id
int64 0
843k
| repository_name
stringlengths 7
55
| file_path
stringlengths 9
332
| class_name
stringlengths 3
290
| human_written_code
stringlengths 12
4.36M
| class_skeleton
stringlengths 19
2.2M
| total_program_units
int64 1
9.57k
| total_doc_str
int64 0
4.2k
| AvgCountLine
float64 0
7.89k
| AvgCountLineBlank
float64 0
300
| AvgCountLineCode
float64 0
7.89k
| AvgCountLineComment
float64 0
7.89k
| AvgCyclomatic
float64 0
130
| CommentToCodeRatio
float64 0
176
| CountClassBase
float64 0
48
| CountClassCoupled
float64 0
589
| CountClassCoupledModified
float64 0
581
| CountClassDerived
float64 0
5.37k
| CountDeclInstanceMethod
float64 0
4.2k
| CountDeclInstanceVariable
float64 0
299
| CountDeclMethod
float64 0
4.2k
| CountDeclMethodAll
float64 0
4.2k
| CountLine
float64 1
115k
| CountLineBlank
float64 0
9.01k
| CountLineCode
float64 0
94.4k
| CountLineCodeDecl
float64 0
46.1k
| CountLineCodeExe
float64 0
91.3k
| CountLineComment
float64 0
27k
| CountStmt
float64 1
93.2k
| CountStmtDecl
float64 0
46.1k
| CountStmtExe
float64 0
90.2k
| MaxCyclomatic
float64 0
759
| MaxInheritanceTree
float64 0
16
| MaxNesting
float64 0
34
| SumCyclomatic
float64 0
6k
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
148,348 |
MacHu-GWU/sqlalchemy_mate-project
|
MacHu-GWU_sqlalchemy_mate-project/tests/orm/test_orm_extended_declarative_base_2_single_api.py
|
test_orm_extended_declarative_base_2_single_api.SingleOperationBaseTest
|
class SingleOperationBaseTest(BaseCrudTest):
def method_level_data_setup(self):
self.delete_all_data_in_orm_table()
def method_level_data_teardown(self):
self.delete_all_data_in_orm_table()
def test_by_pk(self):
with orm.Session(self.eng) as ses:
ses.add(User(id=1, name="alice"))
ses.commit()
assert User.by_pk(self.eng, 1).name == "alice"
assert User.by_pk(self.eng, (1,)).name == "alice"
assert (
User.by_pk(
self.eng,
[
1,
],
).name
== "alice"
)
assert User.by_pk(self.eng, 0) is None
with orm.Session(self.eng) as ses:
assert User.by_pk(ses, 1).name == "alice"
assert User.by_pk(ses, (1,)).name == "alice"
assert (
User.by_pk(
ses,
[
1,
],
).name
== "alice"
)
assert User.by_pk(ses, 0) is None
with orm.Session(self.eng) as ses:
ses.add(Association(x_id=1, y_id=2, flag=999))
ses.commit()
assert Association.by_pk(self.eng, (1, 2)).flag == 999
assert Association.by_pk(self.eng, [1, 2]).flag == 999
assert Association.by_pk(self.eng, (0, 0)) is None
with orm.Session(self.eng) as ses:
assert Association.by_pk(ses, (1, 2)).flag == 999
assert Association.by_pk(ses, [1, 2]).flag == 999
assert Association.by_pk(ses, [0, 0]) is None
def test_by_sql(self):
assert User.count_all(self.eng) == 0
with orm.Session(self.eng) as ses:
user_list = [
User(id=1, name="mr x"),
User(id=2, name="mr y"),
User(id=3, name="mr z"),
]
ses.add_all(user_list)
ses.commit()
assert User.count_all(self.eng) == 3
expected = ["mr y", "mr z"]
results = User.by_sql(
self.eng,
"""
SELECT *
FROM extended_declarative_base_user t
WHERE t.id >= 2
""",
)
assert [user.name for user in results] == expected
results = User.by_sql(
self.eng,
sa.text(
"""
SELECT *
FROM extended_declarative_base_user t
WHERE t.id >= 2
"""
),
)
assert [user.name for user in results] == expected
|
class SingleOperationBaseTest(BaseCrudTest):
def method_level_data_setup(self):
pass
def method_level_data_teardown(self):
pass
def test_by_pk(self):
pass
def test_by_sql(self):
pass
| 5 | 0 | 20 | 2 | 19 | 0 | 1 | 0 | 1 | 2 | 2 | 2 | 4 | 0 | 4 | 15 | 85 | 9 | 76 | 10 | 71 | 0 | 40 | 8 | 35 | 1 | 1 | 1 | 4 |
148,349 |
MacHu-GWU/sqlalchemy_mate-project
|
MacHu-GWU_sqlalchemy_mate-project/tests/orm/test_orm_extended_declarative_base_2_single_api.py
|
test_orm_extended_declarative_base_2_single_api.TestExtendedBaseOnPostgres
|
class TestExtendedBaseOnPostgres(SingleOperationBaseTest): # test on postgres
engine = engine_psql
|
class TestExtendedBaseOnPostgres(SingleOperationBaseTest):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0.5 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 15 | 2 | 0 | 2 | 2 | 1 | 1 | 2 | 2 | 1 | 0 | 2 | 0 | 0 |
148,350 |
MacHu-GWU/sqlalchemy_mate-project
|
MacHu-GWU_sqlalchemy_mate-project/tests/orm/test_orm_extended_declarative_base_2_single_api.py
|
test_orm_extended_declarative_base_2_single_api.TestExtendedBaseOnSqlite
|
class TestExtendedBaseOnSqlite(SingleOperationBaseTest): # test on sqlite
engine = engine_sqlite
|
class TestExtendedBaseOnSqlite(SingleOperationBaseTest):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0.5 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 15 | 2 | 0 | 2 | 2 | 1 | 1 | 2 | 2 | 1 | 0 | 2 | 0 | 0 |
148,351 |
MacHu-GWU/sqlalchemy_mate-project
|
MacHu-GWU_sqlalchemy_mate-project/tests/orm/test_orm_extended_declarative_base_3_bulk_api.py
|
test_orm_extended_declarative_base_3_bulk_api.BulkOperationTestBase
|
class BulkOperationTestBase(BaseCrudTest):
def method_level_data_teardown(self):
self.delete_all_data_in_orm_table()
def test_smart_insert(self):
"""
Test performance of smart insert.
**中文文档**
测试smart_insert的基本功能, 以及与普通的insert比较性能。
"""
# ------ Before State ------
scale = 10
n_exist = scale
n_all = scale**3
exist_id_list = [random.randint(1, n_all) for _ in range(n_exist)]
exist_id_list = list(set(exist_id_list))
exist_id_list.sort()
n_exist = len(exist_id_list)
# user smart insert
exist_data = [User(id=user_id) for user_id in exist_id_list]
all_data = [User(id=user_id) for user_id in range(1, 1 + n_all)]
with orm.Session(self.engine) as ses:
ses.add_all(exist_data)
ses.commit()
assert User.count_all(ses) == n_exist
# ------ Invoke ------
st = time.process_time()
with orm.Session(self.engine) as ses:
op_counter, insert_counter = User.smart_insert(ses, all_data)
elapse1 = time.process_time() - st
assert User.count_all(ses) == n_all
# ------ After State ------
assert op_counter <= (0.5 * n_all)
assert insert_counter == (n_all - n_exist)
# user regular insert
# ------ Before State ------
with self.eng.connect() as conn:
conn.execute(User.__table__.delete())
conn.commit()
exist_data = [User(id=id) for id in exist_id_list]
all_data = [User(id=id) for id in range(1, 1 + n_all)]
with orm.Session(self.engine) as ses:
ses.add_all(exist_data)
ses.commit()
assert User.count_all(ses) == n_exist
st = time.process_time()
with orm.Session(self.engine) as ses:
for user in all_data:
try:
ses.add(user)
ses.commit()
except IntegrityError:
ses.rollback()
except FlushError:
ses.rollback()
elapse2 = time.process_time() - st
assert User.count_all(ses) == n_all
assert elapse1 < elapse2
def test_smart_insert_single_object(self):
assert User.count_all(self.eng) == 0
user = User(id=1)
User.smart_insert(self.eng, user)
assert User.count_all(self.eng) == 1
user = User(id=1)
User.smart_insert(self.eng, user)
assert User.count_all(self.eng) == 1
def test_smart_update(self):
# single primary key column
# ------ Before State ------
User.smart_insert(self.eng, [User(id=1)])
with orm.Session(self.eng) as ses:
assert ses.get(User, 1).name == None
# ------ Invoke ------
# update
update_count, insert_count = User.update_all(
self.eng,
[
User(id=1, name="Alice"), # this is update
User(id=2, name="Bob"), # this is not insert
],
)
# ------ After State ------
assert update_count == 1
assert insert_count == 0
with orm.Session(self.eng) as ses:
assert User.count_all(ses) == 1 # User(Bob) not inserted
assert ses.get(User, 1).name == "Alice"
assert ses.get(User, 2) == None
# ------ Invoke ------
# upsert
with orm.Session(self.eng) as ses:
update_count, insert_count = User.upsert_all(
ses,
[
User(id=1, name="Adam"),
User(id=2, name="Bob"),
],
)
# ------ After State ------
assert update_count == 1
assert insert_count == 1
with orm.Session(self.eng) as ses:
assert User.count_all(ses) == 2 # User(Bob) got inserted
assert ses.get(User, 1).name == "Adam"
assert ses.get(User, 2).name == "Bob"
# multiple primary key columns
# ------ Before State ------
Association.smart_insert(self.eng, Association(x_id=1, y_id=1, flag=0))
assert Association.by_pk(self.eng, (1, 1)).flag == 0
# ------ Invoke ------
# update
with orm.Session(self.eng) as ses:
update_counter, insert_counter = Association.update_all(
ses,
[
Association(x_id=1, y_id=1, flag=1), # this is update
Association(x_id=1, y_id=2, flag=2), # this is not insert
],
)
# ------ After State ------
assert update_counter == 1
assert insert_counter == 0
with orm.Session(self.eng) as ses:
assert Association.count_all(ses) == 1
assert ses.get(Association, (1, 1)).flag == 1
assert ses.get(Association, (1, 2)) is None
# ------ Invoke ------
# upsert
with orm.Session(self.eng) as ses:
update_count, insert_count = Association.upsert_all(
ses,
[
Association(x_id=1, y_id=1, flag=999),
Association(x_id=1, y_id=2, flag=2),
],
)
# ------ After State ------
assert update_count == 1
assert insert_count == 1
with orm.Session(self.eng) as ses:
assert Association.count_all(ses) == 2
assert ses.get(Association, (1, 1)).flag == 999
assert ses.get(Association, (1, 2)).flag == 2
def test_select_all(self):
with orm.Session(self.eng) as ses:
ses.add_all(
[
User(id=1),
User(id=2),
User(id=3),
]
)
ses.commit()
user_list = User.select_all(self.eng)
assert len(user_list) == 3
assert isinstance(user_list[0], User)
user_list = User.select_all(ses)
assert len(user_list) == 3
assert isinstance(user_list[0], User)
def test_random_sample(self):
n_order = 1000
Order.smart_insert(self.eng, [Order(id=id) for id in range(1, n_order + 1)])
result1 = Order.random_sample(self.eng, limit=5)
assert len(result1) == 5
with orm.Session(self.eng) as ses:
result2 = Order.random_sample(ses, limit=5)
assert len(result2) == 5
assert sum([od1.id != od2.id for od1, od2 in zip(result1, result2)]) >= 1
self.psql_only_test_case()
def psql_only_test_case(self):
result3 = Order.random_sample(self.eng, perc=10)
with orm.Session(self.eng) as ses:
result4 = Order.random_sample(ses, perc=10)
assert result3[0].id != result4[0].id
|
class BulkOperationTestBase(BaseCrudTest):
def method_level_data_teardown(self):
pass
def test_smart_insert(self):
'''
Test performance of smart insert.
**中文文档**
测试smart_insert的基本功能, 以及与普通的insert比较性能。
'''
pass
def test_smart_insert_single_object(self):
pass
def test_smart_update(self):
pass
def test_select_all(self):
pass
def test_random_sample(self):
pass
def psql_only_test_case(self):
pass
| 8 | 1 | 30 | 5 | 21 | 5 | 1 | 0.23 | 1 | 7 | 3 | 2 | 7 | 1 | 7 | 18 | 214 | 41 | 146 | 37 | 138 | 33 | 116 | 28 | 108 | 4 | 1 | 3 | 10 |
148,352 |
MacHu-GWU/sqlalchemy_mate-project
|
MacHu-GWU_sqlalchemy_mate-project/tests/orm/test_orm_extended_declarative_base_3_bulk_api.py
|
test_orm_extended_declarative_base_3_bulk_api.TestExtendedBaseOnPostgres
|
class TestExtendedBaseOnPostgres(BulkOperationTestBase): # test on postgres
engine = engine_psql
|
class TestExtendedBaseOnPostgres(BulkOperationTestBase):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0.5 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 18 | 2 | 0 | 2 | 2 | 1 | 1 | 2 | 2 | 1 | 0 | 2 | 0 | 0 |
148,353 |
MacHu-GWU/sqlalchemy_mate-project
|
MacHu-GWU_sqlalchemy_mate-project/tests/orm/test_orm_extended_declarative_base_3_bulk_api.py
|
test_orm_extended_declarative_base_3_bulk_api.TestExtendedBaseOnSqlite
|
class TestExtendedBaseOnSqlite(BulkOperationTestBase): # test on sqlite
engine = engine_sqlite
def psql_only_test_case(self):
pass
|
class TestExtendedBaseOnSqlite(BulkOperationTestBase):
def psql_only_test_case(self):
pass
| 2 | 0 | 2 | 0 | 2 | 0 | 1 | 0.25 | 1 | 0 | 0 | 0 | 1 | 0 | 1 | 19 | 5 | 1 | 4 | 3 | 2 | 1 | 4 | 3 | 2 | 1 | 2 | 0 | 1 |
148,354 |
MacHu-GWU/sqlalchemy_mate-project
|
MacHu-GWU_sqlalchemy_mate-project/tests/orm/test_orm_extended_declarative_base_4_edge_case.py
|
test_orm_extended_declarative_base_4_edge_case.TestExtendedBaseOnPostgres
|
class TestExtendedBaseOnPostgres(ExtendedBaseEdgeCaseTestBase): # test on postgres
engine = engine_psql
|
class TestExtendedBaseOnPostgres(ExtendedBaseEdgeCaseTestBase):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0.5 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 14 | 2 | 0 | 2 | 2 | 1 | 1 | 2 | 2 | 1 | 0 | 2 | 0 | 0 |
148,355 |
MacHu-GWU/sqlalchemy_mate-project
|
MacHu-GWU_sqlalchemy_mate-project/sqlalchemy_mate/tests/crud_test.py
|
sqlalchemy_mate.tests.crud_test.Order
|
class Order(Base, ExtendedBase):
__tablename__ = "extended_declarative_base_order"
id: orm.Mapped[int] = orm.mapped_column(sa.Integer, primary_key=True)
|
class Order(Base, ExtendedBase):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2 | 0 | 0 | 0 | 0 | 0 | 0 | 26 | 4 | 1 | 3 | 2 | 2 | 0 | 3 | 2 | 2 | 0 | 2 | 0 | 0 |
148,356 |
MacHu-GWU/sqlalchemy_mate-project
|
MacHu-GWU_sqlalchemy_mate-project/tests/types/test_types_compressed.py
|
test_types_compressed.Url
|
class Url(Base):
__tablename__ = "types_compressed_urls"
url: orm.Mapped[str] = orm.mapped_column(sa.String, primary_key=True)
html: orm.Mapped[str] = orm.mapped_column(CompressedStringType)
|
class Url(Base):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 5 | 1 | 4 | 4 | 3 | 0 | 4 | 4 | 3 | 0 | 1 | 0 | 0 |
148,357 |
MacHu-GWU/sqlalchemy_mate-project
|
MacHu-GWU_sqlalchemy_mate-project/docs/source/04-Custom-Types/index.py
|
index.Computer
|
class Computer(Base):
__tablename__ = "computer"
id: orm.Mapped[int] = orm.mapped_column(sa.Integer, primary_key=True)
details: orm.Mapped[ComputerDetails] = orm.mapped_column(
sam.types.JSONSerializableType(factory_class=ComputerDetails),
nullable=True,
)
|
class Computer(Base):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 8 | 1 | 7 | 3 | 6 | 0 | 4 | 3 | 3 | 0 | 1 | 0 | 0 |
148,358 |
MacHu-GWU/sqlalchemy_mate-project
|
MacHu-GWU_sqlalchemy_mate-project/docs/source/02-ORM-API/index.py
|
index.User
|
class User(Base, sam.ExtendedBase):
__tablename__ = "users"
id: orm.Mapped[int] = orm.mapped_column(sa.Integer, primary_key=True)
name: orm.Mapped[str] = orm.mapped_column(sa.String, nullable=True)
# you can also do this
# id: int = sa.Column(sa.Integer, primary_key=True)
# name: str = sa.Column(sa.String, nullable=True)
# put important columns here
# you can choose to print those columns only with ``.glance()`` method.
_settings_major_attrs = [
id,
]
|
class User(Base, sam.ExtendedBase):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0.71 | 2 | 0 | 0 | 0 | 0 | 0 | 0 | 26 | 14 | 2 | 7 | 4 | 6 | 5 | 5 | 4 | 4 | 0 | 2 | 0 | 0 |
148,359 |
MacHu-GWU/sqlalchemy_mate-project
|
MacHu-GWU_sqlalchemy_mate-project/docs/source/03-Other-Helpers/index.py
|
index.User
|
class User(Base, sam.ExtendedBase):
__tablename__ = "users"
id: orm.Mapped[int] = orm.mapped_column(sa.Integer, primary_key=True)
name: orm.Mapped[str] = orm.mapped_column(sa.String, nullable=True)
|
class User(Base, sam.ExtendedBase):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2 | 0 | 0 | 0 | 0 | 0 | 0 | 26 | 5 | 1 | 4 | 3 | 3 | 0 | 4 | 3 | 3 | 0 | 2 | 0 | 0 |
148,360 |
MacHu-GWU/sqlalchemy_mate-project
|
MacHu-GWU_sqlalchemy_mate-project/docs/source/04-Custom-Types/index.py
|
index.ComputerDetails
|
class ComputerDetails:
def __init__(self, os: str, cpu: int, memory: int, disk: int):
self.os = os
self.cpu = cpu
self.memory = memory
self.disk = disk
def to_json(self) -> str:
return jsonpickle.encode(self)
@classmethod
def from_json(cls, json_str: str) -> "Computer":
return jsonpickle.decode(json_str)
|
class ComputerDetails:
def __init__(self, os: str, cpu: int, memory: int, disk: int):
pass
def to_json(self) -> str:
pass
@classmethod
def from_json(cls, json_str: str) -> "Computer":
pass
| 5 | 0 | 3 | 0 | 3 | 0 | 1 | 0 | 0 | 2 | 0 | 0 | 2 | 4 | 3 | 3 | 13 | 2 | 11 | 9 | 6 | 0 | 10 | 8 | 6 | 1 | 0 | 0 | 3 |
148,361 |
MacHu-GWU/sqlalchemy_mate-project
|
MacHu-GWU_sqlalchemy_mate-project/examples/e2_orm_api.py
|
e2_orm_api.User
|
class User(Base, sam.ExtendedBase):
__tablename__ = "users"
id = sa.Column(sa.Integer, primary_key=True)
name = sa.Column(sa.String, nullable=True)
_settings_major_attrs = [id, ]
|
class User(Base, sam.ExtendedBase):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2 | 0 | 0 | 0 | 0 | 0 | 0 | 26 | 7 | 2 | 5 | 4 | 4 | 0 | 5 | 4 | 4 | 0 | 2 | 0 | 0 |
148,362 |
MacHu-GWU/sqlalchemy_mate-project
|
MacHu-GWU_sqlalchemy_mate-project/sqlalchemy_mate/patterns/large_binary_column/aws_s3.py
|
sqlalchemy_mate.patterns.large_binary_column.aws_s3.PutS3BackedColumnResult
|
class PutS3BackedColumnResult:
"""
The returned object of :func:`put_s3backed_column`.
:param column: which column is about to be created/updated.
:param old_s3_uri: the old S3 URI, if it is a "create", then it is None.
:param new_s3_uri:
:param executed:
:param cleanup_function:
:param cleanup_old_kwargs:
:param cleanup_new_kwargs:
"""
# fmt: off
column: str = dataclasses.field()
old_s3_uri: str = dataclasses.field()
new_s3_uri: str = dataclasses.field()
executed: bool = dataclasses.field()
cleanup_function: T.Callable = dataclasses.field()
cleanup_old_kwargs: T.Optional[T.Dict[str, T.Any]] = dataclasses.field(default=None)
cleanup_new_kwargs: T.Optional[T.Dict[str, T.Any]] = dataclasses.field(default=None)
|
class PutS3BackedColumnResult:
'''
The returned object of :func:`put_s3backed_column`.
:param column: which column is about to be created/updated.
:param old_s3_uri: the old S3 URI, if it is a "create", then it is None.
:param new_s3_uri:
:param executed:
:param cleanup_function:
:param cleanup_old_kwargs:
:param cleanup_new_kwargs:
'''
pass
| 1 | 1 | 0 | 0 | 0 | 0 | 0 | 1.38 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 21 | 2 | 8 | 8 | 7 | 11 | 8 | 8 | 7 | 0 | 0 | 0 | 0 |
148,363 |
MacHu-GWU/sqlalchemy_mate-project
|
MacHu-GWU_sqlalchemy_mate-project/sqlalchemy_mate/patterns/large_binary_column/aws_s3.py
|
sqlalchemy_mate.patterns.large_binary_column.aws_s3.PutS3ApiCall
|
class PutS3ApiCall:
"""
A data container of the arguments that will be used in ``s3_client.put_object()``.
:param column: which column is about to be created/updated.
:param binary: the binary data of the column to be written to S3.
:param old_s3_uri: if it is a "create row", then it is None.
if it is a "update row", then it is the old value of the column (could be None).
:param extra_put_object_kwargs: additional custom keyword arguments for
``s3_client.put_object()`` API.
"""
# fmt: off
column: str = dataclasses.field()
binary: bytes = dataclasses.field()
old_s3_uri: T.Optional[str] = dataclasses.field()
extra_put_object_kwargs: T.Optional[T.Dict[str, T.Any]] = dataclasses.field(default_factory=dict)
|
class PutS3ApiCall:
'''
A data container of the arguments that will be used in ``s3_client.put_object()``.
:param column: which column is about to be created/updated.
:param binary: the binary data of the column to be written to S3.
:param old_s3_uri: if it is a "create row", then it is None.
if it is a "update row", then it is the old value of the column (could be None).
:param extra_put_object_kwargs: additional custom keyword arguments for
``s3_client.put_object()`` API.
'''
pass
| 1 | 1 | 0 | 0 | 0 | 0 | 0 | 2 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 17 | 2 | 5 | 5 | 4 | 10 | 5 | 5 | 4 | 0 | 0 | 0 | 0 |
148,364 |
MacHu-GWU/sqlalchemy_mate-project
|
MacHu-GWU_sqlalchemy_mate-project/examples/e32_pretty_table.py
|
e32_pretty_table.User
|
class User(Base, sam.ExtendedBase):
__tablename__ = "users"
id = sa.Column(sa.Integer, primary_key=True)
name = sa.Column(sa.String, nullable=True)
|
class User(Base, sam.ExtendedBase):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2 | 0 | 0 | 0 | 0 | 0 | 0 | 26 | 5 | 1 | 4 | 3 | 3 | 0 | 4 | 3 | 3 | 0 | 2 | 0 | 0 |
148,365 |
MacHu-GWU/sqlalchemy_mate-project
|
MacHu-GWU_sqlalchemy_mate-project/patterns/p1_alt_table.py
|
p1_alt_table.UserNew
|
class UserNew(Base, ExtendedBase):
__tablename__ = "users_new"
id = sa.Column(sa.Integer, primary_key=True)
username = sa.Column(sa.String)
email = sa.Column(sa.String)
|
class UserNew(Base, ExtendedBase):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 | 1 | 5 | 4 | 4 | 0 | 5 | 4 | 4 | 0 | 1 | 0 | 0 |
148,366 |
MacHu-GWU/sqlalchemy_mate-project
|
MacHu-GWU_sqlalchemy_mate-project/sqlalchemy_mate/patterns/large_binary_column/local.py
|
sqlalchemy_mate.patterns.large_binary_column.local.WriteFileResult
|
class WriteFileResult:
write_file_backed_column_results: T.List[WriteFileBackedColumnResult] = (
dataclasses.field()
)
def to_values(self) -> T.Dict[str, str]:
return {
res.column: str(res.new_path)
for res in self.write_file_backed_column_results
}
def clean_up_new_file_when_create_or_update_row_failed(self):
for res in self.write_file_backed_column_results:
clean_up_new_file_when_create_or_update_row_failed(
new_path=res.new_path, executed=res.executed
)
def clean_up_old_file_when_update_row_succeeded(self):
for res in self.write_file_backed_column_results:
clean_up_old_file_when_update_row_succeeded(
old_path=res.old_path, executed=res.executed
)
|
class WriteFileResult:
def to_values(self) -> T.Dict[str, str]:
pass
def clean_up_new_file_when_create_or_update_row_failed(self):
pass
def clean_up_old_file_when_update_row_succeeded(self):
pass
| 4 | 0 | 5 | 0 | 5 | 0 | 2 | 0 | 0 | 1 | 0 | 0 | 3 | 0 | 3 | 3 | 22 | 3 | 19 | 7 | 15 | 0 | 10 | 7 | 6 | 2 | 0 | 1 | 5 |
148,367 |
MacHu-GWU/sqlalchemy_mate-project
|
MacHu-GWU_sqlalchemy_mate-project/patterns/p1_alt_table.py
|
p1_alt_table.User
|
class User(Base, ExtendedBase):
__tablename__ = "users"
id = sa.Column(sa.Integer, primary_key=True)
username = sa.Column(sa.String)
|
class User(Base, ExtendedBase):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 5 | 1 | 4 | 3 | 3 | 0 | 4 | 3 | 3 | 0 | 1 | 0 | 0 |
148,368 |
MacHu-GWU/sqlalchemy_mate-project
|
MacHu-GWU_sqlalchemy_mate-project/patterns/p1_alt_table.py
|
p1_alt_table.User
|
class User(Base, ExtendedBase):
__tablename__ = "users"
id = sa.Column(sa.Integer, primary_key=True)
username = sa.Column(sa.String)
|
class User(Base, ExtendedBase):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 | 1 | 5 | 4 | 4 | 0 | 5 | 4 | 4 | 0 | 1 | 0 | 0 |
148,369 |
MacHu-GWU/sqlalchemy_mate-project
|
MacHu-GWU_sqlalchemy_mate-project/sqlalchemy_mate/engine_creator.py
|
sqlalchemy_mate.engine_creator.EngineCreator
|
class EngineCreator: # pragma: no cover
"""
Tired of looking up docs on https://docs.sqlalchemy.org/en/latest/core/engines.html?
``EngineCreator`` creates sqlalchemy engine in one line:
Example::
from sqlalchemy_mate import EngineCreator
# sqlite in memory
engine = EngineCreator.create_sqlite()
# connect to postgresql, credential stored at ``~/.db.json``
# content of ``.db.json``
{
"mydb": {
"host": "example.com",
"port": 1234,
"database": "test",
"username": "admin",
"password": "admin"
},
...
}
engine = EngineCreator.from_home_db_json("mydb").create_postgresql()
"""
def __init__(
self,
host=None,
port=None,
database=None,
username=None,
password=None,
):
self.host = host
self.port = port
self.database = database
self.username = username
self.password = password
uri_template = "{username}{has_password}{password}@{host}{has_port}{port}/{database}"
path_db_json = os.path.join(os.path.expanduser("~"), ".db.json")
local_home = os.path.basename(os.path.expanduser("~"))
def __repr__(self):
return "{classname}(host='{host}', port={port}, database='{database}', username={username}, password='xxxxxxxxxxxx')".format(
classname=self.__class__.__name__,
host=self.host, port=self.port,
database=self.database, username=self.username,
)
@property
def uri(self) -> str:
"""
Return sqlalchemy connect string URI.
"""
return self.uri_template.format(
host=self.host,
port="" if self.port is None else self.port,
database=self.database,
username=self.username,
password="" if self.password is None else self.password,
has_password="" if self.password is None else ":",
has_port="" if self.port is None else ":",
)
@classmethod
def _validate_key_mapping(cls, key_mapping):
if key_mapping is not None:
keys = list(key_mapping)
keys.sort()
if keys != ["database", "host", "password", "port", "username"]:
msg = ("`key_mapping` is the credential field mapping from `Credential` to custom json! "
"it has to be a dictionary with 5 keys: "
"host, port, password, port, username!")
raise ValueError(msg)
@classmethod
def _transform(cls, data, key_mapping):
if key_mapping is None:
return data
else:
return {actual: data[custom] for actual, custom in key_mapping.items()}
@classmethod
def _from_json_data(cls, data, json_path=None, key_mapping=None):
if json_path is not None:
for p in json_path.split("."):
data = data[p]
return cls(**cls._transform(data, key_mapping))
@classmethod
def from_json(
cls,
json_file: str,
json_path: str = None,
key_mapping: dict = None,
) -> 'EngineCreator':
"""
Load connection credential from json file.
:param json_file: str, path to json file
:param json_path: str, dot notation of the path to the credential dict.
:param key_mapping: dict, map 'host', 'port', 'database', 'username', 'password'
to custom alias, for example ``{'host': 'h', 'port': 'p', 'database': 'db', 'username': 'user', 'password': 'pwd'}``. This params are used to adapt any json data.
:rtype:
:return:
Example:
Your json file::
{
"credentials": {
"db1": {
"h": "example.com",
"p": 1234,
"db": "test",
"user": "admin",
"pwd": "admin",
},
"db2": {
...
}
}
}
Usage::
cred = Credential.from_json(
"path-to-json-file", "credentials.db1",
dict(host="h", port="p", database="db", username="user", password="pwd")
)
"""
cls._validate_key_mapping(key_mapping)
with open(json_file, "rb") as f:
data = json.loads(f.read().decode("utf-8"))
return cls._from_json_data(data, json_path, key_mapping)
@classmethod
def from_home_db_json(
cls,
identifier: str,
key_mapping: dict = None,
) -> 'EngineCreator': # pragma: no cover
"""
Read credential from $HOME/.db.json file.
:type identifier: str
:param identifier: str, database identifier.
:type key_mapping: Dict[str, str]
:param key_mapping: dict
``.db.json````::
{
"identifier1": {
"host": "example.com",
"port": 1234,
"database": "test",
"username": "admin",
"password": "admin",
},
"identifier2": {
...
}
}
"""
return cls.from_json(
json_file=cls.path_db_json, json_path=identifier, key_mapping=key_mapping)
@classmethod
def from_s3_json(
cls,
bucket_name: str,
key: str,
json_path: str = None,
key_mapping: dict = None,
aws_profile: str = None,
aws_access_key_id: str = None,
aws_secret_access_key: str = None,
region_name: str = None,
) -> 'EngineCreator': # pragma: no cover
"""
Load database credential from json on s3.
:param bucket_name: str
:param key: str
:param aws_profile: if None, assume that you are using this from
AWS cloud. (service on the same cloud doesn't need profile name)
:param aws_access_key_id: str, not recommend to use
:param aws_secret_access_key: str, not recommend to use
:param region_name: str
"""
import boto3
ses = boto3.Session(
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key,
region_name=region_name,
profile_name=aws_profile,
)
s3 = ses.resource("s3")
bucket = s3.Bucket(bucket_name)
object = bucket.Object(key)
data = json.loads(object.get()["Body"].read().decode("utf-8"))
return cls._from_json_data(data, json_path, key_mapping)
@classmethod
def from_env(
cls,
prefix: str,
kms_decrypt: bool = False,
aws_profile: str = None,
) -> 'EngineCreator':
"""
Load database credential from env variable.
- host: ENV.{PREFIX}_HOST
- port: ENV.{PREFIX}_PORT
- database: ENV.{PREFIX}_DATABASE
- username: ENV.{PREFIX}_USERNAME
- password: ENV.{PREFIX}_PASSWORD
:param prefix: str
:param kms_decrypt: bool
:param aws_profile: str
"""
if len(prefix) < 1:
raise ValueError("prefix can't be empty")
if len(set(prefix).difference(set(string.ascii_uppercase + "_"))):
raise ValueError("prefix can only use [A-Z] and '_'!")
if not prefix.endswith("_"):
prefix = prefix + "_"
data = dict(
host=os.getenv(prefix + "HOST"),
port=os.getenv(prefix + "PORT"),
database=os.getenv(prefix + "DATABASE"),
username=os.getenv(prefix + "USERNAME"),
password=os.getenv(prefix + "PASSWORD"),
)
if kms_decrypt is True: # pragma: no cover
import boto3
from base64 import b64decode
if aws_profile is not None:
kms = boto3.client("kms")
else:
ses = boto3.Session(profile_name=aws_profile)
kms = ses.client("kms")
def decrypt(kms, text):
return kms.decrypt(
CiphertextBlob=b64decode(text.encode("utf-8"))
)["Plaintext"].decode("utf-8")
data = {
key: value if value is None else decrypt(kms, str(value))
for key, value in data.items()
}
return cls(**data)
def to_dict(self):
"""
Convert credentials into a dict.
"""
return dict(
host=self.host,
port=self.port,
database=self.database,
username=self.username,
password=self.password,
)
# --- engine creator logic
def create_connect_str(self, dialect_and_driver) -> str:
return "{}://{}".format(dialect_and_driver, self.uri)
_ccs = create_connect_str
def create_engine(self, conn_str, **kwargs) -> Engine:
"""
:rtype: Engine
"""
return sa.create_engine(conn_str, **kwargs)
_ce = create_engine
@classmethod
def create_sqlite(cls, path=":memory:", **kwargs):
"""
Create sqlite engine.
"""
return sa.create_engine("sqlite:///{path}".format(path=path), **kwargs)
class DialectAndDriver(object):
"""
DB dialect and DB driver mapping.
"""
psql = "postgresql"
psql_psycopg2 = "postgresql+psycopg2"
psql_pg8000 = "postgresql+pg8000"
psql_pygresql = "postgresql+pygresql"
psql_psycopg2cffi = "postgresql+psycopg2cffi"
psql_pypostgresql = "postgresql+pypostgresql"
mysql = "mysql"
mysql_mysqldb = "mysql+mysqldb"
mysql_mysqlconnector = "mysql+mysqlconnector"
mysql_oursql = "mysql+oursql"
mysql_pymysql = "mysql+pymysql"
mysql_cymysql = "mysql+cymysql"
oracle = "oracle"
oracle_cx_oracle = "oracle+cx_oracle"
mssql_pyodbc = "mssql+pyodbc"
mssql_pymssql = "mssql+pymssql"
redshift_psycopg2 = "redshift+psycopg2"
# postgresql
def create_postgresql(self, **kwargs):
"""
:rtype: Engine
"""
return self._ce(
self._ccs(self.DialectAndDriver.psql), **kwargs
)
def create_postgresql_psycopg2(self, **kwargs):
"""
:rtype: Engine
"""
return self._ce(
self._ccs(self.DialectAndDriver.psql_psycopg2), **kwargs
)
def create_postgresql_pg8000(self, **kwargs):
"""
:rtype: Engine
"""
return self._ce(
self._ccs(self.DialectAndDriver.psql_pg8000), **kwargs
)
def _create_postgresql_pygresql(self, **kwargs):
"""
:rtype: Engine
"""
return self._ce(
self._ccs(self.DialectAndDriver.psql_pygresql), **kwargs
)
def create_postgresql_psycopg2cffi(self, **kwargs):
"""
:rtype: Engine
"""
return self._ce(
self._ccs(self.DialectAndDriver.psql_psycopg2cffi), **kwargs
)
def create_postgresql_pypostgresql(self, **kwargs):
"""
:rtype: Engine
"""
return self._ce(
self._ccs(self.DialectAndDriver.psql_pypostgresql), **kwargs
)
# mysql
def create_mysql(self, **kwargs):
"""
:rtype: Engine
"""
return self._ce(
self._ccs(self.DialectAndDriver.mysql), **kwargs
)
def create_mysql_mysqldb(self, **kwargs):
"""
:rtype: Engine
"""
return self._ce(
self._ccs(self.DialectAndDriver.mysql_mysqldb), **kwargs
)
def create_mysql_mysqlconnector(self, **kwargs):
"""
:rtype: Engine
"""
return self._ce(
self._ccs(self.DialectAndDriver.mysql_mysqlconnector), **kwargs
)
def create_mysql_oursql(self, **kwargs):
"""
:rtype: Engine
"""
return self._ce(
self._ccs(self.DialectAndDriver.mysql_oursql), **kwargs
)
def create_mysql_pymysql(self, **kwargs):
"""
:rtype: Engine
"""
return self._ce(
self._ccs(self.DialectAndDriver.mysql_pymysql), **kwargs
)
def create_mysql_cymysql(self, **kwargs):
"""
:rtype: Engine
"""
return self._ce(
self._ccs(self.DialectAndDriver.mysql_cymysql), **kwargs
)
# oracle
def create_oracle(self, **kwargs):
"""
:rtype: Engine
"""
return self._ce(
self._ccs(self.DialectAndDriver.oracle), **kwargs
)
def create_oracle_cx_oracle(self, **kwargs):
"""
:rtype: Engine
"""
return self._ce(
self._ccs(self.DialectAndDriver.oracle_cx_oracle), **kwargs
)
# mssql
def create_mssql_pyodbc(self, **kwargs):
"""
:rtype: Engine
"""
return self._ce(
self._ccs(self.DialectAndDriver.mssql_pyodbc), **kwargs
)
def create_mssql_pymssql(self, **kwargs):
"""
:rtype: Engine
"""
return self._ce(
self._ccs(self.DialectAndDriver.mssql_pymssql), **kwargs
)
# redshift
def create_redshift(self, **kwargs):
"""
:rtype: Engine
"""
return self._ce(
self._ccs(self.DialectAndDriver.redshift_psycopg2), **kwargs
)
|
class EngineCreator:
'''
Tired of looking up docs on https://docs.sqlalchemy.org/en/latest/core/engines.html?
``EngineCreator`` creates sqlalchemy engine in one line:
Example::
from sqlalchemy_mate import EngineCreator
# sqlite in memory
engine = EngineCreator.create_sqlite()
# connect to postgresql, credential stored at ``~/.db.json``
# content of ``.db.json``
{
"mydb": {
"host": "example.com",
"port": 1234,
"database": "test",
"username": "admin",
"password": "admin"
},
...
}
engine = EngineCreator.from_home_db_json("mydb").create_postgresql()
'''
def __init__(
self,
host=None,
port=None,
database=None,
username=None,
password=None,
):
pass
def __repr__(self):
pass
@property
def uri(self) -> str:
'''
Return sqlalchemy connect string URI.
'''
pass
@classmethod
def _validate_key_mapping(cls, key_mapping):
pass
@classmethod
def _transform(cls, data, key_mapping):
pass
@classmethod
def _from_json_data(cls, data, json_path=None, key_mapping=None):
pass
@classmethod
def from_json(
cls,
json_file: str,
json_path: str = None,
key_mapping: dict = None,
) -> 'EngineCreator':
'''
Load connection credential from json file.
:param json_file: str, path to json file
:param json_path: str, dot notation of the path to the credential dict.
:param key_mapping: dict, map 'host', 'port', 'database', 'username', 'password'
to custom alias, for example ``{'host': 'h', 'port': 'p', 'database': 'db', 'username': 'user', 'password': 'pwd'}``. This params are used to adapt any json data.
:rtype:
:return:
Example:
Your json file::
{
"credentials": {
"db1": {
"h": "example.com",
"p": 1234,
"db": "test",
"user": "admin",
"pwd": "admin",
},
"db2": {
...
}
}
}
Usage::
cred = Credential.from_json(
"path-to-json-file", "credentials.db1",
dict(host="h", port="p", database="db", username="user", password="pwd")
)
'''
pass
@classmethod
def from_home_db_json(
cls,
identifier: str,
key_mapping: dict = None,
) -> 'EngineCreator':
'''
Read credential from $HOME/.db.json file.
:type identifier: str
:param identifier: str, database identifier.
:type key_mapping: Dict[str, str]
:param key_mapping: dict
``.db.json````::
{
"identifier1": {
"host": "example.com",
"port": 1234,
"database": "test",
"username": "admin",
"password": "admin",
},
"identifier2": {
...
}
}
'''
pass
@classmethod
def from_s3_json(
cls,
bucket_name: str,
key: str,
json_path: str = None,
key_mapping: dict = None,
aws_profile: str = None,
aws_access_key_id: str = None,
aws_secret_access_key: str = None,
region_name: str = None,
) -> 'EngineCreator':
'''
Load database credential from json on s3.
:param bucket_name: str
:param key: str
:param aws_profile: if None, assume that you are using this from
AWS cloud. (service on the same cloud doesn't need profile name)
:param aws_access_key_id: str, not recommend to use
:param aws_secret_access_key: str, not recommend to use
:param region_name: str
'''
pass
@classmethod
def from_env(
cls,
prefix: str,
kms_decrypt: bool = False,
aws_profile: str = None,
) -> 'EngineCreator':
'''
Load database credential from env variable.
- host: ENV.{PREFIX}_HOST
- port: ENV.{PREFIX}_PORT
- database: ENV.{PREFIX}_DATABASE
- username: ENV.{PREFIX}_USERNAME
- password: ENV.{PREFIX}_PASSWORD
:param prefix: str
:param kms_decrypt: bool
:param aws_profile: str
'''
pass
def decrypt(kms, text):
pass
def to_dict(self):
'''
Convert credentials into a dict.
'''
pass
def create_connect_str(self, dialect_and_driver) -> str:
pass
def create_engine(self, conn_str, **kwargs) -> Engine:
'''
:rtype: Engine
'''
pass
@classmethod
def create_sqlite(cls, path=":memory:", **kwargs):
'''
Create sqlite engine.
'''
pass
class DialectAndDriver(object):
'''
DB dialect and DB driver mapping.
'''
def create_postgresql(self, **kwargs):
'''
:rtype: Engine
'''
pass
def create_postgresql_psycopg2(self, **kwargs):
'''
:rtype: Engine
'''
pass
def create_postgresql_pg8000(self, **kwargs):
'''
:rtype: Engine
'''
pass
def _create_postgresql_pygresql(self, **kwargs):
'''
:rtype: Engine
'''
pass
def create_postgresql_psycopg2cffi(self, **kwargs):
'''
:rtype: Engine
'''
pass
def create_postgresql_pypostgresql(self, **kwargs):
'''
:rtype: Engine
'''
pass
def create_mysql(self, **kwargs):
'''
:rtype: Engine
'''
pass
def create_mysql_mysqldb(self, **kwargs):
'''
:rtype: Engine
'''
pass
def create_mysql_mysqlconnector(self, **kwargs):
'''
:rtype: Engine
'''
pass
def create_mysql_oursql(self, **kwargs):
'''
:rtype: Engine
'''
pass
def create_mysql_pymysql(self, **kwargs):
'''
:rtype: Engine
'''
pass
def create_mysql_cymysql(self, **kwargs):
'''
:rtype: Engine
'''
pass
def create_oracle(self, **kwargs):
'''
:rtype: Engine
'''
pass
def create_oracle_cx_oracle(self, **kwargs):
'''
:rtype: Engine
'''
pass
def create_mssql_pyodbc(self, **kwargs):
'''
:rtype: Engine
'''
pass
def create_mssql_pymssql(self, **kwargs):
'''
:rtype: Engine
'''
pass
def create_redshift(self, **kwargs):
'''
:rtype: Engine
'''
pass
| 43 | 27 | 11 | 1 | 7 | 4 | 1 | 0.7 | 0 | 7 | 1 | 0 | 23 | 5 | 31 | 31 | 465 | 62 | 239 | 117 | 162 | 168 | 126 | 76 | 89 | 7 | 0 | 2 | 47 |
148,370 |
MacHu-GWU/sqlalchemy_mate-project
|
MacHu-GWU_sqlalchemy_mate-project/sqlalchemy_mate/patterns/large_binary_column/local.py
|
sqlalchemy_mate.patterns.large_binary_column.local.WriteFileBackedColumnResult
|
class WriteFileBackedColumnResult:
# fmt: off
column: str = dataclasses.field()
old_path: Path = dataclasses.field()
new_path: Path = dataclasses.field()
executed: bool = dataclasses.field()
cleanup_function: T.Callable = dataclasses.field()
cleanup_old_kwargs: T.Optional[T.Dict[str, T.Any]] = dataclasses.field(default=None)
cleanup_new_kwargs: T.Optional[T.Dict[str, T.Any]] = dataclasses.field(default=None)
|
class WriteFileBackedColumnResult:
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0.13 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 9 | 0 | 8 | 8 | 7 | 1 | 8 | 8 | 7 | 0 | 0 | 0 | 0 |
148,371 |
MacHu-GWU/sqlalchemy_mate-project
|
MacHu-GWU_sqlalchemy_mate-project/sqlalchemy_mate/patterns/status_tracker/impl.py
|
sqlalchemy_mate.patterns.status_tracker.impl.JobAlreadySucceededError
|
class JobAlreadySucceededError(JobIsNotReadyToStartError):
"""
Raised when try to start a succeeded (failed too many times) job.
"""
pass
|
class JobAlreadySucceededError(JobIsNotReadyToStartError):
'''
Raised when try to start a succeeded (failed too many times) job.
'''
pass
| 1 | 1 | 0 | 0 | 0 | 0 | 0 | 1.5 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 10 | 6 | 1 | 2 | 1 | 1 | 3 | 2 | 1 | 1 | 0 | 5 | 0 | 0 |
148,372 |
MacHu-GWU/sqlalchemy_mate-project
|
MacHu-GWU_sqlalchemy_mate-project/sqlalchemy_mate/patterns/large_binary_column/local.py
|
sqlalchemy_mate.patterns.large_binary_column.local.WriteFileApiCall
|
class WriteFileApiCall:
# fmt: off
column: str = dataclasses.field()
binary: bytes = dataclasses.field()
old_path: T.Optional[Path] = dataclasses.field()
extra_write_kwargs: T.Optional[T.Dict[str, T.Any]] = dataclasses.field(default_factory=dict)
|
class WriteFileApiCall:
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0.2 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 | 0 | 5 | 5 | 4 | 1 | 5 | 5 | 4 | 0 | 0 | 0 | 0 |
148,373 |
MacHu-GWU/sqlalchemy_mate-project
|
MacHu-GWU_sqlalchemy_mate-project/sqlalchemy_mate/patterns/large_binary_column/aws_s3.py
|
sqlalchemy_mate.patterns.large_binary_column.aws_s3.PutS3Result
|
class PutS3Result:
"""
The returned object of :func:`put_s3_result`.
"""
s3_client: "S3Client" = dataclasses.field()
put_s3backed_column_results: T.List[PutS3BackedColumnResult] = dataclasses.field()
def to_values(self) -> T.Dict[str, str]:
"""
Return a dictionary of column name and S3 uri that can be used in the
SQL ``UPDATE ... VALUES ...`` statement. The key is the column name,
and the value is the S3 URI.
"""
return {res.column: res.new_s3_uri for res in self.put_s3backed_column_results}
def clean_up_created_s3_object_when_create_or_update_row_failed(self):
"""
A wrapper of :func:`clean_up_created_s3_object_when_create_or_update_row_failed`.
"""
s3_uri_list = list()
for res in self.put_s3backed_column_results:
if res.executed:
s3_uri_list.append(res.new_s3_uri)
batch_delete_s3_objects(s3_client=self.s3_client, s3_uri_list=s3_uri_list)
def clean_up_old_s3_object_when_update_row_succeeded(self):
"""
A wrapper of :func:`clean_up_old_s3_object_when_update_row_succeeded`.
"""
s3_uri_list = list()
for res in self.put_s3backed_column_results:
if res.executed:
if res.old_s3_uri:
s3_uri_list.append(res.old_s3_uri)
batch_delete_s3_objects(s3_client=self.s3_client, s3_uri_list=s3_uri_list)
|
class PutS3Result:
'''
The returned object of :func:`put_s3_result`.
'''
def to_values(self) -> T.Dict[str, str]:
'''
Return a dictionary of column name and S3 uri that can be used in the
SQL ``UPDATE ... VALUES ...`` statement. The key is the column name,
and the value is the S3 URI.
'''
pass
def clean_up_created_s3_object_when_create_or_update_row_failed(self):
'''
A wrapper of :func:`clean_up_created_s3_object_when_create_or_update_row_failed`.
'''
pass
def clean_up_old_s3_object_when_update_row_succeeded(self):
'''
A wrapper of :func:`clean_up_old_s3_object_when_update_row_succeeded`.
'''
pass
| 4 | 4 | 9 | 0 | 5 | 4 | 3 | 0.78 | 0 | 2 | 0 | 0 | 3 | 0 | 3 | 3 | 36 | 4 | 18 | 10 | 14 | 14 | 18 | 10 | 14 | 4 | 0 | 3 | 8 |
148,374 |
MacHu-GWU/superjson-project
|
MacHu-GWU_superjson-project/superjson/_superjson.py
|
superjson._superjson.Meta
|
class Meta(type):
def __new__(cls, name, bases, attrs):
klass = super(Meta, cls).__new__(cls, name, bases, attrs)
_dumpers = dict()
_loaders = dict()
for base in inspect.getmro(klass):
for attr, value in base.__dict__.items():
dumper_warning_message = WARN_MSG.format(
attr=attr,
method_type="dumper",
obj_or_dct="obj",
dump_or_load="dump",
)
loader_warning_message = WARN_MSG.format(
attr=attr,
method_type="loader",
obj_or_dct="dct",
dump_or_load="load",
)
# link dumper / loader method with the full classname
# find dumper method,
if attr.startswith("dump_"):
try:
if is_dumper_method(value):
class_name = get_class_name_from_dumper_loader_method(
value)
_dumpers[class_name] = value
else:
logger.warning(dumper_warning_message)
except TypeError:
logger.warning(dumper_warning_message)
# find loader method
if attr.startswith("load_"):
try:
if is_loader_method(value):
class_name = get_class_name_from_dumper_loader_method(
value)
_loaders[class_name] = value
else:
logger.warning(loader_warning_message)
except TypeError:
logger.warning(loader_warning_message)
klass._dumpers = _dumpers
klass._loaders = _loaders
return klass
|
class Meta(type):
def __new__(cls, name, bases, attrs):
pass
| 2 | 0 | 50 | 6 | 41 | 3 | 9 | 0.07 | 1 | 3 | 0 | 1 | 1 | 0 | 1 | 14 | 51 | 6 | 42 | 10 | 40 | 3 | 28 | 10 | 26 | 9 | 2 | 5 | 9 |
148,375 |
MacHu-GWU/superjson-project
|
MacHu-GWU_superjson-project/superjson/_superjson.py
|
superjson._superjson.SuperJson
|
class SuperJson(BaseSuperJson): pass
|
class SuperJson(BaseSuperJson):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 2 | 0 | 0 | 0 | 34 | 1 | 0 | 1 | 1 | 1 | 0 | 2 | 1 | 1 | 0 | 4 | 0 | 0 |
148,376 |
MacHu-GWU/superjson-project
|
MacHu-GWU_superjson-project/tests/test_extend.py
|
test_extend.MySuperJson
|
class MySuperJson(SuperJson):
def dump_User(self, obj, class_name=User_class_name):
key = "$" + class_name
return {key: {"id": obj.id, "name": obj.name}}
def load_User(self, dct, class_name=User_class_name):
key = "$" + class_name
return User(**dct[key])
# pytest will change the module from __main__ to test_extend, implement both case
def dump_test_extend_User(self, obj, class_name="test_extend.User"):
key = "$" + class_name
return {key: {"id": obj.id, "name": obj.name}}
def load_test_extend_User(self, dct, class_name="test_extend.User"):
key = "$" + class_name
return User(**dct[key])
# other method
def dump_someting(self): # will prompt warning
pass
def load_something(self): # will prompt warning
pass
dump_this = None # will prompt warning
load_this = None
|
class MySuperJson(SuperJson):
def dump_User(self, obj, class_name=User_class_name):
pass
def load_User(self, dct, class_name=User_class_name):
pass
def dump_test_extend_User(self, obj, class_name="test_extend.User"):
pass
def load_test_extend_User(self, dct, class_name="test_extend.User"):
pass
def dump_someting(self):
pass
def load_something(self):
pass
| 7 | 0 | 3 | 0 | 3 | 0 | 1 | 0.32 | 1 | 1 | 1 | 0 | 6 | 0 | 6 | 40 | 27 | 6 | 19 | 13 | 12 | 6 | 19 | 13 | 12 | 1 | 5 | 0 | 6 |
148,377 |
MacHu-GWU/superjson-project
|
MacHu-GWU_superjson-project/tests/test_extend.py
|
test_extend.TestMySuperJson
|
class TestMySuperJson(object):
def test_dumps_loads(self):
data = {
"int": 1,
"str": "Hello",
"user": User(id=1, name="Alice"),
}
s = json.dumps(data, pretty=True)
data1 = json.loads(s)
assert data == data1
|
class TestMySuperJson(object):
def test_dumps_loads(self):
pass
| 2 | 0 | 9 | 0 | 9 | 0 | 1 | 0 | 1 | 1 | 1 | 0 | 1 | 0 | 1 | 1 | 10 | 0 | 10 | 5 | 8 | 0 | 6 | 5 | 4 | 1 | 1 | 0 | 1 |
148,378 |
MacHu-GWU/superjson-project
|
MacHu-GWU_superjson-project/for_document.py
|
for_document.User
|
class User(object):
def __init__(self, id, name):
self.id = id
self.name = name
def __repr__(self):
return "User(id=%r, name=%r)" % (self.id, self.name)
def __eq__(self, other):
return self.id == other.id and self.name == other.name
|
class User(object):
def __init__(self, id, name):
pass
def __repr__(self):
pass
def __eq__(self, other):
pass
| 4 | 0 | 2 | 0 | 2 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 3 | 2 | 3 | 3 | 10 | 2 | 8 | 6 | 4 | 0 | 8 | 6 | 4 | 1 | 1 | 0 | 3 |
148,379 |
MacHu-GWU/superjson-project
|
MacHu-GWU_superjson-project/tests/test_superjson.py
|
test_superjson.TestSuperjson
|
class TestSuperjson(object):
def test_pretty(self):
json.dump(data, abspath_of("data1.json"), pretty=True,
overwrite=False, verbose=False)
data1 = json.load(abspath_of("data1.json"), verbose=False)
assert data == data1
json.dump(data, abspath_of("data2.json"), pretty=True,
overwrite=True, verbose=False)
data2 = json.load(abspath_of("data2.json"), verbose=False)
assert data == data2
def test_auto_compress(self):
json.dump(data, abspath_of("data1.gz"), pretty=True, verbose=False)
data1 = json.load(abspath_of("data1.gz"), verbose=False)
assert data == data1
def test_overwrite(self):
json.dump(data, abspath_of("test.json"), overwrite=True, verbose=False)
json.dump("Hello World!", abspath_of("test.json"), overwrite=True, verbose=False)
# I don't know why in pytest it doesn't work
s = json.load(abspath_of("test.json"), verbose=False)
assert s == "Hello World!"
def test_load_from_not_exist_file(self):
with raises(EnvironmentError):
json.load(abspath_of("not-exists.json"), verbose=False)
|
class TestSuperjson(object):
def test_pretty(self):
pass
def test_auto_compress(self):
pass
def test_overwrite(self):
pass
def test_load_from_not_exist_file(self):
pass
| 5 | 0 | 6 | 0 | 5 | 0 | 1 | 0.05 | 1 | 0 | 0 | 0 | 4 | 0 | 4 | 4 | 27 | 4 | 22 | 9 | 17 | 1 | 20 | 9 | 15 | 1 | 1 | 1 | 4 |
148,380 |
MacHu-GWU/superjson-project
|
MacHu-GWU_superjson-project/for_document.py
|
for_document.MySuperJson
|
class MySuperJson(SuperJson):
def dump_User(self, obj, class_name=user_class_name):
return {"$" + class_name: {"id": obj.id, "name": obj.name}}
def load_User(self, dct, class_name=user_class_name):
return User(**dct["$" + class_name])
|
class MySuperJson(SuperJson):
def dump_User(self, obj, class_name=user_class_name):
pass
def load_User(self, dct, class_name=user_class_name):
pass
| 3 | 0 | 2 | 0 | 2 | 0 | 1 | 0 | 1 | 1 | 1 | 0 | 2 | 0 | 2 | 36 | 6 | 1 | 5 | 3 | 2 | 0 | 5 | 3 | 2 | 1 | 5 | 0 | 2 |
148,381 |
MacHu-GWU/superjson-project
|
MacHu-GWU_superjson-project/tests/test_superjson.py
|
test_superjson.Test_dumps_loads
|
class Test_dumps_loads(object):
def test_deal_with_bytes(self):
b = "Hello".encode("utf-8")
s = json.dumps(b)
assert "{" in s
assert "}" in s
b1 = json.loads(s, ignore_comments=True)
assert b == b1
def test_dumps_pretty(self):
s = json.dumps(data, pretty=True)
assert " " in s
assert "\n" in s
data1 = json.loads(s)
assert data == data1
def test_float_precision(self):
data = 3.1415926535
s = json.dumps(data, float_precision=2)
assert "3.1415" not in s
assert "3.14" in s
def test_ensure_ascii(self):
data = ["α", "β", "θ"]
s = json.dumps(data)
assert "α" not in s
assert json.loads(s) == data
s = json.dumps(data, ensure_ascii=False)
assert "α" in s
assert json.loads(s) == data
def test_compress(self):
data = list(range(1000))
s1 = json.dumps(data, compress=False)
s2 = json.dumps(data, compress=True)
assert len(s1) > len(s2)
assert json.loads(s2, decompress=True) == data
|
class Test_dumps_loads(object):
def test_deal_with_bytes(self):
pass
def test_dumps_pretty(self):
pass
def test_float_precision(self):
pass
def test_ensure_ascii(self):
pass
def test_compress(self):
pass
| 6 | 0 | 7 | 0 | 6 | 0 | 1 | 0 | 1 | 2 | 0 | 0 | 5 | 0 | 5 | 5 | 39 | 6 | 33 | 18 | 27 | 0 | 33 | 18 | 27 | 1 | 1 | 0 | 5 |
148,382 |
MacHu-GWU/superjson-project
|
MacHu-GWU_superjson-project/superjson/pkg/atomicwrites.py
|
superjson.pkg.atomicwrites.AtomicWriter
|
class AtomicWriter(object):
'''
A helper class for performing atomic writes. Usage::
with AtomicWriter(path).open() as f:
f.write(...)
:param path: The destination filepath. May or may not exist.
:param mode: The filemode for the temporary file. This defaults to `wb` in
Python 2 and `w` in Python 3.
:param overwrite: If set to false, an error is raised if ``path`` exists.
Errors are only raised after the file has been written to. Either way,
the operation is atomic.
:param open_kwargs: Keyword-arguments to pass to the underlying
:py:func:`open` call. This can be used to set the encoding when opening
files in text-mode.
If you need further control over the exact behavior, you are encouraged to
subclass.
'''
def __init__(self, path, mode=DEFAULT_MODE, overwrite=False,
**open_kwargs):
if 'a' in mode:
raise ValueError(
'Appending to an existing file is not supported, because that '
'would involve an expensive `copy`-operation to a temporary '
'file. Open the file in normal `w`-mode and copy explicitly '
'if that\'s what you\'re after.'
)
if 'x' in mode:
raise ValueError('Use the `overwrite`-parameter instead.')
if 'w' not in mode:
raise ValueError('AtomicWriters can only be written to.')
# Attempt to convert `path` to `str` or `bytes`
if fspath is not None:
path = fspath(path)
self._path = path
self._mode = mode
self._overwrite = overwrite
self._open_kwargs = open_kwargs
def open(self):
'''
Open the temporary file.
'''
return self._open(self.get_fileobject)
@contextlib.contextmanager
def _open(self, get_fileobject):
f = None # make sure f exists even if get_fileobject() fails
try:
success = False
with get_fileobject(**self._open_kwargs) as f:
yield f
self.sync(f)
self.commit(f)
success = True
finally:
if not success:
try:
self.rollback(f)
except Exception:
pass
def get_fileobject(self, suffix="", prefix=tempfile.gettempprefix(),
dir=None, **kwargs):
'''Return the temporary file to use.'''
if dir is None:
dir = os.path.normpath(os.path.dirname(self._path))
descriptor, name = tempfile.mkstemp(suffix=suffix, prefix=prefix,
dir=dir)
# io.open() will take either the descriptor or the name, but we need
# the name later for commit()/replace_atomic() and couldn't find a way
# to get the filename from the descriptor.
os.close(descriptor)
kwargs['mode'] = self._mode
kwargs['file'] = name
return io.open(**kwargs)
def sync(self, f):
'''responsible for clearing as many file caches as possible before
commit'''
f.flush()
_proper_fsync(f.fileno())
def commit(self, f):
'''Move the temporary file to the target location.'''
if self._overwrite:
replace_atomic(f.name, self._path)
else:
move_atomic(f.name, self._path)
def rollback(self, f):
'''Clean up all temporary resources.'''
os.unlink(f.name)
|
class AtomicWriter(object):
'''
A helper class for performing atomic writes. Usage::
with AtomicWriter(path).open() as f:
f.write(...)
:param path: The destination filepath. May or may not exist.
:param mode: The filemode for the temporary file. This defaults to `wb` in
Python 2 and `w` in Python 3.
:param overwrite: If set to false, an error is raised if ``path`` exists.
Errors are only raised after the file has been written to. Either way,
the operation is atomic.
:param open_kwargs: Keyword-arguments to pass to the underlying
:py:func:`open` call. This can be used to set the encoding when opening
files in text-mode.
If you need further control over the exact behavior, you are encouraged to
subclass.
'''
def __init__(self, path, mode=DEFAULT_MODE, overwrite=False,
**open_kwargs):
pass
def open(self):
'''
Open the temporary file.
'''
pass
@contextlib.contextmanager
def _open(self, get_fileobject):
pass
def get_fileobject(self, suffix="", prefix=tempfile.gettempprefix(),
dir=None, **kwargs):
'''Return the temporary file to use.'''
pass
def sync(self, f):
'''responsible for clearing as many file caches as possible before
commit'''
pass
def commit(self, f):
'''Move the temporary file to the target location.'''
pass
def rollback(self, f):
'''Clean up all temporary resources.'''
pass
| 9 | 6 | 10 | 0 | 8 | 2 | 2 | 0.5 | 1 | 2 | 0 | 0 | 7 | 4 | 7 | 7 | 95 | 9 | 58 | 18 | 47 | 29 | 47 | 15 | 39 | 5 | 1 | 3 | 15 |
148,383 |
MacHu-GWU/superjson-project
|
MacHu-GWU_superjson-project/tests/test_extend.py
|
test_extend.User
|
class User(object):
def __init__(self, id=None, name=None):
self.id = id
self.name = name
def __repr__(self):
return "User(id=%r, name=%r)" % (self.id, self.name)
def __eq__(self, other):
return self.id == other.id and self.name == other.name
|
class User(object):
def __init__(self, id=None, name=None):
pass
def __repr__(self):
pass
def __eq__(self, other):
pass
| 4 | 0 | 2 | 0 | 2 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 3 | 2 | 3 | 3 | 10 | 2 | 8 | 6 | 4 | 0 | 8 | 6 | 4 | 1 | 1 | 0 | 3 |
148,384 |
MacHu-GWU/superjson-project
|
MacHu-GWU_superjson-project/superjson/_superjson.py
|
superjson._superjson.BaseSuperJson
|
class BaseSuperJson(metaclass=Meta):
"""
A extensable json encoder/decoder. You can easily custom converter for
any types.
"""
_dumpers = dict()
_loaders = dict()
def _dump(self, obj):
"""Dump single object to json serializable value.
"""
class_name = get_class_name(obj)
if class_name in self._dumpers:
return self._dumpers[class_name](self, obj)
raise TypeError("%r is not JSON serializable" % obj)
def _json_convert(self, obj):
"""Recursive helper method that converts dict types to standard library
json serializable types, so they can be converted into json.
"""
# OrderedDict
if isinstance(obj, OrderedDict):
try:
return self._dump(obj)
except TypeError:
return {k: self._json_convert(v) for k, v in obj.items()}
# nested dict
elif isinstance(obj, dict):
return {k: self._json_convert(v) for k, v in obj.items()}
# list or tuple
elif isinstance(obj, (list, tuple)):
return list((self._json_convert(v) for v in obj))
# float
elif isinstance(obj, float):
return float(json.encoder.FLOAT_REPR(obj))
# single object
try:
return self._dump(obj)
except TypeError:
return obj
def _object_hook1(self, dct):
"""A function can convert dict data into object.
it's an O(1) implementation.
"""
# {"$class_name": obj_data}
if len(dct) == 1:
for key, value in dct.items():
class_name = key[1:]
if class_name in self._loaders:
return self._loaders[class_name](self, dct)
return dct
return dct
def _object_hook2(self, dct): # pragma: no cover
"""Another object hook implementation.
it's an O(N) implementation.
"""
for class_name, loader in self._loaders.items():
if ("$" + class_name) in dct:
return loader(self, dct)
return dct
def dumps(
self,
obj,
indent: bool = None,
sort_keys: bool = None,
pretty: bool = False,
float_precision: int = None,
ensure_ascii: bool = True,
compress: bool = False,
**kwargs
):
"""Dump any object into json string.
:param pretty: if ``True``, dump json into pretty indent and sorted key
format.
:type pretty: bool
:param float_precision: default ``None``, limit floats to
N-decimal points.
:type float_precision: int
:param compress: default ``False``. If True, then compress encoded string.
:type compress: bool
"""
if pretty:
indent = 4
sort_keys = True
if float_precision is None:
json.encoder.FLOAT_REPR = repr
else:
json.encoder.FLOAT_REPR = lambda x: format(
x, ".%sf" % float_precision)
s = json.dumps(
self._json_convert(obj),
indent=indent,
sort_keys=sort_keys,
ensure_ascii=ensure_ascii,
**kwargs
)
if compress:
s = compresslib.compress(s, return_type="str")
return s
def loads(
self,
s: str,
object_hook: bool = None,
decompress: bool = False,
ignore_comments: bool = False,
**kwargs,
):
"""load object from json encoded string.
:param decompress: default ``False``. If True, then decompress string.
:type decompress: bool
:param ignore_comments: default ``False``. If True, then ignore comments.
:type ignore_comments: bool
"""
if decompress:
s = compresslib.decompress(s, return_type="str")
if ignore_comments:
s = strip_comments(s)
if object_hook is None:
object_hook = self._object_hook1
if "object_pairs_hook" in kwargs:
del kwargs["object_pairs_hook"]
obj = json.loads(
s,
object_hook=object_hook,
object_pairs_hook=None,
**kwargs
)
return obj
def dump(
self,
obj,
abspath: str,
indent: bool = None,
sort_keys: bool = None,
pretty: bool = False,
float_precision: int = None,
ensure_ascii: bool = True,
overwrite: bool = False,
verbose: bool = True,
**kwargs
):
"""Dump any object into file.
:param abspath: if ``*.json, *.js**`` then do regular dump. if ``*.gz``,
then perform compression.
:type abspath: str
:param pretty: if True, dump json into pretty indent and sorted key
format.
:type pretty: bool
:param float_precision: default ``None``, limit floats to
N-decimal points.
:type float_precision: int
:param overwrite: default ``False``, If ``True``, when you dump to
existing file, it silently overwrite it. If ``False``, an alert
message is shown. Default setting ``False`` is to prevent overwrite
file by mistake.
:type overwrite: boolean
:param verbose: default True, help-message-display trigger.
:type verbose: boolean
"""
prt_console("\nDump to '%s' ..." % abspath, verbose)
is_compressed = is_compressed_json_file(abspath)
if not overwrite:
if os.path.exists(abspath): # pragma: no cover
prt_console(
" Stop! File exists and overwrite is not allowed",
verbose,
)
return
st = time.process_time()
s = self.dumps(
obj,
indent=indent,
sort_keys=sort_keys,
pretty=pretty,
float_precision=float_precision,
ensure_ascii=ensure_ascii,
compress=False, # use uncompressed string, and directly write to file
**kwargs,
)
with atomic_write(abspath, mode="wb", overwrite=True) as f:
if is_compressed:
f.write(compresslib.compress(s, return_type="bytes"))
else:
f.write(s.encode("utf-8"))
prt_console(
" Complete! Elapse %.6f sec." % (time.process_time() - st),
verbose,
)
return s
def load(
self,
abspath: str,
object_hook=None,
ignore_comments: bool = False,
verbose: bool = True,
**kwargs
):
"""load object from json file.
:param abspath: if ``*.json, *.js** then do regular dump. if ``*.gz``,
then perform decompression.
:type abspath: str
:param ignore_comments: default ``False. If True, then ignore comments.
:type ignore_comments: bool
:param verbose: default True, help-message-display trigger.
:type verbose: boolean
"""
prt_console("\nLoad from '%s' ..." % abspath, verbose)
is_compressed = is_compressed_json_file(abspath)
if not os.path.exists(abspath):
raise EnvironmentError("'%s' doesn't exist." % abspath)
st = time.process_time()
with open(abspath, "rb") as f:
if is_compressed:
s = compresslib.decompress(f.read(), return_type="str")
else:
s = f.read().decode("utf-8")
obj = self.loads(
s,
object_hook=object_hook,
decompress=False,
ignore_comments=ignore_comments,
**kwargs,
)
prt_console(" Complete! Elapse %.6f sec." % (time.process_time() - st),
verbose)
return obj
# ----------------------------------------------------------------------
# Support built in data type
# ----------------------------------------------------------------------
def dump_bytes(self, obj, class_name=bytes_class_name):
"""
``btyes`` dumper.
"""
return {"$" + class_name: b64encode(obj).decode()}
def load_bytes(self, dct, class_name=bytes_class_name):
"""
``btyes`` loader.
"""
return b64decode(dct["$" + class_name].encode())
def dump_datetime(self, obj, class_name="datetime.datetime"):
"""
``datetime.datetime`` dumper.
"""
return {"$" + class_name: obj.isoformat()}
def load_datetime(self, dct, class_name="datetime.datetime"):
"""
``datetime.datetime`` loader.
"""
try:
from dateutil.parser import parse
except ImportError: # pragma: no cover
msg = ("You need to install `python-dateutil` to support load/dump for datetime type")
logger.info(msg)
raise
return parse(dct["$" + class_name])
def dump_date(self, obj, class_name="datetime.date"):
"""
``datetime.date`` dumper.
"""
return {"$" + class_name: str(obj)}
def load_date(self, dct, class_name="datetime.date"):
"""
``datetime.date`` loader.
"""
return datetime.strptime(dct["$" + class_name], "%Y-%m-%d").date()
def dump_set(self, obj, class_name=set_class_name):
"""
``set`` dumper.
"""
return {"$" + class_name: [self._json_convert(item) for item in obj]}
def load_set(self, dct, class_name=set_class_name):
"""
``set`` loader.
"""
return set(dct["$" + class_name])
def dump_deque(self, obj, class_name="collections.deque"):
"""
``collections.deque`` dumper.
"""
return {"$" + class_name: [self._json_convert(item) for item in obj]}
def load_deque(self, dct, class_name="collections.deque"):
"""
``collections.deque`` loader.
"""
return deque(dct["$" + class_name])
def dump_OrderedDict(self, obj, class_name="collections.OrderedDict"):
"""
``collections.OrderedDict`` dumper.
"""
return {
"$" + class_name: [
(key, self._json_convert(value)) for key, value in obj.items()
]
}
def load_OrderedDict(self, dct, class_name="collections.OrderedDict"):
"""
``collections.OrderedDict`` loader.
"""
return OrderedDict(dct["$" + class_name])
|
class BaseSuperJson(metaclass=Meta):
'''
A extensable json encoder/decoder. You can easily custom converter for
any types.
'''
def _dump(self, obj):
'''Dump single object to json serializable value.
'''
pass
def _json_convert(self, obj):
'''Recursive helper method that converts dict types to standard library
json serializable types, so they can be converted into json.
'''
pass
def _object_hook1(self, dct):
'''A function can convert dict data into object.
it's an O(1) implementation.
'''
pass
def _object_hook2(self, dct):
'''Another object hook implementation.
it's an O(N) implementation.
'''
pass
def dumps(
self,
obj,
indent: bool = None,
sort_keys: bool = None,
pretty: bool = False,
float_precision: int = None,
ensure_ascii: bool = True,
compress: bool = False,
**kwargs
):
'''Dump any object into json string.
:param pretty: if ``True``, dump json into pretty indent and sorted key
format.
:type pretty: bool
:param float_precision: default ``None``, limit floats to
N-decimal points.
:type float_precision: int
:param compress: default ``False``. If True, then compress encoded string.
:type compress: bool
'''
pass
def loads(
self,
s: str,
object_hook: bool = None,
decompress: bool = False,
ignore_comments: bool = False,
**kwargs,
):
'''load object from json encoded string.
:param decompress: default ``False``. If True, then decompress string.
:type decompress: bool
:param ignore_comments: default ``False``. If True, then ignore comments.
:type ignore_comments: bool
'''
pass
def dumps(
self,
obj,
indent: bool = None,
sort_keys: bool = None,
pretty: bool = False,
float_precision: int = None,
ensure_ascii: bool = True,
compress: bool = False,
**kwargs
):
'''Dump any object into file.
:param abspath: if ``*.json, *.js**`` then do regular dump. if ``*.gz``,
then perform compression.
:type abspath: str
:param pretty: if True, dump json into pretty indent and sorted key
format.
:type pretty: bool
:param float_precision: default ``None``, limit floats to
N-decimal points.
:type float_precision: int
:param overwrite: default ``False``, If ``True``, when you dump to
existing file, it silently overwrite it. If ``False``, an alert
message is shown. Default setting ``False`` is to prevent overwrite
file by mistake.
:type overwrite: boolean
:param verbose: default True, help-message-display trigger.
:type verbose: boolean
'''
pass
def loads(
self,
s: str,
object_hook: bool = None,
decompress: bool = False,
ignore_comments: bool = False,
**kwargs,
):
'''load object from json file.
:param abspath: if ``*.json, *.js** then do regular dump. if ``*.gz``,
then perform decompression.
:type abspath: str
:param ignore_comments: default ``False. If True, then ignore comments.
:type ignore_comments: bool
:param verbose: default True, help-message-display trigger.
:type verbose: boolean
'''
pass
def dump_bytes(self, obj, class_name=bytes_class_name):
'''
``btyes`` dumper.
'''
pass
def load_bytes(self, dct, class_name=bytes_class_name):
'''
``btyes`` loader.
'''
pass
def dump_datetime(self, obj, class_name="datetime.datetime"):
'''
``datetime.datetime`` dumper.
'''
pass
def load_datetime(self, dct, class_name="datetime.datetime"):
'''
``datetime.datetime`` loader.
'''
pass
def dump_datetime(self, obj, class_name="datetime.datetime"):
'''
``datetime.date`` dumper.
'''
pass
def load_datetime(self, dct, class_name="datetime.datetime"):
'''
``datetime.date`` loader.
'''
pass
def dump_set(self, obj, class_name=set_class_name):
'''
``set`` dumper.
'''
pass
def load_set(self, dct, class_name=set_class_name):
'''
``set`` loader.
'''
pass
def dump_deque(self, obj, class_name="collections.deque"):
'''
``collections.deque`` dumper.
'''
pass
def load_deque(self, dct, class_name="collections.deque"):
'''
``collections.deque`` loader.
'''
pass
def dump_OrderedDict(self, obj, class_name="collections.OrderedDict"):
'''
``collections.OrderedDict`` dumper.
'''
pass
def load_OrderedDict(self, dct, class_name="collections.OrderedDict"):
'''
``collections.OrderedDict`` loader.
'''
pass
| 21 | 21 | 16 | 2 | 10 | 5 | 2 | 0.55 | 1 | 12 | 0 | 1 | 20 | 0 | 20 | 34 | 358 | 61 | 194 | 76 | 136 | 107 | 114 | 38 | 92 | 7 | 3 | 3 | 45 |
148,385 |
MacHu-GWU/uszipcode-project
|
MacHu-GWU_uszipcode-project/tests/test_search_5_by_coordinate.py
|
test_search_5_by_coordinate.TestSearchEngine
|
class TestSearchEngine(SearchEngineBaseTest):
search = SearchEngine(
simple_or_comprehensive=SearchEngine.SimpleOrComprehensiveArgEnum.simple,
)
def test_resolve_sort_by(self):
with pytest.raises(ValueError):
self.search._resolve_sort_by(
"InValid Field", flag_radius_query=True)
with pytest.raises(ValueError):
self.search._resolve_sort_by(
"InValid Field", flag_radius_query=False)
assert self.search._resolve_sort_by(
Zipcode.zipcode, flag_radius_query=True
) == Zipcode.zipcode.name
assert self.search._resolve_sort_by(
Zipcode.population, flag_radius_query=True
) == Zipcode.population.name
assert self.search._resolve_sort_by(
Zipcode.zipcode, flag_radius_query=False
) == Zipcode.zipcode.name
assert self.search._resolve_sort_by(
Zipcode.population, flag_radius_query=False
) == Zipcode.population.name
assert self.search._resolve_sort_by(
None, flag_radius_query=True
) == SORT_BY_DIST
assert self.search._resolve_sort_by(
None, flag_radius_query=False
) == None
assert self.search._resolve_sort_by(
SORT_BY_DIST, flag_radius_query=True
) == SORT_BY_DIST
with pytest.raises(ValueError):
self.search._resolve_sort_by(SORT_BY_DIST, flag_radius_query=False)
def test_by_zipcode(self):
z = self.search.by_zipcode("10001")
assert z.zipcode == "10001"
assert z.major_city == "New York"
assert z.zipcode_type == ZipcodeTypeEnum.Standard.value
z = self.search.by_zipcode("123456789")
assert bool(z) is False
def test_by_prefix(self):
z_list = self.search.by_prefix("100", ascending=True)
assert_ascending_by(z_list, Zipcode.zipcode.name)
z_list = self.search.by_prefix("100", ascending=False)
assert_descending_by(z_list, Zipcode.zipcode.name)
for z in z_list:
assert z.zipcode.startswith("100")
def test_by_pattern(self):
z_list = self.search.by_pattern("001")
for z in z_list:
assert "001" in z.zipcode
def test_no_limit(self):
res = self.search.by_prefix("1000", returns=None)
assert len(res) > 0
def test_by_coordinates(self):
# Use White House in DC
lat, lng = 38.897835, -77.036541
res1 = self.search.by_coordinates(lat, lng, ascending=True)
assert len(res1) <= DEFAULT_LIMIT
dist_array = [z.dist_from(lat, lng) for z in res1]
assert_ascending(dist_array)
res2 = self.search.by_coordinates(lat, lng, ascending=False)
dist_array = [z.dist_from(lat, lng) for z in res2]
assert_descending(dist_array)
# returns everything when `returns = 0`
res3 = self.search.by_coordinates(lat, lng, ascending=True, returns=0)
assert len(res3) > len(res1)
res4 = self.search.by_coordinates(lat, lng, ascending=False, returns=0)
assert len(res4) > len(res1)
# sort by other field
res5 = self.search.by_coordinates(
lat, lng, radius=5, sort_by=Zipcode.zipcode.name)
for z in res5:
assert z.dist_from(lat, lng) <= 5
assert_ascending_by(res5, Zipcode.zipcode.name)
# when no zipcode matching criterion, return empty list
# Use Eiffel Tower in Paris
lat, lng = 48.858388, 2.294581
res6 = self.search.by_coordinates(lat, lng)
assert len(res6) == 0
|
class TestSearchEngine(SearchEngineBaseTest):
def test_resolve_sort_by(self):
pass
def test_by_zipcode(self):
pass
def test_by_prefix(self):
pass
def test_by_pattern(self):
pass
def test_no_limit(self):
pass
def test_by_coordinates(self):
pass
| 7 | 0 | 16 | 3 | 12 | 1 | 2 | 0.07 | 1 | 3 | 1 | 0 | 6 | 0 | 6 | 8 | 106 | 25 | 76 | 22 | 69 | 5 | 57 | 22 | 50 | 2 | 2 | 1 | 9 |
148,386 |
MacHu-GWU/uszipcode-project
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/MacHu-GWU_uszipcode-project/uszipcode/search.py
|
MacHu-GWU_uszipcode-project.uszipcode.search.SearchEngine.SimpleOrComprehensiveArgEnum
|
class SimpleOrComprehensiveArgEnum(enum.Enum):
simple = enum.auto()
comprehensive = enum.auto()
|
class SimpleOrComprehensiveArgEnum(enum.Enum):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 49 | 3 | 0 | 3 | 3 | 2 | 0 | 3 | 3 | 2 | 0 | 4 | 0 | 0 |
148,387 |
MacHu-GWU/uszipcode-project
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/MacHu-GWU_uszipcode-project/tests/test_search_1_helpers.py
|
test_search_1_helpers.test_validate_enum_arg.Color
|
class Color(enum.Enum):
red = enum.auto()
blue = enum.auto()
|
class Color(enum.Enum):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 49 | 3 | 0 | 3 | 3 | 2 | 0 | 3 | 3 | 2 | 0 | 4 | 0 | 0 |
148,388 |
MacHu-GWU/uszipcode-project
|
MacHu-GWU_uszipcode-project/tests/test_search_4_by_city_and_state.py
|
test_search_4_by_city_and_state.TestSearchEngine
|
class TestSearchEngine(SearchEngineBaseTest):
search = SearchEngine(
simple_or_comprehensive=SearchEngine.SimpleOrComprehensiveArgEnum.simple,
)
def test_cached_data(self):
assert self.search.city_list[0].startswith("A")
assert self.search.state_list[0].startswith("A")
assert len(self.search.city_to_state_mapper) >= 1
assert len(self.search.state_to_city_mapper) >= 1
def test_find_state(self):
for state_short in MAPPER_STATE_ABBR_SHORT_TO_LONG:
assert self.search.find_state(
state_short.lower(), best_match=True
)[0] == state_short
for state_long in MAPPER_STATE_ABBR_LONG_TO_SHORT:
assert self.search.find_state(
state_long.lower()[:8], best_match=True
)[0] == MAPPER_STATE_ABBR_LONG_TO_SHORT[state_long]
assert self.search.find_state("mary", best_match=True) == ["MD", ]
result = set(self.search.find_state("virgin", best_match=False))
assert result == {"VI", "WV", "VA"}
assert self.search.find_state("neyork", best_match=False) == ["NY", ]
with pytest.raises(ValueError):
self.search.find_state("THIS IS NOT A STATE!", best_match=True)
with pytest.raises(ValueError):
self.search.find_state("THIS IS NOT A STATE!", best_match=False)
def test_find_city(self):
city_result = self.search.find_city("phonix", best_match=True)
city_expected = ["Phoenix", ]
assert city_result == city_expected
city_result = self.search.find_city("kerson", best_match=False)
city_result.sort()
city_expected = [
"Dickerson", "Dickerson Run",
"Emerson", "Ericson", "Everson", "Keldron", "Nickerson",
]
for city in city_result:
assert city in city_expected
with pytest.raises(ValueError):
self.search.find_city(PASSWORD, best_match=False)
city_result = self.search.find_city(
"kersen", state="kensas", best_match=False
)
city_expected = ["Nickerson", ]
assert city_result == city_expected
def test_by_city(self):
res = self.search.by_city("vienna")
s = set()
for z in res:
assert z.major_city == "Vienna"
s.add(z.state_abbr)
res = self.search.by_city(PASSWORD)
assert len(res) == 0
def test_by_state(self):
res = self.search.by_state("ca")
assert len(res) > 0
for z in res:
assert z.state_abbr == "CA"
res = self.search.by_state("ilinoy")
assert len(res) > 0
for z in res:
assert z.state_abbr == "IL"
def test_by_city_and_state(self):
# Arlington, VA
res = self.search.by_city_and_state(city="arlingten", state="virgnea")
assert len(res) > 0
for z in res:
assert z.major_city == "Arlington"
assert z.state_abbr == "VA"
def test_edge_case(self):
zipcode = self.search.by_zipcode(0)
assert bool(zipcode) is False
# Use White House in DC
lat, lng = 38.897835, -77.036541
res = self.search.by_coordinates(lat, lng, radius=0.01)
assert len(res) == 0
# Use Eiffel Tower in Paris
lat, lng = 48.858388, 2.294581
res = self.search.by_coordinates(lat, lng, radius=0.01)
assert len(res) == 0
res = self.search.by_city_and_state("Unknown", "MD")
assert len(res) == 0
res = self.search.by_prefix("00000")
assert len(res) == 0
res = self.search.by_pattern("00000")
assert len(res) == 0
res = self.search.by_population(upper=-1)
assert len(res) == 0
def test_bad_param(self):
with pytest.raises(ValueError):
self.search.query(zipcode="10001", prefix="10001", pattern="10001")
with pytest.raises(ValueError):
self.search.query(lat=34, lng=-72)
|
class TestSearchEngine(SearchEngineBaseTest):
def test_cached_data(self):
pass
def test_find_state(self):
pass
def test_find_city(self):
pass
def test_by_city(self):
pass
def test_by_state(self):
pass
def test_by_city_and_state(self):
pass
def test_edge_case(self):
pass
def test_bad_param(self):
pass
| 9 | 0 | 13 | 2 | 11 | 0 | 2 | 0.03 | 1 | 3 | 0 | 0 | 8 | 0 | 8 | 10 | 119 | 26 | 90 | 26 | 81 | 3 | 79 | 26 | 70 | 3 | 2 | 1 | 15 |
148,389 |
MacHu-GWU/uszipcode-project
|
MacHu-GWU_uszipcode-project/tests/test_search_3_by_value_range.py
|
test_search_3_by_value_range.TestSearchEngine
|
class TestSearchEngine(SearchEngineBaseTest):
search = SearchEngine(
simple_or_comprehensive=SearchEngine.SimpleOrComprehensiveArgEnum.simple,
)
def test_by_range(self):
test_cases = [
# tuple: (sort by attr name, lower, upper)
(Zipcode.population.name, 10000, 50000),
(Zipcode.population_density.name, 1000, 2000),
(Zipcode.land_area_in_sqmi.name, 5, 10),
(Zipcode.water_area_in_sqmi.name, 0.5, 1),
(Zipcode.housing_units.name, 1000, 2000),
(Zipcode.occupied_housing_units.name, 1000, 2000),
(Zipcode.median_home_value.name, 200000, 400000),
(Zipcode.median_household_income.name, 50000, 60000),
]
for sort_by, lower, upper in test_cases:
method_name = f"by_{sort_by}"
method = getattr(self.sr, method_name)
z_list = method(lower=lower, upper=upper)
assert len(z_list) > 0
assert_descending_by(z_list, sort_by)
|
class TestSearchEngine(SearchEngineBaseTest):
def test_by_range(self):
pass
| 2 | 0 | 18 | 0 | 17 | 1 | 2 | 0.05 | 1 | 0 | 0 | 0 | 1 | 0 | 1 | 3 | 23 | 1 | 21 | 8 | 19 | 1 | 10 | 8 | 8 | 2 | 2 | 1 | 2 |
148,390 |
MacHu-GWU/uszipcode-project
|
MacHu-GWU_uszipcode-project/tests/test_search_2_by_zipcode.py
|
test_search_2_by_zipcode.TestSearchEngineQuery
|
class TestSearchEngineQuery(SearchEngineBaseTest):
search = SearchEngine(
simple_or_comprehensive=SearchEngine.SimpleOrComprehensiveArgEnum.simple
)
def test_by_zipcode(self):
z = self.sr.by_zipcode("94103")
assert z.city == "San Francisco"
assert z.state == "CA"
|
class TestSearchEngineQuery(SearchEngineBaseTest):
def test_by_zipcode(self):
pass
| 2 | 0 | 4 | 0 | 4 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 1 | 0 | 1 | 3 | 9 | 1 | 8 | 4 | 6 | 0 | 6 | 4 | 4 | 1 | 2 | 0 | 1 |
148,391 |
MacHu-GWU/uszipcode-project
|
MacHu-GWU_uszipcode-project/tests/test_model.py
|
test_model.TestZipcode
|
class TestZipcode(object):
def test_city(self):
z = Zipcode(major_city="New York")
assert z.major_city == z.city
def test_bool(self):
assert bool(Zipcode()) is False
assert bool(Zipcode(zipcode="10001")) is True
assert bool(Zipcode(zipcode="")) is True
def test_comparison(self):
assert Zipcode(zipcode="10001") < Zipcode(zipcode="10002")
assert Zipcode(zipcode="10002") <= Zipcode(zipcode="10002")
assert Zipcode(zipcode="10002") > Zipcode(zipcode="10001")
assert Zipcode(zipcode="10002") >= Zipcode(zipcode="10002")
assert Zipcode(zipcode="10001") == Zipcode(zipcode="10001")
assert Zipcode(zipcode="10001") != Zipcode(zipcode="10002")
with raises(ValueError):
_ = Zipcode(zipcode="10001") < Zipcode()
with raises(ValueError):
_ = Zipcode() < Zipcode(zipcode="10001")
def test_hash(self):
z_set_1 = {
Zipcode(zipcode="10001"),
Zipcode(zipcode="10002"),
}
z_set_2 = {
Zipcode(zipcode="10002"),
Zipcode(zipcode="10003"),
}
assert len(z_set_1.union(z_set_2)) == 3
assert len(z_set_1.intersection(z_set_2)) == 1
assert z_set_1.difference(z_set_2).pop().zipcode == "10001"
assert hash(Zipcode()) != hash(Zipcode(zipcode="10001"))
def test_state_attr(self):
z = Zipcode(state="ca")
assert z.state_abbr == "CA"
assert z.state_long == MAPPER_STATE_ABBR_SHORT_TO_LONG["CA"]
def test_glance(self):
z = Zipcode(zipcode="10001")
z.glance()
|
class TestZipcode(object):
def test_city(self):
pass
def test_bool(self):
pass
def test_comparison(self):
pass
def test_hash(self):
pass
def test_state_attr(self):
pass
def test_glance(self):
pass
| 7 | 0 | 7 | 1 | 6 | 0 | 1 | 0 | 1 | 2 | 0 | 0 | 6 | 0 | 6 | 6 | 47 | 8 | 39 | 13 | 32 | 0 | 33 | 13 | 26 | 1 | 1 | 1 | 6 |
148,392 |
MacHu-GWU/uszipcode-project
|
MacHu-GWU_uszipcode-project/tests/test_search_6_census_data.py
|
test_search_6_census_data.TestSearchEngineCensusData
|
class TestSearchEngineCensusData(SearchEngineBaseTest):
search = SearchEngine(
simple_or_comprehensive=SearchEngine.SimpleOrComprehensiveArgEnum.comprehensive
)
def test(self):
z = self.search.by_zipcode("10001")
_ = z.bounds
if self.search.zip_klass is ComprehensiveZipcode:
_ = z.population_by_age
_ = z.head_of_household_by_age
_ = z.polygon
def test_by_zipcode_non_standard(self):
"""
Test by_zipcode should return any type zipcode.
"""
z = self.search.by_zipcode(48874)
assert z.zipcode_type != ZipcodeTypeEnum.Standard.value
assert z.lat is not None
|
class TestSearchEngineCensusData(SearchEngineBaseTest):
def test(self):
pass
def test_by_zipcode_non_standard(self):
'''
Test by_zipcode should return any type zipcode.
'''
pass
| 3 | 1 | 7 | 0 | 6 | 2 | 2 | 0.2 | 1 | 2 | 2 | 0 | 2 | 0 | 2 | 4 | 19 | 1 | 15 | 7 | 12 | 3 | 13 | 7 | 10 | 2 | 2 | 1 | 3 |
148,393 |
MacHu-GWU/uszipcode-project
|
MacHu-GWU_uszipcode-project/uszipcode/tests/__init__.py
|
MacHu-GWU_uszipcode-project.uszipcode.tests.SearchEngineBaseTest
|
class SearchEngineBaseTest(object):
search: SearchEngine = None
@classmethod
def teardown_class(cls):
cls.search.close()
@property
def sr(self) -> SearchEngine:
return self.search
|
class SearchEngineBaseTest(object):
@classmethod
def teardown_class(cls):
pass
@property
def sr(self) -> SearchEngine:
pass
| 5 | 0 | 2 | 0 | 2 | 0 | 1 | 0 | 1 | 1 | 1 | 5 | 1 | 0 | 2 | 2 | 10 | 2 | 8 | 6 | 3 | 0 | 6 | 4 | 3 | 1 | 1 | 0 | 2 |
148,394 |
MacHu-GWU/uszipcode-project
|
MacHu-GWU_uszipcode-project/uszipcode/model.py
|
MacHu-GWU_uszipcode-project.uszipcode.model.ZipcodeTypeEnum
|
class ZipcodeTypeEnum(enum.Enum):
"""
zipcode type visitor class.
"""
Standard = "STANDARD"
PO_Box = "PO BOX"
Unique = "UNIQUE"
Military = "MILITARY"
|
class ZipcodeTypeEnum(enum.Enum):
'''
zipcode type visitor class.
'''
pass
| 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0.6 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 49 | 8 | 0 | 5 | 5 | 4 | 3 | 5 | 5 | 4 | 0 | 4 | 0 | 0 |
148,395 |
MacHu-GWU/uszipcode-project
|
MacHu-GWU_uszipcode-project/uszipcode/model.py
|
MacHu-GWU_uszipcode-project.uszipcode.model.SimpleZipcode
|
class SimpleZipcode(AbstractSimpleZipcode):
__tablename__ = "simple_zipcode"
|
class SimpleZipcode(AbstractSimpleZipcode):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 10 | 2 | 0 | 2 | 2 | 1 | 0 | 2 | 2 | 1 | 0 | 2 | 0 | 0 |
148,396 |
MacHu-GWU/uszipcode-project
|
MacHu-GWU_uszipcode-project/uszipcode/model.py
|
MacHu-GWU_uszipcode-project.uszipcode.model.ComprehensiveZipcode
|
class ComprehensiveZipcode(AbstractComprehensiveZipcode):
__tablename__ = "comprehensive_zipcode"
|
class ComprehensiveZipcode(AbstractComprehensiveZipcode):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 10 | 2 | 0 | 2 | 2 | 1 | 0 | 2 | 2 | 1 | 0 | 3 | 0 | 0 |
148,397 |
MacHu-GWU/uszipcode-project
|
MacHu-GWU_uszipcode-project/uszipcode/model.py
|
MacHu-GWU_uszipcode-project.uszipcode.model.AbstractSimpleZipcode
|
class AbstractSimpleZipcode(Base, sam.ExtendedBase):
"""
Base class for Zipcode.
"""
__abstract__ = True
zipcode = sa.Column(sa.String, primary_key=True)
zipcode_type = sa.Column(sa.String)
major_city = sa.Column(sa.String)
post_office_city = sa.Column(sa.String)
common_city_list = sa.Column(sam.types.CompressedJSONType)
county = sa.Column(sa.String)
state = sa.Column(sa.String)
lat = sa.Column(sa.Float, index=True)
lng = sa.Column(sa.Float, index=True)
timezone = sa.Column(sa.String)
radius_in_miles = sa.Column(sa.Float)
area_code_list = sa.Column(sam.types.CompressedJSONType)
population = sa.Column(sa.Integer)
population_density = sa.Column(sa.Float)
land_area_in_sqmi = sa.Column(sa.Float)
water_area_in_sqmi = sa.Column(sa.Float)
housing_units = sa.Column(sa.Integer)
occupied_housing_units = sa.Column(sa.Integer)
median_home_value = sa.Column(sa.Integer)
median_household_income = sa.Column(sa.Integer)
bounds_west = sa.Column(sa.Float)
bounds_east = sa.Column(sa.Float)
bounds_north = sa.Column(sa.Float)
bounds_south = sa.Column(sa.Float)
_settings_major_attrs = "zipcode,zipcode_type,city,county,state,lat,lng,timezone".split(
",")
@property
def city(self):
"""
Alias of ``.major_city``.
"""
return self.major_city
@property
def bounds(self) -> dict:
"""
Border boundary.
"""
return {
"west": self.bounds_west,
"east": self.bounds_east,
"north": self.bounds_north,
"south": self.bounds_south,
}
@property
def state_abbr(self) -> str:
"""
Return state abbreviation, two letters, all uppercase.
"""
return self.state.upper()
@property
def state_long(self) -> str:
"""
Return state full name.
"""
return MAPPER_STATE_ABBR_SHORT_TO_LONG.get(self.state.upper())
def __bool__(self):
"""
For Python3 bool() method.
"""
return self.zipcode is not None
def __lt__(self, other: 'AbstractSimpleZipcode'):
"""
For ``>`` comparison operator.
"""
if (self.zipcode is None) or (other.zipcode is None):
raise ValueError(
"Empty Zipcode instance doesn't support comparison.")
else:
return self.zipcode < other.zipcode
def __eq__(self, other: 'AbstractSimpleZipcode'):
"""
For ``==`` comparison operator.
"""
return self.zipcode == other.zipcode
def __hash__(self):
"""
For hash() method
"""
return hash(self.zipcode)
def dist_from(self, lat: float, lng: float, unit: Unit = Unit.MILES):
"""
Calculate the distance of the center of this zipcode from a coordinator.
:param lat: latitude.
:param lng: longitude.
"""
return haversine((self.lat, self.lng), (lat, lng), unit=unit)
def to_json(self, include_null: bool = True):
"""
Convert to json.
"""
data = self.to_OrderedDict(include_null=include_null)
return json.dumps(data, indent=4)
|
class AbstractSimpleZipcode(Base, sam.ExtendedBase):
'''
Base class for Zipcode.
'''
@property
def city(self):
'''
Alias of ``.major_city``.
'''
pass
@property
def bounds(self) -> dict:
'''
Border boundary.
'''
pass
@property
def state_abbr(self) -> str:
'''
Return state abbreviation, two letters, all uppercase.
'''
pass
@property
def state_long(self) -> str:
'''
Return state full name.
'''
pass
def __bool__(self):
'''
For Python3 bool() method.
'''
pass
def __lt__(self, other: 'AbstractSimpleZipcode'):
'''
For ``>`` comparison operator.
'''
pass
def __eq__(self, other: 'AbstractSimpleZipcode'):
'''
For ``==`` comparison operator.
'''
pass
def __hash__(self):
'''
For hash() method
'''
pass
def dist_from(self, lat: float, lng: float, unit: Unit = Unit.MILES):
'''
Calculate the distance of the center of this zipcode from a coordinator.
:param lat: latitude.
:param lng: longitude.
'''
pass
def to_json(self, include_null: bool = True):
'''
Convert to json.
'''
pass
| 15 | 11 | 6 | 0 | 3 | 3 | 1 | 0.56 | 2 | 5 | 0 | 2 | 10 | 0 | 10 | 10 | 117 | 20 | 62 | 42 | 47 | 35 | 50 | 38 | 39 | 2 | 1 | 1 | 11 |
148,398 |
MacHu-GWU/uszipcode-project
|
MacHu-GWU_uszipcode-project/uszipcode/model.py
|
MacHu-GWU_uszipcode-project.uszipcode.model.AbstractComprehensiveZipcode
|
class AbstractComprehensiveZipcode(AbstractSimpleZipcode):
__abstract__ = True
polygon = sa.Column(sam.types.CompressedJSONType)
# Stats and Demographics
population_by_year = sa.Column(sam.types.CompressedJSONType)
population_by_age = sa.Column(sam.types.CompressedJSONType)
population_by_gender = sa.Column(sam.types.CompressedJSONType)
population_by_race = sa.Column(sam.types.CompressedJSONType)
head_of_household_by_age = sa.Column(sam.types.CompressedJSONType)
families_vs_singles = sa.Column(sam.types.CompressedJSONType)
households_with_kids = sa.Column(sam.types.CompressedJSONType)
children_by_age = sa.Column(sam.types.CompressedJSONType)
# Real Estate and Housing
housing_type = sa.Column(sam.types.CompressedJSONType)
year_housing_was_built = sa.Column(sam.types.CompressedJSONType)
housing_occupancy = sa.Column(sam.types.CompressedJSONType)
vacancy_reason = sa.Column(sam.types.CompressedJSONType)
owner_occupied_home_values = sa.Column(sam.types.CompressedJSONType)
rental_properties_by_number_of_rooms = sa.Column(sam.types.CompressedJSONType)
monthly_rent_including_utilities_studio_apt = sa.Column(sam.types.CompressedJSONType)
monthly_rent_including_utilities_1_b = sa.Column(sam.types.CompressedJSONType)
monthly_rent_including_utilities_2_b = sa.Column(sam.types.CompressedJSONType)
monthly_rent_including_utilities_3plus_b = sa.Column(sam.types.CompressedJSONType)
# Employment, Income, Earnings, and Work
employment_status = sa.Column(sam.types.CompressedJSONType)
average_household_income_over_time = sa.Column(sam.types.CompressedJSONType)
household_income = sa.Column(sam.types.CompressedJSONType)
annual_individual_earnings = sa.Column(sam.types.CompressedJSONType)
sources_of_household_income____percent_of_households_receiving_income = sa.Column(
sam.types.CompressedJSONType)
sources_of_household_income____average_income_per_household_by_income_source = sa.Column(
sam.types.CompressedJSONType)
household_investment_income____percent_of_households_receiving_investment_income = sa.Column(
sam.types.CompressedJSONType)
household_investment_income____average_income_per_household_by_income_source = sa.Column(
sam.types.CompressedJSONType)
household_retirement_income____percent_of_households_receiving_retirement_incom = sa.Column(
sam.types.CompressedJSONType)
household_retirement_income____average_income_per_household_by_income_source = sa.Column(
sam.types.CompressedJSONType)
source_of_earnings = sa.Column(sam.types.CompressedJSONType)
means_of_transportation_to_work_for_workers_16_and_over = sa.Column(
sam.types.CompressedJSONType)
travel_time_to_work_in_minutes = sa.Column(sam.types.CompressedJSONType)
# Schools and Education
educational_attainment_for_population_25_and_over = sa.Column(
sam.types.CompressedJSONType)
school_enrollment_age_3_to_17 = sa.Column(sam.types.CompressedJSONType)
|
class AbstractComprehensiveZipcode(AbstractSimpleZipcode):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0.09 | 1 | 0 | 0 | 1 | 0 | 0 | 0 | 10 | 58 | 10 | 44 | 36 | 43 | 4 | 36 | 36 | 35 | 0 | 2 | 0 | 0 |
148,399 |
MacHu-GWU/uszipcode-project
|
MacHu-GWU_uszipcode-project/uszipcode/search.py
|
MacHu-GWU_uszipcode-project.uszipcode.search.SearchEngine
|
class SearchEngine(object):
"""
Zipcode Search Engine.
:type simple_or_comprehensive: SearchEngine.SimpleOrComprehensiveArgEnum
:param simple_or_comprehensive: default SearchEngine.SimpleOrComprehensiveArgEnum,
use the simple zipcode db. Rich Demographics, Real Estate, Employment,
Education info are not available. if
SearchEngine.SimpleOrComprehensiveArgEnum.comprehensive,
use the rich info database.
:type db_file_path: str
:param db_file_path: where you want to download the sqlite database to. This
property allows you to customize where you want to store the data file
locally. by default it is ${HOME}/.uszipcode/...
:type download_url: str
:param download_url: where you want to download the sqlite database file from.
This property allows you to upload the .sqlite file to your private file
host and download from it. In case the default download url fail.
:type engine: Engine
:param engine: a sqlachemy engine object. It allows you to use any
backend database instead of the default sqlite database.
Usage::
>>> search = SearchEngine()
>>> zipcode = search.by_zipcode("10001")
Context Manager::
>>> with SearchEngine() as search:
... for zipcode in search.by_coordinates(lat, lng, radius):
... # do what every you want
:meth:`SearchEngine.query` provides mass options to customize your query.
:attr:`SearchEngine.ses` is a ``sqlalchemy.orm.Session`` object, you can
use it for query. For example::
>>> from uszipcode import SearchEngine, SimpleZipcode, ComprehensiveZipcode
>>> search = SearchEngine()
>>> search.ses.scalar(SimpleZipcode).filter(SimpleZipcode.zipcode=="10001")
.. note::
:class:`SearchEngine` is not multi-thread safe. You should create different
instance for each thread.
"""
class SimpleOrComprehensiveArgEnum(enum.Enum):
simple = enum.auto()
comprehensive = enum.auto()
_default_db_file_path_mapper = {
SimpleOrComprehensiveArgEnum.simple: DEFAULT_SIMPLE_DB_FILE_PATH,
SimpleOrComprehensiveArgEnum.comprehensive: DEFAULT_COMPREHENSIVE_DB_FILE_PATH,
}
_default_download_url_mapper = {
SimpleOrComprehensiveArgEnum.simple: SIMPLE_DB_FILE_DOWNLOAD_URL,
SimpleOrComprehensiveArgEnum.comprehensive: COMPREHENSIVE_DB_FILE_DOWNLOAD_URL,
}
def __init__(
self,
simple_or_comprehensive: SimpleOrComprehensiveArgEnum = SimpleOrComprehensiveArgEnum.simple,
db_file_path: typing.Union[str, None] = None,
download_url: typing.Union[str, None] = None,
engine: Engine = None,
):
validate_enum_arg(
self.SimpleOrComprehensiveArgEnum,
"simple_or_comprehensive",
simple_or_comprehensive,
)
self.simple_or_comprehensive = simple_or_comprehensive
if isinstance(engine, Engine):
self.db_file_path = None
self.download_url = None
self.engine = engine
else:
self.db_file_path = db_file_path
self.download_url = download_url
self._download_db_file_if_not_exists()
self.engine = sam.EngineCreator().create_sqlite(path=self.db_file_path)
self.eng = self.engine
self.session = orm.Session(self.engine)
self.ses = self.session
self.zip_klass: typing.Union[SimpleZipcode, ComprehensiveZipcode]
if self.simple_or_comprehensive is self.SimpleOrComprehensiveArgEnum.simple:
self.zip_klass = SimpleZipcode
elif self.simple_or_comprehensive is self.SimpleOrComprehensiveArgEnum.comprehensive:
self.zip_klass = ComprehensiveZipcode
def _download_db_file_if_not_exists(self):
if self.db_file_path is None:
self.db_file_path = self._default_db_file_path_mapper[self.simple_or_comprehensive]
if self.download_url is None:
self.download_url = self._default_download_url_mapper[self.simple_or_comprehensive]
p = Path(self.db_file_path)
if not p.exists():
if self.simple_or_comprehensive is self.SimpleOrComprehensiveArgEnum.simple:
download_db_file(
db_file_path=self.db_file_path,
download_url=self.download_url,
chunk_size=1024 * 1024,
progress_size=1024 * 1024,
)
elif self.simple_or_comprehensive is self.SimpleOrComprehensiveArgEnum.comprehensive:
download_db_file(
db_file_path=self.db_file_path,
download_url=self.download_url,
chunk_size=1024 * 1024,
progress_size=50 * 1024 * 1024,
)
def __enter__(self): # pragma: no cover
return self
def __exit__(self, *exc_info): # pragma: no cover
self.close()
def __del__(self): # pragma: no cover
# Cleanup connection if still open
if self.ses:
self.close()
def close(self):
"""
close database connection.
"""
self.ses.close()
# Since fuzzy search on City and State requires full list of city and state
# We load the full list from the database only once and store it in cache
_city_list: typing.List[str] = None
"""
all available city list
"""
_state_list: typing.List[str] = None
"""
all available state list, in long format
"""
_state_to_city_mapper: typing.Dict[str, list] = None
"""
"""
_city_to_state_mapper: typing.Dict[str, list] = None
def _get_cache_data(self):
_city_set = set()
_state_to_city_mapper: typing.Dict[str, set] = dict()
_city_to_state_mapper: typing.Dict[str, set] = dict()
stmt = sa.select(self.zip_klass.major_city, self.zip_klass.state)
for major_city, state in self.ses.execute(stmt):
if major_city is not None:
_city_set.add(major_city)
if state is not None:
state = state.upper()
try:
_state_to_city_mapper[state].add(major_city)
except:
_state_to_city_mapper[state] = {major_city, }
try:
_city_to_state_mapper[major_city].add(state)
except:
_city_to_state_mapper[major_city] = {state, }
self._city_list = list(_city_set)
self._city_list.sort()
self._state_list = list(MAPPER_STATE_ABBR_LONG_TO_SHORT)
self._state_list.sort()
self._state_to_city_mapper = OrderedDict(
sorted(
(
(state, list(city_set))
for state, city_set in _state_to_city_mapper.items()
),
key=lambda x: x[0]
)
)
for city_list in self._state_to_city_mapper.values():
city_list.sort()
self._city_to_state_mapper = OrderedDict(
sorted(
(
(city, list(state_set))
for city, state_set in _city_to_state_mapper.items()
),
key=lambda x: x[0]
)
)
for state_list in self._city_to_state_mapper.values():
state_list.sort()
@property
def city_list(self): # pragma: no cover
"""
Return all available city name.
"""
if self._city_list is None:
self._get_cache_data()
return self._city_list
@property
def state_list(self): # pragma: no cover
"""
Return all available state name.
"""
if self._state_list is None:
self._get_cache_data()
return self._state_list
@property
def state_to_city_mapper(self): # pragma: no cover
if self._state_to_city_mapper is None:
self._get_cache_data()
return self._state_to_city_mapper
@property
def city_to_state_mapper(self): # pragma: no cover
if self._city_to_state_mapper is None:
self._get_cache_data()
return self._city_to_state_mapper
def find_state(
self,
state: str,
best_match: bool = True,
min_similarity: int = 70,
) -> typing.List[str]:
"""
Fuzzy search correct state.
:param best_match: bool, when True, only the best matched state
will be return. otherwise, will return all matching states.
"""
result_state_short_list = list()
# check if it is a abbreviate name
if state.upper() in MAPPER_STATE_ABBR_SHORT_TO_LONG:
result_state_short_list.append(state.upper())
# if not, find out what is the state that user looking for
else:
if best_match:
state_long, confidence = extractOne(state, self.state_list)
if confidence >= min_similarity:
result_state_short_list.append(
MAPPER_STATE_ABBR_LONG_TO_SHORT[state_long])
else:
for state_long, confidence in extract(state, self.state_list):
if confidence >= min_similarity:
result_state_short_list.append(
MAPPER_STATE_ABBR_LONG_TO_SHORT[state_long])
if len(result_state_short_list) == 0:
message = ("'%s' is not a valid state name, use 2 letter "
"short name or correct full name please.")
raise ValueError(message % state)
return result_state_short_list
def find_city(
self,
city: str,
state: str = None,
best_match: bool = True,
min_similarity: int = 70,
) -> typing.List[str]:
"""
Fuzzy search correct city.
:param city: city name.
:param state: search city in specified state.
:param best_match: bool, when True, only the best matched city
will return. otherwise, will return all matching cities.
**中文文档**
如果给定了state, 则只在指定的state里的城市中寻找, 否则, 在全国所有的城市中寻找。
"""
# find out what is the city that user looking for
if state:
state_short = self.find_state(state, best_match=True)[0]
city_pool = self.state_to_city_mapper[state_short.upper()]
else:
city_pool = self.city_list
result_city_list = list()
if best_match:
city, confidence = extractOne(city, city_pool)
if confidence >= min_similarity:
result_city_list.append(city)
else:
for city, confidence in extract(city, city_pool):
if confidence >= min_similarity:
result_city_list.append(city)
if len(result_city_list) == 0:
raise ValueError("'%s' is not a valid city name" % city)
return result_city_list
@staticmethod
def _resolve_sort_by(sort_by: str, flag_radius_query: bool):
"""
Result ``sort_by`` argument.
:param sort_by: str, or sqlalchemy ORM attribute.
:param flag_radius_query:
:return:
"""
if sort_by is None:
if flag_radius_query:
sort_by = SORT_BY_DIST
elif isinstance(sort_by, str):
if sort_by.lower() == SORT_BY_DIST:
if flag_radius_query is False:
msg = "`sort_by` arg can be 'dist' only under distance based query!"
raise ValueError(msg)
sort_by = SORT_BY_DIST
elif sort_by not in SimpleZipcode.__table__.columns:
msg = "`sort_by` arg has to be one of the Zipcode attribute or 'dist'!"
raise ValueError(msg)
else:
sort_by = sort_by.name
return sort_by
def query(
self,
zipcode: typing.Union[int, float] = None,
prefix: str = None,
pattern: str = None,
city: str = None,
state: str = None,
lat: typing.Union[int, float] = None,
lng: typing.Union[int, float] = None,
radius=None,
population_lower: int = None,
population_upper: int = None,
population_density_lower: int = None,
population_density_upper: int = None,
land_area_in_sqmi_lower: int = None,
land_area_in_sqmi_upper: int = None,
water_area_in_sqmi_lower: int = None,
water_area_in_sqmi_upper: int = None,
housing_units_lower: int = None,
housing_units_upper: int = None,
occupied_housing_units_lower: int = None,
occupied_housing_units_upper: int = None,
median_home_value_lower: int = None,
median_home_value_upper: int = None,
median_household_income_lower: int = None,
median_household_income_upper: int = None,
zipcode_type: ZipcodeTypeEnum = ZipcodeTypeEnum.Standard,
sort_by: str = SimpleZipcode.zipcode.name,
ascending: bool = True,
returns: int = DEFAULT_LIMIT,
):
"""
Query zipcode the simple way.
:param zipcode: int or str, find the exactly matched zipcode. Will be
automatically zero padding to 5 digits
:param prefix: str, zipcode prefix.
:param pattern: str, zipcode wildcard.
:param city: str, city name.
:param state: str, state name, two letter abbr or state full name.
:param lat: latitude.
:param lng: longitude.
:param radius: number, only returns zipcodes within a specific circle.
:param population_lower:
:param population_upper:
:param population_density_lower:
:param population_density_upper:
:param land_area_in_sqmi_lower:
:param land_area_in_sqmi_upper:
:param water_area_in_sqmi_lower:
:param water_area_in_sqmi_upper:
:param housing_units_lower:
:param housing_units_upper:
:param occupied_housing_units_lower:
:param occupied_housing_units_upper:
:param median_home_value_lower:
:param median_home_value_upper:
:param median_household_income_lower:
:param median_household_income_upper:
:param zipcode_type: str or :class`~uszipcode.model.ZipcodeType` attribute.
if None, allows to return any type of zipcode.
if specified, only return specified zipcode type.
:param sort_by: str or :class:`~uszipcode.model.Zipcode` attribute,
specified which field is used for sorting.
:param ascending: bool, True means ascending, False means descending.
:param returns: int or None, limit the number of result to returns.
:return: list of :class:`~uszipcode.model.SimpleZipcode` or
:class:`~uszipcode.model.ComprehensiveZipcode`.
"""
filters = list()
# by coordinates
_n_radius_param_not_null = sum([
isinstance(lat, (int, float)),
isinstance(lng, (int, float)),
isinstance(radius, (int, float)),
])
if _n_radius_param_not_null == 3:
flag_radius_query = True
if radius <= 0: # pragma: no cover
raise ValueError("`radius` parameters can't less than 0!")
elif radius <= 50: # pragma: no cover
radius_coef = 1.05
elif radius <= 100: # pragma: no cover
radius_coef = 1.10
elif radius <= 250: # pragma: no cover
radius_coef = 1.25
elif radius <= 500: # pragma: no cover
radius_coef = 1.5
else: # pragma: no cover
radius_coef = 2.0
if radius >= 250: # pragma: no cover
msg = ("\nwarning! search within radius >= 250 miles "
"may greatly slow down the query!")
sys.stdout.write(msg)
# define lat lng boundary, should be slightly larger than the circle
dist_btwn_lat_deg = 69.172
dist_btwn_lon_deg = math.cos(lat) * 69.172
lat_degr_rad = abs(radius * radius_coef / dist_btwn_lat_deg)
lon_degr_rad = abs(radius * radius_coef / dist_btwn_lon_deg)
lat_lower = lat - lat_degr_rad
lat_upper = lat + lat_degr_rad
lng_lower = lng - lon_degr_rad
lng_upper = lng + lon_degr_rad
filters.append(self.zip_klass.lat >= lat_lower)
filters.append(self.zip_klass.lat <= lat_upper)
filters.append(self.zip_klass.lng >= lng_lower)
filters.append(self.zip_klass.lng <= lng_upper)
elif _n_radius_param_not_null == 0:
flag_radius_query = False
else:
msg = "You can either specify all of `lat`, `lng`, `radius` or none of them"
raise ValueError(msg)
# by city or state
if (state is not None) and (city is not None):
state = self.find_state(state, best_match=True)[0]
city = self.find_city(city, state, best_match=True)[0]
filters.append(self.zip_klass.state == state)
filters.append(self.zip_klass.major_city == city)
try:
state = self.find_state(state, best_match=True)[0]
city = self.find_city(city, state, best_match=True)[0]
filters.append(self.zip_klass.state == state)
filters.append(self.zip_klass.major_city == city)
except ValueError: # pragma: no cover
return []
elif (state is not None):
try:
state = self.find_state(state, best_match=True)[0]
filters.append(self.zip_klass.state == state)
except ValueError: # pragma: no cover
return []
elif (city is not None):
try:
city = self.find_city(city, None, best_match=True)[0]
filters.append(self.zip_klass.major_city == city)
except ValueError: # pragma: no cover
return []
else:
pass
# by common filter
if sum([zipcode is None, prefix is None, pattern is None]) <= 1:
msg = "You can only specify one of the `zipcode`, `prefix` and `pattern`!"
raise ValueError(msg)
if zipcode_type is not None:
filters.append(self.zip_klass.zipcode_type == zipcode_type.value)
if zipcode is not None:
filters.append(self.zip_klass.zipcode == str(zipcode))
if prefix is not None:
filters.append(self.zip_klass.zipcode.startswith(str(prefix)))
if pattern is not None:
filters.append(
self.zip_klass.zipcode.like("%%%s%%" % str(pattern))
)
if population_lower is not None:
filters.append(self.zip_klass.population >= population_lower)
if population_upper is not None:
filters.append(self.zip_klass.population <= population_upper)
if population_density_lower is not None:
filters.append(
self.zip_klass.population_density >= population_density_lower
)
if population_density_upper is not None:
filters.append(
self.zip_klass.population_density <= population_density_upper
)
if land_area_in_sqmi_lower is not None:
filters.append(
self.zip_klass.land_area_in_sqmi >= land_area_in_sqmi_lower
)
if land_area_in_sqmi_upper is not None:
filters.append(
self.zip_klass.land_area_in_sqmi <= land_area_in_sqmi_upper
)
if water_area_in_sqmi_lower is not None:
filters.append(
self.zip_klass.water_area_in_sqmi >= water_area_in_sqmi_lower
)
if water_area_in_sqmi_upper is not None:
filters.append(
self.zip_klass.water_area_in_sqmi <= water_area_in_sqmi_upper
)
if housing_units_lower is not None:
filters.append(self.zip_klass.housing_units >= housing_units_lower)
if housing_units_upper is not None:
filters.append(self.zip_klass.housing_units <= housing_units_upper)
if occupied_housing_units_lower is not None:
filters.append(
self.zip_klass.occupied_housing_units >= occupied_housing_units_lower
)
if occupied_housing_units_upper is not None:
filters.append(
self.zip_klass.occupied_housing_units <= occupied_housing_units_upper
)
if median_home_value_lower is not None:
filters.append(
self.zip_klass.median_home_value >= median_home_value_lower
)
if median_home_value_upper is not None:
filters.append(
self.zip_klass.median_home_value <= median_home_value_upper
)
if median_household_income_lower is not None:
filters.append(
self.zip_klass.median_household_income >= median_household_income_lower
)
if median_household_income_upper is not None:
filters.append(
self.zip_klass.median_household_income <= median_household_income_upper
)
# --- solve coordinates and other search sort_by conflict ---
sort_by = self._resolve_sort_by(sort_by, flag_radius_query)
stmt = sa.select(self.zip_klass).where(*filters)
if sort_by is None:
pass
elif sort_by == SORT_BY_DIST:
pass
else:
field = getattr(self.zip_klass, sort_by)
if ascending:
by = field.asc()
else:
by = field.desc()
stmt = stmt.order_by(by)
if flag_radius_query:
# if we query by radius, then ignore returns limit before the
# distance calculation, and then manually limit the returns
pairs = list()
for z in self.ses.scalars(stmt):
dist = z.dist_from(lat, lng)
if dist <= radius:
pairs.append((dist, z))
if sort_by == SORT_BY_DIST:
if ascending:
if returns:
pairs_new = heapq.nsmallest(
returns, pairs, key=lambda x: x[0])
else:
pairs_new = list(sorted(pairs, key=lambda x: x[0]))
else:
if returns:
pairs_new = heapq.nlargest(
returns, pairs, key=lambda x: x[0]
)
else:
pairs_new = list(
sorted(pairs, key=lambda x: x[0], reverse=True)
)
return [z for _, z in pairs_new]
else:
return [z for _, z in pairs[:returns]]
else:
if returns:
stmt = stmt.limit(returns)
return self.ses.scalars(stmt).all()
def by_zipcode(
self,
zipcode: typing.Union[int, str],
zero_padding: bool = True,
) -> typing.Union[SimpleZipcode, ComprehensiveZipcode, None]:
"""
Search zipcode by exact 5 digits zipcode. No zero padding is needed.
:param zipcode: int or str, the zipcode will be automatically
zero padding to 5 digits.
:param zipcode_type: str or :class`~uszipcode.model.ZipcodeType` attribute.
by default, it returns any zipcode type.
:param zero_padding: bool, toggle on and off automatic zero padding.
"""
if zero_padding:
zipcode = str(zipcode).zfill(5)
else: # pragma: no cover
zipcode = str(zipcode)
return self.ses.get(self.zip_klass, zipcode)
def by_prefix(
self,
prefix: str,
zipcode_type: ZipcodeTypeEnum = ZipcodeTypeEnum.Standard,
sort_by: str = SimpleZipcode.zipcode.name,
ascending: bool = True,
returns: int = DEFAULT_LIMIT,
):
"""
Search zipcode information by first N digits.
Returns multiple results.
"""
return self.query(
prefix=prefix,
sort_by=sort_by, zipcode_type=zipcode_type,
ascending=ascending, returns=returns,
)
def by_pattern(
self,
pattern: str,
zipcode_type: ZipcodeTypeEnum = ZipcodeTypeEnum.Standard,
sort_by: str = SimpleZipcode.zipcode.name,
ascending: bool = True,
returns: int = DEFAULT_LIMIT,
):
"""
Search zipcode by wildcard.
Returns multiple results.
"""
return self.query(
pattern=pattern,
sort_by=sort_by, zipcode_type=zipcode_type,
ascending=ascending, returns=returns,
)
def by_city(
self,
city: str,
zipcode_type: ZipcodeTypeEnum = ZipcodeTypeEnum.Standard,
sort_by: str = SimpleZipcode.zipcode.name,
ascending: bool = True,
returns: int = DEFAULT_LIMIT,
):
"""
Search zipcode information by fuzzy City name.
My engine use fuzzy match and guess what is the city you want.
"""
return self.query(
city=city,
sort_by=sort_by, zipcode_type=zipcode_type,
ascending=ascending, returns=returns,
)
def by_state(
self,
state: str,
zipcode_type: ZipcodeTypeEnum = ZipcodeTypeEnum.Standard,
sort_by: str = SimpleZipcode.zipcode.name,
ascending: bool = True,
returns: int = DEFAULT_LIMIT,
):
"""
Search zipcode information by fuzzy State name.
My engine use fuzzy match and guess what is the state you want.
"""
return self.query(
state=state,
sort_by=sort_by, zipcode_type=zipcode_type,
ascending=ascending, returns=returns,
)
def by_city_and_state(
self,
city: str,
state: str,
zipcode_type: ZipcodeTypeEnum = ZipcodeTypeEnum.Standard,
sort_by: str = SimpleZipcode.zipcode.name,
ascending: bool = True,
returns: int = DEFAULT_LIMIT,
):
"""
Search zipcode information by fuzzy city and state name.
My engine use fuzzy match and guess what is the state you want.
"""
return self.query(
city=city,
state=state,
sort_by=sort_by, zipcode_type=zipcode_type,
ascending=ascending, returns=returns,
)
def by_coordinates(
self,
lat: typing.Union[int, float],
lng: typing.Union[int, float],
radius: typing.Union[int, float] = 25.0,
zipcode_type: ZipcodeTypeEnum = ZipcodeTypeEnum.Standard,
sort_by: str = SORT_BY_DIST,
ascending: bool = True,
returns: int = DEFAULT_LIMIT,
):
"""
Search zipcode information near a coordinates on a map.
Returns multiple results.
:param lat: center latitude.
:param lng: center longitude.
:param radius: only returns zipcode within X miles from ``lat``, ``lng``.
**中文文档**
1. 计算出在中心坐标处, 每一经度和纬度分别代表多少miles.
2. 以给定坐标为中心, 画出一个矩形, 长宽分别为半径的2倍多一点, 找到该
矩形内所有的Zipcode.
3. 对这些Zipcode计算出他们的距离, 然后按照距离远近排序。距离超过我们
限定的半径的直接丢弃.
"""
return self.query(
lat=lat, lng=lng, radius=radius,
sort_by=sort_by, zipcode_type=zipcode_type,
ascending=ascending, returns=returns,
)
def by_population(
self,
lower: int = -1,
upper: int = 2 ** 31,
zipcode_type: ZipcodeTypeEnum = ZipcodeTypeEnum.Standard,
sort_by: str = SimpleZipcode.population.name,
ascending: bool = False,
returns: int = DEFAULT_LIMIT,
):
"""
Search zipcode information by population range.
"""
return self.query(
population_lower=lower,
population_upper=upper,
sort_by=sort_by, zipcode_type=zipcode_type,
ascending=ascending, returns=returns,
)
def by_population_density(
self,
lower: int = -1,
upper: int = 2 ** 31,
zipcode_type: ZipcodeTypeEnum = ZipcodeTypeEnum.Standard,
sort_by: str = SimpleZipcode.population_density.name,
ascending: bool = False,
returns: int = DEFAULT_LIMIT,
):
"""
Search zipcode information by population density range.
`population density` is `population per square miles on land`
"""
return self.query(
population_density_lower=lower,
population_density_upper=upper,
sort_by=sort_by, zipcode_type=zipcode_type,
ascending=ascending, returns=returns,
)
def by_land_area_in_sqmi(
self,
lower: int = -1,
upper: int = 2 ** 31,
zipcode_type: ZipcodeTypeEnum = ZipcodeTypeEnum.Standard,
sort_by: str = SimpleZipcode.land_area_in_sqmi.name,
ascending: bool = False,
returns: int = DEFAULT_LIMIT,
):
"""
Search zipcode information by land area / sq miles range.
"""
return self.query(
land_area_in_sqmi_lower=lower,
land_area_in_sqmi_upper=upper,
sort_by=sort_by, zipcode_type=zipcode_type,
ascending=ascending, returns=returns,
)
def by_water_area_in_sqmi(
self,
lower: int = -1,
upper: int = 2 ** 31,
zipcode_type: ZipcodeTypeEnum = ZipcodeTypeEnum.Standard,
sort_by: str = SimpleZipcode.water_area_in_sqmi.name,
ascending: bool = False,
returns: int = DEFAULT_LIMIT,
):
"""
Search zipcode information by water area / sq miles range.
"""
return self.query(
water_area_in_sqmi_lower=lower,
water_area_in_sqmi_upper=upper,
sort_by=sort_by, zipcode_type=zipcode_type,
ascending=ascending, returns=returns,
)
def by_housing_units(
self,
lower: int = -1,
upper: int = 2 ** 31,
zipcode_type: ZipcodeTypeEnum = ZipcodeTypeEnum.Standard,
sort_by: str = SimpleZipcode.housing_units.name,
ascending: bool = False,
returns: int = DEFAULT_LIMIT,
):
"""
Search zipcode information by house of units.
"""
return self.query(
housing_units_lower=lower,
housing_units_upper=upper,
sort_by=sort_by, zipcode_type=zipcode_type,
ascending=ascending, returns=returns,
)
def by_occupied_housing_units(
self,
lower: int = -1,
upper: int = 2 ** 31,
zipcode_type: ZipcodeTypeEnum = ZipcodeTypeEnum.Standard,
sort_by: str = SimpleZipcode.occupied_housing_units.name,
ascending: bool = False,
returns: int = DEFAULT_LIMIT,
):
"""
Search zipcode information by occupied house of units.
"""
return self.query(
occupied_housing_units_lower=lower,
occupied_housing_units_upper=upper,
sort_by=sort_by, zipcode_type=zipcode_type,
ascending=ascending, returns=returns,
)
def by_median_home_value(
self,
lower: int = -1,
upper: int = 2 ** 31,
zipcode_type: ZipcodeTypeEnum = ZipcodeTypeEnum.Standard,
sort_by: str = SimpleZipcode.median_home_value.name,
ascending: bool = False,
returns: int = DEFAULT_LIMIT,
):
"""
Search zipcode information by median home value.
"""
return self.query(
median_home_value_lower=lower,
median_home_value_upper=upper,
sort_by=sort_by, zipcode_type=zipcode_type,
ascending=ascending, returns=returns,
)
def by_median_household_income(
self,
lower: int = -1,
upper: int = 2 ** 31,
zipcode_type: ZipcodeTypeEnum = ZipcodeTypeEnum.Standard,
sort_by: str = SimpleZipcode.median_household_income.name,
ascending: bool = False,
returns: int = DEFAULT_LIMIT,
):
"""
Search zipcode information by median household income.
"""
return self.query(
median_household_income_lower=lower,
median_household_income_upper=upper,
sort_by=sort_by, zipcode_type=zipcode_type,
ascending=ascending, returns=returns,
)
def inspect_raw_data(self, zipcode: str):
sql = "SELECT * FROM {} WHERE zipcode = '{}'".format(
self.zip_klass.__tablename__,
str(zipcode).zfill(5),
)
stmt = sa.text(sql)
return dict(self.engine.execute(stmt).fetchone())
|
class SearchEngine(object):
'''
Zipcode Search Engine.
:type simple_or_comprehensive: SearchEngine.SimpleOrComprehensiveArgEnum
:param simple_or_comprehensive: default SearchEngine.SimpleOrComprehensiveArgEnum,
use the simple zipcode db. Rich Demographics, Real Estate, Employment,
Education info are not available. if
SearchEngine.SimpleOrComprehensiveArgEnum.comprehensive,
use the rich info database.
:type db_file_path: str
:param db_file_path: where you want to download the sqlite database to. This
property allows you to customize where you want to store the data file
locally. by default it is ${HOME}/.uszipcode/...
:type download_url: str
:param download_url: where you want to download the sqlite database file from.
This property allows you to upload the .sqlite file to your private file
host and download from it. In case the default download url fail.
:type engine: Engine
:param engine: a sqlachemy engine object. It allows you to use any
backend database instead of the default sqlite database.
Usage::
>>> search = SearchEngine()
>>> zipcode = search.by_zipcode("10001")
Context Manager::
>>> with SearchEngine() as search:
... for zipcode in search.by_coordinates(lat, lng, radius):
... # do what every you want
:meth:`SearchEngine.query` provides mass options to customize your query.
:attr:`SearchEngine.ses` is a ``sqlalchemy.orm.Session`` object, you can
use it for query. For example::
>>> from uszipcode import SearchEngine, SimpleZipcode, ComprehensiveZipcode
>>> search = SearchEngine()
>>> search.ses.scalar(SimpleZipcode).filter(SimpleZipcode.zipcode=="10001")
.. note::
:class:`SearchEngine` is not multi-thread safe. You should create different
instance for each thread.
'''
class SimpleOrComprehensiveArgEnum(enum.Enum):
def __init__(
self,
simple_or_comprehensive: SimpleOrComprehensiveArgEnum = SimpleOrComprehensiveArgEnum.simple,
db_file_path: typing.Union[str, None] = None,
download_url: typing.Union[str, None] = None,
engine: Engine = None,
):
pass
def _download_db_file_if_not_exists(self):
pass
def __enter__(self):
pass
def __exit__(self, *exc_info):
pass
def __del__(self):
pass
def close(self):
'''
close database connection.
'''
pass
def _get_cache_data(self):
pass
@property
def city_list(self):
'''
Return all available city name.
'''
pass
@property
def state_list(self):
'''
Return all available state name.
'''
pass
@property
def state_to_city_mapper(self):
pass
@property
def city_to_state_mapper(self):
pass
def find_state(
self,
state: str,
best_match: bool = True,
min_similarity: int = 70,
) -> typing.List[str]:
'''
Fuzzy search correct state.
:param best_match: bool, when True, only the best matched state
will be return. otherwise, will return all matching states.
'''
pass
def find_city(
self,
city: str,
state: str = None,
best_match: bool = True,
min_similarity: int = 70,
) -> typing.List[str]:
'''
Fuzzy search correct city.
:param city: city name.
:param state: search city in specified state.
:param best_match: bool, when True, only the best matched city
will return. otherwise, will return all matching cities.
**中文文档**
如果给定了state, 则只在指定的state里的城市中寻找, 否则, 在全国所有的城市中寻找。
'''
pass
@staticmethod
def _resolve_sort_by(sort_by: str, flag_radius_query: bool):
'''
Result ``sort_by`` argument.
:param sort_by: str, or sqlalchemy ORM attribute.
:param flag_radius_query:
:return:
'''
pass
def query(
self,
zipcode: typing.Union[int, float] = None,
prefix: str = None,
pattern: str = None,
city: str = None,
state: str = None,
lat: typing.Union[int, float] = None,
lng: typing.Union[int, float] = None,
radius=None,
population_lower: int = None,
population_upper: int = None,
population_density_lower: int = None,
population_density_upper: int = None,
land_area_in_sqmi_lower: int = None,
land_area_in_sqmi_upper: int = None,
water_area_in_sqmi_lower: int = None,
water_area_in_sqmi_upper: int = None,
housing_units_lower: int = None,
housing_units_upper: int = None,
occupied_housing_units_lower: int = None,
occupied_housing_units_upper: int = None,
median_home_value_lower: int = None,
median_home_value_upper: int = None,
median_household_income_lower: int = None,
median_household_income_upper: int = None,
zipcode_type: ZipcodeTypeEnum = ZipcodeTypeEnum.Standard,
sort_by: str = SimpleZipcode.zipcode.name,
ascending: bool = True,
returns: int = DEFAULT_LIMIT,
):
'''
Query zipcode the simple way.
:param zipcode: int or str, find the exactly matched zipcode. Will be
automatically zero padding to 5 digits
:param prefix: str, zipcode prefix.
:param pattern: str, zipcode wildcard.
:param city: str, city name.
:param state: str, state name, two letter abbr or state full name.
:param lat: latitude.
:param lng: longitude.
:param radius: number, only returns zipcodes within a specific circle.
:param population_lower:
:param population_upper:
:param population_density_lower:
:param population_density_upper:
:param land_area_in_sqmi_lower:
:param land_area_in_sqmi_upper:
:param water_area_in_sqmi_lower:
:param water_area_in_sqmi_upper:
:param housing_units_lower:
:param housing_units_upper:
:param occupied_housing_units_lower:
:param occupied_housing_units_upper:
:param median_home_value_lower:
:param median_home_value_upper:
:param median_household_income_lower:
:param median_household_income_upper:
:param zipcode_type: str or :class`~uszipcode.model.ZipcodeType` attribute.
if None, allows to return any type of zipcode.
if specified, only return specified zipcode type.
:param sort_by: str or :class:`~uszipcode.model.Zipcode` attribute,
specified which field is used for sorting.
:param ascending: bool, True means ascending, False means descending.
:param returns: int or None, limit the number of result to returns.
:return: list of :class:`~uszipcode.model.SimpleZipcode` or
:class:`~uszipcode.model.ComprehensiveZipcode`.
'''
pass
def by_zipcode(
self,
zipcode: typing.Union[int, str],
zero_padding: bool = True,
) -> typing.Union[SimpleZipcode, ComprehensiveZipcode, None]:
'''
Search zipcode by exact 5 digits zipcode. No zero padding is needed.
:param zipcode: int or str, the zipcode will be automatically
zero padding to 5 digits.
:param zipcode_type: str or :class`~uszipcode.model.ZipcodeType` attribute.
by default, it returns any zipcode type.
:param zero_padding: bool, toggle on and off automatic zero padding.
'''
pass
def by_prefix(
self,
prefix: str,
zipcode_type: ZipcodeTypeEnum = ZipcodeTypeEnum.Standard,
sort_by: str = SimpleZipcode.zipcode.name,
ascending: bool = True,
returns: int = DEFAULT_LIMIT,
):
'''
Search zipcode information by first N digits.
Returns multiple results.
'''
pass
def by_pattern(
self,
pattern: str,
zipcode_type: ZipcodeTypeEnum = ZipcodeTypeEnum.Standard,
sort_by: str = SimpleZipcode.zipcode.name,
ascending: bool = True,
returns: int = DEFAULT_LIMIT,
):
'''
Search zipcode by wildcard.
Returns multiple results.
'''
pass
def by_city(
self,
city: str,
zipcode_type: ZipcodeTypeEnum = ZipcodeTypeEnum.Standard,
sort_by: str = SimpleZipcode.zipcode.name,
ascending: bool = True,
returns: int = DEFAULT_LIMIT,
):
'''
Search zipcode information by fuzzy City name.
My engine use fuzzy match and guess what is the city you want.
'''
pass
def by_state(
self,
state: str,
zipcode_type: ZipcodeTypeEnum = ZipcodeTypeEnum.Standard,
sort_by: str = SimpleZipcode.zipcode.name,
ascending: bool = True,
returns: int = DEFAULT_LIMIT,
):
'''
Search zipcode information by fuzzy State name.
My engine use fuzzy match and guess what is the state you want.
'''
pass
def by_city_and_state(
self,
city: str,
state: str,
zipcode_type: ZipcodeTypeEnum = ZipcodeTypeEnum.Standard,
sort_by: str = SimpleZipcode.zipcode.name,
ascending: bool = True,
returns: int = DEFAULT_LIMIT,
):
'''
Search zipcode information by fuzzy city and state name.
My engine use fuzzy match and guess what is the state you want.
'''
pass
def by_coordinates(
self,
lat: typing.Union[int, float],
lng: typing.Union[int, float],
radius: typing.Union[int, float] = 25.0,
zipcode_type: ZipcodeTypeEnum = ZipcodeTypeEnum.Standard,
sort_by: str = SORT_BY_DIST,
ascending: bool = True,
returns: int = DEFAULT_LIMIT,
):
'''
Search zipcode information near a coordinates on a map.
Returns multiple results.
:param lat: center latitude.
:param lng: center longitude.
:param radius: only returns zipcode within X miles from ``lat``, ``lng``.
**中文文档**
1. 计算出在中心坐标处, 每一经度和纬度分别代表多少miles.
2. 以给定坐标为中心, 画出一个矩形, 长宽分别为半径的2倍多一点, 找到该
矩形内所有的Zipcode.
3. 对这些Zipcode计算出他们的距离, 然后按照距离远近排序。距离超过我们
限定的半径的直接丢弃.
'''
pass
def by_population(
self,
lower: int = -1,
upper: int = 2 ** 31,
zipcode_type: ZipcodeTypeEnum = ZipcodeTypeEnum.Standard,
sort_by: str = SimpleZipcode.population.name,
ascending: bool = False,
returns: int = DEFAULT_LIMIT,
):
'''
Search zipcode information by population range.
'''
pass
def by_population_density(
self,
lower: int = -1,
upper: int = 2 ** 31,
zipcode_type: ZipcodeTypeEnum = ZipcodeTypeEnum.Standard,
sort_by: str = SimpleZipcode.population_density.name,
ascending: bool = False,
returns: int = DEFAULT_LIMIT,
):
'''
Search zipcode information by population density range.
`population density` is `population per square miles on land`
'''
pass
def by_land_area_in_sqmi(
self,
lower: int = -1,
upper: int = 2 ** 31,
zipcode_type: ZipcodeTypeEnum = ZipcodeTypeEnum.Standard,
sort_by: str = SimpleZipcode.land_area_in_sqmi.name,
ascending: bool = False,
returns: int = DEFAULT_LIMIT,
):
'''
Search zipcode information by land area / sq miles range.
'''
pass
def by_water_area_in_sqmi(
self,
lower: int = -1,
upper: int = 2 ** 31,
zipcode_type: ZipcodeTypeEnum = ZipcodeTypeEnum.Standard,
sort_by: str = SimpleZipcode.water_area_in_sqmi.name,
ascending: bool = False,
returns: int = DEFAULT_LIMIT,
):
'''
Search zipcode information by water area / sq miles range.
'''
pass
def by_housing_units(
self,
lower: int = -1,
upper: int = 2 ** 31,
zipcode_type: ZipcodeTypeEnum = ZipcodeTypeEnum.Standard,
sort_by: str = SimpleZipcode.housing_units.name,
ascending: bool = False,
returns: int = DEFAULT_LIMIT,
):
'''
Search zipcode information by house of units.
'''
pass
def by_occupied_housing_units(
self,
lower: int = -1,
upper: int = 2 ** 31,
zipcode_type: ZipcodeTypeEnum = ZipcodeTypeEnum.Standard,
sort_by: str = SimpleZipcode.occupied_housing_units.name,
ascending: bool = False,
returns: int = DEFAULT_LIMIT,
):
'''
Search zipcode information by occupied house of units.
'''
pass
def by_median_home_value(
self,
lower: int = -1,
upper: int = 2 ** 31,
zipcode_type: ZipcodeTypeEnum = ZipcodeTypeEnum.Standard,
sort_by: str = SimpleZipcode.median_home_value.name,
ascending: bool = False,
returns: int = DEFAULT_LIMIT,
):
'''
Search zipcode information by median home value.
'''
pass
def by_median_household_income(
self,
lower: int = -1,
upper: int = 2 ** 31,
zipcode_type: ZipcodeTypeEnum = ZipcodeTypeEnum.Standard,
sort_by: str = SimpleZipcode.median_household_income.name,
ascending: bool = False,
returns: int = DEFAULT_LIMIT,
):
'''
Search zipcode information by median household income.
'''
pass
def inspect_raw_data(self, zipcode: str):
pass
| 38 | 23 | 26 | 2 | 20 | 5 | 4 | 0.33 | 1 | 13 | 4 | 0 | 30 | 8 | 31 | 31 | 936 | 113 | 634 | 253 | 436 | 207 | 297 | 87 | 264 | 47 | 1 | 4 | 116 |
148,400 |
MacHu-GWU/windtalker-project
|
MacHu-GWU_windtalker-project/tests/test_symmetric.py
|
test_symmetric.TestSymmetricCipher
|
class TestSymmetricCipher(BaseTestCipher):
cipher = SymmetricCipher(password="MyPassword")
with pytest.raises(ValueError):
cipher.set_encrypt_chunk_size(0)
cipher.set_encrypt_chunk_size(10 * 1024 * 1024)
_ = cipher.metadata
def test_decrypt_with_password(self):
encrypted_text = self.cipher.encrypt_text("Hello World")
cipher = SymmetricCipher(password="MyPassword")
cipher.set_password(password="AnotherPassword")
with pytest.raises(PasswordError):
cipher.decrypt_text(encrypted_text)
|
class TestSymmetricCipher(BaseTestCipher):
def test_decrypt_with_password(self):
pass
| 2 | 0 | 6 | 0 | 6 | 0 | 1 | 0 | 1 | 2 | 2 | 0 | 1 | 0 | 1 | 5 | 14 | 2 | 12 | 6 | 10 | 0 | 12 | 6 | 10 | 1 | 1 | 1 | 1 |
148,401 |
MacHu-GWU/windtalker-project
|
MacHu-GWU_windtalker-project/tests/test_asymmetric.py
|
test_asymmetric.TestAsymmetricCipher
|
class TestAsymmetricCipher:
def test_encrypt_decrypt_data(self):
data = "Turn right at blue tree".encode("utf-8")
A_pubkey, A_privkey = AsymmetricCipher.new_keys()
B_pubkey, B_privkey = AsymmetricCipher.new_keys()
cipherA = AsymmetricCipher(A_pubkey, A_privkey, B_pubkey)
cipherB = AsymmetricCipher(B_pubkey, B_privkey, A_pubkey)
token = cipherA.encrypt(data)
sign = cipherA.sign
data_new = cipherB.decrypt(token, signature=sign)
assert data == data_new
assert data != token
|
class TestAsymmetricCipher:
def test_encrypt_decrypt_data(self):
pass
| 2 | 0 | 14 | 3 | 11 | 0 | 1 | 0 | 0 | 1 | 1 | 0 | 1 | 0 | 1 | 1 | 15 | 3 | 12 | 10 | 10 | 0 | 12 | 10 | 10 | 1 | 0 | 0 | 1 |
148,402 |
MacHu-GWU/windtalker-project
|
MacHu-GWU_windtalker-project/windtalker/vendor/hashes.py
|
windtalker.vendor.hashes.Hashes
|
class Hashes:
"""
A hashlib wrapper class allow you to use one line to do hash as you wish.
"""
def __init__(
self,
algo: HashAlgoEnum = HashAlgoEnum.md5,
hexdigest: bool = True,
):
self.algo = getattr(hashlib, algo.value)
self.hexdigest: bool = hexdigest
def use_md5(self) -> "Hashes":
"""
Use md5 hash algorithm.
"""
self.algo = getattr(hashlib, HashAlgoEnum.md5.value)
return self
def use_sha1(self) -> "Hashes":
"""
Use sha1 hash algorithm.
"""
self.algo = getattr(hashlib, HashAlgoEnum.sha1.value)
return self
def use_sha224(self) -> "Hashes":
"""
Use sha224 hash algorithm.
"""
self.algo = getattr(hashlib, HashAlgoEnum.sha224.value)
return self
def use_sha256(self) -> "Hashes":
"""
Use sha256 hash algorithm.
"""
self.algo = getattr(hashlib, HashAlgoEnum.sha256.value)
return self
def use_sha384(self) -> "Hashes":
"""
Use sha384 hash algorithm.
"""
self.algo = getattr(hashlib, HashAlgoEnum.sha384.value)
return self
def use_sha512(self) -> "Hashes":
"""
Use sha512 hash algorithm.
"""
self.algo = getattr(hashlib, HashAlgoEnum.sha512.value)
return self
def use_hexdigesst(self) -> "Hashes":
"""
Return hash in hex string.
"""
self.hexdigest = True
return self
def use_bytesdigest(self) -> "Hashes":
"""
Return hash in bytes.
"""
self.hexdigest = False
return self
def _construct(self, algo: T.Optional[HashAlgoEnum] = None):
if algo is None:
return self.algo()
else:
return getattr(hashlib, algo.value)()
def _digest(self, m, hexdigest: T.Optional[bool]) -> T.Union[str, bytes]:
if hexdigest is None:
if self.hexdigest:
return m.hexdigest()
else:
return m.digest()
else:
if hexdigest:
return m.hexdigest()
else:
return m.digest()
def of_str(
self,
s: str,
algo: T.Optional[HashAlgoEnum] = None,
hexdigest: T.Optional[bool] = None,
) -> T.Union[str, bytes]:
"""
Return hash value of a string.
"""
m = self._construct(algo)
m.update(s.encode("utf-8"))
return self._digest(m, hexdigest)
def of_bytes(
self,
b: bytes,
algo: T.Optional[HashAlgoEnum] = None,
hexdigest: T.Optional[bool] = None,
) -> T.Union[str, bytes]:
"""
Return hash value of a bytes.
"""
m = self._construct(algo)
m.update(b)
return self._digest(m, hexdigest)
def of_str_or_bytes(
self,
s_or_b: T.Union[bytes, str],
algo: T.Optional[HashAlgoEnum] = None,
hexdigest: T.Optional[bool] = None,
) -> T.Union[str, bytes]:
"""
Return hash value of a bytes or string.
"""
if isinstance(s_or_b, str):
return self.of_str(s_or_b, algo, hexdigest)
else:
return self.of_bytes(s_or_b, algo, hexdigest)
def of_file(
self,
abspath: T.Union[str, Path, T.Any],
nbytes: int = 0,
chunk_size: int = 1024,
algo: T.Optional[HashAlgoEnum] = None,
hexdigest: T.Optional[bool] = None,
) -> T.Union[str, bytes]:
"""
Return hash value of a file, or only a piece of a file
"""
p = Path(abspath)
with p.open("rb") as f:
return self.of_file_object(
f,
nbytes=nbytes,
chunk_size=chunk_size,
algo=algo,
hexdigest=hexdigest,
)
def of_file_object(
self,
f,
nbytes: int = 0,
chunk_size: int = 4096,
algo: T.Optional[HashAlgoEnum] = None,
hexdigest: T.Optional[bool] = None,
) -> T.Union[str, bytes]:
if nbytes < 0:
raise ValueError("chunk_size cannot smaller than 0")
if chunk_size < 1:
raise ValueError("chunk_size cannot smaller than 1")
if (nbytes > 0) and (nbytes < chunk_size):
chunk_size = nbytes
m = self._construct(algo)
if nbytes: # use first n bytes only
have_reads = 0
while True:
have_reads += chunk_size
if have_reads > nbytes:
n = nbytes - (have_reads - chunk_size)
if n:
data = f.read(n)
m.update(data)
break
else:
data = f.read(chunk_size)
m.update(data)
else: # use entire content
while True:
data = f.read(chunk_size)
if not data:
break
m.update(data)
return self._digest(m, hexdigest)
def of_folder(
self,
abspath: T.Union[str, Path, T.Any],
algo: T.Optional[HashAlgoEnum] = None,
hexdigest: T.Optional[bool] = None,
) -> str:
"""
Return hash value of a folder. It is based on the concatenation of
the hash values of all files in the folder. The order of the files
are sorted by their paths.
"""
path = Path(abspath)
if not path.is_dir():
raise NotADirectoryError(f"{path} is not a folder!")
hashes = list()
for p in sorted(path.glob("**/*"), key=lambda x: str(x)):
if p.is_file():
hashes.append(self.of_file(p, algo=algo, hexdigest=hexdigest))
return self.of_str(
s="".join(hashes),
algo=algo,
hexdigest=hexdigest,
)
def of_paths(
self,
paths: T.List[T.Union[str, Path, T.Any]],
algo: T.Optional[HashAlgoEnum] = None,
hexdigest: T.Optional[bool] = None,
) -> str:
"""
Return hash value of a list of paths. It is based on the concatenation of
the hash values of all files and folders.
"""
hashes = list()
for path in paths:
path = Path(path)
if path.is_dir():
hashes.append(self.of_folder(path, algo=algo, hexdigest=hexdigest))
elif path.is_file():
hashes.append(self.of_file(path, algo=algo, hexdigest=hexdigest))
else: # pragma: no cover
pass
return self.of_str(
s="".join(hashes),
algo=algo,
hexdigest=hexdigest,
)
|
class Hashes:
'''
A hashlib wrapper class allow you to use one line to do hash as you wish.
'''
def __init__(
self,
algo: HashAlgoEnum = HashAlgoEnum.md5,
hexdigest: bool = True,
):
pass
def use_md5(self) -> "Hashes":
'''
Use md5 hash algorithm.
'''
pass
def use_sha1(self) -> "Hashes":
'''
Use sha1 hash algorithm.
'''
pass
def use_sha224(self) -> "Hashes":
'''
Use sha224 hash algorithm.
'''
pass
def use_sha256(self) -> "Hashes":
'''
Use sha256 hash algorithm.
'''
pass
def use_sha384(self) -> "Hashes":
'''
Use sha384 hash algorithm.
'''
pass
def use_sha512(self) -> "Hashes":
'''
Use sha512 hash algorithm.
'''
pass
def use_hexdigesst(self) -> "Hashes":
'''
Return hash in hex string.
'''
pass
def use_bytesdigest(self) -> "Hashes":
'''
Return hash in bytes.
'''
pass
def _construct(self, algo: T.Optional[HashAlgoEnum] = None):
pass
def _digest(self, m, hexdigest: T.Optional[bool]) -> T.Union[str, bytes]:
pass
def of_str(
self,
s: str,
algo: T.Optional[HashAlgoEnum] = None,
hexdigest: T.Optional[bool] = None,
) -> T.Union[str, bytes]:
'''
Return hash value of a string.
'''
pass
def of_bytes(
self,
b: bytes,
algo: T.Optional[HashAlgoEnum] = None,
hexdigest: T.Optional[bool] = None,
) -> T.Union[str, bytes]:
'''
Return hash value of a bytes.
'''
pass
def of_str_or_bytes(
self,
s_or_b: T.Union[bytes, str],
algo: T.Optional[HashAlgoEnum] = None,
hexdigest: T.Optional[bool] = None,
) -> T.Union[str, bytes]:
'''
Return hash value of a bytes or string.
'''
pass
def of_file(
self,
abspath: T.Union[str, Path, T.Any],
nbytes: int = 0,
chunk_size: int = 1024,
algo: T.Optional[HashAlgoEnum] = None,
hexdigest: T.Optional[bool] = None,
) -> T.Union[str, bytes]:
'''
Return hash value of a file, or only a piece of a file
'''
pass
def of_file_object(
self,
f,
nbytes: int = 0,
chunk_size: int = 4096,
algo: T.Optional[HashAlgoEnum] = None,
hexdigest: T.Optional[bool] = None,
) -> T.Union[str, bytes]:
pass
def of_folder(
self,
abspath: T.Union[str, Path, T.Any],
algo: T.Optional[HashAlgoEnum] = None,
hexdigest: T.Optional[bool] = None,
) -> str:
'''
Return hash value of a folder. It is based on the concatenation of
the hash values of all files in the folder. The order of the files
are sorted by their paths.
'''
pass
def of_paths(
self,
paths: T.List[T.Union[str, Path, T.Any]],
algo: T.Optional[HashAlgoEnum] = None,
hexdigest: T.Optional[bool] = None,
) -> str:
'''
Return hash value of a list of paths. It is based on the concatenation of
the hash values of all files and folders.
'''
pass
| 19 | 15 | 12 | 0 | 9 | 3 | 2 | 0.31 | 0 | 10 | 1 | 0 | 18 | 2 | 18 | 18 | 234 | 20 | 166 | 77 | 104 | 51 | 100 | 33 | 81 | 10 | 0 | 4 | 38 |
148,403 |
MacHu-GWU/windtalker-project
|
MacHu-GWU_windtalker-project/windtalker/vendor/hashes.py
|
windtalker.vendor.hashes.HashAlgoEnum
|
class HashAlgoEnum(str, enum.Enum):
md5 = "md5"
sha1 = "sha1"
sha224 = "sha224"
sha256 = "sha256"
sha384 = "sha384"
sha512 = "sha512"
|
class HashAlgoEnum(str, enum.Enum):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2 | 0 | 0 | 0 | 0 | 0 | 0 | 115 | 7 | 0 | 7 | 7 | 6 | 0 | 7 | 7 | 6 | 0 | 4 | 0 | 0 |
148,404 |
MacHu-GWU/windtalker-project
|
MacHu-GWU_windtalker-project/windtalker/tests/helper.py
|
windtalker.tests.helper.BaseTestCipher
|
class BaseTestCipher:
cipher = None
@property
def c(self):
return self.cipher
def test_encrypt_and_decrypt_binary_and_text(self):
s = "Turn right at blue tree"
assert self.c.decrypt_text(self.c.encrypt_text(s)) == s
b = s.encode("utf-8")
assert self.c.decrypt_binary(self.c.encrypt_binary(b)) == b
def test_encrypt_and_decrypt_file(self):
original_text = p_original.read_text()
self.c.encrypt_file(
p_original,
p_encrypted,
overwrite=True,
enable_verbose=False,
)
encrypted_text = p_encrypted.read_text()
self.c.decrypt_file(
p_encrypted,
p_decrypted,
overwrite=True,
enable_verbose=False,
)
decrypted_text = p_decrypted.read_text()
assert original_text == decrypted_text
assert original_text != encrypted_text
def test_encrypt_and_decrypt_dir(self):
self.c.encrypt_dir(
dir_original,
dir_encrypted,
overwrite=True,
enable_verbose=False,
)
self.c.decrypt_dir(
dir_encrypted,
dir_decrypted,
overwrite=True,
enable_verbose=False,
)
for p1, p2 in zip(
dir_original.select_file(recursive=True),
dir_decrypted.select_file(recursive=True),
):
assert p1.read_bytes() == p2.read_bytes()
|
class BaseTestCipher:
@property
def c(self):
pass
def test_encrypt_and_decrypt_binary_and_text(self):
pass
def test_encrypt_and_decrypt_file(self):
pass
def test_encrypt_and_decrypt_dir(self):
pass
| 6 | 0 | 12 | 1 | 11 | 0 | 1 | 0 | 0 | 1 | 0 | 2 | 4 | 0 | 4 | 4 | 54 | 8 | 46 | 13 | 40 | 0 | 22 | 12 | 17 | 2 | 0 | 1 | 5 |
148,405 |
MacHu-GWU/windtalker-project
|
MacHu-GWU_windtalker-project/windtalker/symmetric.py
|
windtalker.symmetric.SymmetricCipher
|
class SymmetricCipher(BaseCipher):
"""
A symmetric encryption algorithm utility class helps you easily
encrypt/decrypt text, files and even a directory.
:param password: The secret password you use to encrypt all your message.
If you feel uncomfortable to put that in your code, you can leave it
empty. The system will ask you manually enter that later.
**中文文档**
对称加密器。
"""
_encrypt_chunk_size = 1024 * 1024 # 1 MB
_decrypt_chunk_size = 1398200 # 1.398 MB
"""Symmtric algorithm needs to break big files in small chunk, and encrypt
them one by one, and concatenate them at the end. Each chunk has a fixed
size. That's what these two attributes for.
"""
def __init__(self, password: T.Optional[str] = None):
if password:
fernet_key = self.any_text_to_fernet_key(password)
self.fernet = Fernet(fernet_key) # type: Fernet
else: # pragma: no cover
if path_windtalker.exists():
self.set_password(read_windtalker_password())
else:
self.input_password()
def any_text_to_fernet_key(self, text: str) -> bytes:
"""
Convert any text to a fernet key for encryption.
"""
md5 = hashes.of_str(text)
fernet_key = base64.b64encode(md5.encode("utf-8"))
return fernet_key
def input_password(self): # pragma: no cover
"""
Manually enter a password for encryption on keyboard.
"""
password = input("Please enter your secret key (case sensitive): ")
self.set_password(password)
def set_password(self, password: str):
"""
Set a new password for encryption.
"""
self.__init__(password=password)
def set_encrypt_chunk_size(self, size: int):
if 1024 * 1024 < size < 100 * 1024 * 1024:
self._encrypt_chunk_size = size
self._decrypt_chunk_size = len(self.encrypt(b"x" * size))
else:
raise ValueError(
f"Cannot set encrypt chunk size = {size}, "
f"encrypt chunk size has to be between 1MB and 100MB"
)
@property
def metadata(self) -> dict:
return {
"_encrypt_chunk_size": self._encrypt_chunk_size,
"_decrypt_chunk_size": self._decrypt_chunk_size,
}
def encrypt(self, binary: bytes, *args, **kwargs) -> bytes:
"""
Encrypt binary data.
"""
return self.fernet.encrypt(binary)
def decrypt(self, binary: bytes, *args, **kwargs) -> bytes:
"""
Decrypt binary data.
"""
try:
return self.fernet.decrypt(binary)
except:
raise PasswordError("Ops, wrong magic word!")
|
class SymmetricCipher(BaseCipher):
'''
A symmetric encryption algorithm utility class helps you easily
encrypt/decrypt text, files and even a directory.
:param password: The secret password you use to encrypt all your message.
If you feel uncomfortable to put that in your code, you can leave it
empty. The system will ask you manually enter that later.
**中文文档**
对称加密器。
'''
def __init__(self, password: T.Optional[str] = None):
pass
def any_text_to_fernet_key(self, text: str) -> bytes:
'''
Convert any text to a fernet key for encryption.
'''
pass
def input_password(self):
'''
Manually enter a password for encryption on keyboard.
'''
pass
def set_password(self, password: str):
'''
Set a new password for encryption.
'''
pass
def set_encrypt_chunk_size(self, size: int):
pass
@property
def metadata(self) -> dict:
pass
def encrypt(self, binary: bytes, *args, **kwargs) -> bytes:
'''
Encrypt binary data.
'''
pass
def decrypt(self, binary: bytes, *args, **kwargs) -> bytes:
'''
Decrypt binary data.
'''
pass
| 10 | 6 | 7 | 0 | 5 | 2 | 2 | 0.77 | 1 | 6 | 1 | 0 | 8 | 1 | 8 | 21 | 83 | 12 | 43 | 17 | 33 | 33 | 33 | 16 | 24 | 3 | 1 | 2 | 12 |
148,406 |
MacHu-GWU/windtalker-project
|
MacHu-GWU_windtalker-project/windtalker/exc.py
|
windtalker.exc.SignatureError
|
class SignatureError(Exception):
"""asymmetric encrypt wrong signature error.
"""
|
class SignatureError(Exception):
'''asymmetric encrypt wrong signature error.
'''
pass
| 1 | 1 | 0 | 0 | 0 | 0 | 0 | 2 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 10 | 3 | 0 | 1 | 1 | 0 | 2 | 1 | 1 | 0 | 0 | 3 | 0 | 0 |
148,407 |
MacHu-GWU/windtalker-project
|
MacHu-GWU_windtalker-project/tests/test_cipher.py
|
test_cipher.MyCipher
|
class MyCipher(BaseCipher):
def encrypt(self, binary, *args, **kwargs):
return (base64.b64encode(binary).decode("utf-8") + "X").encode("utf-8")
def decrypt(self, binary, *args, **kwargs):
return base64.b64decode(binary.decode("utf-8")[:-1].encode("utf-8"))
|
class MyCipher(BaseCipher):
def encrypt(self, binary, *args, **kwargs):
pass
def decrypt(self, binary, *args, **kwargs):
pass
| 3 | 0 | 2 | 0 | 2 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 2 | 0 | 2 | 15 | 6 | 1 | 5 | 3 | 2 | 0 | 5 | 3 | 2 | 1 | 1 | 0 | 2 |
148,408 |
MacHu-GWU/windtalker-project
|
MacHu-GWU_windtalker-project/tests/test_cipher.py
|
test_cipher.TestBaseCipher
|
class TestBaseCipher(BaseTestCipher):
cipher = c
|
class TestBaseCipher(BaseTestCipher):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 2 | 0 | 2 | 2 | 1 | 0 | 2 | 2 | 1 | 0 | 1 | 0 | 0 |
148,409 |
MacHu-GWU/windtalker-project
|
MacHu-GWU_windtalker-project/windtalker/exc.py
|
windtalker.exc.PasswordError
|
class PasswordError(Exception):
"""symmetric encrypt wrong password error.
"""
|
class PasswordError(Exception):
'''symmetric encrypt wrong password error.
'''
| 1 | 1 | 0 | 0 | 0 | 0 | 0 | 2 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 10 | 3 | 0 | 1 | 1 | 0 | 2 | 1 | 1 | 0 | 0 | 3 | 0 | 0 |
148,410 |
MagicStack/asyncpg
|
MagicStack_asyncpg/asyncpg/exceptions/__init__.py
|
asyncpg.exceptions.InvalidIndicatorParameterValueError
|
class InvalidIndicatorParameterValueError(DataError):
sqlstate = '22010'
|
class InvalidIndicatorParameterValueError(DataError):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 31 | 2 | 0 | 2 | 2 | 1 | 0 | 2 | 2 | 1 | 0 | 6 | 0 | 0 |
148,411 |
MagicStack/asyncpg
|
MagicStack_asyncpg/asyncpg/exceptions/__init__.py
|
asyncpg.exceptions.InvalidJsonTextError
|
class InvalidJsonTextError(DataError):
sqlstate = '22032'
|
class InvalidJsonTextError(DataError):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 31 | 2 | 0 | 2 | 2 | 1 | 0 | 2 | 2 | 1 | 0 | 6 | 0 | 0 |
148,412 |
MagicStack/asyncpg
|
MagicStack_asyncpg/asyncpg/exceptions/__init__.py
|
asyncpg.exceptions.InvalidLocatorSpecificationError
|
class InvalidLocatorSpecificationError(LocatorError):
sqlstate = '0F001'
|
class InvalidLocatorSpecificationError(LocatorError):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 31 | 2 | 0 | 2 | 2 | 1 | 0 | 2 | 2 | 1 | 0 | 6 | 0 | 0 |
148,413 |
MagicStack/asyncpg
|
MagicStack_asyncpg/asyncpg/exceptions/__init__.py
|
asyncpg.exceptions.FDWNoSchemasError
|
class FDWNoSchemasError(FDWError):
sqlstate = 'HV00P'
|
class FDWNoSchemasError(FDWError):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 31 | 2 | 0 | 2 | 2 | 1 | 0 | 2 | 2 | 1 | 0 | 6 | 0 | 0 |
148,414 |
MagicStack/asyncpg
|
MagicStack_asyncpg/asyncpg/exceptions/__init__.py
|
asyncpg.exceptions.UndefinedFileError
|
class UndefinedFileError(PostgresSystemError):
sqlstate = '58P01'
|
class UndefinedFileError(PostgresSystemError):
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 31 | 2 | 0 | 2 | 2 | 1 | 0 | 2 | 2 | 1 | 0 | 6 | 0 | 0 |
148,415 |
MagicStack/asyncpg
|
MagicStack_asyncpg/asyncpg/exceptions/__init__.py
|
asyncpg.exceptions.IdleInTransactionSessionTimeoutError
|
class IdleInTransactionSessionTimeoutError(InvalidTransactionStateError):
sqlstate = '25P03'
|
class IdleInTransactionSessionTimeoutError(InvalidTransactionStateError):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 31 | 2 | 0 | 2 | 2 | 1 | 0 | 2 | 2 | 1 | 0 | 6 | 0 | 0 |
148,416 |
MagicStack/asyncpg
|
MagicStack_asyncpg/asyncpg/exceptions/__init__.py
|
asyncpg.exceptions.UndefinedParameterError
|
class UndefinedParameterError(SyntaxOrAccessError):
sqlstate = '42P02'
|
class UndefinedParameterError(SyntaxOrAccessError):
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 31 | 2 | 0 | 2 | 2 | 1 | 0 | 2 | 2 | 1 | 0 | 6 | 0 | 0 |
148,417 |
MagicStack/asyncpg
|
MagicStack_asyncpg/asyncpg/exceptions/__init__.py
|
asyncpg.exceptions.TrimError
|
class TrimError(DataError):
sqlstate = '22027'
|
class TrimError(DataError):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 31 | 2 | 0 | 2 | 2 | 1 | 0 | 2 | 2 | 1 | 0 | 6 | 0 | 0 |
148,418 |
MagicStack/asyncpg
|
MagicStack_asyncpg/asyncpg/exceptions/__init__.py
|
asyncpg.exceptions.TriggeredDataChangeViolationError
|
class TriggeredDataChangeViolationError(_base.PostgresError):
sqlstate = '27000'
|
class TriggeredDataChangeViolationError(_base.PostgresError):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 31 | 2 | 0 | 2 | 2 | 1 | 0 | 2 | 2 | 1 | 0 | 5 | 0 | 0 |
148,419 |
MagicStack/asyncpg
|
MagicStack_asyncpg/asyncpg/exceptions/__init__.py
|
asyncpg.exceptions.UndefinedTableError
|
class UndefinedTableError(SyntaxOrAccessError):
sqlstate = '42P01'
|
class UndefinedTableError(SyntaxOrAccessError):
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 31 | 2 | 0 | 2 | 2 | 1 | 0 | 2 | 2 | 1 | 0 | 6 | 0 | 0 |
148,420 |
MagicStack/asyncpg
|
MagicStack_asyncpg/asyncpg/exceptions/__init__.py
|
asyncpg.exceptions.UniqueViolationError
|
class UniqueViolationError(IntegrityConstraintViolationError):
sqlstate = '23505'
|
class UniqueViolationError(IntegrityConstraintViolationError):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 31 | 2 | 0 | 2 | 2 | 1 | 0 | 2 | 2 | 1 | 0 | 6 | 0 | 0 |
148,421 |
MagicStack/asyncpg
|
MagicStack_asyncpg/asyncpg/exceptions/__init__.py
|
asyncpg.exceptions.UnsafeNewEnumValueUsageError
|
class UnsafeNewEnumValueUsageError(ObjectNotInPrerequisiteStateError):
sqlstate = '55P04'
|
class UnsafeNewEnumValueUsageError(ObjectNotInPrerequisiteStateError):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 31 | 2 | 0 | 2 | 2 | 1 | 0 | 2 | 2 | 1 | 0 | 6 | 0 | 0 |
148,422 |
MagicStack/asyncpg
|
MagicStack_asyncpg/asyncpg/exceptions/__init__.py
|
asyncpg.exceptions.UnterminatedCStringError
|
class UnterminatedCStringError(DataError):
sqlstate = '22024'
|
class UnterminatedCStringError(DataError):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 31 | 2 | 0 | 2 | 2 | 1 | 0 | 2 | 2 | 1 | 0 | 6 | 0 | 0 |
148,423 |
MagicStack/asyncpg
|
MagicStack_asyncpg/asyncpg/exceptions/__init__.py
|
asyncpg.exceptions.UntranslatableCharacterError
|
class UntranslatableCharacterError(DataError):
sqlstate = '22P05'
|
class UntranslatableCharacterError(DataError):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 31 | 2 | 0 | 2 | 2 | 1 | 0 | 2 | 2 | 1 | 0 | 6 | 0 | 0 |
148,424 |
MagicStack/asyncpg
|
MagicStack_asyncpg/asyncpg/exceptions/__init__.py
|
asyncpg.exceptions.TriggeredActionError
|
class TriggeredActionError(_base.PostgresError):
sqlstate = '09000'
|
class TriggeredActionError(_base.PostgresError):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 31 | 2 | 0 | 2 | 2 | 1 | 0 | 2 | 2 | 1 | 0 | 5 | 0 | 0 |
148,425 |
MagicStack/asyncpg
|
MagicStack_asyncpg/asyncpg/exceptions/__init__.py
|
asyncpg.exceptions.FunctionExecutedNoReturnStatementError
|
class FunctionExecutedNoReturnStatementError(SQLRoutineError):
sqlstate = '2F005'
|
class FunctionExecutedNoReturnStatementError(SQLRoutineError):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 31 | 2 | 0 | 2 | 2 | 1 | 0 | 2 | 2 | 1 | 0 | 6 | 0 | 0 |
148,426 |
MagicStack/asyncpg
|
MagicStack_asyncpg/asyncpg/exceptions/__init__.py
|
asyncpg.exceptions.UndefinedColumnError
|
class UndefinedColumnError(SyntaxOrAccessError):
sqlstate = '42703'
|
class UndefinedColumnError(SyntaxOrAccessError):
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 31 | 2 | 0 | 2 | 2 | 1 | 0 | 2 | 2 | 1 | 0 | 6 | 0 | 0 |
148,427 |
MagicStack/asyncpg
|
MagicStack_asyncpg/asyncpg/exceptions/__init__.py
|
asyncpg.exceptions.UndefinedObjectError
|
class UndefinedObjectError(SyntaxOrAccessError):
sqlstate = '42704'
|
class UndefinedObjectError(SyntaxOrAccessError):
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 31 | 2 | 0 | 2 | 2 | 1 | 0 | 2 | 2 | 1 | 0 | 6 | 0 | 0 |
148,428 |
MagicStack/asyncpg
|
MagicStack_asyncpg/asyncpg/exceptions/__init__.py
|
asyncpg.exceptions.InvalidFunctionDefinitionError
|
class InvalidFunctionDefinitionError(SyntaxOrAccessError):
sqlstate = '42P13'
|
class InvalidFunctionDefinitionError(SyntaxOrAccessError):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 31 | 2 | 0 | 2 | 2 | 1 | 0 | 2 | 2 | 1 | 0 | 6 | 0 | 0 |
148,429 |
MagicStack/asyncpg
|
MagicStack_asyncpg/tests/test_transaction.py
|
tests.test_transaction.TestTransaction
|
class TestTransaction(tb.ConnectedTestCase):
async def test_transaction_regular(self):
self.assertIsNone(self.con._top_xact)
self.assertFalse(self.con.is_in_transaction())
tr = self.con.transaction()
self.assertIsNone(self.con._top_xact)
self.assertFalse(self.con.is_in_transaction())
with self.assertRaises(ZeroDivisionError):
async with tr as with_tr:
self.assertIs(self.con._top_xact, tr)
self.assertTrue(self.con.is_in_transaction())
# We don't return the transaction object from __aenter__,
# to make it harder for people to use '.rollback()' and
# '.commit()' from within an 'async with' block.
self.assertIsNone(with_tr)
await self.con.execute('''
CREATE TABLE mytab (a int);
''')
1 / 0
self.assertIsNone(self.con._top_xact)
self.assertFalse(self.con.is_in_transaction())
with self.assertRaisesRegex(asyncpg.PostgresError,
'"mytab" does not exist'):
await self.con.prepare('''
SELECT * FROM mytab
''')
async def test_transaction_nested(self):
self.assertIsNone(self.con._top_xact)
self.assertFalse(self.con.is_in_transaction())
tr = self.con.transaction()
self.assertIsNone(self.con._top_xact)
self.assertFalse(self.con.is_in_transaction())
with self.assertRaises(ZeroDivisionError):
async with tr:
self.assertIs(self.con._top_xact, tr)
self.assertTrue(self.con.is_in_transaction())
await self.con.execute('''
CREATE TABLE mytab (a int);
''')
async with self.con.transaction():
self.assertIs(self.con._top_xact, tr)
self.assertTrue(self.con.is_in_transaction())
await self.con.execute('''
INSERT INTO mytab (a) VALUES (1), (2);
''')
self.assertIs(self.con._top_xact, tr)
self.assertTrue(self.con.is_in_transaction())
with self.assertRaises(ZeroDivisionError):
in_tr = self.con.transaction()
async with in_tr:
self.assertIs(self.con._top_xact, tr)
self.assertTrue(self.con.is_in_transaction())
await self.con.execute('''
INSERT INTO mytab (a) VALUES (3), (4);
''')
1 / 0
st = await self.con.prepare('SELECT * FROM mytab;')
recs = []
async for rec in st.cursor():
recs.append(rec)
self.assertEqual(len(recs), 2)
self.assertEqual(recs[0][0], 1)
self.assertEqual(recs[1][0], 2)
self.assertIs(self.con._top_xact, tr)
self.assertTrue(self.con.is_in_transaction())
1 / 0
self.assertIs(self.con._top_xact, None)
self.assertFalse(self.con.is_in_transaction())
with self.assertRaisesRegex(asyncpg.PostgresError,
'"mytab" does not exist'):
await self.con.prepare('''
SELECT * FROM mytab
''')
async def test_transaction_interface_errors(self):
self.assertIsNone(self.con._top_xact)
self.assertFalse(self.con.is_in_transaction())
tr = self.con.transaction(readonly=True, isolation='serializable')
with self.assertRaisesRegex(asyncpg.InterfaceError,
'cannot start; .* already started'):
async with tr:
await tr.start()
self.assertTrue(repr(tr).startswith(
'<asyncpg.Transaction state:rolledback serializable readonly'))
self.assertIsNone(self.con._top_xact)
self.assertFalse(self.con.is_in_transaction())
with self.assertRaisesRegex(asyncpg.InterfaceError,
'cannot start; .* already rolled back'):
async with tr:
pass
self.assertIsNone(self.con._top_xact)
self.assertFalse(self.con.is_in_transaction())
tr = self.con.transaction()
with self.assertRaisesRegex(asyncpg.InterfaceError,
'cannot manually commit.*async with'):
async with tr:
await tr.commit()
self.assertIsNone(self.con._top_xact)
self.assertFalse(self.con.is_in_transaction())
tr = self.con.transaction()
with self.assertRaisesRegex(asyncpg.InterfaceError,
'cannot manually rollback.*async with'):
async with tr:
await tr.rollback()
self.assertIsNone(self.con._top_xact)
self.assertFalse(self.con.is_in_transaction())
tr = self.con.transaction()
with self.assertRaisesRegex(asyncpg.InterfaceError,
'cannot enter context:.*async with'):
async with tr:
async with tr:
pass
async def test_transaction_within_manual_transaction(self):
self.assertIsNone(self.con._top_xact)
self.assertFalse(self.con.is_in_transaction())
await self.con.execute('BEGIN')
tr = self.con.transaction()
self.assertIsNone(self.con._top_xact)
self.assertTrue(self.con.is_in_transaction())
with self.assertRaisesRegex(asyncpg.InterfaceError,
'cannot use Connection.transaction'):
await tr.start()
with self.assertLoopErrorHandlerCalled(
'Resetting connection with an active transaction'):
await self.con.reset()
self.assertIsNone(self.con._top_xact)
self.assertFalse(self.con.is_in_transaction())
async def test_isolation_level(self):
await self.con.reset()
default_isolation = await self.con.fetchval(
'SHOW default_transaction_isolation'
)
isolation_levels = {
None: default_isolation,
'read_committed': 'read committed',
'read_uncommitted': 'read uncommitted',
'repeatable_read': 'repeatable read',
'serializable': 'serializable',
}
set_sql = 'SET SESSION CHARACTERISTICS AS TRANSACTION ISOLATION LEVEL '
get_sql = 'SHOW TRANSACTION ISOLATION LEVEL'
for tx_level in isolation_levels:
for conn_level in isolation_levels:
with self.subTest(conn=conn_level, tx=tx_level):
if conn_level:
await self.con.execute(
set_sql + isolation_levels[conn_level]
)
level = await self.con.fetchval(get_sql)
self.assertEqual(level, isolation_levels[conn_level])
async with self.con.transaction(isolation=tx_level):
level = await self.con.fetchval(get_sql)
self.assertEqual(
level,
isolation_levels[tx_level or conn_level],
)
await self.con.reset()
async def test_nested_isolation_level(self):
set_sql = 'SET SESSION CHARACTERISTICS AS TRANSACTION ISOLATION LEVEL '
isolation_levels = {
'read_committed': 'read committed',
'read_uncommitted': 'read uncommitted',
'repeatable_read': 'repeatable read',
'serializable': 'serializable',
}
for inner in [None] + list(isolation_levels):
for outer, outer_sql_level in isolation_levels.items():
for implicit in [False, True]:
with self.subTest(
implicit=implicit, outer=outer, inner=inner,
):
if implicit:
await self.con.execute(set_sql + outer_sql_level)
outer_level = None
else:
outer_level = outer
async with self.con.transaction(isolation=outer_level):
if inner and outer != inner:
with self.assertRaisesRegex(
asyncpg.InterfaceError,
'current {!r} != outer {!r}'.format(
inner, outer
)
):
async with self.con.transaction(
isolation=inner,
):
pass
else:
async with self.con.transaction(
isolation=inner,
):
pass
|
class TestTransaction(tb.ConnectedTestCase):
async def test_transaction_regular(self):
pass
async def test_transaction_nested(self):
pass
async def test_transaction_interface_errors(self):
pass
async def test_transaction_within_manual_transaction(self):
pass
async def test_isolation_level(self):
pass
async def test_nested_isolation_level(self):
pass
| 7 | 0 | 39 | 7 | 31 | 1 | 3 | 0.02 | 1 | 2 | 0 | 0 | 6 | 0 | 6 | 6 | 238 | 46 | 189 | 29 | 182 | 3 | 136 | 28 | 129 | 6 | 1 | 8 | 15 |
148,430 |
MagicStack/asyncpg
|
MagicStack_asyncpg/asyncpg/exceptions/__init__.py
|
asyncpg.exceptions.GroupingError
|
class GroupingError(SyntaxOrAccessError):
sqlstate = '42803'
|
class GroupingError(SyntaxOrAccessError):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 31 | 2 | 0 | 2 | 2 | 1 | 0 | 2 | 2 | 1 | 0 | 6 | 0 | 0 |
148,431 |
MagicStack/asyncpg
|
MagicStack_asyncpg/asyncpg/exceptions/__init__.py
|
asyncpg.exceptions.GeneratedAlwaysError
|
class GeneratedAlwaysError(SyntaxOrAccessError):
sqlstate = '428C9'
|
class GeneratedAlwaysError(SyntaxOrAccessError):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 31 | 2 | 0 | 2 | 2 | 1 | 0 | 2 | 2 | 1 | 0 | 6 | 0 | 0 |
148,432 |
MagicStack/asyncpg
|
MagicStack_asyncpg/asyncpg/exceptions/__init__.py
|
asyncpg.exceptions.InvalidCursorStateError
|
class InvalidCursorStateError(_base.PostgresError):
sqlstate = '24000'
|
class InvalidCursorStateError(_base.PostgresError):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 31 | 2 | 0 | 2 | 2 | 1 | 0 | 2 | 2 | 1 | 0 | 5 | 0 | 0 |
148,433 |
MagicStack/asyncpg
|
MagicStack_asyncpg/asyncpg/exceptions/__init__.py
|
asyncpg.exceptions.InvalidDatabaseDefinitionError
|
class InvalidDatabaseDefinitionError(SyntaxOrAccessError):
sqlstate = '42P12'
|
class InvalidDatabaseDefinitionError(SyntaxOrAccessError):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 31 | 2 | 0 | 2 | 2 | 1 | 0 | 2 | 2 | 1 | 0 | 6 | 0 | 0 |
148,434 |
MagicStack/asyncpg
|
MagicStack_asyncpg/asyncpg/exceptions/__init__.py
|
asyncpg.exceptions.InvalidDatetimeFormatError
|
class InvalidDatetimeFormatError(DataError):
sqlstate = '22007'
|
class InvalidDatetimeFormatError(DataError):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 31 | 2 | 0 | 2 | 2 | 1 | 0 | 2 | 2 | 1 | 0 | 6 | 0 | 0 |
148,435 |
MagicStack/asyncpg
|
MagicStack_asyncpg/asyncpg/exceptions/__init__.py
|
asyncpg.exceptions.InvalidEscapeCharacterError
|
class InvalidEscapeCharacterError(DataError):
sqlstate = '22019'
|
class InvalidEscapeCharacterError(DataError):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 31 | 2 | 0 | 2 | 2 | 1 | 0 | 2 | 2 | 1 | 0 | 6 | 0 | 0 |
148,436 |
MagicStack/asyncpg
|
MagicStack_asyncpg/asyncpg/exceptions/__init__.py
|
asyncpg.exceptions.InvalidEscapeOctetError
|
class InvalidEscapeOctetError(DataError):
sqlstate = '2200D'
|
class InvalidEscapeOctetError(DataError):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 31 | 2 | 0 | 2 | 2 | 1 | 0 | 2 | 2 | 1 | 0 | 6 | 0 | 0 |
148,437 |
MagicStack/asyncpg
|
MagicStack_asyncpg/tests/test_utils.py
|
tests.test_utils.TestUtils
|
class TestUtils(tb.ConnectedTestCase):
async def test_mogrify_simple(self):
cases = [
('timestamp',
datetime.datetime(2016, 10, 10),
"SELECT '2016-10-10 00:00:00'::timestamp"),
('int[]',
[[1, 2], [3, 4]],
"SELECT '{{1,2},{3,4}}'::int[]"),
]
for typename, data, expected in cases:
with self.subTest(value=data, type=typename):
mogrified = await utils._mogrify(
self.con, 'SELECT $1::{}'.format(typename), [data])
self.assertEqual(mogrified, expected)
async def test_mogrify_multiple(self):
mogrified = await utils._mogrify(
self.con, 'SELECT $1::int, $2::int[]',
[1, [2, 3, 4, 5]])
expected = "SELECT '1'::int, '{2,3,4,5}'::int[]"
self.assertEqual(mogrified, expected)
|
class TestUtils(tb.ConnectedTestCase):
async def test_mogrify_simple(self):
pass
async def test_mogrify_multiple(self):
pass
| 3 | 0 | 11 | 1 | 10 | 0 | 2 | 0 | 1 | 1 | 0 | 0 | 2 | 0 | 2 | 2 | 24 | 3 | 21 | 8 | 18 | 0 | 11 | 8 | 8 | 2 | 1 | 2 | 3 |
148,438 |
MagicStack/asyncpg
|
MagicStack_asyncpg/asyncpg/exceptions/__init__.py
|
asyncpg.exceptions.InvalidEscapeSequenceError
|
class InvalidEscapeSequenceError(DataError):
sqlstate = '22025'
|
class InvalidEscapeSequenceError(DataError):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 31 | 2 | 0 | 2 | 2 | 1 | 0 | 2 | 2 | 1 | 0 | 6 | 0 | 0 |
148,439 |
MagicStack/asyncpg
|
MagicStack_asyncpg/tests/test_types.py
|
tests.test_types.TestTypes
|
class TestTypes(tb.TestCase):
def test_range_issubset(self):
subs = [
Range(empty=True),
Range(lower=1, upper=5, lower_inc=True, upper_inc=False),
Range(lower=1, upper=5, lower_inc=True, upper_inc=True),
Range(lower=1, upper=5, lower_inc=False, upper_inc=True),
Range(lower=1, upper=5, lower_inc=False, upper_inc=False),
Range(lower=-5, upper=10),
Range(lower=2, upper=3),
Range(lower=1, upper=None),
Range(lower=None, upper=None)
]
sups = [
Range(empty=True),
Range(lower=1, upper=5, lower_inc=True, upper_inc=False),
Range(lower=1, upper=5, lower_inc=True, upper_inc=True),
Range(lower=1, upper=5, lower_inc=False, upper_inc=True),
Range(lower=1, upper=5, lower_inc=False, upper_inc=False),
Range(lower=None, upper=None)
]
# Each row is 1 subs with all sups
results = [
True, True, True, True, True, True,
False, True, True, False, False, True,
False, False, True, False, False, True,
False, False, True, True, False, True,
False, True, True, True, True, True,
False, False, False, False, False, True,
False, True, True, True, True, True,
False, False, False, False, False, True,
False, False, False, False, False, True
]
for (sub, sup), res in zip(product(subs, sups), results):
self.assertIs(
sub.issubset(sup), res, "Sub:{}, Sup:{}".format(sub, sup)
)
self.assertIs(
sup.issuperset(sub), res, "Sub:{}, Sup:{}".format(sub, sup)
)
|
class TestTypes(tb.TestCase):
def test_range_issubset(self):
pass
| 2 | 0 | 42 | 3 | 38 | 1 | 2 | 0.03 | 1 | 3 | 1 | 0 | 1 | 0 | 1 | 1 | 44 | 4 | 39 | 6 | 37 | 1 | 8 | 6 | 6 | 2 | 1 | 1 | 2 |
148,440 |
MagicStack/asyncpg
|
MagicStack_asyncpg/asyncpg/exceptions/__init__.py
|
asyncpg.exceptions.WithCheckOptionViolationError
|
class WithCheckOptionViolationError(_base.PostgresError):
sqlstate = '44000'
|
class WithCheckOptionViolationError(_base.PostgresError):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 31 | 2 | 0 | 2 | 2 | 1 | 0 | 2 | 2 | 1 | 0 | 5 | 0 | 0 |
148,441 |
MagicStack/asyncpg
|
MagicStack_asyncpg/asyncpg/exceptions/__init__.py
|
asyncpg.exceptions.InvalidForeignKeyError
|
class InvalidForeignKeyError(SyntaxOrAccessError):
sqlstate = '42830'
|
class InvalidForeignKeyError(SyntaxOrAccessError):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 31 | 2 | 0 | 2 | 2 | 1 | 0 | 2 | 2 | 1 | 0 | 6 | 0 | 0 |
148,442 |
MagicStack/asyncpg
|
MagicStack_asyncpg/asyncpg/exceptions/__init__.py
|
asyncpg.exceptions.HeldCursorRequiresSameIsolationLevelError
|
class HeldCursorRequiresSameIsolationLevelError(InvalidTransactionStateError):
sqlstate = '25008'
|
class HeldCursorRequiresSameIsolationLevelError(InvalidTransactionStateError):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 31 | 2 | 0 | 2 | 2 | 1 | 0 | 2 | 2 | 1 | 0 | 6 | 0 | 0 |
148,443 |
MagicStack/asyncpg
|
MagicStack_asyncpg/asyncpg/exceptions/__init__.py
|
asyncpg.exceptions.InvalidGrantOperationError
|
class InvalidGrantOperationError(InvalidGrantorError):
sqlstate = '0LP01'
|
class InvalidGrantOperationError(InvalidGrantorError):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 31 | 2 | 0 | 2 | 2 | 1 | 0 | 2 | 2 | 1 | 0 | 6 | 0 | 0 |
148,444 |
MagicStack/asyncpg
|
MagicStack_asyncpg/asyncpg/exceptions/__init__.py
|
asyncpg.exceptions.InvalidGrantorError
|
class InvalidGrantorError(_base.PostgresError):
sqlstate = '0L000'
|
class InvalidGrantorError(_base.PostgresError):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 0 | 0 | 0 | 31 | 2 | 0 | 2 | 2 | 1 | 0 | 2 | 2 | 1 | 0 | 5 | 0 | 0 |
148,445 |
MagicStack/asyncpg
|
MagicStack_asyncpg/asyncpg/exceptions/__init__.py
|
asyncpg.exceptions.UndefinedFunctionError
|
class UndefinedFunctionError(SyntaxOrAccessError):
sqlstate = '42883'
|
class UndefinedFunctionError(SyntaxOrAccessError):
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 31 | 2 | 0 | 2 | 2 | 1 | 0 | 2 | 2 | 1 | 0 | 6 | 0 | 0 |
148,446 |
MagicStack/asyncpg
|
MagicStack_asyncpg/asyncpg/exceptions/__init__.py
|
asyncpg.exceptions.WindowingError
|
class WindowingError(SyntaxOrAccessError):
sqlstate = '42P20'
|
class WindowingError(SyntaxOrAccessError):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 31 | 2 | 0 | 2 | 2 | 1 | 0 | 2 | 2 | 1 | 0 | 6 | 0 | 0 |
148,447 |
MagicStack/asyncpg
|
MagicStack_asyncpg/asyncpg/exceptions/__init__.py
|
asyncpg.exceptions.FDWUnableToCreateReplyError
|
class FDWUnableToCreateReplyError(FDWError):
sqlstate = 'HV00M'
|
class FDWUnableToCreateReplyError(FDWError):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 31 | 2 | 0 | 2 | 2 | 1 | 0 | 2 | 2 | 1 | 0 | 6 | 0 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.