id
int64
0
843k
repository_name
stringlengths
7
55
file_path
stringlengths
9
332
class_name
stringlengths
3
290
human_written_code
stringlengths
12
4.36M
class_skeleton
stringlengths
19
2.2M
total_program_units
int64
1
9.57k
total_doc_str
int64
0
4.2k
AvgCountLine
float64
0
7.89k
AvgCountLineBlank
float64
0
300
AvgCountLineCode
float64
0
7.89k
AvgCountLineComment
float64
0
7.89k
AvgCyclomatic
float64
0
130
CommentToCodeRatio
float64
0
176
CountClassBase
float64
0
48
CountClassCoupled
float64
0
589
CountClassCoupledModified
float64
0
581
CountClassDerived
float64
0
5.37k
CountDeclInstanceMethod
float64
0
4.2k
CountDeclInstanceVariable
float64
0
299
CountDeclMethod
float64
0
4.2k
CountDeclMethodAll
float64
0
4.2k
CountLine
float64
1
115k
CountLineBlank
float64
0
9.01k
CountLineCode
float64
0
94.4k
CountLineCodeDecl
float64
0
46.1k
CountLineCodeExe
float64
0
91.3k
CountLineComment
float64
0
27k
CountStmt
float64
1
93.2k
CountStmtDecl
float64
0
46.1k
CountStmtExe
float64
0
90.2k
MaxCyclomatic
float64
0
759
MaxInheritanceTree
float64
0
16
MaxNesting
float64
0
34
SumCyclomatic
float64
0
6k
148,448
MagicStack/asyncpg
MagicStack_asyncpg/asyncpg/exceptions/__init__.py
asyncpg.exceptions.TransactionTimeoutError
class TransactionTimeoutError(InvalidTransactionStateError): sqlstate = '25P04'
class TransactionTimeoutError(InvalidTransactionStateError): pass
1
0
0
0
0
0
0
0
1
0
0
0
0
0
0
31
2
0
2
2
1
0
2
2
1
0
6
0
0
148,449
MagicStack/asyncpg
MagicStack_asyncpg/asyncpg/exceptions/__init__.py
asyncpg.exceptions.InappropriateAccessModeForBranchTransactionError
class InappropriateAccessModeForBranchTransactionError( InvalidTransactionStateError): sqlstate = '25003'
class InappropriateAccessModeForBranchTransactionError( InvalidTransactionStateError): pass
1
0
0
0
0
0
0
0
1
0
0
0
0
0
0
31
3
0
3
3
1
0
2
2
1
0
6
0
0
148,450
MagicStack/asyncpg
MagicStack_asyncpg/asyncpg/exceptions/__init__.py
asyncpg.exceptions.StringDataRightTruncation
class StringDataRightTruncation(PostgresWarning): sqlstate = '01004'
class StringDataRightTruncation(PostgresWarning): pass
1
0
0
0
0
0
0
0
1
0
0
0
0
0
0
33
2
0
2
2
1
0
2
2
1
0
6
0
0
148,451
MagicStack/asyncpg
MagicStack_asyncpg/asyncpg/exceptions/__init__.py
asyncpg.exceptions.InappropriateIsolationLevelForBranchTransactionError
class InappropriateIsolationLevelForBranchTransactionError( InvalidTransactionStateError): sqlstate = '25004'
class InappropriateIsolationLevelForBranchTransactionError( InvalidTransactionStateError): pass
1
0
0
0
0
0
0
0
1
0
0
0
0
0
0
31
3
0
3
3
1
0
2
2
1
0
6
0
0
148,452
MagicStack/asyncpg
MagicStack_asyncpg/asyncpg/exceptions/__init__.py
asyncpg.exceptions.IndeterminateCollationError
class IndeterminateCollationError(SyntaxOrAccessError): sqlstate = '42P22'
class IndeterminateCollationError(SyntaxOrAccessError): pass
1
0
0
0
0
0
0
0
1
0
0
0
0
0
0
31
2
0
2
2
1
0
2
2
1
0
6
0
0
148,453
MagicStack/asyncpg
MagicStack_asyncpg/asyncpg/exceptions/__init__.py
asyncpg.exceptions.IndeterminateDatatypeError
class IndeterminateDatatypeError(SyntaxOrAccessError): sqlstate = '42P18'
class IndeterminateDatatypeError(SyntaxOrAccessError): pass
1
0
0
0
0
0
0
0
1
0
0
0
0
0
0
31
2
0
2
2
1
0
2
2
1
0
6
0
0
148,454
MagicStack/asyncpg
MagicStack_asyncpg/asyncpg/exceptions/__init__.py
asyncpg.exceptions.IndexCorruptedError
class IndexCorruptedError(InternalServerError): sqlstate = 'XX002'
class IndexCorruptedError(InternalServerError): pass
1
0
0
0
0
0
0
0
1
0
0
0
0
0
0
31
2
0
2
2
1
0
2
2
1
0
6
0
0
148,455
MagicStack/asyncpg
MagicStack_asyncpg/asyncpg/exceptions/__init__.py
asyncpg.exceptions.IndicatorOverflowError
class IndicatorOverflowError(DataError): sqlstate = '22022'
class IndicatorOverflowError(DataError): pass
1
0
0
0
0
0
0
0
1
0
0
0
0
0
0
31
2
0
2
2
1
0
2
2
1
0
6
0
0
148,456
MagicStack/asyncpg
MagicStack_asyncpg/asyncpg/exceptions/__init__.py
asyncpg.exceptions.NonUniqueKeysInAJsonObjectError
class NonUniqueKeysInAJsonObjectError(DataError): sqlstate = '22037'
class NonUniqueKeysInAJsonObjectError(DataError): pass
1
0
0
0
0
0
0
0
1
0
0
0
0
0
0
31
2
0
2
2
1
0
2
2
1
0
6
0
0
148,457
MagicStack/asyncpg
MagicStack_asyncpg/asyncpg/exceptions/__init__.py
asyncpg.exceptions.NonstandardUseOfEscapeCharacterError
class NonstandardUseOfEscapeCharacterError(DataError): sqlstate = '22P06'
class NonstandardUseOfEscapeCharacterError(DataError): pass
1
0
0
0
0
0
0
0
1
0
0
0
0
0
0
31
2
0
2
2
1
0
2
2
1
0
6
0
0
148,458
MagicStack/asyncpg
MagicStack_asyncpg/asyncpg/exceptions/__init__.py
asyncpg.exceptions.NotAnXmlDocumentError
class NotAnXmlDocumentError(DataError): sqlstate = '2200L'
class NotAnXmlDocumentError(DataError): pass
1
0
0
0
0
0
0
0
1
0
0
0
0
0
0
31
2
0
2
2
1
0
2
2
1
0
6
0
0
148,459
MagicStack/asyncpg
MagicStack_asyncpg/asyncpg/exceptions/__init__.py
asyncpg.exceptions.NotNullViolationError
class NotNullViolationError(IntegrityConstraintViolationError): sqlstate = '23502'
class NotNullViolationError(IntegrityConstraintViolationError): pass
1
0
0
0
0
0
0
0
1
0
0
0
0
0
0
31
2
0
2
2
1
0
2
2
1
0
6
0
0
148,460
MagicStack/asyncpg
MagicStack_asyncpg/asyncpg/exceptions/__init__.py
asyncpg.exceptions.NullValueEliminatedInSetFunction
class NullValueEliminatedInSetFunction(PostgresWarning): sqlstate = '01003'
class NullValueEliminatedInSetFunction(PostgresWarning): pass
1
0
0
0
0
0
0
0
1
0
0
0
0
0
0
33
2
0
2
2
1
0
2
2
1
0
6
0
0
148,461
MagicStack/asyncpg
MagicStack_asyncpg/asyncpg/exceptions/__init__.py
asyncpg.exceptions.NullValueInExternalRoutineNotAllowedError
class NullValueInExternalRoutineNotAllowedError( ExternalRoutineInvocationError): sqlstate = '39004'
class NullValueInExternalRoutineNotAllowedError( ExternalRoutineInvocationError): pass
1
0
0
0
0
0
0
0
1
0
0
0
0
0
0
31
3
0
3
3
1
0
2
2
1
0
6
0
0
148,462
MagicStack/asyncpg
MagicStack_asyncpg/asyncpg/exceptions/__init__.py
asyncpg.exceptions.NullValueNoIndicatorParameterError
class NullValueNoIndicatorParameterError(DataError): sqlstate = '22002'
class NullValueNoIndicatorParameterError(DataError): pass
1
0
0
0
0
0
0
0
1
0
0
0
0
0
0
31
2
0
2
2
1
0
2
2
1
0
6
0
0
148,463
MagicStack/asyncpg
MagicStack_asyncpg/asyncpg/exceptions/__init__.py
asyncpg.exceptions.NullValueNotAllowedError
class NullValueNotAllowedError(DataError): sqlstate = '22004'
class NullValueNotAllowedError(DataError): pass
1
0
0
0
0
0
0
0
1
0
0
0
0
0
0
31
2
0
2
2
1
0
2
2
1
0
6
0
0
148,464
MagicStack/asyncpg
MagicStack_asyncpg/asyncpg/exceptions/__init__.py
asyncpg.exceptions.NumericValueOutOfRangeError
class NumericValueOutOfRangeError(DataError): sqlstate = '22003'
class NumericValueOutOfRangeError(DataError): pass
1
0
0
0
0
0
0
0
1
0
0
0
0
0
0
31
2
0
2
2
1
0
2
2
1
0
6
0
0
148,465
MagicStack/asyncpg
MagicStack_asyncpg/asyncpg/exceptions/__init__.py
asyncpg.exceptions.ObjectInUseError
class ObjectInUseError(ObjectNotInPrerequisiteStateError): sqlstate = '55006'
class ObjectInUseError(ObjectNotInPrerequisiteStateError): pass
1
0
0
0
0
0
0
0
1
0
0
0
0
0
0
31
2
0
2
2
1
0
2
2
1
0
6
0
0
148,466
MagicStack/asyncpg
MagicStack_asyncpg/asyncpg/exceptions/__init__.py
asyncpg.exceptions.ObjectNotInPrerequisiteStateError
class ObjectNotInPrerequisiteStateError(_base.PostgresError): sqlstate = '55000'
class ObjectNotInPrerequisiteStateError(_base.PostgresError): pass
1
0
0
0
0
0
0
0
1
0
0
4
0
0
0
31
2
0
2
2
1
0
2
2
1
0
5
0
0
148,467
MagicStack/asyncpg
MagicStack_asyncpg/asyncpg/exceptions/__init__.py
asyncpg.exceptions.OperatorInterventionError
class OperatorInterventionError(_base.PostgresError): sqlstate = '57000'
class OperatorInterventionError(_base.PostgresError): pass
1
0
0
0
0
0
0
0
1
0
0
6
0
0
0
31
2
0
2
2
1
0
2
2
1
0
5
0
0
148,468
MagicStack/asyncpg
MagicStack_asyncpg/asyncpg/exceptions/__init__.py
asyncpg.exceptions.OutOfMemoryError
class OutOfMemoryError(InsufficientResourcesError): sqlstate = '53200'
class OutOfMemoryError(InsufficientResourcesError): pass
1
0
0
0
0
0
0
0
1
0
0
0
0
0
0
31
2
0
2
2
1
0
2
2
1
0
6
0
0
148,469
MagicStack/asyncpg
MagicStack_asyncpg/asyncpg/exceptions/__init__.py
asyncpg.exceptions.PLPGSQLError
class PLPGSQLError(_base.PostgresError): sqlstate = 'P0000'
class PLPGSQLError(_base.PostgresError): pass
1
0
0
0
0
0
0
0
1
0
0
4
0
0
0
31
2
0
2
2
1
0
2
2
1
0
5
0
0
148,470
MagicStack/asyncpg
MagicStack_asyncpg/asyncpg/exceptions/__init__.py
asyncpg.exceptions.InFailedSQLTransactionError
class InFailedSQLTransactionError(InvalidTransactionStateError): sqlstate = '25P02'
class InFailedSQLTransactionError(InvalidTransactionStateError): pass
1
0
0
0
0
0
0
0
1
0
0
0
0
0
0
31
2
0
2
2
1
0
2
2
1
0
6
0
0
148,471
MagicStack/asyncpg
MagicStack_asyncpg/asyncpg/exceptions/__init__.py
asyncpg.exceptions.ImplicitZeroBitPadding
class ImplicitZeroBitPadding(PostgresWarning): sqlstate = '01008'
class ImplicitZeroBitPadding(PostgresWarning): pass
1
0
0
0
0
0
0
0
1
0
0
0
0
0
0
33
2
0
2
2
1
0
2
2
1
0
6
0
0
148,472
MagicStack/asyncpg
MagicStack_asyncpg/asyncpg/exceptions/__init__.py
asyncpg.exceptions.StringDataRightTruncationError
class StringDataRightTruncationError(DataError): sqlstate = '22001'
class StringDataRightTruncationError(DataError): pass
1
0
0
0
0
0
0
0
1
0
0
0
0
0
0
31
2
0
2
2
1
0
2
2
1
0
6
0
0
148,473
MagicStack/asyncpg
MagicStack_asyncpg/asyncpg/exceptions/__init__.py
asyncpg.exceptions.FDWReplyHandleError
class FDWReplyHandleError(FDWError): sqlstate = 'HV00K'
class FDWReplyHandleError(FDWError): pass
1
0
0
0
0
0
0
0
1
0
0
0
0
0
0
31
2
0
2
2
1
0
2
2
1
0
6
0
0
148,474
MagicStack/asyncpg
MagicStack_asyncpg/asyncpg/exceptions/__init__.py
asyncpg.exceptions.TransactionRollbackError
class TransactionRollbackError(_base.PostgresError): sqlstate = '40000'
class TransactionRollbackError(_base.PostgresError): pass
1
0
0
0
0
0
0
0
1
0
0
4
0
0
0
31
2
0
2
2
1
0
2
2
1
0
5
0
0
148,475
MagicStack/asyncpg
MagicStack_asyncpg/asyncpg/exceptions/__init__.py
asyncpg.exceptions.TransactionResolutionUnknownError
class TransactionResolutionUnknownError(PostgresConnectionError): sqlstate = '08007'
class TransactionResolutionUnknownError(PostgresConnectionError): pass
1
0
0
0
0
0
0
0
1
0
0
0
0
0
0
31
2
0
2
2
1
0
2
2
1
0
6
0
0
148,476
MagicStack/asyncpg
MagicStack_asyncpg/asyncpg/exceptions/__init__.py
asyncpg.exceptions.IdleSessionTimeoutError
class IdleSessionTimeoutError(OperatorInterventionError): sqlstate = '57P05'
class IdleSessionTimeoutError(OperatorInterventionError): pass
1
0
0
0
0
0
0
0
1
0
0
0
0
0
0
31
2
0
2
2
1
0
2
2
1
0
6
0
0
148,477
MagicStack/asyncpg
MagicStack_asyncpg/asyncpg/exceptions/__init__.py
asyncpg.exceptions.TransactionIntegrityConstraintViolationError
class TransactionIntegrityConstraintViolationError(TransactionRollbackError): sqlstate = '40002'
class TransactionIntegrityConstraintViolationError(TransactionRollbackError): pass
1
0
0
0
0
0
0
0
1
0
0
0
0
0
0
31
2
0
2
2
1
0
2
2
1
0
6
0
0
148,478
MagicStack/asyncpg
MagicStack_asyncpg/asyncpg/exceptions/__init__.py
asyncpg.exceptions.TooManyRowsError
class TooManyRowsError(PLPGSQLError): sqlstate = 'P0003'
class TooManyRowsError(PLPGSQLError): pass
1
0
0
0
0
0
0
0
1
0
0
0
0
0
0
31
2
0
2
2
1
0
2
2
1
0
6
0
0
148,479
MagicStack/asyncpg
MagicStack_asyncpg/asyncpg/exceptions/__init__.py
asyncpg.exceptions.TooManyJsonObjectMembersError
class TooManyJsonObjectMembersError(DataError): sqlstate = '2203E'
class TooManyJsonObjectMembersError(DataError): pass
1
0
0
0
0
0
0
0
1
0
0
0
0
0
0
31
2
0
2
2
1
0
2
2
1
0
6
0
0
148,480
MagicStack/asyncpg
MagicStack_asyncpg/asyncpg/_testbase/__init__.py
asyncpg._testbase.ClusterTestCase
class ClusterTestCase(TestCase): @classmethod def get_server_settings(cls): settings = { 'log_connections': 'on' } if cls.cluster.get_pg_version() >= (11, 0): # JITting messes up timing tests, and # is not essential for testing. settings['jit'] = 'off' return settings @classmethod def new_cluster(cls, ClusterCls, *, cluster_kwargs={}, initdb_options={}): cluster = _init_cluster(ClusterCls, cluster_kwargs, _get_initdb_options(initdb_options)) cls._clusters.append(cluster) return cluster @classmethod def start_cluster(cls, cluster, *, server_settings={}): cluster.start(port='dynamic', server_settings=server_settings) @classmethod def setup_cluster(cls): cls.cluster = _init_default_cluster() if cls.cluster.get_status() != 'running': cls.cluster.start( port='dynamic', server_settings=cls.get_server_settings()) @classmethod def setUpClass(cls): super().setUpClass() cls._clusters = [] cls.setup_cluster() @classmethod def tearDownClass(cls): super().tearDownClass() for cluster in cls._clusters: if cluster is not _default_cluster: cluster.stop() cluster.destroy() cls._clusters = [] @classmethod def get_connection_spec(cls, kwargs={}): conn_spec = cls.cluster.get_connection_spec() if kwargs.get('dsn'): conn_spec.pop('host') conn_spec.update(kwargs) if not os.environ.get('PGHOST') and not kwargs.get('dsn'): if 'database' not in conn_spec: conn_spec['database'] = 'postgres' if 'user' not in conn_spec: conn_spec['user'] = 'postgres' return conn_spec @classmethod def connect(cls, **kwargs): conn_spec = cls.get_connection_spec(kwargs) return pg_connection.connect(**conn_spec, loop=cls.loop) def setUp(self): super().setUp() self._pools = [] def tearDown(self): super().tearDown() for pool in self._pools: pool.terminate() self._pools = [] def create_pool(self, pool_class=pg_pool.Pool, connection_class=pg_connection.Connection, **kwargs): conn_spec = self.get_connection_spec(kwargs) pool = create_pool(loop=self.loop, pool_class=pool_class, connection_class=connection_class, **conn_spec) self._pools.append(pool) return pool
class ClusterTestCase(TestCase): @classmethod def get_server_settings(cls): pass @classmethod def new_cluster(cls, ClusterCls, *, cluster_kwargs={}, initdb_options={}): pass @classmethod def start_cluster(cls, cluster, *, server_settings={}): pass @classmethod def setup_cluster(cls): pass @classmethod def setUpClass(cls): pass @classmethod def tearDownClass(cls): pass @classmethod def get_connection_spec(cls, kwargs={}): pass @classmethod def connect(cls, **kwargs): pass def setUpClass(cls): pass def tearDownClass(cls): pass def create_pool(self, pool_class=pg_pool.Pool, connection_class=pg_connection.Connection, **kwargs): pass
20
0
6
0
5
0
2
0.03
1
3
2
3
3
2
11
106
83
13
68
30
47
2
54
21
42
5
4
2
20
148,481
MagicStack/asyncpg
MagicStack_asyncpg/asyncpg/_testbase/__init__.py
asyncpg._testbase.ConnectedTestCase
class ConnectedTestCase(ClusterTestCase): def setUp(self): super().setUp() # Extract options set up with `with_connection_options`. test_func = getattr(self, self._testMethodName).__func__ opts = getattr(test_func, '__connect_options__', {}) self.con = self.loop.run_until_complete(self.connect(**opts)) self.server_version = self.con.get_server_version() def tearDown(self): try: self.loop.run_until_complete(self.con.close()) self.con = None finally: super().tearDown()
class ConnectedTestCase(ClusterTestCase): def setUp(self): pass def tearDown(self): pass
3
0
7
1
6
1
1
0.08
1
1
0
0
2
2
2
108
17
3
13
7
10
1
12
7
9
1
5
1
2
148,482
MagicStack/asyncpg
MagicStack_asyncpg/asyncpg/_testbase/__init__.py
asyncpg._testbase.HotStandbyTestCase
class HotStandbyTestCase(ClusterTestCase): @classmethod def setup_cluster(cls): cls.master_cluster = cls.new_cluster(pg_cluster.TempCluster) cls.start_cluster( cls.master_cluster, server_settings={ 'max_wal_senders': 10, 'wal_level': 'hot_standby' } ) con = None try: con = cls.loop.run_until_complete( cls.master_cluster.connect( database='postgres', user='postgres', loop=cls.loop)) cls.loop.run_until_complete( con.execute(''' CREATE ROLE replication WITH LOGIN REPLICATION ''')) cls.master_cluster.trust_local_replication_by('replication') conn_spec = cls.master_cluster.get_connection_spec() cls.standby_cluster = cls.new_cluster( pg_cluster.HotStandbyCluster, cluster_kwargs={ 'master': conn_spec, 'replication_user': 'replication' } ) cls.start_cluster( cls.standby_cluster, server_settings={ 'hot_standby': True } ) finally: if con is not None: cls.loop.run_until_complete(con.close()) @classmethod def get_cluster_connection_spec(cls, cluster, kwargs={}): conn_spec = cluster.get_connection_spec() if kwargs.get('dsn'): conn_spec.pop('host') conn_spec.update(kwargs) if not os.environ.get('PGHOST') and not kwargs.get('dsn'): if 'database' not in conn_spec: conn_spec['database'] = 'postgres' if 'user' not in conn_spec: conn_spec['user'] = 'postgres' return conn_spec @classmethod def get_connection_spec(cls, kwargs={}): primary_spec = cls.get_cluster_connection_spec( cls.master_cluster, kwargs ) standby_spec = cls.get_cluster_connection_spec( cls.standby_cluster, kwargs ) return { 'host': [primary_spec['host'], standby_spec['host']], 'port': [primary_spec['port'], standby_spec['port']], 'database': primary_spec['database'], 'user': primary_spec['user'], **kwargs } @classmethod def connect_primary(cls, **kwargs): conn_spec = cls.get_cluster_connection_spec(cls.master_cluster, kwargs) return pg_connection.connect(**conn_spec, loop=cls.loop) @classmethod def connect_standby(cls, **kwargs): conn_spec = cls.get_cluster_connection_spec( cls.standby_cluster, kwargs ) return pg_connection.connect(**conn_spec, loop=cls.loop)
class HotStandbyTestCase(ClusterTestCase): @classmethod def setup_cluster(cls): pass @classmethod def get_cluster_connection_spec(cls, cluster, kwargs={}): pass @classmethod def get_connection_spec(cls, kwargs={}): pass @classmethod def connect_primary(cls, **kwargs): pass @classmethod def connect_standby(cls, **kwargs): pass
11
0
15
1
14
0
2
0
1
2
2
0
0
0
5
111
88
12
76
18
65
0
35
13
29
5
5
2
10
148,483
MagicStack/asyncpg
MagicStack_asyncpg/asyncpg/_testbase/__init__.py
asyncpg._testbase.ProxiedClusterTestCase
class ProxiedClusterTestCase(ClusterTestCase): @classmethod def get_server_settings(cls): settings = dict(super().get_server_settings()) settings['listen_addresses'] = '127.0.0.1' return settings @classmethod def get_proxy_settings(cls): return {'fuzzing-mode': None} @classmethod def setUpClass(cls): super().setUpClass() conn_spec = cls.cluster.get_connection_spec() host = conn_spec.get('host') if not host: host = '127.0.0.1' elif host.startswith('/'): host = '127.0.0.1' cls.proxy = fuzzer.TCPFuzzingProxy( backend_host=host, backend_port=conn_spec['port'], ) cls.proxy.start() @classmethod def tearDownClass(cls): cls.proxy.stop() super().tearDownClass() @classmethod def get_connection_spec(cls, kwargs): conn_spec = super().get_connection_spec(kwargs) conn_spec['host'] = cls.proxy.listening_addr conn_spec['port'] = cls.proxy.listening_port return conn_spec def tearDown(self): self.proxy.reset() super().tearDown()
class ProxiedClusterTestCase(ClusterTestCase): @classmethod def get_server_settings(cls): pass @classmethod def get_proxy_settings(cls): pass @classmethod def setUpClass(cls): pass @classmethod def tearDownClass(cls): pass @classmethod def get_connection_spec(cls, kwargs): pass def tearDownClass(cls): pass
12
0
5
0
5
0
1
0
1
3
1
0
1
0
6
112
41
5
36
16
24
0
27
11
20
3
5
1
8
148,484
MagicStack/asyncpg
MagicStack_asyncpg/asyncpg/exceptions/__init__.py
asyncpg.exceptions.TriggerProtocolViolatedError
class TriggerProtocolViolatedError(ExternalRoutineInvocationError): sqlstate = '39P01'
class TriggerProtocolViolatedError(ExternalRoutineInvocationError): pass
1
0
0
0
0
0
0
0
1
0
0
0
0
0
0
31
2
0
2
2
1
0
2
2
1
0
6
0
0
148,485
MagicStack/asyncpg
MagicStack_asyncpg/asyncpg/_testbase/__init__.py
asyncpg._testbase.TestCase
class TestCase(unittest.TestCase, metaclass=TestCaseMeta): @classmethod def setUpClass(cls): if os.environ.get('USE_UVLOOP'): import uvloop asyncio.set_event_loop_policy(uvloop.EventLoopPolicy()) loop = asyncio.new_event_loop() asyncio.set_event_loop(None) cls.loop = loop @classmethod def tearDownClass(cls): cls.loop.close() asyncio.set_event_loop(None) def setUp(self): self.loop.set_exception_handler(self.loop_exception_handler) self.__unhandled_exceptions = [] def tearDown(self): excs = [] for exc in self.__unhandled_exceptions: if isinstance(exc, ConnectionResetError): texc = traceback.TracebackException.from_exception( exc, lookup_lines=False) if texc.stack[-1].name == "_call_connection_lost": # On Windows calling socket.shutdown may raise # ConnectionResetError, which happens in the # finally block of _call_connection_lost. continue excs.append(exc) if excs: formatted = [] for i, context in enumerate(excs): formatted.append(self._format_loop_exception(context, i + 1)) self.fail( 'unexpected exceptions in asynchronous code:\n' + '\n'.join(formatted)) @contextlib.contextmanager def assertRunUnder(self, delta): st = time.monotonic() try: yield finally: elapsed = time.monotonic() - st if elapsed > delta: raise AssertionError( 'running block took {:0.3f}s which is longer ' 'than the expected maximum of {:0.3f}s'.format( elapsed, delta)) @contextlib.contextmanager def assertLoopErrorHandlerCalled(self, msg_re: str): contexts = [] def handler(loop, ctx): contexts.append(ctx) old_handler = self.loop.get_exception_handler() self.loop.set_exception_handler(handler) try: yield for ctx in contexts: msg = ctx.get('message') if msg and re.search(msg_re, msg): return raise AssertionError( 'no message matching {!r} was logged with ' 'loop.call_exception_handler()'.format(msg_re)) finally: self.loop.set_exception_handler(old_handler) def loop_exception_handler(self, loop, context): self.__unhandled_exceptions.append(context) loop.default_exception_handler(context) def _format_loop_exception(self, context, n): message = context.get('message', 'Unhandled exception in event loop') exception = context.get('exception') if exception is not None: exc_info = (type(exception), exception, exception.__traceback__) else: exc_info = None lines = [] for key in sorted(context): if key in {'message', 'exception'}: continue value = context[key] if key == 'source_traceback': tb = ''.join(traceback.format_list(value)) value = 'Object created at (most recent call last):\n' value += tb.rstrip() else: try: value = repr(value) except Exception as ex: value = ('Exception in __repr__ {!r}; ' 'value type: {!r}'.format(ex, type(value))) lines.append('[{}]: {}\n\n'.format(key, value)) if exc_info is not None: lines.append('[exception]:\n') formatted_exc = textwrap.indent( ''.join(traceback.format_exception(*exc_info)), ' ') lines.append(formatted_exc) details = textwrap.indent(''.join(lines), ' ') return '{:02d}. {}:\n{}\n'.format(n, message, details)
class TestCase(unittest.TestCase, metaclass=TestCaseMeta): @classmethod def setUpClass(cls): pass @classmethod def tearDownClass(cls): pass def setUpClass(cls): pass def tearDownClass(cls): pass @contextlib.contextmanager def assertRunUnder(self, delta): pass @contextlib.contextmanager def assertLoopErrorHandlerCalled(self, msg_re: str): pass def handler(loop, ctx): pass def loop_exception_handler(self, loop, context): pass def _format_loop_exception(self, context, n): pass
14
0
12
1
10
0
3
0.03
2
6
0
1
6
1
8
95
118
20
95
38
80
3
77
33
66
7
3
3
24
148,486
MagicStack/asyncpg
MagicStack_asyncpg/asyncpg/_testbase/fuzzer.py
asyncpg._testbase.fuzzer.StopServer
class StopServer(Exception): pass
class StopServer(Exception): pass
1
0
0
0
0
0
0
0
1
0
0
0
0
0
0
10
2
0
2
1
1
0
2
1
1
0
3
0
0
148,487
MagicStack/asyncpg
MagicStack_asyncpg/asyncpg/_testbase/fuzzer.py
asyncpg._testbase.fuzzer.TCPFuzzingProxy
class TCPFuzzingProxy: def __init__(self, *, listening_addr: str='127.0.0.1', listening_port: typing.Optional[int]=None, backend_host: str, backend_port: int, settings: typing.Optional[dict]=None) -> None: self.listening_addr = listening_addr self.listening_port = listening_port self.backend_host = backend_host self.backend_port = backend_port self.settings = settings or {} self.loop = None self.connectivity = None self.connectivity_loss = None self.stop_event = None self.connections = {} self.sock = None self.listen_task = None async def _wait(self, work): work_task = asyncio.ensure_future(work) stop_event_task = asyncio.ensure_future(self.stop_event.wait()) try: await asyncio.wait( [work_task, stop_event_task], return_when=asyncio.FIRST_COMPLETED) if self.stop_event.is_set(): raise StopServer() else: return work_task.result() finally: if not work_task.done(): work_task.cancel() if not stop_event_task.done(): stop_event_task.cancel() def start(self): started = threading.Event() self.thread = threading.Thread( target=self._start_thread, args=(started,)) self.thread.start() if not started.wait(timeout=2): raise RuntimeError('fuzzer proxy failed to start') def stop(self): self.loop.call_soon_threadsafe(self._stop) self.thread.join() def _stop(self): self.stop_event.set() def _start_thread(self, started_event): self.loop = asyncio.new_event_loop() asyncio.set_event_loop(self.loop) self.connectivity = asyncio.Event() self.connectivity.set() self.connectivity_loss = asyncio.Event() self.stop_event = asyncio.Event() if self.listening_port is None: self.listening_port = cluster.find_available_port() self.sock = socket.socket() self.sock.bind((self.listening_addr, self.listening_port)) self.sock.listen(50) self.sock.setblocking(False) try: self.loop.run_until_complete(self._main(started_event)) finally: self.loop.close() async def _main(self, started_event): self.listen_task = asyncio.ensure_future(self.listen()) # Notify the main thread that we are ready to go. started_event.set() try: await self.listen_task finally: for c in list(self.connections): c.close() await asyncio.sleep(0.01) if hasattr(self.loop, 'remove_reader'): self.loop.remove_reader(self.sock.fileno()) self.sock.close() async def listen(self): while True: try: client_sock, _ = await self._wait( self.loop.sock_accept(self.sock)) backend_sock = socket.socket() backend_sock.setblocking(False) await self._wait(self.loop.sock_connect( backend_sock, (self.backend_host, self.backend_port))) except StopServer: break conn = Connection(client_sock, backend_sock, self) conn_task = self.loop.create_task(conn.handle()) self.connections[conn] = conn_task def trigger_connectivity_loss(self): self.loop.call_soon_threadsafe(self._trigger_connectivity_loss) def _trigger_connectivity_loss(self): self.connectivity.clear() self.connectivity_loss.set() def restore_connectivity(self): self.loop.call_soon_threadsafe(self._restore_connectivity) def _restore_connectivity(self): self.connectivity.set() self.connectivity_loss.clear() def reset(self): self.restore_connectivity() def _close_connection(self, connection): conn_task = self.connections.pop(connection, None) if conn_task is not None: conn_task.cancel() def close_all_connections(self): for conn in list(self.connections): self.loop.call_soon_threadsafe(self._close_connection, conn)
class TCPFuzzingProxy: def __init__(self, *, listening_addr: str='127.0.0.1', listening_port: typing.Optional[int]=None, backend_host: str, backend_port: int, settings: typing.Optional[dict]=None) -> None: pass async def _wait(self, work): pass def start(self): pass def stop(self): pass def _stop(self): pass def _start_thread(self, started_event): pass async def _main(self, started_event): pass async def listen(self): pass def trigger_connectivity_loss(self): pass def _trigger_connectivity_loss(self): pass def restore_connectivity(self): pass def _restore_connectivity(self): pass def reset(self): pass def _close_connection(self, connection): pass def close_all_connections(self): pass
16
0
8
1
7
0
2
0.01
0
10
2
0
15
13
15
15
131
23
107
42
88
1
95
39
79
4
0
2
26
148,488
MagicStack/asyncpg
MagicStack_asyncpg/asyncpg/exceptions/__init__.py
asyncpg.exceptions.TooManyJsonArrayElementsError
class TooManyJsonArrayElementsError(DataError): sqlstate = '2203D'
class TooManyJsonArrayElementsError(DataError): pass
1
0
0
0
0
0
0
0
1
0
0
0
0
0
0
31
2
0
2
2
1
0
2
2
1
0
6
0
0
148,489
MagicStack/asyncpg
MagicStack_asyncpg/asyncpg/exceptions/__init__.py
asyncpg.exceptions.TooManyConnectionsError
class TooManyConnectionsError(InsufficientResourcesError): sqlstate = '53300'
class TooManyConnectionsError(InsufficientResourcesError): pass
1
0
0
0
0
0
0
0
1
0
0
0
0
0
0
31
2
0
2
2
1
0
2
2
1
0
6
0
0
148,490
MagicStack/asyncpg
MagicStack_asyncpg/asyncpg/exceptions/__init__.py
asyncpg.exceptions.TooManyColumnsError
class TooManyColumnsError(ProgramLimitExceededError): sqlstate = '54011'
class TooManyColumnsError(ProgramLimitExceededError): pass
1
0
0
0
0
0
0
0
1
0
0
0
0
0
0
31
2
0
2
2
1
0
2
2
1
0
6
0
0
148,491
MagicStack/asyncpg
MagicStack_asyncpg/asyncpg/exceptions/__init__.py
asyncpg.exceptions.TooManyArgumentsError
class TooManyArgumentsError(ProgramLimitExceededError): sqlstate = '54023'
class TooManyArgumentsError(ProgramLimitExceededError): pass
1
0
0
0
0
0
0
0
1
0
0
0
0
0
0
31
2
0
2
2
1
0
2
2
1
0
6
0
0
148,492
MagicStack/asyncpg
MagicStack_asyncpg/asyncpg/exceptions/__init__.py
asyncpg.exceptions.SyntaxOrAccessError
class SyntaxOrAccessError(_base.PostgresError): sqlstate = '42000'
class SyntaxOrAccessError(_base.PostgresError): pass
1
0
0
0
0
0
0
0
1
0
0
43
0
0
0
31
2
0
2
2
1
0
2
2
1
0
5
0
0
148,493
MagicStack/asyncpg
MagicStack_asyncpg/asyncpg/exceptions/__init__.py
asyncpg.exceptions.SubstringError
class SubstringError(DataError): sqlstate = '22011'
class SubstringError(DataError): pass
1
0
0
0
0
0
0
0
1
0
0
0
0
0
0
31
2
0
2
2
1
0
2
2
1
0
6
0
0
148,494
MagicStack/asyncpg
MagicStack_asyncpg/asyncpg/exceptions/__init__.py
asyncpg.exceptions.FDWOptionNameNotFoundError
class FDWOptionNameNotFoundError(FDWError): sqlstate = 'HV00J'
class FDWOptionNameNotFoundError(FDWError): pass
1
0
0
0
0
0
0
0
1
0
0
0
0
0
0
31
2
0
2
2
1
0
2
2
1
0
6
0
0
148,495
MagicStack/asyncpg
MagicStack_asyncpg/asyncpg/exceptions/__init__.py
asyncpg.exceptions.FDWOutOfMemoryError
class FDWOutOfMemoryError(FDWError): sqlstate = 'HV001'
class FDWOutOfMemoryError(FDWError): pass
1
0
0
0
0
0
0
0
1
0
0
0
0
0
0
31
2
0
2
2
1
0
2
2
1
0
6
0
0
148,496
MagicStack/asyncpg
MagicStack_asyncpg/asyncpg/_testbase/fuzzer.py
asyncpg._testbase.fuzzer.Connection
class Connection: def __init__(self, client_sock, backend_sock, proxy): self.client_sock = client_sock self.backend_sock = backend_sock self.proxy = proxy self.loop = proxy.loop self.connectivity = proxy.connectivity self.connectivity_loss = proxy.connectivity_loss self.proxy_to_backend_task = None self.proxy_from_backend_task = None self.is_closed = False def close(self): if self.is_closed: return self.is_closed = True if self.proxy_to_backend_task is not None: self.proxy_to_backend_task.cancel() self.proxy_to_backend_task = None if self.proxy_from_backend_task is not None: self.proxy_from_backend_task.cancel() self.proxy_from_backend_task = None self.proxy._close_connection(self) async def handle(self): self.proxy_to_backend_task = asyncio.ensure_future( self.proxy_to_backend()) self.proxy_from_backend_task = asyncio.ensure_future( self.proxy_from_backend()) try: await asyncio.wait( [self.proxy_to_backend_task, self.proxy_from_backend_task], return_when=asyncio.FIRST_COMPLETED) finally: if self.proxy_to_backend_task is not None: self.proxy_to_backend_task.cancel() if self.proxy_from_backend_task is not None: self.proxy_from_backend_task.cancel() # Asyncio fails to properly remove the readers and writers # when the task doing recv() or send() is cancelled, so # we must remove the readers and writers manually before # closing the sockets. self.loop.remove_reader(self.client_sock.fileno()) self.loop.remove_writer(self.client_sock.fileno()) self.loop.remove_reader(self.backend_sock.fileno()) self.loop.remove_writer(self.backend_sock.fileno()) self.client_sock.close() self.backend_sock.close() async def _read(self, sock, n): read_task = asyncio.ensure_future( self.loop.sock_recv(sock, n)) conn_event_task = asyncio.ensure_future( self.connectivity_loss.wait()) try: await asyncio.wait( [read_task, conn_event_task], return_when=asyncio.FIRST_COMPLETED) if self.connectivity_loss.is_set(): return None else: return read_task.result() finally: if not self.loop.is_closed(): if not read_task.done(): read_task.cancel() if not conn_event_task.done(): conn_event_task.cancel() async def _write(self, sock, data): write_task = asyncio.ensure_future( self.loop.sock_sendall(sock, data)) conn_event_task = asyncio.ensure_future( self.connectivity_loss.wait()) try: await asyncio.wait( [write_task, conn_event_task], return_when=asyncio.FIRST_COMPLETED) if self.connectivity_loss.is_set(): return None else: return write_task.result() finally: if not self.loop.is_closed(): if not write_task.done(): write_task.cancel() if not conn_event_task.done(): conn_event_task.cancel() async def proxy_to_backend(self): buf = None try: while True: await self.connectivity.wait() if buf is not None: data = buf buf = None else: data = await self._read(self.client_sock, 4096) if data == b'': break if self.connectivity_loss.is_set(): if data: buf = data continue await self._write(self.backend_sock, data) except ConnectionError: pass finally: if not self.loop.is_closed(): self.loop.call_soon(self.close) async def proxy_from_backend(self): buf = None try: while True: await self.connectivity.wait() if buf is not None: data = buf buf = None else: data = await self._read(self.backend_sock, 4096) if data == b'': break if self.connectivity_loss.is_set(): if data: buf = data continue await self._write(self.client_sock, data) except ConnectionError: pass finally: if not self.loop.is_closed(): self.loop.call_soon(self.close)
class Connection: def __init__(self, client_sock, backend_sock, proxy): pass def close(self): pass async def handle(self): pass async def _read(self, sock, n): pass async def _write(self, sock, data): pass async def proxy_to_backend(self): pass async def proxy_from_backend(self): pass
8
0
21
3
18
1
5
0.03
0
1
0
0
7
9
7
7
154
26
124
25
116
4
103
25
95
8
0
4
34
148,497
MagicStack/asyncpg
MagicStack_asyncpg/asyncpg/exceptions/__init__.py
asyncpg.exceptions.ForeignKeyViolationError
class ForeignKeyViolationError(IntegrityConstraintViolationError): sqlstate = '23503'
class ForeignKeyViolationError(IntegrityConstraintViolationError): pass
1
0
0
0
0
0
0
0
1
0
0
0
0
0
0
31
2
0
2
2
1
0
2
2
1
0
6
0
0
148,498
MagicStack/asyncpg
MagicStack_asyncpg/asyncpg/exceptions/__init__.py
asyncpg.exceptions.NoDataFoundError
class NoDataFoundError(PLPGSQLError): sqlstate = 'P0002'
class NoDataFoundError(PLPGSQLError): pass
1
0
0
0
0
0
0
0
1
0
0
0
0
0
0
31
2
0
2
2
1
0
2
2
1
0
6
0
0
148,499
MagicStack/asyncpg
MagicStack_asyncpg/asyncpg/exceptions/__init__.py
asyncpg.exceptions.FeatureNotSupportedError
class FeatureNotSupportedError(_base.PostgresError): sqlstate = '0A000'
class FeatureNotSupportedError(_base.PostgresError): pass
1
0
0
0
0
0
0
0
1
0
0
1
0
0
0
31
2
0
2
2
1
0
2
2
1
0
5
0
0
148,500
MagicStack/asyncpg
MagicStack_asyncpg/asyncpg/types.py
asyncpg.types._RangeValue
class _RangeValue(typing.Protocol): def __eq__(self, __value: object) -> bool: ... def __lt__(self, __other: Self, /) -> bool: ... def __gt__(self, __other: Self, /) -> bool: ...
class _RangeValue(typing.Protocol): def __eq__(self, __value: object) -> bool: pass def __lt__(self, __other: Self, /) -> bool: pass def __gt__(self, __other: Self, /) -> bool: pass
4
0
2
0
2
0
1
0
1
1
0
0
3
0
3
27
9
2
7
4
3
0
7
4
3
1
5
0
3
148,501
MagicStack/asyncpg
MagicStack_asyncpg/asyncpg/types.py
asyncpg.types.Type
class Type(typing.NamedTuple): oid: int name: str kind: str schema: str
class Type(typing.NamedTuple): pass
1
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
5
0
5
1
4
0
5
1
4
0
1
0
0
148,502
MagicStack/asyncpg
MagicStack_asyncpg/asyncpg/types.py
asyncpg.types.ServerVersion
class ServerVersion(typing.NamedTuple): major: int minor: int micro: int releaselevel: str serial: int
class ServerVersion(typing.NamedTuple): pass
1
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
6
0
6
1
5
0
6
1
5
0
1
0
0
148,503
MagicStack/asyncpg
MagicStack_asyncpg/asyncpg/types.py
asyncpg.types.Range
class Range(typing.Generic[_RV]): """Immutable representation of PostgreSQL `range` type.""" __slots__ = ('_lower', '_upper', '_lower_inc', '_upper_inc', '_empty') _lower: _RV | None _upper: _RV | None _lower_inc: bool _upper_inc: bool _empty: bool def __init__( self, lower: _RV | None = None, upper: _RV | None = None, *, lower_inc: bool = True, upper_inc: bool = False, empty: bool = False ) -> None: self._empty = empty if empty: self._lower = self._upper = None self._lower_inc = self._upper_inc = False else: self._lower = lower self._upper = upper self._lower_inc = lower is not None and lower_inc self._upper_inc = upper is not None and upper_inc @property def lower(self) -> _RV | None: return self._lower @property def lower_inc(self) -> bool: return self._lower_inc @property def lower_inf(self) -> bool: return self._lower is None and not self._empty @property def upper(self) -> _RV | None: return self._upper @property def upper_inc(self) -> bool: return self._upper_inc @property def upper_inf(self) -> bool: return self._upper is None and not self._empty @property def isempty(self) -> bool: return self._empty def _issubset_lower(self, other: Self) -> bool: if other._lower is None: return True if self._lower is None: return False return self._lower > other._lower or ( self._lower == other._lower and (other._lower_inc or not self._lower_inc) ) def _issubset_upper(self, other: Self) -> bool: if other._upper is None: return True if self._upper is None: return False return self._upper < other._upper or ( self._upper == other._upper and (other._upper_inc or not self._upper_inc) ) def issubset(self, other: Self) -> bool: if self._empty: return True if other._empty: return False return self._issubset_lower(other) and self._issubset_upper(other) def issuperset(self, other: Self) -> bool: return other.issubset(self) def __bool__(self) -> bool: return not self._empty def __eq__(self, other: object) -> bool: if not isinstance(other, Range): return NotImplemented return ( self._lower, self._upper, self._lower_inc, self._upper_inc, self._empty ) == ( other._lower, # pyright: ignore [reportUnknownMemberType] other._upper, # pyright: ignore [reportUnknownMemberType] other._lower_inc, other._upper_inc, other._empty ) def __hash__(self) -> int: return hash(( self._lower, self._upper, self._lower_inc, self._upper_inc, self._empty )) def __repr__(self) -> str: if self._empty: desc = 'empty' else: if self._lower is None or not self._lower_inc: lb = '(' else: lb = '[' if self._lower is not None: lb += repr(self._lower) if self._upper is not None: ub = repr(self._upper) else: ub = '' if self._upper is None or not self._upper_inc: ub += ')' else: ub += ']' desc = '{}, {}'.format(lb, ub) return '<Range {}>'.format(desc) __str__ = __repr__
class Range(typing.Generic[_RV]): '''Immutable representation of PostgreSQL `range` type.''' def __init__( self, lower: _RV | None = None, upper: _RV | None = None, *, lower_inc: bool = True, upper_inc: bool = False, empty: bool = False ) -> None: pass @property def lower(self) -> _RV | None: pass @property def lower_inc(self) -> bool: pass @property def lower_inf(self) -> bool: pass @property def upper(self) -> _RV | None: pass @property def upper_inc(self) -> bool: pass @property def upper_inf(self) -> bool: pass @property def isempty(self) -> bool: pass def _issubset_lower(self, other: Self) -> bool: pass def _issubset_upper(self, other: Self) -> bool: pass def issubset(self, other: Self) -> bool: pass def issuperset(self, other: Self) -> bool: pass def __bool__(self) -> bool: pass def __eq__(self, other: object) -> bool: pass def __hash__(self) -> int: pass def __repr__(self) -> str: pass
24
1
7
1
7
0
2
0.03
1
4
0
0
16
0
16
18
148
28
119
37
87
3
75
22
58
6
1
2
29
148,504
MagicStack/asyncpg
MagicStack_asyncpg/asyncpg/types.py
asyncpg.types.Attribute
class Attribute(typing.NamedTuple): name: str type: Type
class Attribute(typing.NamedTuple): pass
1
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
3
0
3
1
2
0
3
1
2
0
1
0
0
148,505
MagicStack/asyncpg
MagicStack_asyncpg/tests/test_listeners.py
tests.test_listeners.TestConnectionTerminationListener
class TestConnectionTerminationListener(tb.ProxiedClusterTestCase): async def test_connection_termination_callback_called_on_remote(self): called = False async_called = False def close_cb(con): nonlocal called called = True async def async_close_cb(con): nonlocal async_called async_called = True con = await self.connect() con.add_termination_listener(close_cb) con.add_termination_listener(async_close_cb) self.proxy.close_all_connections() try: await con.fetchval('SELECT 1') except Exception: pass self.assertTrue(called) self.assertTrue(async_called) async def test_connection_termination_callback_called_on_local(self): called = False def close_cb(con): nonlocal called called = True con = await self.connect() con.add_termination_listener(close_cb) await con.close() await asyncio.sleep(0) self.assertTrue(called)
class TestConnectionTerminationListener(tb.ProxiedClusterTestCase): async def test_connection_termination_callback_called_on_remote(self): pass def close_cb(con): pass async def async_close_cb(con): pass async def test_connection_termination_callback_called_on_local(self): pass def close_cb(con): pass
6
0
9
1
8
0
1
0
1
1
0
0
2
0
2
2
39
9
30
14
21
0
30
14
21
2
1
1
6
148,506
MagicStack/asyncpg
MagicStack_asyncpg/asyncpg/transaction.py
asyncpg.transaction.TransactionState
class TransactionState(enum.Enum): NEW = 0 STARTED = 1 COMMITTED = 2 ROLLEDBACK = 3 FAILED = 4
class TransactionState(enum.Enum): pass
1
0
0
0
0
0
0
0
1
0
0
0
0
0
0
49
6
0
6
6
5
0
6
6
5
0
4
0
0
148,507
MagicStack/asyncpg
MagicStack_asyncpg/asyncpg/transaction.py
asyncpg.transaction.Transaction
class Transaction(connresource.ConnectionResource): """Represents a transaction or savepoint block. Transactions are created by calling the :meth:`Connection.transaction() <connection.Connection.transaction>` function. """ __slots__ = ('_connection', '_isolation', '_readonly', '_deferrable', '_state', '_nested', '_id', '_managed') def __init__(self, connection, isolation, readonly, deferrable): super().__init__(connection) if isolation and isolation not in ISOLATION_LEVELS: raise ValueError( 'isolation is expected to be either of {}, ' 'got {!r}'.format(ISOLATION_LEVELS, isolation)) self._isolation = isolation self._readonly = readonly self._deferrable = deferrable self._state = TransactionState.NEW self._nested = False self._id = None self._managed = False async def __aenter__(self): if self._managed: raise apg_errors.InterfaceError( 'cannot enter context: already in an `async with` block') self._managed = True await self.start() async def __aexit__(self, extype, ex, tb): try: self._check_conn_validity('__aexit__') except apg_errors.InterfaceError: if extype is GeneratorExit: # When a PoolAcquireContext is being exited, and there # is an open transaction in an async generator that has # not been iterated fully, there is a possibility that # Pool.release() would race with this __aexit__(), since # both would be in concurrent tasks. In such case we # yield to Pool.release() to do the ROLLBACK for us. # See https://github.com/MagicStack/asyncpg/issues/232 # for an example. return else: raise try: if extype is not None: await self.__rollback() else: await self.__commit() finally: self._managed = False @connresource.guarded async def start(self): """Enter the transaction or savepoint block.""" self.__check_state_base('start') if self._state is TransactionState.STARTED: raise apg_errors.InterfaceError( 'cannot start; the transaction is already started') con = self._connection if con._top_xact is None: if con._protocol.is_in_transaction(): raise apg_errors.InterfaceError( 'cannot use Connection.transaction() in ' 'a manually started transaction') con._top_xact = self else: # Nested transaction block if self._isolation: top_xact_isolation = con._top_xact._isolation if top_xact_isolation is None: top_xact_isolation = ISOLATION_LEVELS_BY_VALUE[ await self._connection.fetchval( 'SHOW transaction_isolation;')] if self._isolation != top_xact_isolation: raise apg_errors.InterfaceError( 'nested transaction has a different isolation level: ' 'current {!r} != outer {!r}'.format( self._isolation, top_xact_isolation)) self._nested = True if self._nested: self._id = con._get_unique_id('savepoint') query = 'SAVEPOINT {};'.format(self._id) else: query = 'BEGIN' if self._isolation == 'read_committed': query += ' ISOLATION LEVEL READ COMMITTED' elif self._isolation == 'read_uncommitted': query += ' ISOLATION LEVEL READ UNCOMMITTED' elif self._isolation == 'repeatable_read': query += ' ISOLATION LEVEL REPEATABLE READ' elif self._isolation == 'serializable': query += ' ISOLATION LEVEL SERIALIZABLE' if self._readonly: query += ' READ ONLY' if self._deferrable: query += ' DEFERRABLE' query += ';' try: await self._connection.execute(query) except BaseException: self._state = TransactionState.FAILED raise else: self._state = TransactionState.STARTED def __check_state_base(self, opname): if self._state is TransactionState.COMMITTED: raise apg_errors.InterfaceError( 'cannot {}; the transaction is already committed'.format( opname)) if self._state is TransactionState.ROLLEDBACK: raise apg_errors.InterfaceError( 'cannot {}; the transaction is already rolled back'.format( opname)) if self._state is TransactionState.FAILED: raise apg_errors.InterfaceError( 'cannot {}; the transaction is in error state'.format( opname)) def __check_state(self, opname): if self._state is not TransactionState.STARTED: if self._state is TransactionState.NEW: raise apg_errors.InterfaceError( 'cannot {}; the transaction is not yet started'.format( opname)) self.__check_state_base(opname) async def __commit(self): self.__check_state('commit') if self._connection._top_xact is self: self._connection._top_xact = None if self._nested: query = 'RELEASE SAVEPOINT {};'.format(self._id) else: query = 'COMMIT;' try: await self._connection.execute(query) except BaseException: self._state = TransactionState.FAILED raise else: self._state = TransactionState.COMMITTED async def __rollback(self): self.__check_state('rollback') if self._connection._top_xact is self: self._connection._top_xact = None if self._nested: query = 'ROLLBACK TO {};'.format(self._id) else: query = 'ROLLBACK;' try: await self._connection.execute(query) except BaseException: self._state = TransactionState.FAILED raise else: self._state = TransactionState.ROLLEDBACK @connresource.guarded async def commit(self): """Exit the transaction or savepoint block and commit changes.""" if self._managed: raise apg_errors.InterfaceError( 'cannot manually commit from within an `async with` block') await self.__commit() @connresource.guarded async def rollback(self): """Exit the transaction or savepoint block and rollback changes.""" if self._managed: raise apg_errors.InterfaceError( 'cannot manually rollback from within an `async with` block') await self.__rollback() def __repr__(self): attrs = [] attrs.append('state:{}'.format(self._state.name.lower())) if self._isolation is not None: attrs.append(self._isolation) if self._readonly: attrs.append('readonly') if self._deferrable: attrs.append('deferrable') if self.__class__.__module__.startswith('asyncpg.'): mod = 'asyncpg' else: mod = self.__class__.__module__ return '<{}.{} {} {:#x}>'.format( mod, self.__class__.__name__, ' '.join(attrs), id(self))
class Transaction(connresource.ConnectionResource): '''Represents a transaction or savepoint block. Transactions are created by calling the :meth:`Connection.transaction() <connection.Connection.transaction>` function. ''' def __init__(self, connection, isolation, readonly, deferrable): pass async def __aenter__(self): pass async def __aexit__(self, extype, ex, tb): pass @connresource.guarded async def start(self): '''Enter the transaction or savepoint block.''' pass def __check_state_base(self, opname): pass def __check_state_base(self, opname): pass async def __commit(self): pass async def __rollback(self): pass @connresource.guarded async def commit(self): '''Exit the transaction or savepoint block and commit changes.''' pass @connresource.guarded async def rollback(self): '''Exit the transaction or savepoint block and rollback changes.''' pass def __repr__(self): pass
15
4
17
1
14
1
4
0.11
1
5
1
0
11
7
11
13
211
29
165
30
150
18
128
27
116
15
1
3
47
148,508
MagicStack/asyncpg
MagicStack_asyncpg/asyncpg/serverversion.py
asyncpg.serverversion._VersionDict
class _VersionDict(typing.TypedDict): major: int minor: int | None micro: int | None releaselevel: str | None serial: int | None
class _VersionDict(typing.TypedDict): pass
1
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
6
0
6
1
5
0
6
1
5
0
1
0
0
148,509
MagicStack/asyncpg
MagicStack_asyncpg/asyncpg/prepared_stmt.py
asyncpg.prepared_stmt.PreparedStatement
class PreparedStatement(connresource.ConnectionResource): """A representation of a prepared statement.""" __slots__ = ('_state', '_query', '_last_status') def __init__(self, connection, query, state): super().__init__(connection) self._state = state self._query = query state.attach() self._last_status = None @connresource.guarded def get_name(self) -> str: """Return the name of this prepared statement. .. versionadded:: 0.25.0 """ return self._state.name @connresource.guarded def get_query(self) -> str: """Return the text of the query for this prepared statement. Example:: stmt = await connection.prepare('SELECT $1::int') assert stmt.get_query() == "SELECT $1::int" """ return self._query @connresource.guarded def get_statusmsg(self) -> str: """Return the status of the executed command. Example:: stmt = await connection.prepare('CREATE TABLE mytab (a int)') await stmt.fetch() assert stmt.get_statusmsg() == "CREATE TABLE" """ if self._last_status is None: return self._last_status return self._last_status.decode() @connresource.guarded def get_parameters(self): """Return a description of statement parameters types. :return: A tuple of :class:`asyncpg.types.Type`. Example:: stmt = await connection.prepare('SELECT ($1::int, $2::text)') print(stmt.get_parameters()) # Will print: # (Type(oid=23, name='int4', kind='scalar', schema='pg_catalog'), # Type(oid=25, name='text', kind='scalar', schema='pg_catalog')) """ return self._state._get_parameters() @connresource.guarded def get_attributes(self): """Return a description of relation attributes (columns). :return: A tuple of :class:`asyncpg.types.Attribute`. Example:: st = await self.con.prepare(''' SELECT typname, typnamespace FROM pg_type ''') print(st.get_attributes()) # Will print: # (Attribute( # name='typname', # type=Type(oid=19, name='name', kind='scalar', # schema='pg_catalog')), # Attribute( # name='typnamespace', # type=Type(oid=26, name='oid', kind='scalar', # schema='pg_catalog'))) """ return self._state._get_attributes() @connresource.guarded def cursor(self, *args, prefetch=None, timeout=None) -> cursor.CursorFactory: """Return a *cursor factory* for the prepared statement. :param args: Query arguments. :param int prefetch: The number of rows the *cursor iterator* will prefetch (defaults to ``50``.) :param float timeout: Optional timeout in seconds. :return: A :class:`~cursor.CursorFactory` object. """ return cursor.CursorFactory( self._connection, self._query, self._state, args, prefetch, timeout, self._state.record_class, ) @connresource.guarded async def explain(self, *args, analyze=False): """Return the execution plan of the statement. :param args: Query arguments. :param analyze: If ``True``, the statement will be executed and the run time statitics added to the return value. :return: An object representing the execution plan. This value is actually a deserialized JSON output of the SQL ``EXPLAIN`` command. """ query = 'EXPLAIN (FORMAT JSON, VERBOSE' if analyze: query += ', ANALYZE) ' else: query += ') ' query += self._state.query if analyze: # From PostgreSQL docs: # Important: Keep in mind that the statement is actually # executed when the ANALYZE option is used. Although EXPLAIN # will discard any output that a SELECT would return, other # side effects of the statement will happen as usual. If you # wish to use EXPLAIN ANALYZE on an INSERT, UPDATE, DELETE, # MERGE, CREATE TABLE AS, or EXECUTE statement without letting # the command affect your data, use this approach: # BEGIN; # EXPLAIN ANALYZE ...; # ROLLBACK; tr = self._connection.transaction() await tr.start() try: data = await self._connection.fetchval(query, *args) finally: await tr.rollback() else: data = await self._connection.fetchval(query, *args) return json.loads(data) @connresource.guarded async def fetch(self, *args, timeout=None): r"""Execute the statement and return a list of :class:`Record` objects. :param str query: Query text :param args: Query arguments :param float timeout: Optional timeout value in seconds. :return: A list of :class:`Record` instances. """ data = await self.__bind_execute(args, 0, timeout) return data @connresource.guarded async def fetchval(self, *args, column=0, timeout=None): """Execute the statement and return a value in the first row. :param args: Query arguments. :param int column: Numeric index within the record of the value to return (defaults to 0). :param float timeout: Optional timeout value in seconds. If not specified, defaults to the value of ``command_timeout`` argument to the ``Connection`` instance constructor. :return: The value of the specified column of the first record. """ data = await self.__bind_execute(args, 1, timeout) if not data: return None return data[0][column] @connresource.guarded async def fetchrow(self, *args, timeout=None): """Execute the statement and return the first row. :param str query: Query text :param args: Query arguments :param float timeout: Optional timeout value in seconds. :return: The first row as a :class:`Record` instance. """ data = await self.__bind_execute(args, 1, timeout) if not data: return None return data[0] @connresource.guarded async def fetchmany(self, args, *, timeout=None): """Execute the statement and return a list of :class:`Record` objects. :param args: Query arguments. :param float timeout: Optional timeout value in seconds. :return: A list of :class:`Record` instances. .. versionadded:: 0.30.0 """ return await self.__do_execute( lambda protocol: protocol.bind_execute_many( self._state, args, portal_name='', timeout=timeout, return_rows=True, ) ) @connresource.guarded async def executemany(self, args, *, timeout: float=None): """Execute the statement for each sequence of arguments in *args*. :param args: An iterable containing sequences of arguments. :param float timeout: Optional timeout value in seconds. :return None: This method discards the results of the operations. .. versionadded:: 0.22.0 """ return await self.__do_execute( lambda protocol: protocol.bind_execute_many( self._state, args, portal_name='', timeout=timeout, return_rows=False, )) async def __do_execute(self, executor): protocol = self._connection._protocol try: return await executor(protocol) except exceptions.OutdatedSchemaCacheError: await self._connection.reload_schema_state() # We can not find all manually created prepared statements, so just # drop known cached ones in the `self._connection`. # Other manually created prepared statements will fail and # invalidate themselves (unfortunately, clearing caches again). self._state.mark_closed() raise async def __bind_execute(self, args, limit, timeout): data, status, _ = await self.__do_execute( lambda protocol: protocol.bind_execute( self._state, args, '', limit, True, timeout)) self._last_status = status return data def _check_open(self, meth_name): if self._state.closed: raise exceptions.InterfaceError( 'cannot call PreparedStmt.{}(): ' 'the prepared statement is closed'.format(meth_name)) def _check_conn_validity(self, meth_name): self._check_open(meth_name) super()._check_conn_validity(meth_name) def __del__(self): self._state.detach() self._connection._maybe_gc_stmt(self._state)
class PreparedStatement(connresource.ConnectionResource): '''A representation of a prepared statement.''' def __init__(self, connection, query, state): pass @connresource.guarded def get_name(self) -> str: '''Return the name of this prepared statement. .. versionadded:: 0.25.0 ''' pass @connresource.guarded def get_query(self) -> str: '''Return the text of the query for this prepared statement. Example:: stmt = await connection.prepare('SELECT $1::int') assert stmt.get_query() == "SELECT $1::int" ''' pass @connresource.guarded def get_statusmsg(self) -> str: '''Return the status of the executed command. Example:: stmt = await connection.prepare('CREATE TABLE mytab (a int)') await stmt.fetch() assert stmt.get_statusmsg() == "CREATE TABLE" ''' pass @connresource.guarded def get_parameters(self): '''Return a description of statement parameters types. :return: A tuple of :class:`asyncpg.types.Type`. Example:: stmt = await connection.prepare('SELECT ($1::int, $2::text)') print(stmt.get_parameters()) # Will print: # (Type(oid=23, name='int4', kind='scalar', schema='pg_catalog'), # Type(oid=25, name='text', kind='scalar', schema='pg_catalog')) ''' pass @connresource.guarded def get_attributes(self): '''Return a description of relation attributes (columns). :return: A tuple of :class:`asyncpg.types.Attribute`. Example:: st = await self.con.prepare(''' SELECT typname, typnamespace FROM pg_type ''') print(st.get_attributes()) # Will print: # (Attribute( # name='typname', # type=Type(oid=19, name='name', kind='scalar', # schema='pg_catalog')), # Attribute( # name='typnamespace', # type=Type(oid=26, name='oid', kind='scalar', # schema='pg_catalog'))) ''' pass @connresource.guarded def cursor(self, *args, prefetch=None, timeout=None) -> cursor.CursorFactory: '''Return a *cursor factory* for the prepared statement. :param args: Query arguments. :param int prefetch: The number of rows the *cursor iterator* will prefetch (defaults to ``50``.) :param float timeout: Optional timeout in seconds. :return: A :class:`~cursor.CursorFactory` object. ''' pass @connresource.guarded async def explain(self, *args, analyze=False): '''Return the execution plan of the statement. :param args: Query arguments. :param analyze: If ``True``, the statement will be executed and the run time statitics added to the return value. :return: An object representing the execution plan. This value is actually a deserialized JSON output of the SQL ``EXPLAIN`` command. ''' pass @connresource.guarded async def fetch(self, *args, timeout=None): '''Execute the statement and return a list of :class:`Record` objects. :param str query: Query text :param args: Query arguments :param float timeout: Optional timeout value in seconds. :return: A list of :class:`Record` instances. ''' pass @connresource.guarded async def fetchval(self, *args, column=0, timeout=None): '''Execute the statement and return a value in the first row. :param args: Query arguments. :param int column: Numeric index within the record of the value to return (defaults to 0). :param float timeout: Optional timeout value in seconds. If not specified, defaults to the value of ``command_timeout`` argument to the ``Connection`` instance constructor. :return: The value of the specified column of the first record. ''' pass @connresource.guarded async def fetchrow(self, *args, timeout=None): '''Execute the statement and return the first row. :param str query: Query text :param args: Query arguments :param float timeout: Optional timeout value in seconds. :return: The first row as a :class:`Record` instance. ''' pass @connresource.guarded async def fetchmany(self, args, *, timeout=None): '''Execute the statement and return a list of :class:`Record` objects. :param args: Query arguments. :param float timeout: Optional timeout value in seconds. :return: A list of :class:`Record` instances. .. versionadded:: 0.30.0 ''' pass @connresource.guarded async def executemany(self, args, *, timeout: float=None): '''Execute the statement for each sequence of arguments in *args*. :param args: An iterable containing sequences of arguments. :param float timeout: Optional timeout value in seconds. :return None: This method discards the results of the operations. .. versionadded:: 0.22.0 ''' pass async def __do_execute(self, executor): pass async def __bind_execute(self, args, limit, timeout): pass def _check_open(self, meth_name): pass def _check_conn_validity(self, meth_name): pass def __del__(self): pass
31
13
13
2
6
6
1
0.9
1
3
0
0
18
3
18
20
271
49
117
44
85
105
74
31
55
3
1
2
25
148,510
MagicStack/asyncpg
MagicStack_asyncpg/asyncpg/pool.py
asyncpg.pool.PoolConnectionProxyMeta
class PoolConnectionProxyMeta(type): def __new__( mcls, name: str, bases: tuple[Type[Any], ...], dct: dict[str, Any], *, wrap: bool = False, ) -> PoolConnectionProxyMeta: if wrap: for attrname in dir(connection.Connection): if attrname.startswith('_') or attrname in dct: continue meth = getattr(connection.Connection, attrname) if not inspect.isfunction(meth): continue iscoroutine = inspect.iscoroutinefunction(meth) wrapper = mcls._wrap_connection_method(attrname, iscoroutine) wrapper = functools.update_wrapper(wrapper, meth) dct[attrname] = wrapper if '__doc__' not in dct: dct['__doc__'] = connection.Connection.__doc__ return super().__new__(mcls, name, bases, dct) @staticmethod def _wrap_connection_method( meth_name: str, iscoroutine: bool ) -> Callable[..., Any]: def call_con_method(self: Any, *args: Any, **kwargs: Any) -> Any: # This method will be owned by PoolConnectionProxy class. if self._con is None: raise exceptions.InterfaceError( 'cannot call Connection.{}(): ' 'connection has been released back to the pool'.format( meth_name)) meth = getattr(self._con.__class__, meth_name) return meth(self._con, *args, **kwargs) if iscoroutine: compat.markcoroutinefunction(call_con_method) return call_con_method
class PoolConnectionProxyMeta(type): def __new__( mcls, name: str, bases: tuple[Type[Any], ...], dct: dict[str, Any], *, wrap: bool = False, ) -> PoolConnectionProxyMeta: pass @staticmethod def _wrap_connection_method( meth_name: str, iscoroutine: bool ) -> Callable[..., Any]: pass def call_con_method(self: Any, *args: Any, **kwargs: Any) -> Any: pass
5
0
18
3
15
1
3
0.03
1
8
1
1
1
0
2
15
48
9
38
19
24
1
25
9
21
6
2
3
10
148,511
MagicStack/asyncpg
MagicStack_asyncpg/asyncpg/pool.py
asyncpg.pool.PoolConnectionProxy
class PoolConnectionProxy(connection._ConnectionProxy, metaclass=PoolConnectionProxyMeta, wrap=True): __slots__ = ('_con', '_holder') def __init__( self, holder: PoolConnectionHolder, con: connection.Connection ) -> None: self._con = con self._holder = holder con._set_proxy(self) def __getattr__(self, attr: str) -> Any: # Proxy all unresolved attributes to the wrapped Connection object. return getattr(self._con, attr) def _detach(self) -> Optional[connection.Connection]: if self._con is None: return con, self._con = self._con, None con._set_proxy(None) return con def __repr__(self) -> str: if self._con is None: return '<{classname} [released] {id:#x}>'.format( classname=self.__class__.__name__, id=id(self)) else: return '<{classname} {con!r} {id:#x}>'.format( classname=self.__class__.__name__, con=self._con, id=id(self))
class PoolConnectionProxy(connection._ConnectionProxy, metaclass=PoolConnectionProxyMeta, wrap=True): def __init__( self, holder: PoolConnectionHolder, con: connection.Connection ) -> None: pass def __getattr__(self, attr: str) -> Any: pass def _detach(self) -> Optional[connection.Connection]: pass def __repr__(self) -> str: pass
5
0
6
0
5
1
2
0.12
3
4
2
0
4
2
4
19
32
6
25
13
16
3
18
9
13
2
3
1
6
148,512
MagicStack/asyncpg
MagicStack_asyncpg/asyncpg/pool.py
asyncpg.pool.PoolConnectionHolder
class PoolConnectionHolder: __slots__ = ('_con', '_pool', '_loop', '_proxy', '_max_queries', '_setup', '_max_inactive_time', '_in_use', '_inactive_callback', '_timeout', '_generation') def __init__( self, pool: "Pool", *, max_queries: float, setup: Optional[Callable[[PoolConnectionProxy], Awaitable[None]]], max_inactive_time: float, ) -> None: self._pool = pool self._con: Optional[connection.Connection] = None self._proxy: Optional[PoolConnectionProxy] = None self._max_queries = max_queries self._max_inactive_time = max_inactive_time self._setup = setup self._inactive_callback: Optional[Callable] = None self._in_use: Optional[asyncio.Future] = None self._timeout: Optional[float] = None self._generation: Optional[int] = None def is_connected(self) -> bool: return self._con is not None and not self._con.is_closed() def is_idle(self) -> bool: return not self._in_use async def connect(self) -> None: if self._con is not None: raise exceptions.InternalClientError( 'PoolConnectionHolder.connect() called while another ' 'connection already exists') self._con = await self._pool._get_new_connection() self._generation = self._pool._generation self._maybe_cancel_inactive_callback() self._setup_inactive_callback() async def acquire(self) -> PoolConnectionProxy: if self._con is None or self._con.is_closed(): self._con = None await self.connect() elif self._generation != self._pool._generation: # Connections have been expired, re-connect the holder. self._pool._loop.create_task( self._con.close(timeout=self._timeout)) self._con = None await self.connect() self._maybe_cancel_inactive_callback() self._proxy = proxy = PoolConnectionProxy(self, self._con) if self._setup is not None: try: await self._setup(proxy) except (Exception, asyncio.CancelledError) as ex: # If a user-defined `setup` function fails, we don't # know if the connection is safe for re-use, hence # we close it. A new connection will be created # when `acquire` is called again. try: # Use `close()` to close the connection gracefully. # An exception in `setup` isn't necessarily caused # by an IO or a protocol error. close() will # do the necessary cleanup via _release_on_close(). await self._con.close() finally: raise ex self._in_use = self._pool._loop.create_future() return proxy async def release(self, timeout: Optional[float]) -> None: if self._in_use is None: raise exceptions.InternalClientError( 'PoolConnectionHolder.release() called on ' 'a free connection holder') if self._con.is_closed(): # When closing, pool connections perform the necessary # cleanup, so we don't have to do anything else here. return self._timeout = None if self._con._protocol.queries_count >= self._max_queries: # The connection has reached its maximum utilization limit, # so close it. Connection.close() will call _release(). await self._con.close(timeout=timeout) return if self._generation != self._pool._generation: # The connection has expired because it belongs to # an older generation (Pool.expire_connections() has # been called.) await self._con.close(timeout=timeout) return try: budget = timeout if self._con._protocol._is_cancelling(): # If the connection is in cancellation state, # wait for the cancellation started = time.monotonic() await compat.wait_for( self._con._protocol._wait_for_cancellation(), budget) if budget is not None: budget -= time.monotonic() - started if self._pool._reset is not None: async with compat.timeout(budget): await self._con._reset() await self._pool._reset(self._con) else: await self._con.reset(timeout=budget) except (Exception, asyncio.CancelledError) as ex: # If the `reset` call failed, terminate the connection. # A new one will be created when `acquire` is called # again. try: # An exception in `reset` is most likely caused by # an IO error, so terminate the connection. self._con.terminate() finally: raise ex # Free this connection holder and invalidate the # connection proxy. self._release() # Rearm the connection inactivity timer. self._setup_inactive_callback() async def wait_until_released(self) -> None: if self._in_use is None: return else: await self._in_use async def close(self) -> None: if self._con is not None: # Connection.close() will call _release_on_close() to # finish holder cleanup. await self._con.close() def terminate(self) -> None: if self._con is not None: # Connection.terminate() will call _release_on_close() to # finish holder cleanup. self._con.terminate() def _setup_inactive_callback(self) -> None: if self._inactive_callback is not None: raise exceptions.InternalClientError( 'pool connection inactivity timer already exists') if self._max_inactive_time: self._inactive_callback = self._pool._loop.call_later( self._max_inactive_time, self._deactivate_inactive_connection) def _maybe_cancel_inactive_callback(self) -> None: if self._inactive_callback is not None: self._inactive_callback.cancel() self._inactive_callback = None def _deactivate_inactive_connection(self) -> None: if self._in_use is not None: raise exceptions.InternalClientError( 'attempting to deactivate an acquired connection') if self._con is not None: # The connection is idle and not in use, so it's fine to # use terminate() instead of close(). self._con.terminate() # Must call clear_connection, because _deactivate_connection # is called when the connection is *not* checked out, and # so terminate() above will not call the below. self._release_on_close() def _release_on_close(self) -> None: self._maybe_cancel_inactive_callback() self._release() self._con = None def _release(self) -> None: """Release this connection holder.""" if self._in_use is None: # The holder is not checked out. return if not self._in_use.done(): self._in_use.set_result(None) self._in_use = None # Deinitialize the connection proxy. All subsequent # operations on it will fail. if self._proxy is not None: self._proxy._detach() self._proxy = None # Put ourselves back to the pool queue. self._pool._queue.put_nowait(self)
class PoolConnectionHolder: def __init__( self, pool: "Pool", *, max_queries: float, setup: Optional[Callable[[PoolConnectionProxy], Awaitable[None]]], max_inactive_time: float, ) -> None: pass def is_connected(self) -> bool: pass def is_idle(self) -> bool: pass async def connect(self) -> None: pass async def acquire(self) -> PoolConnectionProxy: pass async def release(self, timeout: Optional[float]) -> None: pass async def wait_until_released(self) -> None: pass async def close(self) -> None: pass def terminate(self) -> None: pass def _setup_inactive_callback(self) -> None: pass def _maybe_cancel_inactive_callback(self) -> None: pass def _deactivate_inactive_connection(self) -> None: pass def _release_on_close(self) -> None: pass def _release_on_close(self) -> None: '''Release this connection holder.''' pass
15
1
14
2
9
3
3
0.29
0
8
2
0
14
10
14
14
215
38
137
38
115
40
111
28
96
9
0
3
38
148,513
MagicStack/asyncpg
MagicStack_asyncpg/asyncpg/pool.py
asyncpg.pool.PoolAcquireContext
class PoolAcquireContext: __slots__ = ('timeout', 'connection', 'done', 'pool') def __init__(self, pool: Pool, timeout: Optional[float]) -> None: self.pool = pool self.timeout = timeout self.connection = None self.done = False async def __aenter__(self): if self.connection is not None or self.done: raise exceptions.InterfaceError('a connection is already acquired') self.connection = await self.pool._acquire(self.timeout) return self.connection async def __aexit__( self, exc_type: Optional[Type[BaseException]] = None, exc_val: Optional[BaseException] = None, exc_tb: Optional[TracebackType] = None, ) -> None: self.done = True con = self.connection self.connection = None await self.pool.release(con) def __await__(self): self.done = True return self.pool._acquire(self.timeout).__await__()
class PoolAcquireContext: def __init__(self, pool: Pool, timeout: Optional[float]) -> None: pass async def __aenter__(self): pass async def __aexit__( self, exc_type: Optional[Type[BaseException]] = None, exc_val: Optional[BaseException] = None, exc_tb: Optional[TracebackType] = None, ) -> None: pass def __await__(self): pass
5
0
6
0
6
0
1
0
0
3
1
0
4
4
4
4
30
5
25
16
15
0
20
11
15
2
0
1
5
148,514
MagicStack/asyncpg
MagicStack_asyncpg/tests/test_listeners.py
tests.test_listeners.TestListeners
class TestListeners(tb.ClusterTestCase): async def test_listen_01(self): async with self.create_pool(database='postgres') as pool: async with pool.acquire() as con: q1 = asyncio.Queue() q2 = asyncio.Queue() q3 = asyncio.Queue() def listener1(*args): q1.put_nowait(args) def listener2(*args): q2.put_nowait(args) async def async_listener3(*args): q3.put_nowait(args) await con.add_listener('test', listener1) await con.add_listener('test', listener2) await con.add_listener('test', async_listener3) await con.execute("NOTIFY test, 'aaaa'") self.assertEqual( await q1.get(), (con, con.get_server_pid(), 'test', 'aaaa')) self.assertEqual( await q2.get(), (con, con.get_server_pid(), 'test', 'aaaa')) self.assertEqual( await q3.get(), (con, con.get_server_pid(), 'test', 'aaaa')) await con.remove_listener('test', listener2) await con.remove_listener('test', async_listener3) await con.execute("NOTIFY test, 'aaaa'") self.assertEqual( await q1.get(), (con, con.get_server_pid(), 'test', 'aaaa')) with self.assertRaises(asyncio.TimeoutError): await asyncio.wait_for(q2.get(), timeout=0.05) await con.reset() await con.remove_listener('test', listener1) await con.execute("NOTIFY test, 'aaaa'") with self.assertRaises(asyncio.TimeoutError): await asyncio.wait_for(q1.get(), timeout=0.05) with self.assertRaises(asyncio.TimeoutError): await asyncio.wait_for(q2.get(), timeout=0.05) async def test_listen_02(self): async with self.create_pool(database='postgres') as pool: async with pool.acquire() as con1, pool.acquire() as con2: q1 = asyncio.Queue() def listener1(*args): q1.put_nowait(args) await con1.add_listener('ipc', listener1) await con2.execute("NOTIFY ipc, 'hello'") self.assertEqual( await q1.get(), (con1, con2.get_server_pid(), 'ipc', 'hello')) await con1.remove_listener('ipc', listener1) async def test_listen_notletters(self): async with self.create_pool(database='postgres') as pool: async with pool.acquire() as con1, pool.acquire() as con2: q1 = asyncio.Queue() def listener1(*args): q1.put_nowait(args) await con1.add_listener('12+"34', listener1) await con2.execute("""NOTIFY "12+""34", 'hello'""") self.assertEqual( await q1.get(), (con1, con2.get_server_pid(), '12+"34', 'hello')) await con1.remove_listener('12+"34', listener1) async def test_dangling_listener_warns(self): async with self.create_pool(database='postgres') as pool: with self.assertWarnsRegex( exceptions.InterfaceWarning, '.*Connection.*is being released to the pool but ' 'has 1 active notification listener'): async with pool.acquire() as con: def listener1(*args): pass await con.add_listener('ipc', listener1)
class TestListeners(tb.ClusterTestCase): async def test_listen_01(self): pass def listener1(*args): pass def listener2(*args): pass async def async_listener3(*args): pass async def test_listen_02(self): pass def listener1(*args): pass async def test_listen_notletters(self): pass def listener1(*args): pass async def test_dangling_listener_warns(self): pass def listener1(*args): pass
11
0
11
2
9
0
1
0
1
1
0
0
4
0
4
4
102
27
75
24
64
0
60
16
49
1
1
3
10
148,515
MagicStack/asyncpg
MagicStack_asyncpg/tests/test_logging.py
tests.test_logging.LogCollector
class LogCollector: def __init__(self): self.records = [] def __call__(self, record): self.records.append(record)
class LogCollector: def __init__(self): pass def __call__(self, record): pass
3
0
2
0
2
0
1
0
0
0
0
0
2
1
2
2
6
1
5
4
2
0
5
4
2
1
0
0
2
148,516
MagicStack/asyncpg
MagicStack_asyncpg/tests/test_logging.py
tests.test_logging.TestQueryLogging
class TestQueryLogging(tb.ConnectedTestCase): async def test_logging_context(self): queries = asyncio.Queue() def query_saver(record): queries.put_nowait(record) log = LogCollector() with self.con.query_logger(query_saver): self.assertEqual(len(self.con._query_loggers), 1) await self.con.execute("SELECT 1") with self.con.query_logger(log): self.assertEqual(len(self.con._query_loggers), 2) await self.con.execute("SELECT 2") r1 = await queries.get() r2 = await queries.get() self.assertEqual(r1.query, "SELECT 1") self.assertEqual(r2.query, "SELECT 2") self.assertEqual(len(log.records), 1) self.assertEqual(log.records[0].query, "SELECT 2") self.assertEqual(len(self.con._query_loggers), 0) async def test_error_logging(self): log = LogCollector() with self.con.query_logger(log): with self.assertRaises(exceptions.UndefinedColumnError): await self.con.execute("SELECT x") await asyncio.sleep(0) # wait for logging self.assertEqual(len(log.records), 1) self.assertEqual( type(log.records[0].exception), exceptions.UndefinedColumnError )
class TestQueryLogging(tb.ConnectedTestCase): async def test_logging_context(self): pass def query_saver(record): pass async def test_error_logging(self): pass
4
0
12
2
10
0
1
0.03
1
3
2
0
2
0
2
2
37
7
30
9
26
1
27
9
23
1
1
2
3
148,517
MagicStack/asyncpg
MagicStack_asyncpg/tests/test_pool.py
tests.test_pool.SlowCancelConnection
class SlowCancelConnection(pg_connection.Connection): """Connection class to simulate races with Connection._cancel().""" async def _cancel(self, waiter): await asyncio.sleep(0.2) return await super()._cancel(waiter)
class SlowCancelConnection(pg_connection.Connection): '''Connection class to simulate races with Connection._cancel().''' async def _cancel(self, waiter): pass
2
1
3
0
3
0
1
0.25
1
1
0
0
1
0
1
87
5
0
4
2
2
1
4
2
2
1
4
0
1
148,518
MagicStack/asyncpg
MagicStack_asyncpg/tests/test_pool.py
tests.test_pool.SlowResetConnection
class SlowResetConnection(pg_connection.Connection): """Connection class to simulate races with Connection.reset().""" async def reset(self, *, timeout=None): await asyncio.sleep(0.2) return await super().reset(timeout=timeout)
class SlowResetConnection(pg_connection.Connection): '''Connection class to simulate races with Connection.reset().''' async def reset(self, *, timeout=None): pass
2
1
3
0
3
0
1
0.25
1
1
0
0
1
0
1
87
5
0
4
2
2
1
4
2
2
1
4
0
1
148,519
MagicStack/asyncpg
MagicStack_asyncpg/tests/test_pool.py
tests.test_pool.TestHotStandby
class TestHotStandby(tb.HotStandbyTestCase): def create_pool(self, **kwargs): conn_spec = self.standby_cluster.get_connection_spec() conn_spec.update(kwargs) return pg_pool.create_pool(loop=self.loop, **conn_spec) async def test_standby_pool_01(self): for n in {1, 3, 5, 10, 20, 100}: with self.subTest(tasksnum=n): pool = await self.create_pool( database='postgres', user='postgres', min_size=5, max_size=10) async def worker(): con = await pool.acquire() self.assertEqual(await con.fetchval('SELECT 1'), 1) await pool.release(con) tasks = [worker() for _ in range(n)] await asyncio.gather(*tasks) await pool.close() async def test_standby_cursors(self): con = await self.standby_cluster.connect( database='postgres', user='postgres', loop=self.loop) try: async with con.transaction(): cursor = await con.cursor('SELECT 1') self.assertEqual(await cursor.fetchrow(), (1,)) finally: await con.close()
class TestHotStandby(tb.HotStandbyTestCase): def create_pool(self, **kwargs): pass async def test_standby_pool_01(self): pass async def worker(): pass async def test_standby_cursors(self): pass
5
0
8
1
8
0
1
0
1
1
0
0
3
0
3
3
32
5
27
12
22
0
23
12
18
2
1
2
5
148,520
MagicStack/asyncpg
MagicStack_asyncpg/tests/test_pool.py
tests.test_pool.TestPool
class TestPool(tb.ConnectedTestCase): async def test_pool_01(self): for n in {1, 5, 10, 20, 100}: with self.subTest(tasksnum=n): pool = await self.create_pool(database='postgres', min_size=5, max_size=10) async def worker(): con = await pool.acquire() self.assertEqual(await con.fetchval('SELECT 1'), 1) await pool.release(con) tasks = [worker() for _ in range(n)] await asyncio.gather(*tasks) await pool.close() async def test_pool_02(self): for n in {1, 3, 5, 10, 20, 100}: with self.subTest(tasksnum=n): async with self.create_pool(database='postgres', min_size=5, max_size=5) as pool: async def worker(): con = await pool.acquire(timeout=5) self.assertEqual(await con.fetchval('SELECT 1'), 1) await pool.release(con) tasks = [worker() for _ in range(n)] await asyncio.gather(*tasks) async def test_pool_03(self): pool = await self.create_pool(database='postgres', min_size=1, max_size=1) con = await pool.acquire(timeout=1) with self.assertRaises(asyncio.TimeoutError): await pool.acquire(timeout=0.03) pool.terminate() del con async def test_pool_04(self): pool = await self.create_pool(database='postgres', min_size=1, max_size=1) con = await pool.acquire(timeout=POOL_NOMINAL_TIMEOUT) # Manual termination of pool connections releases the # pool item immediately. con.terminate() self.assertIsNone(pool._holders[0]._con) self.assertIsNone(pool._holders[0]._in_use) con = await pool.acquire(timeout=POOL_NOMINAL_TIMEOUT) self.assertEqual(await con.fetchval('SELECT 1'), 1) await con.close() self.assertIsNone(pool._holders[0]._con) self.assertIsNone(pool._holders[0]._in_use) # Calling release should not hurt. await pool.release(con) pool.terminate() async def test_pool_05(self): for n in {1, 3, 5, 10, 20, 100}: with self.subTest(tasksnum=n): pool = await self.create_pool(database='postgres', min_size=5, max_size=10) async def worker(): async with pool.acquire() as con: self.assertEqual(await con.fetchval('SELECT 1'), 1) tasks = [worker() for _ in range(n)] await asyncio.gather(*tasks) await pool.close() async def test_pool_06(self): fut = asyncio.Future() async def setup(con): fut.set_result(con) async with self.create_pool(database='postgres', min_size=5, max_size=5, setup=setup) as pool: async with pool.acquire() as con: pass self.assertIs(con, await fut) async def test_pool_07(self): cons = set() connect_called = 0 init_called = 0 setup_called = 0 reset_called = 0 async def connect(*args, **kwargs): nonlocal connect_called connect_called += 1 return await pg_connection.connect(*args, **kwargs) async def setup(con): nonlocal setup_called if con._con not in cons: # `con` is `PoolConnectionProxy`. raise RuntimeError('init was not called before setup') setup_called += 1 async def init(con): nonlocal init_called if con in cons: raise RuntimeError('init was called more than once') cons.add(con) init_called += 1 async def reset(con): nonlocal reset_called reset_called += 1 async def user(pool): async with pool.acquire() as con: if con._con not in cons: # `con` is `PoolConnectionProxy`. raise RuntimeError('init was not called') async with self.create_pool(database='postgres', min_size=2, max_size=5, connect=connect, init=init, setup=setup, reset=reset) as pool: users = asyncio.gather(*[user(pool) for _ in range(10)]) await users self.assertEqual(len(cons), 5) self.assertEqual(connect_called, 5) self.assertEqual(init_called, 5) self.assertEqual(setup_called, 10) self.assertEqual(reset_called, 10) async def bad_connect(*args, **kwargs): return 1 with self.assertRaisesRegex( asyncpg.InterfaceError, "expected pool connect callback to return an instance of " "'asyncpg\\.connection\\.Connection', got 'int'" ): await self.create_pool(database='postgres', connect=bad_connect) async def test_pool_08(self): pool = await self.create_pool(database='postgres', min_size=1, max_size=1) con = await pool.acquire(timeout=POOL_NOMINAL_TIMEOUT) with self.assertRaisesRegex(asyncpg.InterfaceError, 'is not a member'): await pool.release(con._con) async def test_pool_09(self): pool1 = await self.create_pool(database='postgres', min_size=1, max_size=1) pool2 = await self.create_pool(database='postgres', min_size=1, max_size=1) try: con = await pool1.acquire(timeout=POOL_NOMINAL_TIMEOUT) with self.assertRaisesRegex(asyncpg.InterfaceError, 'is not a member'): await pool2.release(con) finally: await pool1.release(con) await pool1.close() await pool2.close() async def test_pool_10(self): pool = await self.create_pool(database='postgres', min_size=1, max_size=1) con = await pool.acquire() await pool.release(con) await pool.release(con) await pool.close() async def test_pool_11(self): pool = await self.create_pool(database='postgres', min_size=1, max_size=1) async with pool.acquire() as con: self.assertIn(repr(con._con), repr(con)) # Test __repr__. ps = await con.prepare('SELECT 1') txn = con.transaction() async with con.transaction(): cur = await con.cursor('SELECT 1') ps_cur = await ps.cursor() self.assertIn('[released]', repr(con)) with self.assertRaisesRegex( asyncpg.InterfaceError, r'cannot call Connection\.execute.*released back to the pool'): con.execute('select 1') for meth in ('fetchval', 'fetchrow', 'fetch', 'explain', 'get_query', 'get_statusmsg', 'get_parameters', 'get_attributes'): with self.assertRaisesRegex( asyncpg.InterfaceError, r'cannot call PreparedStatement\.{meth}.*released ' r'back to the pool'.format(meth=meth)): getattr(ps, meth)() for c in (cur, ps_cur): for meth in ('fetch', 'fetchrow'): with self.assertRaisesRegex( asyncpg.InterfaceError, r'cannot call Cursor\.{meth}.*released ' r'back to the pool'.format(meth=meth)): getattr(c, meth)() with self.assertRaisesRegex( asyncpg.InterfaceError, r'cannot call Cursor\.forward.*released ' r'back to the pool'): c.forward(1) for meth in ('start', 'commit', 'rollback'): with self.assertRaisesRegex( asyncpg.InterfaceError, r'cannot call Transaction\.{meth}.*released ' r'back to the pool'.format(meth=meth)): getattr(txn, meth)() await pool.close() async def test_pool_12(self): pool = await self.create_pool(database='postgres', min_size=1, max_size=1) async with pool.acquire() as con: self.assertTrue(isinstance(con, pg_connection.Connection)) self.assertFalse(isinstance(con, list)) await pool.close() async def test_pool_13(self): pool = await self.create_pool(database='postgres', min_size=1, max_size=1) async with pool.acquire() as con: self.assertIn('Execute an SQL command', con.execute.__doc__) self.assertEqual(con.execute.__name__, 'execute') self.assertIn( str(inspect.signature(con.execute))[1:], str(inspect.signature(pg_connection.Connection.execute))) await pool.close() def test_pool_init_run_until_complete(self): pool_init = self.create_pool(database='postgres') pool = self.loop.run_until_complete(pool_init) self.assertIsInstance(pool, asyncpg.pool.Pool) async def test_pool_exception_in_setup_and_init(self): class Error(Exception): pass async def setup(con): nonlocal setup_calls, last_con last_con = con setup_calls += 1 if setup_calls > 1: cons.append(con) else: cons.append('error') raise Error with self.subTest(method='setup'): setup_calls = 0 last_con = None cons = [] async with self.create_pool(database='postgres', min_size=1, max_size=1, setup=setup) as pool: with self.assertRaises(Error): await pool.acquire() self.assertTrue(last_con.is_closed()) async with pool.acquire() as con: self.assertEqual(cons, ['error', con]) with self.subTest(method='init'): setup_calls = 0 last_con = None cons = [] async with self.create_pool(database='postgres', min_size=0, max_size=1, init=setup) as pool: with self.assertRaises(Error): await pool.acquire() self.assertTrue(last_con.is_closed()) async with pool.acquire() as con: self.assertEqual(await con.fetchval('select 1::int'), 1) self.assertEqual(cons, ['error', con._con]) async def test_pool_auth(self): if not self.cluster.is_managed(): self.skipTest('unmanaged cluster') self.cluster.reset_hba() if _system != 'Windows': self.cluster.add_hba_entry( type='local', database='postgres', user='pooluser', auth_method='md5') self.cluster.add_hba_entry( type='host', address='127.0.0.1/32', database='postgres', user='pooluser', auth_method='md5') self.cluster.add_hba_entry( type='host', address='::1/128', database='postgres', user='pooluser', auth_method='md5') self.cluster.reload() try: await self.con.execute(''' CREATE ROLE pooluser WITH LOGIN PASSWORD 'poolpassword' ''') pool = await self.create_pool(database='postgres', user='pooluser', password='poolpassword', min_size=5, max_size=10) async def worker(): con = await pool.acquire() self.assertEqual(await con.fetchval('SELECT 1'), 1) await pool.release(con) tasks = [worker() for _ in range(5)] await asyncio.gather(*tasks) await pool.close() finally: await self.con.execute('DROP ROLE pooluser') # Reset cluster's pg_hba.conf since we've meddled with it self.cluster.trust_local_connections() self.cluster.reload() async def test_pool_handles_task_cancel_in_acquire_with_timeout(self): # See https://github.com/MagicStack/asyncpg/issues/547 pool = await self.create_pool(database='postgres', min_size=1, max_size=1) async def worker(): async with pool.acquire(timeout=100): pass # Schedule task task = self.loop.create_task(worker()) # Yield to task, but cancel almost immediately await asyncio.sleep(0.00000000001) # Cancel the worker. task.cancel() # Wait to make sure the cleanup has completed. await asyncio.sleep(0.4) # Check that the connection has been returned to the pool. self.assertEqual(pool._queue.qsize(), 1) async def test_pool_handles_task_cancel_in_release(self): # Use SlowResetConnectionPool to simulate # the Task.cancel() and __aexit__ race. pool = await self.create_pool(database='postgres', min_size=1, max_size=1, connection_class=SlowResetConnection) async def worker(): async with pool.acquire(): pass task = self.loop.create_task(worker()) # Let the worker() run. await asyncio.sleep(0.1) # Cancel the worker. task.cancel() # Wait to make sure the cleanup has completed. await asyncio.sleep(0.4) # Check that the connection has been returned to the pool. self.assertEqual(pool._queue.qsize(), 1) async def test_pool_handles_query_cancel_in_release(self): # Use SlowResetConnectionPool to simulate # the Task.cancel() and __aexit__ race. pool = await self.create_pool(database='postgres', min_size=1, max_size=1, connection_class=SlowCancelConnection) async def worker(): async with pool.acquire() as con: await con.execute('SELECT pg_sleep(10)') task = self.loop.create_task(worker()) # Let the worker() run. await asyncio.sleep(0.1) # Cancel the worker. task.cancel() # Wait to make sure the cleanup has completed. await asyncio.sleep(0.5) # Check that the connection has been returned to the pool. self.assertEqual(pool._queue.qsize(), 1) async def test_pool_no_acquire_deadlock(self): async with self.create_pool(database='postgres', min_size=1, max_size=1, max_queries=1) as pool: async def sleep_and_release(): async with pool.acquire() as con: await con.execute('SELECT pg_sleep(1)') asyncio.ensure_future(sleep_and_release()) await asyncio.sleep(0.5) async with pool.acquire() as con: await con.fetchval('SELECT 1') async def test_pool_config_persistence(self): N = 100 cons = set() class MyConnection(asyncpg.Connection): async def foo(self): return 42 async def fetchval(self, query): res = await super().fetchval(query) return res + 1 async def test(pool): async with pool.acquire() as con: self.assertEqual(await con.fetchval('SELECT 1'), 2) self.assertEqual(await con.foo(), 42) self.assertTrue(isinstance(con, MyConnection)) self.assertEqual(con._con._config.statement_cache_size, 3) cons.add(con) async with self.create_pool( database='postgres', min_size=10, max_size=10, max_queries=1, connection_class=MyConnection, statement_cache_size=3) as pool: await asyncio.gather(*[test(pool) for _ in range(N)]) self.assertEqual(len(cons), N) async def test_pool_release_in_xact(self): """Test that Connection.reset() closes any open transaction.""" async with self.create_pool(database='postgres', min_size=1, max_size=1) as pool: async def get_xact_id(con): return await con.fetchval('select txid_current()') with self.assertLoopErrorHandlerCalled('an active transaction'): async with pool.acquire() as con: real_con = con._con # unwrap PoolConnectionProxy id1 = await get_xact_id(con) tr = con.transaction() self.assertIsNone(con._con._top_xact) await tr.start() self.assertIs(real_con._top_xact, tr) id2 = await get_xact_id(con) self.assertNotEqual(id1, id2) self.assertIsNone(real_con._top_xact) async with pool.acquire() as con: self.assertIs(con._con, real_con) self.assertIsNone(con._con._top_xact) id3 = await get_xact_id(con) self.assertNotEqual(id2, id3) async def test_pool_connection_methods(self): async def test_fetch(pool): i = random.randint(0, 20) await asyncio.sleep(random.random() / 100) r = await pool.fetch('SELECT {}::int'.format(i)) self.assertEqual(r, [(i,)]) return 1 async def test_fetchrow(pool): i = random.randint(0, 20) await asyncio.sleep(random.random() / 100) r = await pool.fetchrow('SELECT {}::int'.format(i)) self.assertEqual(r, (i,)) return 1 async def test_fetchval(pool): i = random.randint(0, 20) await asyncio.sleep(random.random() / 100) r = await pool.fetchval('SELECT {}::int'.format(i)) self.assertEqual(r, i) return 1 async def test_execute(pool): await asyncio.sleep(random.random() / 100) r = await pool.execute('SELECT generate_series(0, 10)') self.assertEqual(r, 'SELECT {}'.format(11)) return 1 async def test_execute_with_arg(pool): i = random.randint(0, 20) await asyncio.sleep(random.random() / 100) r = await pool.execute('SELECT generate_series(0, $1)', i) self.assertEqual(r, 'SELECT {}'.format(i + 1)) return 1 async def run(N, meth): async with self.create_pool(database='postgres', min_size=5, max_size=10) as pool: coros = [meth(pool) for _ in range(N)] res = await asyncio.gather(*coros) self.assertEqual(res, [1] * N) methods = [test_fetch, test_fetchrow, test_fetchval, test_execute, test_execute_with_arg] with tb.silence_asyncio_long_exec_warning(): for method in methods: with self.subTest(method=method.__name__): await run(200, method) async def test_pool_connection_execute_many(self): async def worker(pool): await asyncio.sleep(random.random() / 100) await pool.executemany(''' INSERT INTO exmany VALUES($1, $2) ''', [ ('a', 1), ('b', 2), ('c', 3), ('d', 4) ]) return 1 N = 200 async with self.create_pool(database='postgres', min_size=5, max_size=10) as pool: await pool.execute('CREATE TABLE exmany (a text, b int)') try: coros = [worker(pool) for _ in range(N)] res = await asyncio.gather(*coros) self.assertEqual(res, [1] * N) n_rows = await pool.fetchval('SELECT count(*) FROM exmany') self.assertEqual(n_rows, N * 4) finally: await pool.execute('DROP TABLE exmany') async def test_pool_max_inactive_time_01(self): async with self.create_pool( database='postgres', min_size=1, max_size=1, max_inactive_connection_lifetime=0.1) as pool: # Test that it's OK if a query takes longer time to execute # than `max_inactive_connection_lifetime`. con = pool._holders[0]._con for _ in range(3): await pool.execute('SELECT pg_sleep(0.5)') self.assertIs(pool._holders[0]._con, con) self.assertEqual( await pool.execute('SELECT 1::int'), 'SELECT 1') self.assertIs(pool._holders[0]._con, con) async def test_pool_max_inactive_time_02(self): async with self.create_pool( database='postgres', min_size=1, max_size=1, max_inactive_connection_lifetime=0.5) as pool: # Test that we have a new connection after pool not # being used longer than `max_inactive_connection_lifetime`. con = pool._holders[0]._con self.assertEqual( await pool.execute('SELECT 1::int'), 'SELECT 1') self.assertIs(pool._holders[0]._con, con) await asyncio.sleep(1) self.assertIs(pool._holders[0]._con, None) self.assertEqual( await pool.execute('SELECT 1::int'), 'SELECT 1') self.assertIsNot(pool._holders[0]._con, con) async def test_pool_max_inactive_time_03(self): async with self.create_pool( database='postgres', min_size=1, max_size=1, max_inactive_connection_lifetime=1) as pool: # Test that we start counting inactive time *after* # the connection is being released back to the pool. con = pool._holders[0]._con await pool.execute('SELECT pg_sleep(0.5)') await asyncio.sleep(0.6) self.assertIs(pool._holders[0]._con, con) self.assertEqual( await pool.execute('SELECT 1::int'), 'SELECT 1') self.assertIs(pool._holders[0]._con, con) async def test_pool_max_inactive_time_04(self): # Chaos test for max_inactive_connection_lifetime. DURATION = 2.0 START = time.monotonic() N = 0 async def worker(pool): nonlocal N await asyncio.sleep(random.random() / 10 + 0.1) async with pool.acquire() as con: if random.random() > 0.5: await con.execute('SELECT pg_sleep({:.2f})'.format( random.random() / 10)) self.assertEqual( await con.fetchval('SELECT 42::int'), 42) if time.monotonic() - START < DURATION: await worker(pool) N += 1 async with self.create_pool( database='postgres', min_size=10, max_size=30, max_inactive_connection_lifetime=0.1) as pool: workers = [worker(pool) for _ in range(50)] await asyncio.gather(*workers) self.assertGreaterEqual(N, 50) async def test_pool_max_inactive_time_05(self): # Test that idle never-acquired connections abide by # the max inactive lifetime. async with self.create_pool( database='postgres', min_size=2, max_size=2, max_inactive_connection_lifetime=0.2) as pool: self.assertIsNotNone(pool._holders[0]._con) self.assertIsNotNone(pool._holders[1]._con) await pool.execute('SELECT pg_sleep(0.3)') await asyncio.sleep(0.3) self.assertIs(pool._holders[0]._con, None) # The connection in the second holder was never used, # but should be closed nonetheless. self.assertIs(pool._holders[1]._con, None) async def test_pool_handles_inactive_connection_errors(self): pool = await self.create_pool(database='postgres', min_size=1, max_size=1) con = await pool.acquire(timeout=POOL_NOMINAL_TIMEOUT) true_con = con._con await pool.release(con) # we simulate network error by terminating the connection true_con.terminate() # now pool should reopen terminated connection async with pool.acquire(timeout=POOL_NOMINAL_TIMEOUT) as con: self.assertEqual(await con.fetchval('SELECT 1'), 1) await con.close() await pool.close() async def test_pool_size_and_capacity(self): async with self.create_pool( database='postgres', min_size=2, max_size=3, ) as pool: self.assertEqual(pool.get_min_size(), 2) self.assertEqual(pool.get_max_size(), 3) self.assertEqual(pool.get_size(), 2) self.assertEqual(pool.get_idle_size(), 2) async with pool.acquire(): self.assertEqual(pool.get_idle_size(), 1) async with pool.acquire(): self.assertEqual(pool.get_idle_size(), 0) async with pool.acquire(): self.assertEqual(pool.get_size(), 3) self.assertEqual(pool.get_idle_size(), 0) async def test_pool_closing(self): async with self.create_pool() as pool: self.assertFalse(pool.is_closing()) await pool.close() self.assertTrue(pool.is_closing()) async with self.create_pool() as pool: self.assertFalse(pool.is_closing()) pool.terminate() self.assertTrue(pool.is_closing()) async def test_pool_handles_transaction_exit_in_asyncgen_1(self): pool = await self.create_pool(database='postgres', min_size=1, max_size=1) locals_ = {} exec(textwrap.dedent('''\ async def iterate(con): async with con.transaction(): for record in await con.fetch("SELECT 1"): yield record '''), globals(), locals_) iterate = locals_['iterate'] class MyException(Exception): pass with self.assertRaises(MyException): async with pool.acquire() as con: async for _ in iterate(con): # noqa raise MyException() async def test_pool_handles_transaction_exit_in_asyncgen_2(self): pool = await self.create_pool(database='postgres', min_size=1, max_size=1) locals_ = {} exec(textwrap.dedent('''\ async def iterate(con): async with con.transaction(): for record in await con.fetch("SELECT 1"): yield record '''), globals(), locals_) iterate = locals_['iterate'] class MyException(Exception): pass with self.assertRaises(MyException): async with pool.acquire() as con: iterator = iterate(con) async for _ in iterator: # noqa raise MyException() del iterator async def test_pool_handles_asyncgen_finalization(self): pool = await self.create_pool(database='postgres', min_size=1, max_size=1) locals_ = {} exec(textwrap.dedent('''\ async def iterate(con): for record in await con.fetch("SELECT 1"): yield record '''), globals(), locals_) iterate = locals_['iterate'] class MyException(Exception): pass with self.assertRaises(MyException): async with pool.acquire() as con: async with con.transaction(): async for _ in iterate(con): # noqa raise MyException() async def test_pool_close_waits_for_release(self): pool = await self.create_pool(database='postgres', min_size=1, max_size=1) flag = self.loop.create_future() conn_released = False async def worker(): nonlocal conn_released async with pool.acquire() as connection: async with connection.transaction(): flag.set_result(True) await asyncio.sleep(0.1) conn_released = True self.loop.create_task(worker()) await flag await pool.close() self.assertTrue(conn_released) async def test_pool_close_timeout(self): pool = await self.create_pool(database='postgres', min_size=1, max_size=1) flag = self.loop.create_future() async def worker(): async with pool.acquire(): flag.set_result(True) await asyncio.sleep(0.5) task = self.loop.create_task(worker()) with self.assertRaises(asyncio.TimeoutError): await flag await asyncio.wait_for(pool.close(), timeout=0.1) await task async def test_pool_expire_connections(self): pool = await self.create_pool(database='postgres', min_size=1, max_size=1) con = await pool.acquire() try: await pool.expire_connections() finally: await pool.release(con) self.assertIsNone(pool._holders[0]._con) await pool.close() async def test_pool_set_connection_args(self): pool = await self.create_pool(database='postgres', min_size=1, max_size=1) # Test that connection is expired on release. con = await pool.acquire() connspec = self.get_connection_spec() try: connspec['server_settings']['application_name'] = \ 'set_conn_args_test' except KeyError: connspec['server_settings'] = { 'application_name': 'set_conn_args_test' } pool.set_connect_args(**connspec) await pool.expire_connections() await pool.release(con) con = await pool.acquire() self.assertEqual(con.get_settings().application_name, 'set_conn_args_test') await pool.release(con) # Test that connection is expired before acquire. connspec = self.get_connection_spec() try: connspec['server_settings']['application_name'] = \ 'set_conn_args_test' except KeyError: connspec['server_settings'] = { 'application_name': 'set_conn_args_test_2' } pool.set_connect_args(**connspec) await pool.expire_connections() con = await pool.acquire() self.assertEqual(con.get_settings().application_name, 'set_conn_args_test_2') await pool.release(con) await pool.close() async def test_pool_init_race(self): pool = self.create_pool(database='postgres', min_size=1, max_size=1) t1 = asyncio.ensure_future(pool) t2 = asyncio.ensure_future(pool) await t1 with self.assertRaisesRegex( asyncpg.InterfaceError, r'pool is being initialized in another task'): await t2 await pool.close() async def test_pool_init_and_use_race(self): pool = self.create_pool(database='postgres', min_size=1, max_size=1) pool_task = asyncio.ensure_future(pool) await asyncio.sleep(0) with self.assertRaisesRegex( asyncpg.InterfaceError, r'being initialized, but not yet ready'): await pool.fetchval('SELECT 1') await pool_task await pool.close() async def test_pool_remote_close(self): pool = await self.create_pool(min_size=1, max_size=1) backend_pid_fut = self.loop.create_future() async def worker(): async with pool.acquire() as conn: pool_backend_pid = await conn.fetchval( 'SELECT pg_backend_pid()') backend_pid_fut.set_result(pool_backend_pid) await asyncio.sleep(0.2) task = self.loop.create_task(worker()) try: conn = await self.connect() backend_pid = await backend_pid_fut await conn.execute('SELECT pg_terminate_backend($1)', backend_pid) finally: await conn.close() await task # Check that connection_lost has released the pool holder. conn = await pool.acquire(timeout=0.1) await pool.release(conn)
class TestPool(tb.ConnectedTestCase): async def test_pool_01(self): pass async def worker(): pass async def test_pool_02(self): pass async def worker(): pass async def test_pool_03(self): pass async def test_pool_04(self): pass async def test_pool_05(self): pass async def worker(): pass async def test_pool_06(self): pass async def setup(con): pass async def test_pool_07(self): pass async def connect(*args, **kwargs): pass async def setup(con): pass async def init(con): pass async def reset(con): pass async def user(pool): pass async def bad_connect(*args, **kwargs): pass async def test_pool_08(self): pass async def test_pool_09(self): pass async def test_pool_10(self): pass async def test_pool_11(self): pass async def test_pool_12(self): pass async def test_pool_13(self): pass def test_pool_init_run_until_complete(self): pass async def test_pool_exception_in_setup_and_init(self): pass class Error(Exception): async def setup(con): pass async def test_pool_auth(self): pass async def worker(): pass async def test_pool_handles_task_cancel_in_acquire_with_timeout(self): pass async def worker(): pass async def test_pool_handles_task_cancel_in_release(self): pass async def worker(): pass async def test_pool_handles_query_cancel_in_release(self): pass async def worker(): pass async def test_pool_no_acquire_deadlock(self): pass async def sleep_and_release(): pass async def test_pool_config_persistence(self): pass class MyConnection(asyncpg.Connection): async def foo(self): pass async def fetchval(self, query): pass async def test_pool_01(self): pass async def test_pool_release_in_xact(self): '''Test that Connection.reset() closes any open transaction.''' pass async def get_xact_id(con): pass async def test_pool_connection_methods(self): pass async def test_fetch(pool): pass async def test_fetchrow(pool): pass async def test_fetchval(pool): pass async def test_execute(pool): pass async def test_execute_with_arg(pool): pass async def run(N, meth): pass async def test_pool_connection_execute_many(self): pass async def worker(): pass async def test_pool_max_inactive_time_01(self): pass async def test_pool_max_inactive_time_02(self): pass async def test_pool_max_inactive_time_03(self): pass async def test_pool_max_inactive_time_04(self): pass async def worker(): pass async def test_pool_max_inactive_time_05(self): pass async def test_pool_handles_inactive_connection_errors(self): pass async def test_pool_size_and_capacity(self): pass async def test_pool_closing(self): pass async def test_pool_handles_transaction_exit_in_asyncgen_1(self): pass class MyException(Exception): async def test_pool_handles_transaction_exit_in_asyncgen_2(self): pass class MyException(Exception): async def test_pool_handles_asyncgen_finalization(self): pass class MyException(Exception): async def test_pool_close_waits_for_release(self): pass async def worker(): pass async def test_pool_close_timeout(self): pass async def worker(): pass async def test_pool_expire_connections(self): pass async def test_pool_set_connection_args(self): pass async def test_pool_init_race(self): pass async def test_pool_init_and_use_race(self): pass async def test_pool_remote_close(self): pass async def worker(): pass
79
1
15
2
12
1
1
0.07
1
15
8
0
42
0
42
42
962
216
707
243
621
46
562
208
476
5
1
4
95
148,521
MagicStack/asyncpg
MagicStack_asyncpg/setup.py
setup.VersionMixin
class VersionMixin: def _fix_version(self, filename): # Replace asyncpg.__version__ with the actual version # of the distribution (possibly inferred from git). with open(str(filename)) as f: content = f.read() version_re = r"(.*__version__\s*=\s*)'[^']+'(.*)" repl = r"\1'{}'\2".format(self.distribution.metadata.version) content = re.sub(version_re, repl, content) with open(str(filename), 'w') as f: f.write(content)
class VersionMixin: def _fix_version(self, filename): pass
2
0
13
3
8
2
1
0.22
0
1
0
2
1
0
1
1
15
4
9
6
7
2
9
5
7
1
0
1
1
148,522
MagicStack/asyncpg
MagicStack_asyncpg/setup.py
setup.build_ext
class build_ext(setuptools_build_ext.build_ext): user_options = setuptools_build_ext.build_ext.user_options + [ ('cython-always', None, 'run cythonize() even if .c files are present'), ('cython-annotate', None, 'Produce a colorized HTML version of the Cython source.'), ('cython-directives=', None, 'Cython compiler directives'), ] def initialize_options(self): # initialize_options() may be called multiple times on the # same command object, so make sure not to override previously # set options. if getattr(self, '_initialized', False): return super(build_ext, self).initialize_options() if os.environ.get('ASYNCPG_DEBUG'): self.cython_always = True self.cython_annotate = True self.cython_directives = "linetrace=True" self.define = 'PG_DEBUG,CYTHON_TRACE,CYTHON_TRACE_NOGIL' self.debug = True else: self.cython_always = False self.cython_annotate = None self.cython_directives = None def finalize_options(self): # finalize_options() may be called multiple times on the # same command object, so make sure not to override previously # set options. if getattr(self, '_initialized', False): return if not self.cython_always: self.cython_always = bool(os.environ.get( "ASYNCPG_BUILD_CYTHON_ALWAYS")) if self.cython_annotate is None: self.cython_annotate = os.environ.get( "ASYNCPG_BUILD_CYTHON_ANNOTATE") if self.cython_directives is None: self.cython_directives = os.environ.get( "ASYNCPG_BUILD_CYTHON_DIRECTIVES") need_cythonize = self.cython_always cfiles = {} for extension in self.distribution.ext_modules: for i, sfile in enumerate(extension.sources): if sfile.endswith('.pyx'): prefix, ext = os.path.splitext(sfile) cfile = prefix + '.c' if os.path.exists(cfile) and not self.cython_always: extension.sources[i] = cfile else: if os.path.exists(cfile): cfiles[cfile] = os.path.getmtime(cfile) else: cfiles[cfile] = 0 need_cythonize = True if need_cythonize: import pkg_resources # Double check Cython presence in case setup_requires # didn't go into effect (most likely because someone # imported Cython before setup_requires injected the # correct egg into sys.path. try: import Cython except ImportError: raise RuntimeError( 'please install {} to compile asyncpg from source'.format( CYTHON_DEPENDENCY)) cython_dep = pkg_resources.Requirement.parse(CYTHON_DEPENDENCY) if Cython.__version__ not in cython_dep: raise RuntimeError( 'asyncpg requires {}, got Cython=={}'.format( CYTHON_DEPENDENCY, Cython.__version__ )) from Cython.Build import cythonize directives = { 'language_level': '3', } if self.cython_directives: for directive in self.cython_directives.split(','): k, _, v = directive.partition('=') if v.lower() == 'false': v = False if v.lower() == 'true': v = True directives[k] = v self.distribution.ext_modules[:] = cythonize( self.distribution.ext_modules, compiler_directives=directives, annotate=self.cython_annotate) super(build_ext, self).finalize_options()
class build_ext(setuptools_build_ext.build_ext): def initialize_options(self): pass def finalize_options(self): pass
3
0
50
9
36
5
10
0.12
0
6
0
0
2
5
2
2
111
20
81
22
75
10
58
22
52
17
0
5
20
148,523
MagicStack/asyncpg
MagicStack_asyncpg/setup.py
setup.build_py
class build_py(setuptools_build_py.build_py, VersionMixin): def build_module(self, module, module_file, package): outfile, copied = super().build_module(module, module_file, package) if module == '__init__' and package == 'asyncpg': self._fix_version(outfile) return outfile, copied
class build_py(setuptools_build_py.build_py, VersionMixin): def build_module(self, module, module_file, package): pass
2
0
7
2
5
0
2
0
2
1
0
0
1
0
1
72
9
3
6
3
4
0
6
3
4
2
3
1
2
148,524
MagicStack/asyncpg
MagicStack_asyncpg/setup.py
setup.sdist
class sdist(setuptools_sdist.sdist, VersionMixin): def make_release_tree(self, base_dir, files): super().make_release_tree(base_dir, files) self._fix_version(pathlib.Path(base_dir) / 'asyncpg' / '_version.py')
class sdist(setuptools_sdist.sdist, VersionMixin): def make_release_tree(self, base_dir, files): pass
2
0
3
0
3
0
1
0
2
2
0
0
1
0
1
71
5
1
4
2
2
0
4
2
2
1
3
0
1
148,525
MagicStack/asyncpg
MagicStack_asyncpg/tests/test_introspection.py
tests.test_introspection.SlowIntrospectionConnection
class SlowIntrospectionConnection(apg_con.Connection): """Connection class to test introspection races.""" introspect_count = 0 async def _introspect_types(self, *args, **kwargs): self.introspect_count += 1 await asyncio.sleep(0.4) return await super()._introspect_types(*args, **kwargs)
class SlowIntrospectionConnection(apg_con.Connection): '''Connection class to test introspection races.''' async def _introspect_types(self, *args, **kwargs): pass
2
1
4
0
4
0
1
0.17
1
1
0
0
1
0
1
87
8
1
6
3
4
1
6
3
4
1
4
0
1
148,526
MagicStack/asyncpg
MagicStack_asyncpg/tests/test_execute.py
tests.test_execute.TestExecuteScript
class TestExecuteScript(tb.ConnectedTestCase): async def test_execute_script_1(self): self.assertEqual(self.con._protocol.queries_count, 0) status = await self.con.execute(''' SELECT 1; SELECT true FROM pg_type WHERE false = true; SELECT generate_series(0, 9); ''') self.assertEqual(self.con._protocol.queries_count, 1) self.assertEqual(status, 'SELECT 10') async def test_execute_script_2(self): status = await self.con.execute(''' CREATE TABLE mytab (a int); ''') self.assertEqual(status, 'CREATE TABLE') try: status = await self.con.execute(''' INSERT INTO mytab (a) VALUES ($1), ($2) ''', 10, 20) self.assertEqual(status, 'INSERT 0 2') finally: await self.con.execute('DROP TABLE mytab') async def test_execute_script_3(self): with self.assertRaisesRegex(asyncpg.PostgresSyntaxError, 'cannot insert multiple commands'): await self.con.execute(''' CREATE TABLE mytab (a int); INSERT INTO mytab (a) VALUES ($1), ($2); ''', 10, 20) async def test_execute_script_check_transactionality(self): with self.assertRaises(asyncpg.PostgresError): await self.con.execute(''' CREATE TABLE mytab (a int); SELECT * FROM mytab WHERE 1 / 0 = 1; ''') with self.assertRaisesRegex(asyncpg.PostgresError, '"mytab" does not exist'): await self.con.prepare(''' SELECT * FROM mytab ''') async def test_execute_exceptions_1(self): with self.assertRaisesRegex(asyncpg.PostgresError, 'relation "__dne__" does not exist'): await self.con.execute('select * from __dne__') async def test_execute_script_interrupted_close(self): fut = self.loop.create_task( self.con.execute('''SELECT pg_sleep(10)''')) await asyncio.sleep(0.2) self.assertFalse(self.con.is_closed()) await self.con.close() self.assertTrue(self.con.is_closed()) with self.assertRaises(asyncpg.QueryCanceledError): await fut async def test_execute_script_interrupted_terminate(self): fut = self.loop.create_task( self.con.execute('''SELECT pg_sleep(10)''')) await asyncio.sleep(0.2) self.assertFalse(self.con.is_closed()) self.con.terminate() self.assertTrue(self.con.is_closed()) with self.assertRaisesRegex(asyncpg.ConnectionDoesNotExistError, 'closed in the middle'): await fut self.con.terminate()
class TestExecuteScript(tb.ConnectedTestCase): async def test_execute_script_1(self): pass async def test_execute_script_2(self): pass async def test_execute_script_3(self): pass async def test_execute_script_check_transactionality(self): pass async def test_execute_exceptions_1(self): pass async def test_execute_script_interrupted_close(self): pass async def test_execute_script_interrupted_terminate(self): pass
8
0
11
2
9
0
1
0
1
0
0
0
7
0
7
7
85
21
64
12
56
0
41
12
33
1
1
1
7
148,527
MagicStack/asyncpg
MagicStack_asyncpg/tests/test_execute.py
tests.test_execute.TestExecuteMany
class TestExecuteMany(tb.ConnectedTestCase): def setUp(self): super().setUp() self.loop.run_until_complete(self.con.execute( 'CREATE TABLE exmany (a text, b int PRIMARY KEY)')) def tearDown(self): self.loop.run_until_complete(self.con.execute('DROP TABLE exmany')) super().tearDown() async def test_executemany_basic(self): result = await self.con.executemany(''' INSERT INTO exmany VALUES($1, $2) ''', [ ('a', 1), ('b', 2), ('c', 3), ('d', 4) ]) self.assertIsNone(result) result = await self.con.fetch(''' SELECT * FROM exmany ''') self.assertEqual(result, [ ('a', 1), ('b', 2), ('c', 3), ('d', 4) ]) # Empty set await self.con.executemany(''' INSERT INTO exmany VALUES($1, $2) ''', ()) result = await self.con.fetch(''' SELECT * FROM exmany ''') self.assertEqual(result, [ ('a', 1), ('b', 2), ('c', 3), ('d', 4) ]) async def test_executemany_returning(self): result = await self.con.fetchmany(''' INSERT INTO exmany VALUES($1, $2) RETURNING a, b ''', [ ('a', 1), ('b', 2), ('c', 3), ('d', 4) ]) self.assertEqual(result, [ ('a', 1), ('b', 2), ('c', 3), ('d', 4) ]) result = await self.con.fetch(''' SELECT * FROM exmany ''') self.assertEqual(result, [ ('a', 1), ('b', 2), ('c', 3), ('d', 4) ]) # Empty set await self.con.fetchmany(''' INSERT INTO exmany VALUES($1, $2) RETURNING a, b ''', ()) result = await self.con.fetch(''' SELECT * FROM exmany ''') self.assertEqual(result, [ ('a', 1), ('b', 2), ('c', 3), ('d', 4) ]) # Without "RETURNING" result = await self.con.fetchmany(''' INSERT INTO exmany VALUES($1, $2) ''', [('e', 5), ('f', 6)]) self.assertEqual(result, []) result = await self.con.fetch(''' SELECT * FROM exmany ''') self.assertEqual(result, [ ('a', 1), ('b', 2), ('c', 3), ('d', 4), ('e', 5), ('f', 6) ]) async def test_executemany_bad_input(self): with self.assertRaisesRegex( exceptions.DataError, r"invalid input in executemany\(\) argument sequence element #1: " r"expected a sequence", ): await self.con.executemany(''' INSERT INTO exmany (b) VALUES($1) ''', [(0,), {1: 0}]) with self.assertRaisesRegex( exceptions.DataError, r"invalid input for query argument \$1 in element #1 of " r"executemany\(\) sequence: 'bad'", ): await self.con.executemany(''' INSERT INTO exmany (b) VALUES($1) ''', [(0,), ("bad",)]) async def test_executemany_error_in_input_gen(self): bad_data = ([1 / 0] for v in range(10)) with self.assertRaises(ZeroDivisionError): async with self.con.transaction(): await self.con.executemany(''' INSERT INTO exmany (b)VALUES($1) ''', bad_data) good_data = ([v] for v in range(10)) async with self.con.transaction(): await self.con.executemany(''' INSERT INTO exmany (b)VALUES($1) ''', good_data) async def test_executemany_server_failure(self): with self.assertRaises(exceptions.UniqueViolationError): await self.con.executemany(''' INSERT INTO exmany VALUES($1, $2) ''', [ ('a', 1), ('b', 2), ('c', 2), ('d', 4) ]) result = await self.con.fetch('SELECT * FROM exmany') self.assertEqual(result, []) async def test_executemany_server_failure_after_writes(self): with self.assertRaises(exceptions.UniqueViolationError): await self.con.executemany(''' INSERT INTO exmany VALUES($1, $2) ''', [('a' * 32768, x) for x in range(10)] + [ ('b', 12), ('c', 12), ('d', 14) ]) result = await self.con.fetch('SELECT b FROM exmany') self.assertEqual(result, []) async def test_executemany_server_failure_during_writes(self): # failure at the beginning, server error detected in the middle pos = 0 def gen(): nonlocal pos while pos < 128: pos += 1 if pos < 3: yield ('a', 0) else: yield 'a' * 32768, pos with self.assertRaises(exceptions.UniqueViolationError): await self.con.executemany(''' INSERT INTO exmany VALUES($1, $2) ''', gen()) result = await self.con.fetch('SELECT b FROM exmany') self.assertEqual(result, []) self.assertLess(pos, 128, 'should stop early') async def test_executemany_client_failure_after_writes(self): with self.assertRaises(ZeroDivisionError): await self.con.executemany(''' INSERT INTO exmany VALUES($1, $2) ''', (('a' * 32768, y + y / y) for y in range(10, -1, -1))) result = await self.con.fetch('SELECT b FROM exmany') self.assertEqual(result, []) async def test_executemany_timeout(self): with self.assertRaises(asyncio.TimeoutError): await self.con.executemany(''' INSERT INTO exmany VALUES(pg_sleep(0.1) || $1, $2) ''', [('a' * 32768, x) for x in range(128)], timeout=0.5) result = await self.con.fetch('SELECT * FROM exmany') self.assertEqual(result, []) async def test_executemany_timeout_flow_control(self): event = asyncio.Event() async def locker(): test_func = getattr(self, self._testMethodName).__func__ opts = getattr(test_func, '__connect_options__', {}) conn = await self.connect(**opts) try: tx = conn.transaction() await tx.start() await conn.execute("UPDATE exmany SET a = '1' WHERE b = 10") event.set() await asyncio.sleep(1) await tx.rollback() finally: event.set() await conn.close() await self.con.executemany(''' INSERT INTO exmany VALUES(NULL, $1) ''', [(x,) for x in range(128)]) fut = asyncio.ensure_future(locker()) await event.wait() with self.assertRaises(asyncio.TimeoutError): await self.con.executemany(''' UPDATE exmany SET a = $1 WHERE b = $2 ''', [('a' * 32768, x) for x in range(128)], timeout=0.5) await fut result = await self.con.fetch( 'SELECT * FROM exmany WHERE a IS NOT NULL') self.assertEqual(result, []) async def test_executemany_client_failure_in_transaction(self): tx = self.con.transaction() await tx.start() with self.assertRaises(ZeroDivisionError): await self.con.executemany(''' INSERT INTO exmany VALUES($1, $2) ''', (('a' * 32768, y + y / y) for y in range(10, -1, -1))) result = await self.con.fetch('SELECT b FROM exmany') # only 2 batches executed (2 x 4) self.assertEqual( [x[0] for x in result], [y + 1 for y in range(10, 2, -1)]) await tx.rollback() result = await self.con.fetch('SELECT b FROM exmany') self.assertEqual(result, []) async def test_executemany_client_server_failure_conflict(self): self.con._transport.set_write_buffer_limits(65536 * 64, 16384 * 64) with self.assertRaises(exceptions.UniqueViolationError): await self.con.executemany(''' INSERT INTO exmany VALUES($1, 0) ''', (('a' * 32768,) for y in range(4, -1, -1) if y / y)) result = await self.con.fetch('SELECT b FROM exmany') self.assertEqual(result, []) async def test_executemany_prepare(self): stmt = await self.con.prepare(''' INSERT INTO exmany VALUES($1, $2) ''') result = await stmt.executemany([ ('a', 1), ('b', 2), ('c', 3), ('d', 4) ]) self.assertIsNone(result) result = await self.con.fetch(''' SELECT * FROM exmany ''') self.assertEqual(result, [ ('a', 1), ('b', 2), ('c', 3), ('d', 4) ]) # Empty set await stmt.executemany(()) result = await self.con.fetch(''' SELECT * FROM exmany ''') self.assertEqual(result, [ ('a', 1), ('b', 2), ('c', 3), ('d', 4) ])
class TestExecuteMany(tb.ConnectedTestCase): def setUp(self): pass def tearDown(self): pass async def test_executemany_basic(self): pass async def test_executemany_returning(self): pass async def test_executemany_bad_input(self): pass async def test_executemany_error_in_input_gen(self): pass async def test_executemany_server_failure(self): pass async def test_executemany_server_failure_after_writes(self): pass async def test_executemany_server_failure_during_writes(self): pass def gen(): pass async def test_executemany_client_failure_after_writes(self): pass async def test_executemany_timeout(self): pass async def test_executemany_timeout_flow_control(self): pass async def locker(): pass async def test_executemany_client_failure_in_transaction(self): pass async def test_executemany_client_server_failure_conflict(self): pass async def test_executemany_prepare(self): pass
18
0
15
1
14
0
1
0.04
1
6
2
0
15
0
15
15
248
29
213
42
194
8
122
41
103
3
1
2
19
148,528
MagicStack/asyncpg
MagicStack_asyncpg/tests/test_exceptions.py
tests.test_exceptions.TestExceptions
class TestExceptions(tb.ConnectedTestCase): def test_exceptions_exported(self): for err in ('PostgresError', 'SubstringError', 'InterfaceError'): self.assertTrue(hasattr(asyncpg, err)) self.assertIn(err, asyncpg.__all__) for err in ('PostgresMessage',): self.assertFalse(hasattr(asyncpg, err)) self.assertNotIn(err, asyncpg.__all__) self.assertIsNone(asyncpg.PostgresError.schema_name) async def test_exceptions_unpacking(self): try: await self.con.execute('SELECT * FROM _nonexistent_') except asyncpg.UndefinedTableError as e: self.assertEqual(e.sqlstate, '42P01') self.assertEqual(e.position, '15') self.assertEqual(e.query, 'SELECT * FROM _nonexistent_') self.assertIsNotNone(e.severity) else: self.fail('UndefinedTableError not raised') async def test_exceptions_str(self): try: await self.con.execute(''' CREATE FUNCTION foo() RETURNS bool AS $$ $$ LANGUAGE SQL; ''') except asyncpg.InvalidFunctionDefinitionError as e: if self.server_version < (17, 0): detail = ( "Function's final statement must be SELECT or " "INSERT/UPDATE/DELETE RETURNING." ) else: detail = ( "Function's final statement must be SELECT or " "INSERT/UPDATE/DELETE/MERGE RETURNING." ) self.assertEqual(e.detail, detail) self.assertIn('DETAIL: Function', str(e)) else: self.fail('InvalidFunctionDefinitionError not raised')
class TestExceptions(tb.ConnectedTestCase): def test_exceptions_exported(self): pass async def test_exceptions_unpacking(self): pass async def test_exceptions_str(self): pass
4
0
14
1
13
0
3
0
1
1
0
0
3
0
3
3
45
6
39
8
35
0
30
6
26
3
1
2
8
148,529
MagicStack/asyncpg
MagicStack_asyncpg/tests/test_cursor.py
tests.test_cursor.TestIterableCursor
class TestIterableCursor(tb.ConnectedTestCase): async def test_cursor_iterable_01(self): st = await self.con.prepare('SELECT generate_series(0, 20)') expected = await st.fetch() for prefetch in range(1, 25): with self.subTest(prefetch=prefetch): async with self.con.transaction(): result = [] async for rec in st.cursor(prefetch=prefetch): result.append(rec) self.assertEqual( result, expected, 'result != expected for prefetch={}'.format(prefetch)) async def test_cursor_iterable_02(self): # Test that it's not possible to create a cursor without hold # outside of a transaction s = await self.con.prepare( 'DECLARE t BINARY CURSOR WITHOUT HOLD FOR SELECT 1') with self.assertRaises(asyncpg.NoActiveSQLTransactionError): await s.fetch() # Now test that statement.cursor() does not let you # iterate over it outside of a transaction st = await self.con.prepare('SELECT generate_series(0, 20)') it = st.cursor(prefetch=5).__aiter__() if inspect.isawaitable(it): it = await it with self.assertRaisesRegex(asyncpg.NoActiveSQLTransactionError, 'cursor cannot be created.*transaction'): await it.__anext__() async def test_cursor_iterable_03(self): st = await self.con.prepare('SELECT generate_series(0, 20)') it = st.cursor().__aiter__() if inspect.isawaitable(it): it = await it st._state.mark_closed() with self.assertRaisesRegex(asyncpg.InterfaceError, 'statement is closed'): async for _ in it: # NOQA pass async def test_cursor_iterable_04(self): st = await self.con.prepare('SELECT generate_series(0, 20)') st._state.mark_closed() with self.assertRaisesRegex(asyncpg.InterfaceError, 'statement is closed'): async for _ in st.cursor(): # NOQA pass async def test_cursor_iterable_05(self): st = await self.con.prepare('SELECT generate_series(0, 20)') for prefetch in range(-1, 1): with self.subTest(prefetch=prefetch): with self.assertRaisesRegex(asyncpg.InterfaceError, 'must be greater than zero'): async for _ in st.cursor(prefetch=prefetch): # NOQA pass async def test_cursor_iterable_06(self): recs = [] async with self.con.transaction(): await self.con.execute(''' CREATE TABLE cursor_iterable_06 (id int); INSERT INTO cursor_iterable_06 VALUES (0), (1); ''') try: cur = self.con.cursor('SELECT * FROM cursor_iterable_06') async for rec in cur: recs.append(rec) finally: # Check that after iteration has exhausted the cursor, # its associated portal is closed properly, unlocking # the table. await self.con.execute('DROP TABLE cursor_iterable_06') self.assertEqual(recs, [(i,) for i in range(2)])
class TestIterableCursor(tb.ConnectedTestCase): async def test_cursor_iterable_01(self): pass async def test_cursor_iterable_02(self): pass async def test_cursor_iterable_03(self): pass async def test_cursor_iterable_04(self): pass async def test_cursor_iterable_05(self): pass async def test_cursor_iterable_06(self): pass
7
0
14
2
11
2
3
0.16
1
1
0
0
6
0
6
6
88
17
64
26
57
10
53
26
46
3
1
4
15
148,530
MagicStack/asyncpg
MagicStack_asyncpg/tests/test_cursor.py
tests.test_cursor.TestCursor
class TestCursor(tb.ConnectedTestCase): async def test_cursor_01(self): st = await self.con.prepare('SELECT generate_series(0, 20)') with self.assertRaisesRegex(asyncpg.NoActiveSQLTransactionError, 'cursor cannot be created.*transaction'): await st.cursor() async def test_cursor_02(self): st = await self.con.prepare('SELECT generate_series(0, 20)') async with self.con.transaction(): cur = await st.cursor() for i in range(-1, 1): with self.assertRaisesRegex(asyncpg.InterfaceError, 'greater than zero'): await cur.fetch(i) res = await cur.fetch(2) self.assertEqual(res, [(0,), (1,)]) rec = await cur.fetchrow() self.assertEqual(rec, (2,)) r = repr(cur) self.assertTrue(r.startswith('<asyncpg.Cursor ')) self.assertNotIn(' exhausted ', r) self.assertIn('"SELECT generate', r) moved = await cur.forward(5) self.assertEqual(moved, 5) rec = await cur.fetchrow() self.assertEqual(rec, (8,)) res = await cur.fetch(100) self.assertEqual(res, [(i,) for i in range(9, 21)]) self.assertIsNone(await cur.fetchrow()) self.assertEqual(await cur.fetch(5), []) r = repr(cur) self.assertTrue(r.startswith('<asyncpg.Cursor ')) self.assertIn(' exhausted ', r) self.assertIn('"SELECT generate', r) async def test_cursor_03(self): st = await self.con.prepare('SELECT generate_series(0, 20)') async with self.con.transaction(): with self.assertRaisesRegex(asyncpg.InterfaceError, 'prefetch argument can only'): await st.cursor(prefetch=10) async def test_cursor_04(self): async with self.con.transaction(): st = await self.con.cursor('SELECT generate_series(0, 100)') await st.forward(42) self.assertEqual(await st.fetchrow(), (42,))
class TestCursor(tb.ConnectedTestCase): async def test_cursor_01(self): pass async def test_cursor_02(self): pass async def test_cursor_03(self): pass async def test_cursor_04(self): pass
5
0
13
2
11
0
1
0
1
1
0
0
4
0
4
4
58
13
45
15
40
0
42
15
37
2
1
3
5
148,531
MagicStack/asyncpg
MagicStack_asyncpg/tests/test_copy.py
tests.test_copy.TestCopyTo
class TestCopyTo(tb.ConnectedTestCase): async def test_copy_to_table_basics(self): await self.con.execute(''' CREATE TABLE copytab(a text, "b~" text, i int); ''') try: f = io.BytesIO() f.write( '\n'.join([ 'a1\tb1\t1', 'a2\tb2\t2', 'a3\tb3\t3', 'a4\tb4\t4', 'a5\tb5\t5', '*\t\\N\t\\N', '' ]).encode('utf-8') ) f.seek(0) res = await self.con.copy_to_table('copytab', source=f) self.assertEqual(res, 'COPY 6') output = await self.con.fetch(""" SELECT * FROM copytab ORDER BY a """) self.assertEqual( output, [ ('*', None, None), ('a1', 'b1', 1), ('a2', 'b2', 2), ('a3', 'b3', 3), ('a4', 'b4', 4), ('a5', 'b5', 5), ] ) # Test parameters. await self.con.execute('TRUNCATE copytab') await self.con.execute('SET search_path=none') f.seek(0) f.truncate() f.write( '\n'.join([ 'a|b~', '*a1*|b1', '*a2*|b2', '*a3*|b3', '*a4*|b4', '*a5*|b5', '*!**|*n-u-l-l*', 'n-u-l-l|bb', ]).encode('utf-8') ) f.seek(0) if self.con.get_server_version() < (9, 4): force_null = None forced_null_expected = 'n-u-l-l' else: force_null = ('b~',) forced_null_expected = None res = await self.con.copy_to_table( 'copytab', source=f, columns=('a', 'b~'), schema_name='public', format='csv', delimiter='|', null='n-u-l-l', header=True, quote='*', escape='!', force_not_null=('a',), force_null=force_null) self.assertEqual(res, 'COPY 7') await self.con.execute('SET search_path=public') output = await self.con.fetch(""" SELECT * FROM copytab ORDER BY a """) self.assertEqual( output, [ ('*', forced_null_expected, None), ('a1', 'b1', None), ('a2', 'b2', None), ('a3', 'b3', None), ('a4', 'b4', None), ('a5', 'b5', None), ('n-u-l-l', 'bb', None), ] ) finally: await self.con.execute('DROP TABLE public.copytab') async def test_copy_to_table_large_rows(self): await self.con.execute(''' CREATE TABLE copytab(a text, b text); ''') try: class _Source: def __init__(self): self.rowcount = 0 def __aiter__(self): return self async def __anext__(self): if self.rowcount >= 100: raise StopAsyncIteration else: self.rowcount += 1 return b'a1' * 500000 + b'\t' + b'b1' * 500000 + b'\n' res = await self.con.copy_to_table('copytab', source=_Source()) self.assertEqual(res, 'COPY 100') finally: await self.con.execute('DROP TABLE copytab') async def test_copy_to_table_from_bytes_like(self): await self.con.execute(''' CREATE TABLE copytab(a text, b text); ''') try: data = memoryview((b'a1' * 500 + b'\t' + b'b1' * 500 + b'\n') * 2) res = await self.con.copy_to_table('copytab', source=data) self.assertEqual(res, 'COPY 2') finally: await self.con.execute('DROP TABLE copytab') async def test_copy_to_table_fail_in_source_1(self): await self.con.execute(''' CREATE TABLE copytab(a text, b text); ''') try: class _Source: def __init__(self): self.rowcount = 0 def __aiter__(self): return self async def __anext__(self): raise RuntimeError('failure in source') with self.assertRaisesRegex(RuntimeError, 'failure in source'): await self.con.copy_to_table('copytab', source=_Source()) # Check that the protocol has recovered. self.assertEqual(await self.con.fetchval('SELECT 1'), 1) finally: await self.con.execute('DROP TABLE copytab') async def test_copy_to_table_fail_in_source_2(self): await self.con.execute(''' CREATE TABLE copytab(a text, b text); ''') try: class _Source: def __init__(self): self.rowcount = 0 def __aiter__(self): return self async def __anext__(self): if self.rowcount == 0: self.rowcount += 1 return b'a\tb\n' else: raise RuntimeError('failure in source') with self.assertRaisesRegex(RuntimeError, 'failure in source'): await self.con.copy_to_table('copytab', source=_Source()) # Check that the protocol has recovered. self.assertEqual(await self.con.fetchval('SELECT 1'), 1) finally: await self.con.execute('DROP TABLE copytab') async def test_copy_to_table_timeout(self): await self.con.execute(''' CREATE TABLE copytab(a text, b text); ''') try: class _Source: def __init__(self, loop): self.rowcount = 0 self.loop = loop def __aiter__(self): return self async def __anext__(self): self.rowcount += 1 await asyncio.sleep(60) return b'a1' * 50 + b'\t' + b'b1' * 50 + b'\n' with self.assertRaises(asyncio.TimeoutError): await self.con.copy_to_table( 'copytab', source=_Source(self.loop), timeout=0.10) # Check that the protocol has recovered. self.assertEqual(await self.con.fetchval('SELECT 1'), 1) finally: await self.con.execute('DROP TABLE copytab') async def test_copy_to_table_from_file_path(self): await self.con.execute(''' CREATE TABLE copytab(a text, "b~" text, i int); ''') f = tempfile.NamedTemporaryFile(delete=False) try: f.write( '\n'.join([ 'a1\tb1\t1', 'a2\tb2\t2', 'a3\tb3\t3', 'a4\tb4\t4', 'a5\tb5\t5', '*\t\\N\t\\N', '' ]).encode('utf-8') ) f.close() res = await self.con.copy_to_table('copytab', source=f.name) self.assertEqual(res, 'COPY 6') output = await self.con.fetch(""" SELECT * FROM copytab ORDER BY a """) self.assertEqual( output, [ ('*', None, None), ('a1', 'b1', 1), ('a2', 'b2', 2), ('a3', 'b3', 3), ('a4', 'b4', 4), ('a5', 'b5', 5), ] ) finally: await self.con.execute('DROP TABLE public.copytab') os.unlink(f.name) async def test_copy_records_to_table_1(self): await self.con.execute(''' CREATE TABLE copytab(a text, b int, c timestamptz); ''') try: date = datetime.datetime.now(tz=datetime.timezone.utc) delta = datetime.timedelta(days=1) records = [ ('a-{}'.format(i), i, date + delta) for i in range(100) ] records.append(('a-100', None, None)) res = await self.con.copy_records_to_table( 'copytab', records=records) self.assertEqual(res, 'COPY 101') finally: await self.con.execute('DROP TABLE copytab') async def test_copy_records_to_table_where(self): if not self.con._server_caps.sql_copy_from_where: raise unittest.SkipTest( 'COPY WHERE not supported on server') await self.con.execute(''' CREATE TABLE copytab_where(a text, b int, c timestamptz); ''') try: date = datetime.datetime.now(tz=datetime.timezone.utc) delta = datetime.timedelta(days=1) records = [ ('a-{}'.format(i), i, date + delta) for i in range(100) ] records.append(('a-100', None, None)) records.append(('b-999', None, None)) res = await self.con.copy_records_to_table( 'copytab_where', records=records, where='a <> \'b-999\'') self.assertEqual(res, 'COPY 101') finally: await self.con.execute('DROP TABLE copytab_where') async def test_copy_records_to_table_async(self): await self.con.execute(''' CREATE TABLE copytab_async(a text, b int, c timestamptz); ''') try: date = datetime.datetime.now(tz=datetime.timezone.utc) delta = datetime.timedelta(days=1) async def record_generator(): for i in range(100): yield ('a-{}'.format(i), i, date + delta) yield ('a-100', None, None) res = await self.con.copy_records_to_table( 'copytab_async', records=record_generator(), ) self.assertEqual(res, 'COPY 101') finally: await self.con.execute('DROP TABLE copytab_async') async def test_copy_records_to_table_no_binary_codec(self): await self.con.execute(''' CREATE TABLE copytab(a uuid); ''') try: def _encoder(value): return value def _decoder(value): return value await self.con.set_type_codec( 'uuid', encoder=_encoder, decoder=_decoder, schema='pg_catalog', format='text' ) records = [('2975ab9a-f79c-4ab4-9be5-7bc134d952f0',)] with self.assertRaisesRegex( asyncpg.InternalClientError, 'no binary format encoder'): await self.con.copy_records_to_table( 'copytab', records=records) finally: await self.con.reset_type_codec( 'uuid', schema='pg_catalog' ) await self.con.execute('DROP TABLE copytab')
class TestCopyTo(tb.ConnectedTestCase): async def test_copy_to_table_basics(self): pass async def test_copy_to_table_large_rows(self): pass class _Source: def __init__(self): pass def __aiter__(self): pass async def __anext__(self): pass async def test_copy_to_table_from_bytes_like(self): pass async def test_copy_to_table_fail_in_source_1(self): pass class _Source: def __init__(self): pass def __aiter__(self): pass async def __anext__(self): pass async def test_copy_to_table_fail_in_source_2(self): pass class _Source: def __init__(self): pass def __aiter__(self): pass async def __anext__(self): pass async def test_copy_to_table_timeout(self): pass class _Source: def __init__(self): pass def __aiter__(self): pass async def __anext__(self): pass async def test_copy_to_table_from_file_path(self): pass async def test_copy_records_to_table_1(self): pass async def test_copy_records_to_table_where(self): pass async def test_copy_records_to_table_async(self): pass async def record_generator(): pass async def test_copy_records_to_table_no_binary_codec(self): pass def _encoder(value): pass def _decoder(value): pass
31
0
15
3
13
0
1
0.01
1
12
4
0
11
1
11
11
368
77
287
61
256
4
159
60
128
2
1
2
31
148,532
MagicStack/asyncpg
MagicStack_asyncpg/tests/test_copy.py
tests.test_copy.TestCopyFrom
class TestCopyFrom(tb.ConnectedTestCase): async def test_copy_from_table_basics(self): await self.con.execute(''' CREATE TABLE copytab(a text, "b~" text, i int); INSERT INTO copytab (a, "b~", i) ( SELECT 'a' || i::text, 'b' || i::text, i FROM generate_series(1, 5) AS i ); INSERT INTO copytab (a, "b~", i) VALUES('*', NULL, NULL); ''') try: f = io.BytesIO() # Basic functionality. res = await self.con.copy_from_table('copytab', output=f) self.assertEqual(res, 'COPY 6') output = f.getvalue().decode().split('\n') self.assertEqual( output, [ 'a1\tb1\t1', 'a2\tb2\t2', 'a3\tb3\t3', 'a4\tb4\t4', 'a5\tb5\t5', '*\t\\N\t\\N', '' ] ) # Test parameters. await self.con.execute('SET search_path=none') f.seek(0) f.truncate() res = await self.con.copy_from_table( 'copytab', output=f, columns=('a', 'b~'), schema_name='public', format='csv', delimiter='|', null='n-u-l-l', header=True, quote='*', escape='!', force_quote=('a',)) output = f.getvalue().decode().split('\n') self.assertEqual( output, [ 'a|b~', '*a1*|b1', '*a2*|b2', '*a3*|b3', '*a4*|b4', '*a5*|b5', '*!**|n-u-l-l', '' ] ) await self.con.execute('SET search_path=public') finally: await self.con.execute('DROP TABLE public.copytab') async def test_copy_from_table_large_rows(self): await self.con.execute(''' CREATE TABLE copytab(a text, b text); INSERT INTO copytab (a, b) ( SELECT repeat('a' || i::text, 500000), repeat('b' || i::text, 500000) FROM generate_series(1, 5) AS i ); ''') try: f = io.BytesIO() # Basic functionality. res = await self.con.copy_from_table('copytab', output=f) self.assertEqual(res, 'COPY 5') output = f.getvalue().decode().split('\n') self.assertEqual( output, [ 'a1' * 500000 + '\t' + 'b1' * 500000, 'a2' * 500000 + '\t' + 'b2' * 500000, 'a3' * 500000 + '\t' + 'b3' * 500000, 'a4' * 500000 + '\t' + 'b4' * 500000, 'a5' * 500000 + '\t' + 'b5' * 500000, '' ] ) finally: await self.con.execute('DROP TABLE public.copytab') async def test_copy_from_query_basics(self): f = io.BytesIO() res = await self.con.copy_from_query(''' SELECT repeat('a' || i::text, 500000), repeat('b' || i::text, 500000) FROM generate_series(1, 5) AS i ''', output=f) self.assertEqual(res, 'COPY 5') output = f.getvalue().decode().split('\n') self.assertEqual( output, [ 'a1' * 500000 + '\t' + 'b1' * 500000, 'a2' * 500000 + '\t' + 'b2' * 500000, 'a3' * 500000 + '\t' + 'b3' * 500000, 'a4' * 500000 + '\t' + 'b4' * 500000, 'a5' * 500000 + '\t' + 'b5' * 500000, '' ] ) async def test_copy_from_query_with_args(self): f = io.BytesIO() res = await self.con.copy_from_query(''' SELECT i, i * 10, $2::text FROM generate_series(1, 5) AS i WHERE i = $1 ''', 3, None, output=f) self.assertEqual(res, 'COPY 1') output = f.getvalue().decode().split('\n') self.assertEqual( output, [ '3\t30\t\\N', '' ] ) async def test_copy_from_query_to_path(self): with tempfile.NamedTemporaryFile() as f: f.close() await self.con.copy_from_query(''' SELECT i, i * 10 FROM generate_series(1, 5) AS i WHERE i = $1 ''', 3, output=f.name) with open(f.name, 'rb') as fr: output = fr.read().decode().split('\n') self.assertEqual( output, [ '3\t30', '' ] ) async def test_copy_from_query_to_path_like(self): with tempfile.NamedTemporaryFile() as f: f.close() class Path: def __init__(self, path): self.path = path def __fspath__(self): return self.path await self.con.copy_from_query(''' SELECT i, i * 10 FROM generate_series(1, 5) AS i WHERE i = $1 ''', 3, output=Path(f.name)) with open(f.name, 'rb') as fr: output = fr.read().decode().split('\n') self.assertEqual( output, [ '3\t30', '' ] ) async def test_copy_from_query_to_bad_output(self): with self.assertRaisesRegex(TypeError, 'output is expected to be'): await self.con.copy_from_query(''' SELECT i, i * 10 FROM generate_series(1, 5) AS i WHERE i = $1 ''', 3, output=1) async def test_copy_from_query_to_sink(self): with tempfile.NamedTemporaryFile() as f: async def writer(data): # Sleeping here to simulate slow output sink to test # backpressure. await asyncio.sleep(0.05) f.write(data) await self.con.copy_from_query(''' SELECT repeat('a', 500) FROM generate_series(1, 5000) AS i ''', output=writer) f.seek(0) output = f.read().decode().split('\n') self.assertEqual( output, [ 'a' * 500 ] * 5000 + [''] ) self.assertEqual(await self.con.fetchval('SELECT 1'), 1) async def test_copy_from_query_cancellation_explicit(self): async def writer(data): # Sleeping here to simulate slow output sink to test # backpressure. await asyncio.sleep(0.5) coro = self.con.copy_from_query(''' SELECT repeat('a', 500) FROM generate_series(1, 5000) AS i ''', output=writer) task = self.loop.create_task(coro) await asyncio.sleep(0.7) task.cancel() with self.assertRaises(asyncio.CancelledError): await task self.assertEqual(await self.con.fetchval('SELECT 1'), 1) async def test_copy_from_query_cancellation_on_sink_error(self): async def writer(data): await asyncio.sleep(0.05) raise RuntimeError('failure') coro = self.con.copy_from_query(''' SELECT repeat('a', 500) FROM generate_series(1, 5000) AS i ''', output=writer) task = self.loop.create_task(coro) with self.assertRaises(RuntimeError): await task self.assertEqual(await self.con.fetchval('SELECT 1'), 1) async def test_copy_from_query_cancellation_while_waiting_for_data(self): async def writer(data): pass coro = self.con.copy_from_query(''' SELECT pg_sleep(60) FROM generate_series(1, 5000) AS i ''', output=writer) task = self.loop.create_task(coro) await asyncio.sleep(0.7) task.cancel() with self.assertRaises(asyncio.CancelledError): await task self.assertEqual(await self.con.fetchval('SELECT 1'), 1) async def test_copy_from_query_timeout_1(self): async def writer(data): await asyncio.sleep(0.05) coro = self.con.copy_from_query(''' SELECT repeat('a', 500) FROM generate_series(1, 5000) AS i ''', output=writer, timeout=0.10) task = self.loop.create_task(coro) with self.assertRaises(asyncio.TimeoutError): await task self.assertEqual(await self.con.fetchval('SELECT 1'), 1) async def test_copy_from_query_timeout_2(self): async def writer(data): try: await asyncio.sleep(10) except asyncio.TimeoutError: raise else: self.fail('TimeoutError not raised') coro = self.con.copy_from_query(''' SELECT repeat('a', 500) FROM generate_series(1, 5000) AS i ''', output=writer, timeout=0.10) task = self.loop.create_task(coro) with self.assertRaises(asyncio.TimeoutError): await task self.assertEqual(await self.con.fetchval('SELECT 1'), 1)
class TestCopyFrom(tb.ConnectedTestCase): async def test_copy_from_table_basics(self): pass async def test_copy_from_table_large_rows(self): pass async def test_copy_from_query_basics(self): pass async def test_copy_from_query_with_args(self): pass async def test_copy_from_query_to_path(self): pass async def test_copy_from_query_to_path_like(self): pass class Path: def __init__(self, path): pass def __fspath__(self): pass async def test_copy_from_query_to_bad_output(self): pass async def test_copy_from_query_to_sink(self): pass async def writer(data): pass async def test_copy_from_query_cancellation_explicit(self): pass async def writer(data): pass async def test_copy_from_query_cancellation_on_sink_error(self): pass async def writer(data): pass async def test_copy_from_query_cancellation_while_waiting_for_data(self): pass async def writer(data): pass async def test_copy_from_query_timeout_1(self): pass async def writer(data): pass async def test_copy_from_query_timeout_2(self): pass async def writer(data): pass
23
0
17
2
14
1
1
0.03
1
4
1
0
13
0
13
13
343
62
274
54
251
7
120
49
97
2
1
2
22
148,533
MagicStack/asyncpg
MagicStack_asyncpg/tests/test_connect.py
tests.test_connect.TestSspiAuthentication
class TestSspiAuthentication(BaseTestAuthentication): @classmethod def setUpClass(cls): cls.username = f'{os.getlogin()}@{socket.gethostname()}' cls.USERS = [ (cls.username, 'sspi', None), (f'wrong-{cls.username}', 'sspi', None), ] super().setUpClass() async def test_auth_sspi(self): conn = await self.connect(user=self.username) await conn.close() # Credentials mismatch. with self.assertRaisesRegex( exceptions.InvalidAuthorizationSpecificationError, 'SSPI authentication failed for user' ): await self.connect(user=f'wrong-{self.username}')
class TestSspiAuthentication(BaseTestAuthentication): @classmethod def setUpClass(cls): pass async def test_auth_sspi(self): pass
4
0
9
1
8
1
1
0.06
1
2
1
0
1
0
2
4
20
2
17
5
13
1
10
4
7
1
2
1
2
148,534
MagicStack/asyncpg
MagicStack_asyncpg/tests/test_connect.py
tests.test_connect.TestSettings
class TestSettings(tb.ConnectedTestCase): async def test_get_settings_01(self): self.assertEqual( self.con.get_settings().client_encoding, 'UTF8') async def test_server_version_01(self): version = self.con.get_server_version() version_num = await self.con.fetchval("SELECT current_setting($1)", 'server_version_num', column=0) ver_maj = int(version_num[:-4]) ver_min = int(version_num[-4:-2]) ver_fix = int(version_num[-2:]) self.assertEqual(version[:3], (ver_maj, ver_min, ver_fix)) def test_server_version_02(self): versions = [ ("9.2", (9, 2, 0, 'final', 0),), ("Postgres-XL 9.2.1", (9, 2, 1, 'final', 0),), ("9.4beta1", (9, 4, 0, 'beta', 1),), ("10devel", (10, 0, 0, 'devel', 0),), ("10beta2", (10, 0, 0, 'beta', 2),), # For PostgreSQL versions >=10 we always # set version.minor to 0. ("10.1", (10, 0, 1, 'final', 0),), ("11.1.2", (11, 0, 1, 'final', 0),), ("PostgreSQL 10.1 (Debian 10.1-3)", (10, 0, 1, 'final', 0),), ("PostgreSQL 11.2-YB-2.7.1.1-b0 on x86_64-pc-linux-gnu, " "compiled by gcc (Homebrew gcc 5.5.0_4) 5.5.0, 64-bit", (11, 0, 2, "final", 0),), ] for version, expected in versions: result = split_server_version_string(version) self.assertEqual(expected, result)
class TestSettings(tb.ConnectedTestCase): async def test_get_settings_01(self): pass async def test_server_version_01(self): pass def test_server_version_02(self): pass
4
0
11
0
10
1
1
0.07
1
1
0
0
3
0
3
3
36
4
30
12
26
2
15
12
11
2
1
1
4
148,535
MagicStack/asyncpg
MagicStack_asyncpg/tests/test_pool.py
tests.test_pool.TestPoolReconnectWithTargetSessionAttrs
class TestPoolReconnectWithTargetSessionAttrs(tb.ClusterTestCase): @classmethod def setup_cluster(cls): cls.cluster = cls.new_cluster(pg_cluster.TempCluster) cls.start_cluster(cls.cluster) async def simulate_cluster_recovery_mode(self): port = self.cluster.get_connection_spec()['port'] await self.loop.run_in_executor( None, lambda: self.cluster.stop() ) # Simulate recovery mode (pathlib.Path(self.cluster._data_dir) / 'standby.signal').touch() await self.loop.run_in_executor( None, lambda: self.cluster.start( port=port, server_settings=self.get_server_settings(), ) ) async def test_full_reconnect_on_node_change_role(self): if self.cluster.get_pg_version() < (12, 0): self.skipTest("PostgreSQL < 12 cannot support standby.signal") return pool = await self.create_pool( min_size=1, max_size=1, target_session_attrs='primary' ) # Force a new connection to be created await pool.fetchval('SELECT 1') await self.simulate_cluster_recovery_mode() # current pool connection info cache is expired, # but we don't know it yet with self.assertRaises(asyncpg.TargetServerAttributeNotMatched) as cm: await pool.execute('SELECT 1') self.assertEqual( cm.exception.args[0], "None of the hosts match the target attribute requirement " "<SessionAttribute.primary: 'primary'>" ) # force reconnect with self.assertRaises(asyncpg.TargetServerAttributeNotMatched) as cm: await pool.execute('SELECT 1') self.assertEqual( cm.exception.args[0], "None of the hosts match the target attribute requirement " "<SessionAttribute.primary: 'primary'>" )
class TestPoolReconnectWithTargetSessionAttrs(tb.ClusterTestCase): @classmethod def setup_cluster(cls): pass async def simulate_cluster_recovery_mode(self): pass async def test_full_reconnect_on_node_change_role(self): pass
5
0
19
3
14
2
1
0.11
1
2
1
0
2
0
3
3
61
12
44
8
39
5
22
6
18
2
1
1
4
148,536
MagicStack/asyncpg
MagicStack_asyncpg/tests/test_connect.py
tests.test_connect.TestSSLConnection
class TestSSLConnection(BaseTestSSLConnection): def _add_hba_entry(self): self.cluster.add_hba_entry( type='hostssl', address=ipaddress.ip_network('127.0.0.0/24'), database='postgres', user='ssl_user', auth_method='trust') self.cluster.add_hba_entry( type='hostssl', address=ipaddress.ip_network('::1/128'), database='postgres', user='ssl_user', auth_method='trust') async def test_ssl_connection_custom_context(self): ssl_context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT) ssl_context.load_verify_locations(SSL_CA_CERT_FILE) con = await self.connect( host='localhost', user='ssl_user', ssl=ssl_context) try: self.assertEqual(await con.fetchval('SELECT 42'), 42) with self.assertRaises(asyncio.TimeoutError): await con.execute('SELECT pg_sleep(5)', timeout=0.5) self.assertEqual(await con.fetchval('SELECT 43'), 43) finally: await con.close() async def test_ssl_connection_sslmode(self): async def verify_works(sslmode, *, host='localhost'): con = None try: con = await self.connect( dsn='postgresql://foo/postgres?sslmode=' + sslmode, host=host, user='ssl_user') self.assertEqual(await con.fetchval('SELECT 42'), 42) self.assertTrue(con._protocol.is_ssl) finally: if con: await con.close() async def verify_fails(sslmode, *, host='localhost', exn_type): # XXX: uvloop artifact old_handler = self.loop.get_exception_handler() con = None try: self.loop.set_exception_handler(lambda *args: None) with self.assertRaises(exn_type): con = await self.connect( dsn='postgresql://foo/?sslmode=' + sslmode, host=host, user='ssl_user') await con.fetchval('SELECT 42') finally: if con: await con.close() self.loop.set_exception_handler(old_handler) invalid_auth_err = asyncpg.InvalidAuthorizationSpecificationError await verify_fails('disable', exn_type=invalid_auth_err) await verify_works('allow') await verify_works('prefer') await verify_works('require') await verify_fails('verify-ca', exn_type=ValueError) await verify_fails('verify-full', exn_type=ValueError) with mock_dot_postgresql(): await verify_works('require') await verify_works('verify-ca') await verify_works('verify-ca', host='127.0.0.1') await verify_works('verify-full') await verify_fails('verify-full', host='127.0.0.1', exn_type=ssl.CertificateError) with mock_dot_postgresql(crl=True): await verify_fails('disable', exn_type=invalid_auth_err) await verify_works('allow') await verify_works('prefer') await verify_fails('require', exn_type=ssl.SSLError) await verify_fails('verify-ca', exn_type=ssl.SSLError) await verify_fails('verify-ca', host='127.0.0.1', exn_type=ssl.SSLError) await verify_fails('verify-full', exn_type=ssl.SSLError) async def test_ssl_connection_default_context(self): # XXX: uvloop artifact old_handler = self.loop.get_exception_handler() try: self.loop.set_exception_handler(lambda *args: None) with self.assertRaisesRegex(ssl.SSLError, 'verify failed'): await self.connect( host='localhost', user='ssl_user', ssl=True) finally: self.loop.set_exception_handler(old_handler) async def test_ssl_connection_pool(self): ssl_context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT) ssl_context.load_verify_locations(SSL_CA_CERT_FILE) pool = await self.create_pool( host='localhost', user='ssl_user', database='postgres', min_size=5, max_size=10, ssl=ssl_context) async def worker(): async with pool.acquire() as con: self.assertEqual(await con.fetchval('SELECT 42'), 42) with self.assertRaises(asyncio.TimeoutError): await con.execute('SELECT pg_sleep(5)', timeout=0.5) self.assertEqual(await con.fetchval('SELECT 43'), 43) tasks = [worker() for _ in range(100)] await asyncio.gather(*tasks) await pool.close() async def test_executemany_uvloop_ssl_issue_700(self): ssl_context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT) ssl_context.load_verify_locations(SSL_CA_CERT_FILE) con = await self.connect( host='localhost', user='ssl_user', ssl=ssl_context) try: await con.execute('CREATE TABLE test_many (v int)') await con.executemany( 'INSERT INTO test_many VALUES ($1)', [(x + 1,) for x in range(100)] ) self.assertEqual( await con.fetchval('SELECT sum(v) FROM test_many'), 5050 ) finally: try: await con.execute('DROP TABLE IF EXISTS test_many') finally: await con.close() async def test_tls_version(self): if self.cluster.get_pg_version() < (12, 0): self.skipTest("PostgreSQL < 12 cannot set ssl protocol version") # XXX: uvloop artifact old_handler = self.loop.get_exception_handler() with warnings.catch_warnings(): warnings.filterwarnings( "ignore", message="ssl.TLSVersion.TLSv1_1 is deprecated", category=DeprecationWarning ) try: self.loop.set_exception_handler(lambda *args: None) with self.assertRaisesRegex( ssl.SSLError, '(protocol version)|(handshake failure)', ): await self.connect( dsn='postgresql://ssl_user@localhost/postgres' '?sslmode=require&ssl_min_protocol_version=TLSv1.3' ) with self.assertRaises((ssl.SSLError, ConnectionResetError)): await self.connect( dsn='postgresql://ssl_user@localhost/postgres' '?sslmode=require' '&ssl_min_protocol_version=TLSv1.1' '&ssl_max_protocol_version=TLSv1.1' ) if not ssl.OPENSSL_VERSION.startswith('LibreSSL'): with self.assertRaisesRegex(ssl.SSLError, 'no protocols'): await self.connect( dsn='postgresql://ssl_user@localhost/postgres' '?sslmode=require' '&ssl_min_protocol_version=TLSv1.2' '&ssl_max_protocol_version=TLSv1.1' ) con = await self.connect( dsn='postgresql://ssl_user@localhost/postgres' '?sslmode=require' '&ssl_min_protocol_version=TLSv1.2' '&ssl_max_protocol_version=TLSv1.2' ) try: self.assertEqual(await con.fetchval('SELECT 42'), 42) finally: await con.close() finally: self.loop.set_exception_handler(old_handler)
class TestSSLConnection(BaseTestSSLConnection): def _add_hba_entry(self): pass async def test_ssl_connection_custom_context(self): pass async def test_ssl_connection_sslmode(self): pass async def verify_works(sslmode, *, host='localhost'): pass async def verify_fails(sslmode, *, host='localhost', exn_type): pass async def test_ssl_connection_default_context(self): pass async def test_ssl_connection_pool(self): pass async def worker(): pass async def test_executemany_uvloop_ssl_issue_700(self): pass async def test_tls_version(self): pass
11
0
23
2
21
0
1
0.02
1
7
0
0
7
0
7
12
203
24
176
26
165
3
106
25
95
3
2
4
14
148,537
MagicStack/asyncpg
MagicStack_asyncpg/tests/test_connect.py
tests.test_connect.TestGssAuthentication
class TestGssAuthentication(BaseTestAuthentication): @classmethod def setUpClass(cls): try: from k5test.realm import K5Realm except ModuleNotFoundError: raise unittest.SkipTest('k5test not installed') cls.realm = K5Realm() cls.addClassCleanup(cls.realm.stop) # Setup environment before starting the cluster. patch = unittest.mock.patch.dict(os.environ, cls.realm.env) patch.start() cls.addClassCleanup(patch.stop) # Add credentials. cls.realm.addprinc('postgres/localhost') cls.realm.extract_keytab('postgres/localhost', cls.realm.keytab) cls.USERS = [ (cls.realm.user_princ, 'gss', None), (f'wrong-{cls.realm.user_princ}', 'gss', None), ] super().setUpClass() cls.cluster.override_connection_spec(host='localhost') @classmethod def get_server_settings(cls): settings = super().get_server_settings() settings['krb_server_keyfile'] = f'FILE:{cls.realm.keytab}' return settings @classmethod def setup_cluster(cls): cls.cluster = cls.new_cluster(pg_cluster.TempCluster) cls.start_cluster( cls.cluster, server_settings=cls.get_server_settings()) async def test_auth_gssapi_ok(self): conn = await self.connect(user=self.realm.user_princ) await conn.close() async def test_auth_gssapi_bad_srvname(self): # Service name mismatch. with self.assertRaisesRegex( exceptions.InternalClientError, 'Server .* not found' ): await self.connect(user=self.realm.user_princ, krbsrvname='wrong') async def test_auth_gssapi_bad_user(self): # Credentials mismatch. with self.assertRaisesRegex( exceptions.InvalidAuthorizationSpecificationError, 'GSSAPI authentication failed for user' ): await self.connect(user=f'wrong-{self.realm.user_princ}')
class TestGssAuthentication(BaseTestAuthentication): @classmethod def setUpClass(cls): pass @classmethod def get_server_settings(cls): pass @classmethod def setup_cluster(cls): pass async def test_auth_gssapi_ok(self): pass async def test_auth_gssapi_bad_srvname(self): pass async def test_auth_gssapi_bad_user(self): pass
10
0
8
1
7
1
1
0.09
1
4
2
0
3
0
6
8
57
8
45
14
34
4
32
11
24
2
2
1
7
148,538
MagicStack/asyncpg
MagicStack_asyncpg/tests/test_connect.py
tests.test_connect.TestConnectionGC
class TestConnectionGC(tb.ClusterTestCase): async def _run_no_explicit_close_test(self): gc_was_enabled = gc.isenabled() gc.disable() try: con = await self.connect() await con.fetchval("select 123") proto = con._protocol conref = weakref.ref(con) del con self.assertIsNone(conref()) self.assertTrue(proto.is_closed()) # tick event loop; asyncio.selector_events._SelectorSocketTransport # needs a chance to close itself and remove its reference to proto await asyncio.sleep(0) protoref = weakref.ref(proto) del proto self.assertIsNone(protoref()) finally: if gc_was_enabled: gc.enable() async def test_no_explicit_close_no_debug(self): olddebug = self.loop.get_debug() self.loop.set_debug(False) try: with self.assertWarnsRegex( ResourceWarning, r'unclosed connection.*run in asyncio debug'): await self._run_no_explicit_close_test() finally: self.loop.set_debug(olddebug) async def test_no_explicit_close_with_debug(self): olddebug = self.loop.get_debug() self.loop.set_debug(True) try: with self.assertWarnsRegex(ResourceWarning, r'unclosed connection') as rw: await self._run_no_explicit_close_test() msg = " ".join(rw.warning.args) self.assertIn(' created at:\n', msg) self.assertIn('in test_no_explicit_close_with_debug', msg) finally: self.loop.set_debug(olddebug)
class TestConnectionGC(tb.ClusterTestCase): async def _run_no_explicit_close_test(self): pass async def test_no_explicit_close_no_debug(self): pass async def test_no_explicit_close_with_debug(self): pass
4
0
15
1
13
1
1
0.05
1
1
0
0
3
0
3
3
49
6
41
13
37
2
35
12
31
2
1
2
4
148,539
MagicStack/asyncpg
MagicStack_asyncpg/tests/test_connect.py
tests.test_connect.TestConnectionAttributes
class TestConnectionAttributes(tb.HotStandbyTestCase): async def _run_connection_test( self, connect, target_attribute, expected_port ): conn = await connect(target_session_attrs=target_attribute) self.assertTrue(_get_connected_host(conn).endswith(expected_port)) await conn.close() async def test_target_server_attribute_port(self): master_port = self.master_cluster.get_connection_spec()['port'] standby_port = self.standby_cluster.get_connection_spec()['port'] tests = [ (self.connect_primary, 'primary', master_port), (self.connect_standby, 'standby', standby_port), ] for connect, target_attr, expected_port in tests: await self._run_connection_test( connect, target_attr, expected_port ) if self.master_cluster.get_pg_version()[0] < 14: self.skipTest("PostgreSQL<14 does not support these features") tests = [ (self.connect_primary, 'read-write', master_port), (self.connect_standby, 'read-only', standby_port), ] for connect, target_attr, expected_port in tests: await self._run_connection_test( connect, target_attr, expected_port ) async def test_target_attribute_not_matched(self): tests = [ (self.connect_standby, 'primary'), (self.connect_primary, 'standby'), ] for connect, target_attr in tests: with self.assertRaises(exceptions.TargetServerAttributeNotMatched): await connect(target_session_attrs=target_attr) if self.master_cluster.get_pg_version()[0] < 14: self.skipTest("PostgreSQL<14 does not support these features") tests = [ (self.connect_standby, 'read-write'), (self.connect_primary, 'read-only'), ] for connect, target_attr in tests: with self.assertRaises(exceptions.TargetServerAttributeNotMatched): await connect(target_session_attrs=target_attr) async def test_prefer_standby_when_standby_is_up(self): con = await self.connect(target_session_attrs='prefer-standby') standby_port = self.standby_cluster.get_connection_spec()['port'] connected_host = _get_connected_host(con) self.assertTrue(connected_host.endswith(standby_port)) await con.close() async def test_prefer_standby_picks_master_when_standby_is_down(self): primary_spec = self.get_cluster_connection_spec(self.master_cluster) connection_spec = { 'host': [ primary_spec['host'], 'unlocalhost', ], 'port': [primary_spec['port'], 15345], 'database': primary_spec['database'], 'user': primary_spec['user'], 'target_session_attrs': 'prefer-standby' } con = await self.connect(**connection_spec) master_port = self.master_cluster.get_connection_spec()['port'] connected_host = _get_connected_host(con) self.assertTrue(connected_host.endswith(master_port)) await con.close()
class TestConnectionAttributes(tb.HotStandbyTestCase): async def _run_connection_test( self, connect, target_attribute, expected_port ): pass async def test_target_server_attribute_port(self): pass async def test_target_attribute_not_matched(self): pass async def test_prefer_standby_when_standby_is_up(self): pass async def test_prefer_standby_picks_master_when_standby_is_down(self): pass
6
0
15
1
13
0
2
0
1
0
0
0
5
0
5
5
79
11
68
23
60
0
41
21
35
4
1
2
11
148,540
MagicStack/asyncpg
MagicStack_asyncpg/tests/test_connect.py
tests.test_connect.TestClientSSLConnection
class TestClientSSLConnection(BaseTestSSLConnection): def _add_hba_entry(self): self.cluster.add_hba_entry( type='hostssl', address=ipaddress.ip_network('127.0.0.0/24'), database='postgres', user='ssl_user', auth_method='cert') self.cluster.add_hba_entry( type='hostssl', address=ipaddress.ip_network('::1/128'), database='postgres', user='ssl_user', auth_method='cert') async def test_ssl_connection_client_auth_fails_with_wrong_setup(self): ssl_context = ssl.create_default_context( ssl.Purpose.SERVER_AUTH, cafile=SSL_CA_CERT_FILE, ) with self.assertRaisesRegex( exceptions.InvalidAuthorizationSpecificationError, "requires a valid client certificate", ): await self.connect( host='localhost', user='ssl_user', ssl=ssl_context, ) async def _test_works(self, **conn_args): con = await self.connect(**conn_args) try: self.assertEqual(await con.fetchval('SELECT 42'), 42) finally: await con.close() async def test_ssl_connection_client_auth_custom_context(self): for key_file in (CLIENT_SSL_KEY_FILE, CLIENT_SSL_PROTECTED_KEY_FILE): ssl_context = ssl.create_default_context( ssl.Purpose.SERVER_AUTH, cafile=SSL_CA_CERT_FILE, ) ssl_context.load_cert_chain( CLIENT_SSL_CERT_FILE, keyfile=key_file, password='secRet', ) await self._test_works( host='localhost', user='ssl_user', ssl=ssl_context, ) async def test_ssl_connection_client_auth_dsn(self): params = { 'sslrootcert': SSL_CA_CERT_FILE, 'sslcert': CLIENT_SSL_CERT_FILE, 'sslkey': CLIENT_SSL_KEY_FILE, 'sslmode': 'verify-full', } params_str = urllib.parse.urlencode(params) dsn = 'postgres://ssl_user@localhost/postgres?' + params_str await self._test_works(dsn=dsn) params['sslkey'] = CLIENT_SSL_PROTECTED_KEY_FILE params['sslpassword'] = 'secRet' params_str = urllib.parse.urlencode(params) dsn = 'postgres://ssl_user@localhost/postgres?' + params_str await self._test_works(dsn=dsn) async def test_ssl_connection_client_auth_env(self): env = { 'PGSSLROOTCERT': SSL_CA_CERT_FILE, 'PGSSLCERT': CLIENT_SSL_CERT_FILE, 'PGSSLKEY': CLIENT_SSL_KEY_FILE, } dsn = 'postgres://ssl_user@localhost/postgres?sslmode=verify-full' with unittest.mock.patch.dict('os.environ', env): await self._test_works(dsn=dsn) env['PGSSLKEY'] = CLIENT_SSL_PROTECTED_KEY_FILE with unittest.mock.patch.dict('os.environ', env): await self._test_works(dsn=dsn + '&sslpassword=secRet') async def test_ssl_connection_client_auth_dot_postgresql(self): dsn = 'postgres://ssl_user@localhost/postgres?sslmode=verify-full' with mock_dot_postgresql(client=True): await self._test_works(dsn=dsn) with mock_dot_postgresql(client=True, protected=True): await self._test_works(dsn=dsn + '&sslpassword=secRet')
class TestClientSSLConnection(BaseTestSSLConnection): def _add_hba_entry(self): pass async def test_ssl_connection_client_auth_fails_with_wrong_setup(self): pass async def _test_works(self, **conn_args): pass async def test_ssl_connection_client_auth_custom_context(self): pass async def test_ssl_connection_client_auth_dsn(self): pass async def test_ssl_connection_client_auth_env(self): pass async def test_ssl_connection_client_auth_dot_postgresql(self): pass
8
0
12
1
11
0
1
0
1
2
1
0
7
0
7
12
90
11
79
18
71
0
42
18
34
2
2
1
8
148,541
MagicStack/asyncpg
MagicStack_asyncpg/tests/test_connect.py
tests.test_connect.BaseTestAuthentication
class BaseTestAuthentication(tb.ConnectedTestCase): USERS = [] def setUp(self): super().setUp() if not self.cluster.is_managed(): self.skipTest('unmanaged cluster') self.cluster.reset_hba() create_script = [] for username, method, password in self.USERS: if method == 'scram-sha-256' and self.server_version.major < 10: continue # if this is a SCRAM password, we need to set the encryption method # to "scram-sha-256" in order to properly hash the password if method == 'scram-sha-256': create_script.append( "SET password_encryption = 'scram-sha-256';" ) create_script.append( 'CREATE ROLE "{}" WITH LOGIN{};'.format( username, f' PASSWORD E{(password or "")!r}' ) ) # to be courteous to the MD5 test, revert back to MD5 after the # scram-sha-256 password is set if method == 'scram-sha-256': create_script.append( "SET password_encryption = 'md5';" ) if _system != 'Windows' and method != 'gss': self.cluster.add_hba_entry( type='local', database='postgres', user=username, auth_method=method) self.cluster.add_hba_entry( type='host', address=ipaddress.ip_network('127.0.0.0/24'), database='postgres', user=username, auth_method=method) self.cluster.add_hba_entry( type='host', address=ipaddress.ip_network('::1/128'), database='postgres', user=username, auth_method=method) # Put hba changes into effect self.cluster.reload() create_script = '\n'.join(create_script) self.loop.run_until_complete(self.con.execute(create_script)) def tearDown(self): # Reset cluster's pg_hba.conf since we've meddled with it self.cluster.trust_local_connections() drop_script = [] for username, method, _ in self.USERS: if method == 'scram-sha-256' and self.server_version.major < 10: continue drop_script.append('DROP ROLE "{}";'.format(username)) drop_script = '\n'.join(drop_script) self.loop.run_until_complete(self.con.execute(drop_script)) super().tearDown()
class BaseTestAuthentication(tb.ConnectedTestCase): def setUp(self): pass def tearDown(self): pass
3
0
35
8
25
3
5
0.12
1
1
0
3
2
0
2
2
74
17
51
8
48
6
33
8
30
7
1
2
10
148,542
MagicStack/asyncpg
MagicStack_asyncpg/tests/test_codecs.py
tests.test_codecs.TestCodecsLargeOIDs
class TestCodecsLargeOIDs(tb.ConnectedTestCase): LARGE_OID = 2147483648 @classmethod def setup_cluster(cls): cls.cluster = cls.new_cluster(pg_cluster.TempCluster) cls.cluster.reset_wal(oid=cls.LARGE_OID) cls.start_cluster(cls.cluster) async def test_custom_codec_large_oid(self): await self.con.execute('CREATE DOMAIN test_domain_t AS int') try: oid = await self.con.fetchval(''' SELECT oid FROM pg_type WHERE typname = 'test_domain_t' ''') expected_oid = self.LARGE_OID if self.server_version >= (11, 0): # PostgreSQL 11 automatically creates a domain array type # _before_ the domain type, so the expected OID is # off by one. expected_oid += 1 self.assertEqual(oid, expected_oid) # Test that introspection handles large OIDs v = await self.con.fetchval('SELECT $1::test_domain_t', 10) self.assertEqual(v, 10) finally: await self.con.execute('DROP DOMAIN test_domain_t')
class TestCodecsLargeOIDs(tb.ConnectedTestCase): @classmethod def setup_cluster(cls): pass async def test_custom_codec_large_oid(self): pass
4
0
13
2
9
2
2
0.19
1
1
1
0
1
0
2
2
31
6
21
8
17
4
17
7
14
2
1
2
3
148,543
MagicStack/asyncpg
MagicStack_asyncpg/tests/test_codecs.py
tests.test_codecs.TestCodecs
class TestCodecs(tb.ConnectedTestCase): async def test_standard_codecs(self): """Test encoding/decoding of standard data types and arrays thereof.""" for (typname, intname, sample_data, *metadata) in type_samples: if metadata and self.server_version < metadata[0]: continue st = await self.con.prepare( "SELECT $1::" + typname ) text_in = await self.con.prepare( "SELECT $1::text::" + typname ) text_out = await self.con.prepare( "SELECT $1::" + typname + "::text" ) for sample in sample_data: with self.subTest(sample=sample, typname=typname): stmt = st if isinstance(sample, dict): if 'textinput' in sample: inputval = sample['textinput'] stmt = text_in else: inputval = sample['input'] if 'textoutput' in sample: outputval = sample['textoutput'] if stmt is text_in: raise ValueError( 'cannot test "textin" and' ' "textout" simultaneously') stmt = text_out else: outputval = sample['output'] if sample.get('query'): stmt = await self.con.prepare(sample['query']) else: inputval = outputval = sample result = await stmt.fetchval(inputval) err_msg = ( "unexpected result for {} when passing {!r}: " "received {!r}, expected {!r}".format( typname, inputval, result, outputval)) if typname.startswith('float'): if math.isnan(outputval): if not math.isnan(result): self.fail(err_msg) else: self.assertTrue( math.isclose(result, outputval, rel_tol=1e-6), err_msg) else: self.assertEqual(result, outputval, err_msg) if (typname == 'numeric' and isinstance(inputval, decimal.Decimal)): self.assertEqual( result.as_tuple(), outputval.as_tuple(), err_msg, ) with self.subTest(sample=None, typname=typname): # Test that None is handled for all types. rsample = await st.fetchval(None) self.assertIsNone(rsample) at = st.get_attributes() self.assertEqual(at[0].type.name, intname) async def test_all_builtin_types_handled(self): from asyncpg.protocol.protocol import BUILTIN_TYPE_OID_MAP for oid, typename in BUILTIN_TYPE_OID_MAP.items(): codec = self.con.get_settings().get_data_codec(oid) self.assertIsNotNone( codec, 'core type {} ({}) is unhandled'.format(typename, oid)) async def test_void(self): res = await self.con.fetchval('select pg_sleep(0)') self.assertIsNone(res) await self.con.fetchval('select now($1::void)', '') def test_bitstring(self): bitlen = random.randint(0, 1000) bs = ''.join(random.choice(('1', '0', ' ')) for _ in range(bitlen)) bits = asyncpg.BitString(bs) sanitized_bs = bs.replace(' ', '') self.assertEqual(sanitized_bs, bits.as_string().replace(' ', '')) expected_bytelen = \ len(sanitized_bs) // 8 + (1 if len(sanitized_bs) % 8 else 0) self.assertEqual(len(bits.bytes), expected_bytelen) little, big = bits.to_int('little'), bits.to_int('big') self.assertEqual(bits.from_int(little, len(bits), 'little'), bits) self.assertEqual(bits.from_int(big, len(bits), 'big'), bits) naive_little = 0 for i, c in enumerate(sanitized_bs): naive_little |= int(c) << i naive_big = 0 for c in sanitized_bs: naive_big = (naive_big << 1) | int(c) self.assertEqual(little, naive_little) self.assertEqual(big, naive_big) async def test_interval(self): res = await self.con.fetchval("SELECT '5 years'::interval") self.assertEqual(res, datetime.timedelta(days=1825)) res = await self.con.fetchval("SELECT '5 years 1 month'::interval") self.assertEqual(res, datetime.timedelta(days=1855)) res = await self.con.fetchval("SELECT '-5 years'::interval") self.assertEqual(res, datetime.timedelta(days=-1825)) res = await self.con.fetchval("SELECT '-5 years -1 month'::interval") self.assertEqual(res, datetime.timedelta(days=-1855)) async def test_numeric(self): # Test that we handle dscale correctly. cases = [ '0.001', '0.001000', '1', '1.00000' ] for case in cases: res = await self.con.fetchval( "SELECT $1::numeric", case) self.assertEqual(str(res), case) try: await self.con.execute( ''' CREATE TABLE tab (v numeric(3, 2)); INSERT INTO tab VALUES (0), (1); ''') res = await self.con.fetchval("SELECT v FROM tab WHERE v = $1", 0) self.assertEqual(str(res), '0.00') res = await self.con.fetchval("SELECT v FROM tab WHERE v = $1", 1) self.assertEqual(str(res), '1.00') finally: await self.con.execute('DROP TABLE tab') res = await self.con.fetchval( "SELECT $1::numeric", decimal.Decimal('NaN')) self.assertTrue(res.is_nan()) res = await self.con.fetchval( "SELECT $1::numeric", decimal.Decimal('sNaN')) self.assertTrue(res.is_nan()) if self.server_version < (14, 0): with self.assertRaisesRegex( asyncpg.DataError, 'invalid sign in external "numeric" value' ): await self.con.fetchval( "SELECT $1::numeric", decimal.Decimal('-Inf')) with self.assertRaisesRegex( asyncpg.DataError, 'invalid sign in external "numeric" value' ): await self.con.fetchval( "SELECT $1::numeric", decimal.Decimal('+Inf')) with self.assertRaisesRegex(asyncpg.DataError, 'invalid'): await self.con.fetchval( "SELECT $1::numeric", 'invalid') else: res = await self.con.fetchval( "SELECT $1::numeric", decimal.Decimal("-Inf")) self.assertTrue(res.is_infinite()) res = await self.con.fetchval( "SELECT $1::numeric", decimal.Decimal("+Inf")) self.assertTrue(res.is_infinite()) with self.assertRaisesRegex(asyncpg.DataError, 'invalid'): await self.con.fetchval( "SELECT $1::numeric", 'invalid') async def test_unhandled_type_fallback(self): await self.con.execute(''' CREATE EXTENSION IF NOT EXISTS isn ''') try: input_val = '1436-4522' res = await self.con.fetchrow(''' SELECT $1::issn AS issn, 42 AS int ''', input_val) self.assertEqual(res['issn'], input_val) self.assertEqual(res['int'], 42) finally: await self.con.execute(''' DROP EXTENSION isn ''') async def test_invalid_input(self): # The latter message appears beginning in Python 3.10. integer_required = ( r"(an integer is required|" r"\('str' object cannot be interpreted as an integer\))") cases = [ ('bytea', 'a bytes-like object is required', [ 1, 'aaa' ]), ('bool', 'a boolean is required', [ 1, ]), ('int2', integer_required, [ '2', 'aa', ]), ('smallint', 'value out of int16 range', [ 2**256, # check for the same exception for any big numbers decimal.Decimal("2000000000000000000000000000000"), 0xffff, 0xffffffff, 32768, -32769 ]), ('float4', 'value out of float32 range', [ 4.1 * 10 ** 40, -4.1 * 10 ** 40, ]), ('int4', integer_required, [ '2', 'aa', ]), ('int', 'value out of int32 range', [ 2**256, # check for the same exception for any big numbers decimal.Decimal("2000000000000000000000000000000"), 0xffffffff, 2**31, -2**31 - 1, ]), ('int8', integer_required, [ '2', 'aa', ]), ('bigint', 'value out of int64 range', [ 2**256, # check for the same exception for any big numbers decimal.Decimal("2000000000000000000000000000000"), 0xffffffffffffffff, 2**63, -2**63 - 1, ]), ('text', 'expected str, got bytes', [ b'foo' ]), ('text', 'expected str, got list', [ [1] ]), ('tid', 'list or tuple expected', [ b'foo' ]), ('tid', 'invalid number of elements in tid tuple', [ [], (), [1, 2, 3], (4,), ]), ('tid', 'tuple id block value out of uint32 range', [ (-1, 0), (2**256, 0), (0xffffffff + 1, 0), (2**32, 0), ]), ('tid', 'tuple id offset value out of uint16 range', [ (0, -1), (0, 2**256), (0, 0xffff + 1), (0, 0xffffffff), (0, 65536), ]), ('oid', 'value out of uint32 range', [ 2 ** 32, -1, ]), ('timestamp', r"expected a datetime\.date.*got 'str'", [ 'foo' ]), ('timestamptz', r"expected a datetime\.date.*got 'str'", [ 'foo' ]), ] for typname, errmsg, data in cases: stmt = await self.con.prepare("SELECT $1::" + typname) for sample in data: with self.subTest(sample=sample, typname=typname): full_errmsg = ( r'invalid input for query argument \$1:.*' + errmsg) with self.assertRaisesRegex( asyncpg.DataError, full_errmsg): await stmt.fetchval(sample) async def test_arrays(self): """Test encoding/decoding of arrays (particularly multidimensional).""" cases = [ ( r"SELECT '[1:3][-1:0]={{1,2},{4,5},{6,7}}'::int[]", [[1, 2], [4, 5], [6, 7]] ), ( r"SELECT '{{{{{{1}}}}}}'::int[]", [[[[[[1]]]]]] ), ( r"SELECT '{1, 2, NULL}'::int[]::anyarray", [1, 2, None] ), ( r"SELECT '{}'::int[]", [] ), ] for sql, expected in cases: with self.subTest(sql=sql): res = await self.con.fetchval(sql) self.assertEqual(res, expected) with self.assertRaises(asyncpg.ProgramLimitExceededError): await self.con.fetchval("SELECT '{{{{{{{1}}}}}}}'::int[]") cases = [ [None], [1, 2, 3, 4, 5, 6], [[1, 2], [4, 5], [6, 7]], [[[1], [2]], [[4], [5]], [[None], [7]]], [[[[[[1]]]]]], [[[[[[None]]]]]] ] st = await self.con.prepare( "SELECT $1::int[]" ) for case in cases: with self.subTest(case=case): result = await st.fetchval(case) err_msg = ( "failed to return array data as-is; " "gave {!r}, received {!r}".format( case, result)) self.assertEqual(result, case, err_msg) # A sized iterable is fine as array input. class Iterable: def __iter__(self): return iter([1, 2, 3]) def __len__(self): return 3 result = await self.con.fetchval("SELECT $1::int[]", Iterable()) self.assertEqual(result, [1, 2, 3]) # A pure container is _not_ OK for array input. class SomeContainer: def __contains__(self, item): return False with self.assertRaisesRegex(asyncpg.DataError, 'sized iterable container expected'): result = await self.con.fetchval("SELECT $1::int[]", SomeContainer()) with self.assertRaisesRegex(asyncpg.DataError, 'dimensions'): await self.con.fetchval( "SELECT $1::int[]", [[[[[[[1]]]]]]]) with self.assertRaisesRegex(asyncpg.DataError, 'non-homogeneous'): await self.con.fetchval( "SELECT $1::int[]", [1, [1]]) with self.assertRaisesRegex(asyncpg.DataError, 'non-homogeneous'): await self.con.fetchval( "SELECT $1::int[]", [[1], 1, [2]]) with self.assertRaisesRegex(asyncpg.DataError, 'invalid array element'): await self.con.fetchval( "SELECT $1::int[]", [1, 't', 2]) with self.assertRaisesRegex(asyncpg.DataError, 'invalid array element'): await self.con.fetchval( "SELECT $1::int[]", [[1], ['t'], [2]]) with self.assertRaisesRegex(asyncpg.DataError, 'sized iterable container expected'): await self.con.fetchval( "SELECT $1::int[]", 1) async def test_composites(self): """Test encoding/decoding of composite types.""" await self.con.execute(''' CREATE TYPE test_composite AS ( a int, b text, c int[] ) ''') st = await self.con.prepare(''' SELECT ROW(NULL, 1234, '5678', ROW(42, '42')) ''') res = await st.fetchval() self.assertEqual(res, (None, 1234, '5678', (42, '42'))) with self.assertRaisesRegex( asyncpg.UnsupportedClientFeatureError, 'query argument \\$1: input of anonymous ' 'composite types is not supported', ): await self.con.fetchval("SELECT (1, 'foo') = $1", (1, 'foo')) try: st = await self.con.prepare(''' SELECT ROW( NULL, '5678', ARRAY[9, NULL, 11]::int[] )::test_composite AS test ''') res = await st.fetch() res = res[0]['test'] self.assertIsNone(res['a']) self.assertEqual(res['b'], '5678') self.assertEqual(res['c'], [9, None, 11]) self.assertIsNone(res[0]) self.assertEqual(res[1], '5678') self.assertEqual(res[2], [9, None, 11]) at = st.get_attributes() self.assertEqual(len(at), 1) self.assertEqual(at[0].name, 'test') self.assertEqual(at[0].type.name, 'test_composite') self.assertEqual(at[0].type.kind, 'composite') res = await self.con.fetchval(''' SELECT $1::test_composite ''', res) # composite input as a mapping res = await self.con.fetchval(''' SELECT $1::test_composite ''', {'b': 'foo', 'a': 1, 'c': [1, 2, 3]}) self.assertEqual(res, (1, 'foo', [1, 2, 3])) # Test None padding res = await self.con.fetchval(''' SELECT $1::test_composite ''', {'a': 1}) self.assertEqual(res, (1, None, None)) with self.assertRaisesRegex( asyncpg.DataError, "'bad' is not a valid element"): await self.con.fetchval( "SELECT $1::test_composite", {'bad': 'foo'}) finally: await self.con.execute('DROP TYPE test_composite') async def test_domains(self): """Test encoding/decoding of composite types.""" await self.con.execute(''' CREATE DOMAIN my_dom AS int ''') await self.con.execute(''' CREATE DOMAIN my_dom2 AS my_dom ''') try: st = await self.con.prepare(''' SELECT 3::my_dom2 ''') res = await st.fetchval() self.assertEqual(res, 3) st = await self.con.prepare(''' SELECT NULL::my_dom2 ''') res = await st.fetchval() self.assertIsNone(res) at = st.get_attributes() self.assertEqual(len(at), 1) self.assertEqual(at[0].name, 'my_dom2') self.assertEqual(at[0].type.name, 'int4') self.assertEqual(at[0].type.kind, 'scalar') finally: await self.con.execute('DROP DOMAIN my_dom2') await self.con.execute('DROP DOMAIN my_dom') async def test_range_types(self): """Test encoding/decoding of range types.""" cases = [ ('int4range', [ [(1, 9), asyncpg.Range(1, 10)], [asyncpg.Range(0, 9, lower_inc=False, upper_inc=True), asyncpg.Range(1, 10)], [(), asyncpg.Range(empty=True)], [asyncpg.Range(empty=True), asyncpg.Range(empty=True)], [(None, 2), asyncpg.Range(None, 3)], [asyncpg.Range(None, 2, upper_inc=True), asyncpg.Range(None, 3)], [(2,), asyncpg.Range(2, None)], [(2, None), asyncpg.Range(2, None)], [asyncpg.Range(2, None), asyncpg.Range(2, None)], [(None, None), asyncpg.Range(None, None)], [asyncpg.Range(None, None), asyncpg.Range(None, None)] ]) ] for (typname, sample_data) in cases: st = await self.con.prepare( "SELECT $1::" + typname ) for sample, expected in sample_data: with self.subTest(sample=sample, typname=typname): result = await st.fetchval(sample) self.assertEqual(result, expected) with self.assertRaisesRegex( asyncpg.DataError, 'list, tuple or Range object expected'): await self.con.fetch("SELECT $1::int4range", 'aa') with self.assertRaisesRegex( asyncpg.DataError, 'expected 0, 1 or 2 elements'): await self.con.fetch("SELECT $1::int4range", (0, 2, 3)) cases = [(asyncpg.Range(0, 1), asyncpg.Range(0, 1), 1), (asyncpg.Range(0, 1), asyncpg.Range(0, 2), 2), (asyncpg.Range(empty=True), asyncpg.Range(0, 2), 2), (asyncpg.Range(empty=True), asyncpg.Range(empty=True), 1), (asyncpg.Range(0, 1, upper_inc=True), asyncpg.Range(0, 1), 2), ] for obj_a, obj_b, count in cases: dic = {obj_a: 1, obj_b: 2} self.assertEqual(len(dic), count) async def test_multirange_types(self): """Test encoding/decoding of multirange types.""" if self.server_version < (14, 0): self.skipTest("this server does not support multirange types") cases = [ ('int4multirange', [ [ [], [] ], [ [()], [] ], [ [asyncpg.Range(empty=True)], [] ], [ [asyncpg.Range(0, 9, lower_inc=False, upper_inc=True)], [asyncpg.Range(1, 10)] ], [ [(1, 9), (9, 11)], [asyncpg.Range(1, 12)] ], [ [(1, 9), (20, 30)], [asyncpg.Range(1, 10), asyncpg.Range(20, 31)] ], [ [(None, 2)], [asyncpg.Range(None, 3)], ] ]) ] for (typname, sample_data) in cases: st = await self.con.prepare( "SELECT $1::" + typname ) for sample, expected in sample_data: with self.subTest(sample=sample, typname=typname): result = await st.fetchval(sample) self.assertEqual(result, expected) with self.assertRaisesRegex( asyncpg.DataError, 'expected a sequence'): await self.con.fetch("SELECT $1::int4multirange", 1) async def test_extra_codec_alias(self): """Test encoding/decoding of a builtin non-pg_catalog codec.""" await self.con.execute(''' CREATE DOMAIN my_dec_t AS decimal; CREATE EXTENSION IF NOT EXISTS hstore; CREATE TYPE rec_t AS ( i my_dec_t, h hstore ); ''') try: await self.con.set_builtin_type_codec( 'hstore', codec_name='pg_contrib.hstore') cases = [ {'ham': 'spam', 'nada': None}, {} ] st = await self.con.prepare(''' SELECT $1::hstore AS result ''') for case in cases: res = await st.fetchval(case) self.assertEqual(res, case) res = await self.con.fetchval(''' SELECT $1::hstore AS result ''', (('foo', '2'), ('bar', '3'))) self.assertEqual(res, {'foo': '2', 'bar': '3'}) with self.assertRaisesRegex(asyncpg.DataError, 'null value not allowed'): await self.con.fetchval(''' SELECT $1::hstore AS result ''', {None: '1'}) await self.con.set_builtin_type_codec( 'my_dec_t', codec_name='decimal') res = await self.con.fetchval(''' SELECT $1::my_dec_t AS result ''', 44) self.assertEqual(res, 44) # Both my_dec_t and hstore are decoded in binary res = await self.con.fetchval(''' SELECT ($1::my_dec_t, 'a=>1'::hstore)::rec_t AS result ''', 44) self.assertEqual(res, (44, {'a': '1'})) # Now, declare only the text format for my_dec_t await self.con.reset_type_codec('my_dec_t') await self.con.set_builtin_type_codec( 'my_dec_t', codec_name='decimal', format='text') # This should fail, as there is no binary codec for # my_dec_t and text decoding of composites is not # implemented. with self.assertRaises(asyncpg.UnsupportedClientFeatureError): res = await self.con.fetchval(''' SELECT ($1::my_dec_t, 'a=>1'::hstore)::rec_t AS result ''', 44) finally: await self.con.execute(''' DROP TYPE rec_t; DROP EXTENSION hstore; DROP DOMAIN my_dec_t; ''') async def test_custom_codec_text(self): """Test encoding/decoding using a custom codec in text mode.""" await self.con.execute(''' CREATE EXTENSION IF NOT EXISTS hstore ''') def hstore_decoder(data): result = {} items = data.split(',') for item in items: k, _, v = item.partition('=>') result[k.strip('"')] = v.strip('"') return result def hstore_encoder(obj): return ','.join('{}=>{}'.format(k, v) for k, v in obj.items()) try: await self.con.set_type_codec('hstore', encoder=hstore_encoder, decoder=hstore_decoder) st = await self.con.prepare(''' SELECT $1::hstore AS result ''') res = await st.fetchrow({'ham': 'spam'}) res = res['result'] self.assertEqual(res, {'ham': 'spam'}) pt = st.get_parameters() self.assertTrue(isinstance(pt, tuple)) self.assertEqual(len(pt), 1) self.assertEqual(pt[0].name, 'hstore') self.assertEqual(pt[0].kind, 'scalar') self.assertEqual(pt[0].schema, 'public') at = st.get_attributes() self.assertTrue(isinstance(at, tuple)) self.assertEqual(len(at), 1) self.assertEqual(at[0].name, 'result') self.assertEqual(at[0].type, pt[0]) err = 'cannot use custom codec on type public._hstore' with self.assertRaisesRegex(asyncpg.InterfaceError, err): await self.con.set_type_codec('_hstore', encoder=hstore_encoder, decoder=hstore_decoder) finally: await self.con.execute(''' DROP EXTENSION hstore ''') async def test_custom_codec_binary(self): """Test encoding/decoding using a custom codec in binary mode.""" await self.con.execute(''' CREATE EXTENSION IF NOT EXISTS hstore ''') longstruct = struct.Struct('!L') ulong_unpack = lambda b: longstruct.unpack_from(b)[0] ulong_pack = longstruct.pack def hstore_decoder(data): result = {} n = ulong_unpack(data) view = memoryview(data) ptr = 4 for i in range(n): klen = ulong_unpack(view[ptr:ptr + 4]) ptr += 4 k = bytes(view[ptr:ptr + klen]).decode() ptr += klen vlen = ulong_unpack(view[ptr:ptr + 4]) ptr += 4 if vlen == -1: v = None else: v = bytes(view[ptr:ptr + vlen]).decode() ptr += vlen result[k] = v return result def hstore_encoder(obj): buffer = bytearray(ulong_pack(len(obj))) for k, v in obj.items(): kenc = k.encode() buffer += ulong_pack(len(kenc)) + kenc if v is None: buffer += b'\xFF\xFF\xFF\xFF' # -1 else: venc = v.encode() buffer += ulong_pack(len(venc)) + venc return buffer try: await self.con.set_type_codec('hstore', encoder=hstore_encoder, decoder=hstore_decoder, format='binary') st = await self.con.prepare(''' SELECT $1::hstore AS result ''') res = await st.fetchrow({'ham': 'spam'}) res = res['result'] self.assertEqual(res, {'ham': 'spam'}) pt = st.get_parameters() self.assertTrue(isinstance(pt, tuple)) self.assertEqual(len(pt), 1) self.assertEqual(pt[0].name, 'hstore') self.assertEqual(pt[0].kind, 'scalar') self.assertEqual(pt[0].schema, 'public') at = st.get_attributes() self.assertTrue(isinstance(at, tuple)) self.assertEqual(len(at), 1) self.assertEqual(at[0].name, 'result') self.assertEqual(at[0].type, pt[0]) finally: await self.con.execute(''' DROP EXTENSION hstore ''') async def test_custom_codec_on_domain(self): """Test encoding/decoding using a custom codec on a domain.""" await self.con.execute(''' CREATE DOMAIN custom_codec_t AS int ''') try: with self.assertRaisesRegex( asyncpg.UnsupportedClientFeatureError, 'custom codecs on domain types are not supported' ): await self.con.set_type_codec( 'custom_codec_t', encoder=lambda v: str(v), decoder=lambda v: int(v)) finally: await self.con.execute('DROP DOMAIN custom_codec_t') async def test_custom_codec_on_stdsql_types(self): types = [ 'smallint', 'int', 'integer', 'bigint', 'decimal', 'real', 'double precision', 'timestamp with timezone', 'time with timezone', 'timestamp without timezone', 'time without timezone', 'char', 'character', 'character varying', 'bit varying', 'CHARACTER VARYING' ] for t in types: with self.subTest(type=t): try: await self.con.set_type_codec( t, schema='pg_catalog', encoder=str, decoder=str, format='text' ) finally: await self.con.reset_type_codec(t, schema='pg_catalog') async def test_custom_codec_on_enum(self): """Test encoding/decoding using a custom codec on an enum.""" await self.con.execute(''' CREATE TYPE custom_codec_t AS ENUM ('foo', 'bar', 'baz') ''') try: await self.con.set_type_codec( 'custom_codec_t', encoder=lambda v: str(v).lstrip('enum :'), decoder=lambda v: 'enum: ' + str(v)) v = await self.con.fetchval('SELECT $1::custom_codec_t', 'foo') self.assertEqual(v, 'enum: foo') finally: await self.con.execute('DROP TYPE custom_codec_t') async def test_custom_codec_on_enum_array(self): """Test encoding/decoding using a custom codec on an enum array. Bug: https://github.com/MagicStack/asyncpg/issues/590 """ await self.con.execute(''' CREATE TYPE custom_codec_t AS ENUM ('foo', 'bar', 'baz') ''') try: await self.con.set_type_codec( 'custom_codec_t', encoder=lambda v: str(v).lstrip('enum :'), decoder=lambda v: 'enum: ' + str(v)) v = await self.con.fetchval( "SELECT ARRAY['foo', 'bar']::custom_codec_t[]") self.assertEqual(v, ['enum: foo', 'enum: bar']) v = await self.con.fetchval( 'SELECT ARRAY[$1]::custom_codec_t[]', 'foo') self.assertEqual(v, ['enum: foo']) v = await self.con.fetchval("SELECT 'foo'::custom_codec_t") self.assertEqual(v, 'enum: foo') finally: await self.con.execute('DROP TYPE custom_codec_t') async def test_custom_codec_override_binary(self): """Test overriding core codecs.""" import json conn = await self.connect() try: def _encoder(value): return json.dumps(value).encode('utf-8') def _decoder(value): return json.loads(value.decode('utf-8')) await conn.set_type_codec( 'json', encoder=_encoder, decoder=_decoder, schema='pg_catalog', format='binary' ) data = {'foo': 'bar', 'spam': 1} res = await conn.fetchval('SELECT $1::json', data) self.assertEqual(data, res) finally: await conn.close() async def test_custom_codec_override_text(self): """Test overriding core codecs.""" import json conn = await self.connect() try: def _encoder(value): return json.dumps(value) def _decoder(value): return json.loads(value) await conn.set_type_codec( 'json', encoder=_encoder, decoder=_decoder, schema='pg_catalog', format='text' ) data = {'foo': 'bar', 'spam': 1} res = await conn.fetchval('SELECT $1::json', data) self.assertEqual(data, res) res = await conn.fetchval('SELECT $1::json[]', [data]) self.assertEqual([data], res) await conn.execute('CREATE DOMAIN my_json AS json') res = await conn.fetchval('SELECT $1::my_json', data) self.assertEqual(data, res) def _encoder(value): return value def _decoder(value): return value await conn.set_type_codec( 'uuid', encoder=_encoder, decoder=_decoder, schema='pg_catalog', format='text' ) data = '14058ad9-0118-4b7e-ac15-01bc13e2ccd1' res = await conn.fetchval('SELECT $1::uuid', data) self.assertEqual(res, data) finally: await conn.execute('DROP DOMAIN IF EXISTS my_json') await conn.close() async def test_custom_codec_override_tuple(self): """Test overriding core codecs.""" cases = [ ('date', (3,), '2000-01-04'), ('date', (2**31 - 1,), 'infinity'), ('date', (-2**31,), '-infinity'), ('time', (60 * 10**6,), '00:01:00'), ('timetz', (60 * 10**6, 12600), '00:01:00-03:30'), ('timestamp', (60 * 10**6,), '2000-01-01 00:01:00'), ('timestamp', (2**63 - 1,), 'infinity'), ('timestamp', (-2**63,), '-infinity'), ('timestamptz', (60 * 10**6,), '1999-12-31 19:01:00', "tab.v AT TIME ZONE 'EST'"), ('timestamptz', (2**63 - 1,), 'infinity'), ('timestamptz', (-2**63,), '-infinity'), ('interval', (2, 3, 1), '2 mons 3 days 00:00:00.000001') ] conn = await self.connect() def _encoder(value): return tuple(value) def _decoder(value): return tuple(value) try: for (typename, data, expected_result, *extra) in cases: with self.subTest(type=typename): await self.con.execute( 'CREATE TABLE tab (v {})'.format(typename)) try: await conn.set_type_codec( typename, encoder=_encoder, decoder=_decoder, schema='pg_catalog', format='tuple' ) await conn.execute( 'INSERT INTO tab VALUES ($1)', data) res = await conn.fetchval('SELECT tab.v FROM tab') self.assertEqual(res, data) await conn.reset_type_codec( typename, schema='pg_catalog') if extra: val = extra[0] else: val = 'tab.v' res = await conn.fetchval( 'SELECT ({val})::text FROM tab'.format(val=val)) self.assertEqual(res, expected_result) finally: await self.con.execute('DROP TABLE tab') finally: await conn.close() async def test_custom_codec_composite_tuple(self): await self.con.execute(''' CREATE TYPE mycomplex AS (r float, i float); ''') try: await self.con.set_type_codec( 'mycomplex', encoder=lambda x: (x.real, x.imag), decoder=lambda t: complex(t[0], t[1]), format='tuple', ) num = complex('1+2j') res = await self.con.fetchval( 'SELECT $1::mycomplex', num, ) self.assertEqual(num, res) finally: await self.con.execute(''' DROP TYPE mycomplex; ''') async def test_custom_codec_composite_non_tuple(self): await self.con.execute(''' CREATE TYPE mycomplex AS (r float, i float); ''') try: with self.assertRaisesRegex( asyncpg.UnsupportedClientFeatureError, "only tuple-format codecs can be used on composite types", ): await self.con.set_type_codec( 'mycomplex', encoder=lambda x: (x.real, x.imag), decoder=lambda t: complex(t[0], t[1]), ) finally: await self.con.execute(''' DROP TYPE mycomplex; ''') async def test_timetz_encoding(self): try: async with self.con.transaction(): await self.con.execute("SET TIME ZONE 'America/Toronto'") # Check decoding: row = await self.con.fetchrow( 'SELECT extract(epoch from now())::float8 AS epoch, ' 'now()::date as date, now()::timetz as time') result = datetime.datetime.combine(row['date'], row['time']) expected = datetime.datetime.fromtimestamp(row['epoch'], tz=result.tzinfo) self.assertEqual(result, expected) # Check encoding: res = await self.con.fetchval( 'SELECT now() = ($1::date + $2::timetz)', row['date'], row['time']) self.assertTrue(res) finally: await self.con.execute('RESET ALL') async def test_composites_in_arrays(self): await self.con.execute(''' CREATE TYPE t AS (a text, b int); CREATE TABLE tab (d t[]); ''') try: await self.con.execute( 'INSERT INTO tab (d) VALUES ($1)', [('a', 1)]) r = await self.con.fetchval(''' SELECT d FROM tab ''') self.assertEqual(r, [('a', 1)]) finally: await self.con.execute(''' DROP TABLE tab; DROP TYPE t; ''') async def test_table_as_composite(self): await self.con.execute(''' CREATE TABLE tab (a text, b int); INSERT INTO tab VALUES ('1', 1); ''') try: r = await self.con.fetchrow(''' SELECT tab FROM tab ''') self.assertEqual(r, (('1', 1),)) finally: await self.con.execute(''' DROP TABLE tab; ''') async def test_relacl_array_type(self): await self.con.execute(r''' CREATE USER """u1'"; CREATE USER "{u2"; CREATE USER ",u3"; CREATE USER "u4}"; CREATE USER "u5"""; CREATE USER "u6\"""; CREATE USER "u7\"; CREATE USER norm1; CREATE USER norm2; CREATE TABLE t0 (); GRANT SELECT ON t0 TO norm1; CREATE TABLE t1 (); GRANT SELECT ON t1 TO """u1'"; CREATE TABLE t2 (); GRANT SELECT ON t2 TO "{u2"; CREATE TABLE t3 (); GRANT SELECT ON t3 TO ",u3"; CREATE TABLE t4 (); GRANT SELECT ON t4 TO "u4}"; CREATE TABLE t5 (); GRANT SELECT ON t5 TO "u5"""; CREATE TABLE t6 (); GRANT SELECT ON t6 TO "u6\"""; CREATE TABLE t7 (); GRANT SELECT ON t7 TO "u7\"; CREATE TABLE a1 (); GRANT SELECT ON a1 TO """u1'"; GRANT SELECT ON a1 TO "{u2"; GRANT SELECT ON a1 TO ",u3"; GRANT SELECT ON a1 TO "norm1"; GRANT SELECT ON a1 TO "u4}"; GRANT SELECT ON a1 TO "u5"""; GRANT SELECT ON a1 TO "u6\"""; GRANT SELECT ON a1 TO "u7\"; GRANT SELECT ON a1 TO "norm2"; CREATE TABLE a2 (); GRANT SELECT ON a2 TO """u1'" WITH GRANT OPTION; GRANT SELECT ON a2 TO "{u2" WITH GRANT OPTION; GRANT SELECT ON a2 TO ",u3" WITH GRANT OPTION; GRANT SELECT ON a2 TO "norm1" WITH GRANT OPTION; GRANT SELECT ON a2 TO "u4}" WITH GRANT OPTION; GRANT SELECT ON a2 TO "u5""" WITH GRANT OPTION; GRANT SELECT ON a2 TO "u6\""" WITH GRANT OPTION; GRANT SELECT ON a2 TO "u7\" WITH GRANT OPTION; SET SESSION AUTHORIZATION """u1'"; GRANT SELECT ON a2 TO "norm2"; SET SESSION AUTHORIZATION "{u2"; GRANT SELECT ON a2 TO "norm2"; SET SESSION AUTHORIZATION ",u3"; GRANT SELECT ON a2 TO "norm2"; SET SESSION AUTHORIZATION "u4}"; GRANT SELECT ON a2 TO "norm2"; SET SESSION AUTHORIZATION "u5"""; GRANT SELECT ON a2 TO "norm2"; SET SESSION AUTHORIZATION "u6\"""; GRANT SELECT ON a2 TO "norm2"; SET SESSION AUTHORIZATION "u7\"; GRANT SELECT ON a2 TO "norm2"; RESET SESSION AUTHORIZATION; ''') try: rows = await self.con.fetch(''' SELECT relacl, relacl::text[] AS chk, relacl::text[]::text AS text_ FROM pg_catalog.pg_class WHERE relacl IS NOT NULL ''') for row in rows: self.assertEqual(row['relacl'], row['chk'],) finally: await self.con.execute(r''' DROP TABLE t0; DROP TABLE t1; DROP TABLE t2; DROP TABLE t3; DROP TABLE t4; DROP TABLE t5; DROP TABLE t6; DROP TABLE t7; DROP TABLE a1; DROP TABLE a2; DROP USER """u1'"; DROP USER "{u2"; DROP USER ",u3"; DROP USER "u4}"; DROP USER "u5"""; DROP USER "u6\"""; DROP USER "u7\"; DROP USER norm1; DROP USER norm2; ''') async def test_enum(self): await self.con.execute(''' CREATE TYPE enum_t AS ENUM ('abc', 'def', 'ghi'); CREATE TABLE tab ( a text, b enum_t ); INSERT INTO tab (a, b) VALUES ('foo', 'abc'); INSERT INTO tab (a, b) VALUES ('bar', 'def'); ''') try: for i in range(10): r = await self.con.fetch(''' SELECT a, b FROM tab ORDER BY b ''') self.assertEqual(r, [('foo', 'abc'), ('bar', 'def')]) finally: await self.con.execute(''' DROP TABLE tab; DROP TYPE enum_t; ''') async def test_unknown_type_text_fallback(self): await self.con.execute(r'CREATE EXTENSION citext') await self.con.execute(r''' CREATE DOMAIN citext_dom AS citext ''') await self.con.execute(r''' CREATE TYPE citext_range AS RANGE (SUBTYPE = citext) ''') await self.con.execute(r''' CREATE TYPE citext_comp AS (t citext) ''') try: # Check that plain fallback works. result = await self.con.fetchval(''' SELECT $1::citext ''', 'citext') self.assertEqual(result, 'citext') # Check that domain fallback works. result = await self.con.fetchval(''' SELECT $1::citext_dom ''', 'citext') self.assertEqual(result, 'citext') # Check that array fallback works. cases = [ ['a', 'b'], [None, 'b'], [], [' a', ' b'], ['"a', r'\""'], [['"a', r'\""'], [',', '",']], ] for case in cases: result = await self.con.fetchval(''' SELECT $1::citext[] ''', case) self.assertEqual(result, case) # Text encoding of ranges and composite types # is not supported yet. with self.assertRaisesRegex( asyncpg.UnsupportedClientFeatureError, 'text encoding of range types is not supported'): await self.con.fetchval(''' SELECT $1::citext_range ''', ['a', 'z']) with self.assertRaisesRegex( asyncpg.UnsupportedClientFeatureError, 'text encoding of composite types is not supported'): await self.con.fetchval(''' SELECT $1::citext_comp ''', ('a',)) # Check that setting a custom codec clears the codec # cache properly and that subsequent queries work # as expected. await self.con.set_type_codec( 'citext', encoder=lambda d: d, decoder=lambda d: 'CI: ' + d) result = await self.con.fetchval(''' SELECT $1::citext[] ''', ['a', 'b']) self.assertEqual(result, ['CI: a', 'CI: b']) finally: await self.con.execute(r'DROP TYPE citext_comp') await self.con.execute(r'DROP TYPE citext_range') await self.con.execute(r'DROP TYPE citext_dom') await self.con.execute(r'DROP EXTENSION citext') async def test_enum_in_array(self): await self.con.execute(''' CREATE TYPE enum_t AS ENUM ('abc', 'def', 'ghi'); ''') try: result = await self.con.fetchrow('''SELECT $1::enum_t[];''', ['abc']) self.assertEqual(result, (['abc'],)) result = await self.con.fetchrow('''SELECT ARRAY[$1::enum_t];''', 'abc') self.assertEqual(result, (['abc'],)) finally: await self.con.execute(''' DROP TYPE enum_t; ''') async def test_enum_and_range(self): await self.con.execute(''' CREATE TYPE enum_t AS ENUM ('abc', 'def', 'ghi'); CREATE TABLE testtab ( a int4range, b enum_t ); INSERT INTO testtab VALUES ( '[10, 20)', 'abc' ); ''') try: result = await self.con.fetchrow(''' SELECT testtab.a FROM testtab WHERE testtab.b = $1 ''', 'abc') self.assertEqual(result, (asyncpg.Range(10, 20),)) finally: await self.con.execute(''' DROP TABLE testtab; DROP TYPE enum_t; ''') async def test_enum_in_composite(self): await self.con.execute(''' CREATE TYPE enum_t AS ENUM ('abc', 'def', 'ghi'); CREATE TYPE composite_w_enum AS (a int, b enum_t); ''') try: result = await self.con.fetchval(''' SELECT ROW(1, 'def'::enum_t)::composite_w_enum ''') self.assertEqual(set(result.items()), {('a', 1), ('b', 'def')}) finally: await self.con.execute(''' DROP TYPE composite_w_enum; DROP TYPE enum_t; ''') async def test_enum_function_return(self): await self.con.execute(''' CREATE TYPE enum_t AS ENUM ('abc', 'def', 'ghi'); CREATE FUNCTION return_enum() RETURNS enum_t LANGUAGE plpgsql AS $$ BEGIN RETURN 'abc'::enum_t; END; $$; ''') try: result = await self.con.fetchval('''SELECT return_enum()''') self.assertEqual(result, 'abc') finally: await self.con.execute(''' DROP FUNCTION return_enum(); DROP TYPE enum_t; ''') async def test_no_result(self): st = await self.con.prepare('rollback') self.assertTupleEqual(st.get_attributes(), ()) async def test_array_with_custom_json_text_codec(self): import json await self.con.execute('CREATE TABLE tab (id serial, val json[]);') insert_sql = 'INSERT INTO tab (val) VALUES (cast($1 AS json[]));' query_sql = 'SELECT val FROM tab ORDER BY id DESC;' try: for custom_codec in [False, True]: if custom_codec: await self.con.set_type_codec( 'json', encoder=lambda v: v, decoder=json.loads, schema="pg_catalog", ) for val in ['"null"', '22', 'null', '[2]', '{"a": null}']: await self.con.execute(insert_sql, [val]) result = await self.con.fetchval(query_sql) if custom_codec: self.assertEqual(result, [json.loads(val)]) else: self.assertEqual(result, [val]) await self.con.execute(insert_sql, [None]) result = await self.con.fetchval(query_sql) self.assertEqual(result, [None]) await self.con.execute(insert_sql, None) result = await self.con.fetchval(query_sql) self.assertEqual(result, None) finally: await self.con.execute(''' DROP TABLE tab; ''')
class TestCodecs(tb.ConnectedTestCase): async def test_standard_codecs(self): '''Test encoding/decoding of standard data types and arrays thereof.''' pass async def test_all_builtin_types_handled(self): pass async def test_void(self): pass def test_bitstring(self): pass async def test_interval(self): pass async def test_numeric(self): pass async def test_unhandled_type_fallback(self): pass async def test_invalid_input(self): pass async def test_arrays(self): '''Test encoding/decoding of arrays (particularly multidimensional).''' pass class Iterable: def __iter__(self): pass def __len__(self): pass class SomeContainer: def __contains__(self, item): pass async def test_composites(self): '''Test encoding/decoding of composite types.''' pass async def test_domains(self): '''Test encoding/decoding of composite types.''' pass async def test_range_types(self): '''Test encoding/decoding of range types.''' pass async def test_multirange_types(self): '''Test encoding/decoding of multirange types.''' pass async def test_extra_codec_alias(self): '''Test encoding/decoding of a builtin non-pg_catalog codec.''' pass async def test_custom_codec_text(self): '''Test encoding/decoding using a custom codec in text mode.''' pass def hstore_decoder(data): pass def hstore_encoder(obj): pass async def test_custom_codec_binary(self): '''Test encoding/decoding using a custom codec in binary mode.''' pass def hstore_decoder(data): pass def hstore_encoder(obj): pass async def test_custom_codec_on_domain(self): '''Test encoding/decoding using a custom codec on a domain.''' pass async def test_custom_codec_on_stdsql_types(self): pass async def test_custom_codec_on_enum(self): '''Test encoding/decoding using a custom codec on an enum.''' pass async def test_custom_codec_on_enum_array(self): '''Test encoding/decoding using a custom codec on an enum array. Bug: https://github.com/MagicStack/asyncpg/issues/590 ''' pass async def test_custom_codec_override_binary(self): '''Test overriding core codecs.''' pass def _encoder(value): pass def _decoder(value): pass async def test_custom_codec_override_text(self): '''Test overriding core codecs.''' pass def _encoder(value): pass def _decoder(value): pass def _encoder(value): pass def _decoder(value): pass async def test_custom_codec_override_tuple(self): '''Test overriding core codecs.''' pass def _encoder(value): pass def _decoder(value): pass async def test_custom_codec_composite_tuple(self): pass async def test_custom_codec_composite_non_tuple(self): pass async def test_timetz_encoding(self): pass async def test_composites_in_arrays(self): pass async def test_table_as_composite(self): pass async def test_relacl_array_type(self): pass async def test_enum(self): pass async def test_unknown_type_text_fallback(self): pass async def test_enum_in_array(self): pass async def test_enum_and_range(self): pass async def test_enum_in_composite(self): pass async def test_enum_function_return(self): pass async def test_no_result(self): pass async def test_array_with_custom_json_text_codec(self): pass
55
15
30
4
25
1
2
0.04
1
17
2
0
37
0
37
37
1,509
255
1,215
193
1,156
43
567
193
508
13
1
6
96
148,544
MagicStack/asyncpg
MagicStack_asyncpg/tests/test_cancellation.py
tests.test_cancellation.TestCancellation
class TestCancellation(tb.ConnectedTestCase): async def test_cancellation_01(self): st1000 = await self.con.prepare('SELECT 1000') async def test0(): val = await self.con.execute('SELECT 42') self.assertEqual(val, 'SELECT 1') async def test1(): val = await self.con.fetchval('SELECT 42') self.assertEqual(val, 42) async def test2(): val = await self.con.fetchrow('SELECT 42') self.assertEqual(val, (42,)) async def test3(): val = await self.con.fetch('SELECT 42') self.assertEqual(val, [(42,)]) async def test4(): val = await self.con.prepare('SELECT 42') self.assertEqual(await val.fetchval(), 42) async def test5(): self.assertEqual(await st1000.fetchval(), 1000) async def test6(): self.assertEqual(await st1000.fetchrow(), (1000,)) async def test7(): self.assertEqual(await st1000.fetch(), [(1000,)]) async def test8(): cur = await st1000.cursor() self.assertEqual(await cur.fetchrow(), (1000,)) for test in {test0, test1, test2, test3, test4, test5, test6, test7, test8}: with self.subTest(testfunc=test), self.assertRunUnder(1): st = await self.con.prepare('SELECT pg_sleep(20)') task = self.loop.create_task(st.fetch()) await asyncio.sleep(0.05) task.cancel() with self.assertRaises(asyncio.CancelledError): await task async with self.con.transaction(): await test() async def test_cancellation_02(self): st = await self.con.prepare('SELECT 1') task = self.loop.create_task(st.fetch()) await asyncio.sleep(0.05) task.cancel() self.assertEqual(await task, [(1,)]) async def test_cancellation_03(self): with self.assertRaises(asyncpg.InFailedSQLTransactionError): async with self.con.transaction(): task = self.loop.create_task( self.con.fetch('SELECT pg_sleep(20)')) await asyncio.sleep(0.05) task.cancel() with self.assertRaises(asyncio.CancelledError): await task await self.con.fetch('SELECT generate_series(0, 100)') self.assertEqual( await self.con.fetchval('SELECT 42'), 42) async def test_cancellation_04(self): await self.con.fetchval('SELECT pg_sleep(0)') waiter = asyncio.Future() self.con._cancel_current_command(waiter) await waiter self.assertEqual(await self.con.fetchval('SELECT 42'), 42)
class TestCancellation(tb.ConnectedTestCase): async def test_cancellation_01(self): pass async def test0(): pass async def test1(): pass async def test2(): pass async def test3(): pass async def test4(): pass async def test5(): pass async def test6(): pass async def test7(): pass async def test8(): pass async def test_cancellation_02(self): pass async def test_cancellation_03(self): pass async def test_cancellation_04(self): pass
14
0
8
1
7
0
1
0
1
0
0
0
4
0
4
4
83
20
63
28
49
0
59
28
45
2
1
3
14
148,545
MagicStack/asyncpg
MagicStack_asyncpg/tests/test_cache_invalidation.py
tests.test_cache_invalidation.TestCacheInvalidation
class TestCacheInvalidation(tb.ConnectedTestCase): def _get_cached_statements(self, connection=None): if connection is None: connection = self.con return list(connection._stmt_cache.iter_statements()) def _check_statements_are_not_closed(self, statements): self.assertGreater(len(statements), 0) self.assertTrue(all(not s.closed for s in statements)) def _check_statements_are_closed(self, statements): self.assertGreater(len(statements), 0) self.assertTrue(all(s.closed for s in statements)) async def test_prepare_cache_invalidation_silent(self): await self.con.execute('CREATE TABLE tab1(a int, b int)') try: await self.con.execute('INSERT INTO tab1 VALUES (1, 2)') result = await self.con.fetchrow('SELECT * FROM tab1') self.assertEqual(result, (1, 2)) statements = self._get_cached_statements() self._check_statements_are_not_closed(statements) await self.con.execute( 'ALTER TABLE tab1 ALTER COLUMN b SET DATA TYPE text') result = await self.con.fetchrow('SELECT * FROM tab1') self.assertEqual(result, (1, '2')) self._check_statements_are_closed(statements) finally: await self.con.execute('DROP TABLE tab1') async def test_prepare_cache_invalidation_in_transaction(self): await self.con.execute('CREATE TABLE tab1(a int, b int)') try: await self.con.execute('INSERT INTO tab1 VALUES (1, 2)') result = await self.con.fetchrow('SELECT * FROM tab1') self.assertEqual(result, (1, 2)) statements = self._get_cached_statements() self._check_statements_are_not_closed(statements) await self.con.execute( 'ALTER TABLE tab1 ALTER COLUMN b SET DATA TYPE text') with self.assertRaisesRegex(asyncpg.InvalidCachedStatementError, 'cached statement plan is invalid'): async with self.con.transaction(): result = await self.con.fetchrow('SELECT * FROM tab1') self._check_statements_are_closed(statements) # This is now OK, result = await self.con.fetchrow('SELECT * FROM tab1') self.assertEqual(result, (1, '2')) finally: await self.con.execute('DROP TABLE tab1') async def test_prepare_cache_invalidation_in_pool(self): pool = await self.create_pool(database='postgres', min_size=2, max_size=2) await self.con.execute('CREATE TABLE tab1(a int, b int)') try: await self.con.execute('INSERT INTO tab1 VALUES (1, 2)') con1 = await pool.acquire() con2 = await pool.acquire() result = await con1.fetchrow('SELECT * FROM tab1') self.assertEqual(result, (1, 2)) result = await con2.fetchrow('SELECT * FROM tab1') self.assertEqual(result, (1, 2)) statements1 = self._get_cached_statements(con1) self._check_statements_are_not_closed(statements1) statements2 = self._get_cached_statements(con2) self._check_statements_are_not_closed(statements2) await self.con.execute( 'ALTER TABLE tab1 ALTER COLUMN b SET DATA TYPE text') # con1 tries the same plan, will invalidate the cache # for the entire pool. result = await con1.fetchrow('SELECT * FROM tab1') self.assertEqual(result, (1, '2')) self._check_statements_are_closed(statements1) self._check_statements_are_closed(statements2) async with con2.transaction(): # This should work, as con1 should have invalidated # the plan cache. result = await con2.fetchrow('SELECT * FROM tab1') self.assertEqual(result, (1, '2')) finally: await self.con.execute('DROP TABLE tab1') await pool.release(con2) await pool.release(con1) await pool.close() async def test_type_cache_invalidation_in_transaction(self): await self.con.execute('CREATE TYPE typ1 AS (x int, y int)') await self.con.execute('CREATE TABLE tab1(a int, b typ1)') try: await self.con.execute('INSERT INTO tab1 VALUES (1, (2, 3))') result = await self.con.fetchrow('SELECT * FROM tab1') self.assertEqual(result, (1, (2, 3))) statements = self._get_cached_statements() self._check_statements_are_not_closed(statements) async with self.con.transaction(): await self.con.execute('ALTER TYPE typ1 ADD ATTRIBUTE c text') with self.assertRaisesRegex( asyncpg.OutdatedSchemaCacheError, ERRNUM): await self.con.fetchrow('SELECT * FROM tab1') self._check_statements_are_closed(statements) # The second request must be correct (cache was dropped): result = await self.con.fetchrow('SELECT * FROM tab1') self.assertEqual(result, (1, (2, 3, None))) # This is now OK, the cache is actual after the transaction. result = await self.con.fetchrow('SELECT * FROM tab1') self.assertEqual(result, (1, (2, 3, None))) finally: await self.con.execute('DROP TABLE tab1') await self.con.execute('DROP TYPE typ1') async def test_type_cache_invalidation_in_cancelled_transaction(self): await self.con.execute('CREATE TYPE typ1 AS (x int, y int)') await self.con.execute('CREATE TABLE tab1(a int, b typ1)') try: await self.con.execute('INSERT INTO tab1 VALUES (1, (2, 3))') result = await self.con.fetchrow('SELECT * FROM tab1') self.assertEqual(result, (1, (2, 3))) statements = self._get_cached_statements() self._check_statements_are_not_closed(statements) try: async with self.con.transaction(): await self.con.execute( 'ALTER TYPE typ1 ADD ATTRIBUTE c text') with self.assertRaisesRegex( asyncpg.OutdatedSchemaCacheError, ERRNUM): await self.con.fetchrow('SELECT * FROM tab1') self._check_statements_are_closed(statements) # The second request must be correct (cache was dropped): result = await self.con.fetchrow('SELECT * FROM tab1') self.assertEqual(result, (1, (2, 3, None))) raise UserWarning # Just to generate ROLLBACK except UserWarning: pass with self.assertRaisesRegex( asyncpg.OutdatedSchemaCacheError, ERRNUM): await self.con.fetchrow('SELECT * FROM tab1') # This is now OK, the cache is filled after being dropped. result = await self.con.fetchrow('SELECT * FROM tab1') self.assertEqual(result, (1, (2, 3))) finally: await self.con.execute('DROP TABLE tab1') await self.con.execute('DROP TYPE typ1') async def test_prepared_type_cache_invalidation(self): await self.con.execute('CREATE TYPE typ1 AS (x int, y int)') await self.con.execute('CREATE TABLE tab1(a int, b typ1)') try: await self.con.execute('INSERT INTO tab1 VALUES (1, (2, 3))') prep = await self.con._prepare('SELECT * FROM tab1', use_cache=True) result = await prep.fetchrow() self.assertEqual(result, (1, (2, 3))) statements = self._get_cached_statements() self._check_statements_are_not_closed(statements) try: async with self.con.transaction(): await self.con.execute( 'ALTER TYPE typ1 ADD ATTRIBUTE c text') with self.assertRaisesRegex( asyncpg.OutdatedSchemaCacheError, ERRNUM): await prep.fetchrow() self._check_statements_are_closed(statements) # PS has its local cache for types codecs, even after the # cache cleanup it is not possible to use it. # That's why it is marked as closed. with self.assertRaisesRegex( asyncpg.InterfaceError, 'the prepared statement is closed'): await prep.fetchrow() prep = await self.con._prepare('SELECT * FROM tab1', use_cache=True) # The second PS must be correct (cache was dropped): result = await prep.fetchrow() self.assertEqual(result, (1, (2, 3, None))) raise UserWarning # Just to generate ROLLBACK except UserWarning: pass with self.assertRaisesRegex( asyncpg.OutdatedSchemaCacheError, ERRNUM): await prep.fetchrow() # Reprepare it again after dropping cache. prep = await self.con._prepare('SELECT * FROM tab1', use_cache=True) # This is now OK, the cache is filled after being dropped. result = await prep.fetchrow() self.assertEqual(result, (1, (2, 3))) finally: await self.con.execute('DROP TABLE tab1') await self.con.execute('DROP TYPE typ1') async def test_type_cache_invalidation_on_drop_type_attr(self): await self.con.execute('CREATE TYPE typ1 AS (x int, y int, c text)') await self.con.execute('CREATE TABLE tab1(a int, b typ1)') try: await self.con.execute( 'INSERT INTO tab1 VALUES (1, (2, 3, $1))', 'x') result = await self.con.fetchrow('SELECT * FROM tab1') self.assertEqual(result, (1, (2, 3, 'x'))) statements = self._get_cached_statements() self._check_statements_are_not_closed(statements) await self.con.execute('ALTER TYPE typ1 DROP ATTRIBUTE x') with self.assertRaisesRegex( asyncpg.OutdatedSchemaCacheError, ERRNUM): await self.con.fetchrow('SELECT * FROM tab1') self._check_statements_are_closed(statements) # This is now OK, the cache is filled after being dropped. result = await self.con.fetchrow('SELECT * FROM tab1') self.assertEqual(result, (1, (3, 'x'))) finally: await self.con.execute('DROP TABLE tab1') await self.con.execute('DROP TYPE typ1') async def test_type_cache_invalidation_on_change_attr(self): await self.con.execute('CREATE TYPE typ1 AS (x int, y int)') await self.con.execute('CREATE TABLE tab1(a int, b typ1)') try: await self.con.execute('INSERT INTO tab1 VALUES (1, (2, 3))') result = await self.con.fetchrow('SELECT * FROM tab1') self.assertEqual(result, (1, (2, 3))) statements = self._get_cached_statements() self._check_statements_are_not_closed(statements) # It is slightly artificial, but can take place in transactional # schema changing. Nevertheless, if the code checks and raises it # the most probable reason is a difference with the cache type. await self.con.execute('ALTER TYPE typ1 DROP ATTRIBUTE y') await self.con.execute('ALTER TYPE typ1 ADD ATTRIBUTE y bigint') with self.assertRaisesRegex( asyncpg.OutdatedSchemaCacheError, ERRTYP): await self.con.fetchrow('SELECT * FROM tab1') self._check_statements_are_closed(statements) # This is now OK, the cache is filled after being dropped. result = await self.con.fetchrow('SELECT * FROM tab1') self.assertEqual(result, (1, (2, None))) finally: await self.con.execute('DROP TABLE tab1') await self.con.execute('DROP TYPE typ1') async def test_type_cache_invalidation_in_pool(self): await self.con.execute('CREATE DATABASE testdb') pool = await self.create_pool(database='postgres', min_size=2, max_size=2) pool_chk = await self.create_pool(database='testdb', min_size=2, max_size=2) await self.con.execute('CREATE TYPE typ1 AS (x int, y int)') await self.con.execute('CREATE TABLE tab1(a int, b typ1)') try: await self.con.execute('INSERT INTO tab1 VALUES (1, (2, 3))') con1 = await pool.acquire() con2 = await pool.acquire() result = await con1.fetchrow('SELECT * FROM tab1') self.assertEqual(result, (1, (2, 3))) statements1 = self._get_cached_statements(con1) self._check_statements_are_not_closed(statements1) result = await con2.fetchrow('SELECT * FROM tab1') self.assertEqual(result, (1, (2, 3))) statements2 = self._get_cached_statements(con2) self._check_statements_are_not_closed(statements2) # Create the same schema in the "testdb", fetch data which caches # type info. con_chk = await pool_chk.acquire() await con_chk.execute('CREATE TYPE typ1 AS (x int, y int)') await con_chk.execute('CREATE TABLE tab1(a int, b typ1)') await con_chk.execute('INSERT INTO tab1 VALUES (1, (2, 3))') result = await con_chk.fetchrow('SELECT * FROM tab1') self.assertEqual(result, (1, (2, 3))) statements_chk = self._get_cached_statements(con_chk) self._check_statements_are_not_closed(statements_chk) # Change schema in the databases. await self.con.execute('ALTER TYPE typ1 ADD ATTRIBUTE c text') await con_chk.execute('ALTER TYPE typ1 ADD ATTRIBUTE c text') # con1 tries to get cached type info, fails, but invalidates the # cache for the entire pool. with self.assertRaisesRegex( asyncpg.OutdatedSchemaCacheError, ERRNUM): await con1.fetchrow('SELECT * FROM tab1') self._check_statements_are_closed(statements1) self._check_statements_are_closed(statements2) async with con2.transaction(): # This should work, as con1 should have invalidated all caches. result = await con2.fetchrow('SELECT * FROM tab1') self.assertEqual(result, (1, (2, 3, None))) # After all the con1 uses actual info from renewed cache entry. result = await con1.fetchrow('SELECT * FROM tab1') self.assertEqual(result, (1, (2, 3, None))) # Check the invalidation is database-specific, i.e. cache entries # for pool_chk/con_chk was not dropped via pool/con1. self._check_statements_are_not_closed(statements_chk) with self.assertRaisesRegex( asyncpg.OutdatedSchemaCacheError, ERRNUM): await con_chk.fetchrow('SELECT * FROM tab1') self._check_statements_are_closed(statements_chk) finally: await self.con.execute('DROP TABLE tab1') await self.con.execute('DROP TYPE typ1') await pool.release(con2) await pool.release(con1) await pool.close() await pool_chk.release(con_chk) await pool_chk.close() await self.con.execute('DROP DATABASE testdb')
class TestCacheInvalidation(tb.ConnectedTestCase): def _get_cached_statements(self, connection=None): pass def _check_statements_are_not_closed(self, statements): pass def _check_statements_are_closed(self, statements): pass async def test_prepare_cache_invalidation_silent(self): pass async def test_prepare_cache_invalidation_in_transaction(self): pass async def test_prepare_cache_invalidation_in_pool(self): pass async def test_type_cache_invalidation_in_transaction(self): pass async def test_type_cache_invalidation_in_cancelled_transaction(self): pass async def test_prepared_type_cache_invalidation(self): pass async def test_type_cache_invalidation_on_drop_type_attr(self): pass async def test_type_cache_invalidation_on_change_attr(self): pass async def test_type_cache_invalidation_in_pool(self): pass
13
0
30
6
22
3
1
0.12
1
2
0
0
12
0
12
12
376
86
261
43
248
31
228
43
215
2
1
4
15
148,546
MagicStack/asyncpg
MagicStack_asyncpg/tests/test__sourcecode.py
tests.test__sourcecode.TestCodeQuality
class TestCodeQuality(unittest.TestCase): def test_flake8(self): try: import flake8 # NoQA except ImportError: raise unittest.SkipTest('flake8 module is missing') root_path = find_root() config_path = os.path.join(root_path, '.flake8') if not os.path.exists(config_path): raise RuntimeError('could not locate .flake8 file') try: subprocess.run( [sys.executable, '-m', 'flake8', '--config', config_path], check=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, cwd=root_path) except subprocess.CalledProcessError as ex: output = ex.output.decode() raise AssertionError( 'flake8 validation failed:\n{}'.format(output)) from None def test_mypy(self): try: import mypy # NoQA except ImportError: raise unittest.SkipTest('mypy module is missing') root_path = find_root() config_path = os.path.join(root_path, 'pyproject.toml') if not os.path.exists(config_path): raise RuntimeError('could not locate mypy.ini file') try: subprocess.run( [ sys.executable, '-m', 'mypy', '--config-file', config_path, 'asyncpg' ], check=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, cwd=root_path ) except subprocess.CalledProcessError as ex: output = ex.output.decode() raise AssertionError( 'mypy validation failed:\n{}'.format(output)) from None
class TestCodeQuality(unittest.TestCase): def test_flake8(self): pass def test_mypy(self): pass
3
0
26
2
24
1
4
0.04
1
5
0
0
2
0
2
74
55
6
49
13
44
2
29
11
24
4
2
1
8
148,547
MagicStack/asyncpg
MagicStack_asyncpg/tests/test_connect.py
tests.test_connect.TestNoSSLConnection
class TestNoSSLConnection(BaseTestSSLConnection): def _add_hba_entry(self): self.cluster.add_hba_entry( type='hostnossl', address=ipaddress.ip_network('127.0.0.0/24'), database='postgres', user='ssl_user', auth_method='trust') self.cluster.add_hba_entry( type='hostnossl', address=ipaddress.ip_network('::1/128'), database='postgres', user='ssl_user', auth_method='trust') async def test_nossl_connection_sslmode(self): async def verify_works(sslmode, *, host='localhost'): con = None try: con = await self.connect( dsn='postgresql://foo/postgres?sslmode=' + sslmode, host=host, user='ssl_user') self.assertEqual(await con.fetchval('SELECT 42'), 42) self.assertFalse(con._protocol.is_ssl) finally: if con: await con.close() async def verify_fails(sslmode, *, host='localhost'): # XXX: uvloop artifact old_handler = self.loop.get_exception_handler() con = None try: self.loop.set_exception_handler(lambda *args: None) with self.assertRaises( asyncpg.InvalidAuthorizationSpecificationError ): con = await self.connect( dsn='postgresql://foo/?sslmode=' + sslmode, host=host, user='ssl_user') await con.fetchval('SELECT 42') finally: if con: await con.close() self.loop.set_exception_handler(old_handler) await verify_works('disable') await verify_works('allow') await verify_works('prefer') await verify_fails('require') with mock_dot_postgresql(): await verify_fails('require') await verify_fails('verify-ca') await verify_fails('verify-full') async def test_nossl_connection_prefer_cancel(self): con = await self.connect( dsn='postgresql://foo/postgres?sslmode=prefer', host='localhost', user='ssl_user') try: self.assertFalse(con._protocol.is_ssl) with self.assertRaises(asyncio.TimeoutError): await con.execute('SELECT pg_sleep(5)', timeout=0.5) val = await con.fetchval('SELECT 123') self.assertEqual(val, 123) finally: await con.close() async def test_nossl_connection_pool(self): pool = await self.create_pool( host='localhost', user='ssl_user', database='postgres', min_size=5, max_size=10, ssl='prefer') async def worker(): async with pool.acquire() as con: self.assertFalse(con._protocol.is_ssl) self.assertEqual(await con.fetchval('SELECT 42'), 42) with self.assertRaises(asyncio.TimeoutError): await con.execute('SELECT pg_sleep(5)', timeout=0.5) self.assertEqual(await con.fetchval('SELECT 43'), 43) tasks = [worker() for _ in range(100)] await asyncio.gather(*tasks) await pool.close()
class TestNoSSLConnection(BaseTestSSLConnection): def _add_hba_entry(self): pass async def test_nossl_connection_sslmode(self): pass async def verify_works(sslmode, *, host='localhost'): pass async def verify_fails(sslmode, *, host='localhost'): pass async def test_nossl_connection_prefer_cancel(self): pass async def test_nossl_connection_pool(self): pass async def worker(): pass
8
0
18
1
16
0
1
0.01
1
2
0
0
4
0
4
9
90
10
79
16
71
1
53
15
45
2
2
2
9