index
int64
0
731k
package
stringlengths
2
98
name
stringlengths
1
76
docstring
stringlengths
0
281k
code
stringlengths
4
1.07M
signature
stringlengths
2
42.8k
714,176
apispec.ext.marshmallow
parameter_helper
Parameter component helper that allows using a marshmallow :class:`Schema <marshmallow.Schema>` in parameter definition. :param dict parameter: parameter fields. May contain a marshmallow Schema class or instance.
def parameter_helper(self, parameter, **kwargs): """Parameter component helper that allows using a marshmallow :class:`Schema <marshmallow.Schema>` in parameter definition. :param dict parameter: parameter fields. May contain a marshmallow Schema class or instance. """ assert self.resolver is not None, "init_spec has not yet been called" self.resolver.resolve_schema(parameter) return parameter
(self, parameter, **kwargs)
714,178
apispec.ext.marshmallow
response_helper
Response component helper that allows using a marshmallow :class:`Schema <marshmallow.Schema>` in response definition. :param dict parameter: response fields. May contain a marshmallow Schema class or instance.
def response_helper(self, response, **kwargs): """Response component helper that allows using a marshmallow :class:`Schema <marshmallow.Schema>` in response definition. :param dict parameter: response fields. May contain a marshmallow Schema class or instance. """ assert self.resolver is not None, "init_spec has not yet been called" self.resolver.resolve_response(response) return response
(self, response, **kwargs)
714,179
apispec.ext.marshmallow
schema_helper
Definition helper that allows using a marshmallow :class:`Schema <marshmallow.Schema>` to provide OpenAPI metadata. :param type|Schema schema: A marshmallow Schema class or instance.
def schema_helper(self, name, _, schema=None, **kwargs): """Definition helper that allows using a marshmallow :class:`Schema <marshmallow.Schema>` to provide OpenAPI metadata. :param type|Schema schema: A marshmallow Schema class or instance. """ if schema is None: return None schema_instance = resolve_schema_instance(schema) schema_key = make_schema_key(schema_instance) self.warn_if_schema_already_in_spec(schema_key) assert self.converter is not None, "init_spec has not yet been called" self.converter.refs[schema_key] = name json_schema = self.converter.schema2jsonschema(schema_instance) return json_schema
(self, name, _, schema=None, **kwargs)
714,180
apispec.ext.marshmallow
warn_if_schema_already_in_spec
Method to warn the user if the schema has already been added to the spec.
def warn_if_schema_already_in_spec(self, schema_key: tuple) -> None: """Method to warn the user if the schema has already been added to the spec. """ assert self.converter # needed for mypy if schema_key in self.converter.refs: warnings.warn( f"{schema_key[0]} has already been added to the spec. Adding it twice may " "cause references to not resolve properly.", UserWarning, stacklevel=2, )
(self, schema_key: tuple) -> NoneType
714,183
darkdetect._linux_detect
isDark
null
def isDark(): return theme() == 'Dark'
()
714,184
darkdetect._linux_detect
isLight
null
def isLight(): return theme() == 'Light'
()
714,185
darkdetect._linux_detect
listener
null
def listener(callback): with subprocess.Popen( ('gsettings', 'monitor', 'org.gnome.desktop.interface', 'gtk-theme'), stdout=subprocess.PIPE, universal_newlines=True, ) as p: for line in p.stdout: callback('Dark' if '-dark' in line.strip().removeprefix("gtk-theme: '").removesuffix("'").lower() else 'Light')
(callback)
714,186
darkdetect
macos_supported_version
null
def macos_supported_version(): sysver = platform.mac_ver()[0] #typically 10.14.2 or 12.3 major = int(sysver.split('.')[0]) if major < 10: return False elif major >= 11: return True else: minor = int(sysver.split('.')[1]) if minor < 14: return False else: return True
()
714,188
darkdetect._linux_detect
theme
null
def theme(): try: #Using the freedesktop specifications for checking dark mode out = subprocess.run( ['gsettings', 'get', 'org.gnome.desktop.interface', 'color-scheme'], capture_output=True) stdout = out.stdout.decode() #If not found then trying older gtk-theme method if len(stdout)<1: out = subprocess.run( ['gsettings', 'get', 'org.gnome.desktop.interface', 'gtk-theme'], capture_output=True) stdout = out.stdout.decode() except Exception: return 'Light' # we have a string, now remove start and end quote theme = stdout.lower().strip()[1:-1] if '-dark' in theme.lower(): return 'Dark' else: return 'Light'
()
714,189
flake8_print
PrintChecker
null
class PrintChecker(object): options = None name = "flake8-print" version = __version__ def __init__(self, tree, filename): self.tree = tree self.filename = filename self.lines = None def load_file(self): if self.filename in ("stdin", "-", None): self.filename = "stdin" self.lines = stdin_utils.stdin_get_value().splitlines(True) else: self.lines = pycodestyle.readlines(self.filename) if not self.tree: self.tree = ast.parse("".join(self.lines)) def run(self): if not self.tree or not self.lines: self.load_file() parser = PrintFinder() parser.visit(self.tree) error_dicts = (parser.prints_used, parser.prints_redefined) errors_seen = set() for index, error_dict in enumerate(error_dicts): for error, message in error_dict.items(): if error in errors_seen: continue errors_seen.add(error) yield (error[0], error[1], message, PrintChecker)
(tree, filename)
714,192
flake8_print
run
null
def run(self): if not self.tree or not self.lines: self.load_file() parser = PrintFinder() parser.visit(self.tree) error_dicts = (parser.prints_used, parser.prints_redefined) errors_seen = set() for index, error_dict in enumerate(error_dicts): for error, message in error_dict.items(): if error in errors_seen: continue errors_seen.add(error) yield (error[0], error[1], message, PrintChecker)
(self)
714,193
flake8_print
PrintFinder
null
class PrintFinder(ast.NodeVisitor): def __init__(self, *args, **kwargs): super(PrintFinder, self).__init__(*args, **kwargs) self.prints_used = {} self.prints_redefined = {} def visit_Call(self, node): is_print_function = getattr(node.func, "id", None) in PRINT_FUNCTION_NAMES is_print_function_attribute = ( getattr(getattr(node.func, "value", None), "id", None) in PRINT_FUNCTION_NAMES and getattr(node.func, "attr", None) in PRINT_FUNCTION_NAMES ) if is_print_function: self.prints_used[(node.lineno, node.col_offset)] = VIOLATIONS["found"][node.func.id] elif is_print_function_attribute: self.prints_used[(node.lineno, node.col_offset)] = VIOLATIONS["found"][node.func.attr] self.generic_visit(node) def visit_FunctionDef(self, node): if node.name in PRINT_FUNCTION_NAMES: self.prints_redefined[(node.lineno, node.col_offset)] = VIOLATIONS["declared"][node.name] for arg in node.args.args: if arg.arg in PRINT_FUNCTION_NAMES: self.prints_redefined[(node.lineno, node.col_offset)] = VIOLATIONS["declared"][arg.arg] for arg in node.args.kwonlyargs: if arg.arg in PRINT_FUNCTION_NAMES: self.prints_redefined[(node.lineno, node.col_offset)] = VIOLATIONS["declared"][arg.arg] self.generic_visit(node) def visit_Name(self, node): if node.id == PRINT_FUNCTION_NAME: self.prints_redefined[(node.lineno, node.col_offset)] = VIOLATIONS["declared"][node.id] self.generic_visit(node)
(*args, **kwargs)
714,194
flake8_print
__init__
null
def __init__(self, *args, **kwargs): super(PrintFinder, self).__init__(*args, **kwargs) self.prints_used = {} self.prints_redefined = {}
(self, *args, **kwargs)
714,197
flake8_print
visit_Call
null
def visit_Call(self, node): is_print_function = getattr(node.func, "id", None) in PRINT_FUNCTION_NAMES is_print_function_attribute = ( getattr(getattr(node.func, "value", None), "id", None) in PRINT_FUNCTION_NAMES and getattr(node.func, "attr", None) in PRINT_FUNCTION_NAMES ) if is_print_function: self.prints_used[(node.lineno, node.col_offset)] = VIOLATIONS["found"][node.func.id] elif is_print_function_attribute: self.prints_used[(node.lineno, node.col_offset)] = VIOLATIONS["found"][node.func.attr] self.generic_visit(node)
(self, node)
714,199
flake8_print
visit_FunctionDef
null
def visit_FunctionDef(self, node): if node.name in PRINT_FUNCTION_NAMES: self.prints_redefined[(node.lineno, node.col_offset)] = VIOLATIONS["declared"][node.name] for arg in node.args.args: if arg.arg in PRINT_FUNCTION_NAMES: self.prints_redefined[(node.lineno, node.col_offset)] = VIOLATIONS["declared"][arg.arg] for arg in node.args.kwonlyargs: if arg.arg in PRINT_FUNCTION_NAMES: self.prints_redefined[(node.lineno, node.col_offset)] = VIOLATIONS["declared"][arg.arg] self.generic_visit(node)
(self, node)
714,200
flake8_print
visit_Name
null
def visit_Name(self, node): if node.id == PRINT_FUNCTION_NAME: self.prints_redefined[(node.lineno, node.col_offset)] = VIOLATIONS["declared"][node.id] self.generic_visit(node)
(self, node)
714,204
taskorbit.dispatching.handler
BaseHandler
null
class BaseHandler(ABC): def __init__(self) -> None: self.name = "unknown" self.__task = None self._timer_manager = TimerManager() self.uuid: Optional[str] = None self.execution_timeout: Optional[int] = None self.on_execution_timeout: Optional[Callable[[...], Awaitable[None]]] = None self.close_timeout: Optional[int] = None self.on_close: Optional[Callable[[...], Awaitable[None]]] = None if ( not isinstance(self.on_execution_timeout, Callable | NoneType) or not isinstance(self.on_close, Callable | NoneType) or inspect.isclass(self.on_execution_timeout) or inspect.isclass(self.on_close) ): raise TypeError("The callback must be either a function or NoneType") def __str__(self) -> str: return f"<Handler:{self.name}>" def __repr__(self) -> str: return self.__str__() async def _execution(self, **kwargs) -> None: if self.on_execution_timeout is not None: await self.on_execution_timeout(**kwargs) else: logger.debug(f"Please wait, the task-{self.uuid} is still in progress...") async def _close(self, **kwargs) -> None: if self.on_close is not None: await self.on_close(**kwargs) logger.debug("The timeout has expired and the task is being closed...") if self.__task is not None: self.__task.cancel() else: logger.warning("Closing via timeout was incorrect. The task does not exist!") self.cancel(...) def cancel(self, _) -> None: self._timer_manager.cancel_timers() logger.debug("Cancelled") @abstractmethod async def handle(self, *args, **kwargs) -> None: ... async def __call__(self, fields_execution_callback: dict[str, Any], fields_close_callback: dict[str, Any], **kwargs) -> None: # try: self.__task = asyncio.create_task(self.handle(**kwargs)) self.__task.add_done_callback(self.cancel) await self._timer_manager.start_timer(self.execution_timeout, self._execution, **fields_execution_callback) await self._timer_manager.start_timer(self.close_timeout, self._close, **fields_close_callback) await self.__task # except Exception as e: # logger.debug(f"An error occurred: {e.args[0]}")
() -> None
714,205
taskorbit.dispatching.handler
__call__
null
@abstractmethod async def handle(self, *args, **kwargs) -> None: ...
(self, fields_execution_callback: dict[str, typing.Any], fields_close_callback: dict[str, typing.Any], **kwargs) -> NoneType
714,206
taskorbit.dispatching.handler
__init__
null
def __init__(self) -> None: self.name = "unknown" self.__task = None self._timer_manager = TimerManager() self.uuid: Optional[str] = None self.execution_timeout: Optional[int] = None self.on_execution_timeout: Optional[Callable[[...], Awaitable[None]]] = None self.close_timeout: Optional[int] = None self.on_close: Optional[Callable[[...], Awaitable[None]]] = None if ( not isinstance(self.on_execution_timeout, Callable | NoneType) or not isinstance(self.on_close, Callable | NoneType) or inspect.isclass(self.on_execution_timeout) or inspect.isclass(self.on_close) ): raise TypeError("The callback must be either a function or NoneType")
(self) -> NoneType
714,208
taskorbit.dispatching.handler
__str__
null
def __str__(self) -> str: return f"<Handler:{self.name}>"
(self) -> str
714,211
taskorbit.dispatching.handler
cancel
null
def cancel(self, _) -> None: self._timer_manager.cancel_timers() logger.debug("Cancelled")
(self, _) -> NoneType
714,213
taskorbit.dispatching.dispatcher
Dispatcher
null
class Dispatcher(Router): def __init__(self, max_queue_size: int) -> None: super().__init__(name='DISPATCHER') self.middleware = MiddlewareManager() self.inner_middleware = MiddlewareManager() self.queue: Queue[str, asyncio.Task] = Queue(max_queue_size) self.context_data: dict = {} def __setitem__(self, key, value): self.context_data[key] = value async def _service_processing(self, metadata: ServiceMessage) -> None: logger.debug(f"Getting service messages: {metadata.command}") if metadata.command == Commands.GET_STATUS: status: TaskStatus = self.queue.get_status_task(metadata.uuid) logger.debug(f"Task-{metadata.uuid} STATUS: {status}") elif metadata.command == Commands.CLOSING: if metadata.uuid in self.queue: self.queue[metadata.uuid].cancel() logger.debug(f"The task-{metadata.uuid} was forcibly completed") else: logger.warning(f"Failed to close the task-{metadata.uuid}, there is no such task in the queue") def __cb_close_task(self, future) -> None: name = future.get_name() self.queue.pop(name) logger.debug(f"The task-{name} has been removed from the queue") async def listen(self, metadata: Metadata) -> None: if isinstance(metadata, ServiceMessage): _ = asyncio.create_task(self._service_processing(metadata)) elif isinstance(metadata, Message): # try: task = asyncio.create_task(self._metadata_processing(metadata), name=metadata.uuid) task.add_done_callback(self.__cb_close_task) self.queue[metadata.uuid] = task # except Exception as e: # logger.error(e.args[0]) async def _metadata_processing(self, metadata: Metadata) -> None: data = self.context_data.copy() # try: call_processing: partial = await self.middleware.middleware_processing(handler=self._message_processing, metadata=metadata) await call_processing(metadata=metadata, data=data) # except Exception as e: # logger.error(f"{e.args[0]}") async def _message_processing(self, metadata: Message, data: dict[str, Any]) -> Any: handler: Type[HandlerType] = await find_handler( handlers=self.handlers, router=self, metadata=metadata, data=data ) async def _handler_processing(metadata: Message, data: dict[str, Any]) -> Any: nonlocal handler if isinstance(handler, abc.ABCMeta): handler = handler(**get_list_parameters(handler.__init__, metadata=metadata, data=data)) fields_cls: dict = get_list_parameters(handler.__call__, metadata=metadata, data=data) fields_handle: dict = get_list_parameters(handler.handle, metadata=metadata, data=data, is_handler=True) fields_execution_callback: dict = get_list_parameters(handler.on_execution_timeout, metadata=metadata, data=data, is_handler=True) fields_close_callback: dict = get_list_parameters(handler.on_close, metadata=metadata, data=data, is_handler=True) return await handler(**{ **fields_cls, **fields_handle, 'fields_execution_callback': fields_execution_callback, 'fields_close_callback': fields_close_callback }) call_processing: partial | Callable = await self.inner_middleware.middleware_processing(handler=_handler_processing, metadata=metadata) # try: return await call_processing(metadata=metadata, data=data) # except Exception as e: # logger.error(f"{e.args[0]}")
(max_queue_size: int) -> None
714,214
taskorbit.dispatching.dispatcher
__cb_close_task
null
def __cb_close_task(self, future) -> None: name = future.get_name() self.queue.pop(name) logger.debug(f"The task-{name} has been removed from the queue")
(self, future) -> NoneType
714,215
taskorbit.dispatching.dispatcher
__init__
null
def __init__(self, max_queue_size: int) -> None: super().__init__(name='DISPATCHER') self.middleware = MiddlewareManager() self.inner_middleware = MiddlewareManager() self.queue: Queue[str, asyncio.Task] = Queue(max_queue_size) self.context_data: dict = {}
(self, max_queue_size: int) -> NoneType
714,217
taskorbit.dispatching.dispatcher
__setitem__
null
def __setitem__(self, key, value): self.context_data[key] = value
(self, key, value)
714,218
taskorbit.dispatching.router
__str__
null
def __str__(self) -> str: return f"<Router:{self.name}>"
(self) -> str
714,222
taskorbit.dispatching.router
include_class_handler
null
def include_class_handler(self, *filters: FilterType) -> Type[HandlerType]: def wrapper(cls: HandlerType): self.handlers[cls] = validate_filters(filters) return cls return wrapper
(self, *filters: Union[magic_filter.magic.MagicFilter, taskorbit.filter.BaseFilter, bool, tuple]) -> Type[Union[taskorbit.dispatching.handler.BaseHandler, taskorbit.dispatching.handler.Handler, Callable]]
714,223
taskorbit.dispatching.router
include_handler
null
def include_handler( self, *filters: FilterType, execution_timeout: Optional[int] = None, on_execution_timeout: Optional[Callable] = None, close_timeout: Optional[int] = None, on_close: Optional[Callable] = None, ) -> Callable: def wrapper(handler: Callable): cls = Handler() cls.name = handler.__name__ cls.execution_timeout = execution_timeout cls.on_execution_timeout = on_execution_timeout cls.close_timeout = close_timeout cls.on_close = on_close cls.handle = handler self.handlers[cls] = validate_filters(filters) return handler return wrapper
(self, *filters: Union[magic_filter.magic.MagicFilter, taskorbit.filter.BaseFilter, bool, tuple], execution_timeout: Optional[int] = None, on_execution_timeout: Optional[Callable] = None, close_timeout: Optional[int] = None, on_close: Optional[Callable] = None) -> Callable
714,224
taskorbit.dispatching.router
include_router
null
def include_router(self, router: "Router", *filters: FilterType) -> None: if not isinstance(router, Router): raise TypeError(f"The router must be an instance of Router, but received {type(router).__name__}") self.child_routers[router] = validate_filters(filters)
(self, router: taskorbit.dispatching.router.Router, *filters: Union[magic_filter.magic.MagicFilter, taskorbit.filter.BaseFilter, bool, tuple]) -> NoneType
714,226
taskorbit.models
Message
Message(uuid: str, type_event: str, data: Optional[dict] = None)
class Message(BaseType): uuid: str type_event: str data: Optional[dict] = None
(uuid: str, type_event: str, data: Optional[dict] = None) -> None
714,227
taskorbit.models
__eq__
null
from dataclasses import dataclass, fields from enum import EnumMeta from typing import Optional, Union from taskorbit.enums import Commands @dataclass class BaseType: def __post_init__(self): for field in fields(self): attr_sel = getattr(self, field.name) if isinstance(field.type, EnumMeta): if field.type.validate_key(attr_sel): setattr(self, field.name, field.type[attr_sel]) elif not isinstance(attr_sel, field.type): raise TypeError(f"Invalid nested type: {field.name}: {type(attr_sel).__name__} != {field.type.__name__}") @classmethod def validate_fields(cls, data: set[str]) -> bool: if not isinstance(data, set): raise TypeError(f"The `data` must be a set, but received {type(data).__name__}") return data == {field.name for field in fields(cls) if field.default is not None or field.name in data}
(self, other)
714,229
taskorbit.models
__post_init__
null
def __post_init__(self): for field in fields(self): attr_sel = getattr(self, field.name) if isinstance(field.type, EnumMeta): if field.type.validate_key(attr_sel): setattr(self, field.name, field.type[attr_sel]) elif not isinstance(attr_sel, field.type): raise TypeError(f"Invalid nested type: {field.name}: {type(attr_sel).__name__} != {field.type.__name__}")
(self)
714,231
taskorbit.middlewares.middleware
Middleware
null
class Middleware(ABC): @abstractmethod async def __call__(self, handler: Callable, metadata: Message, data: dict[str, Any]) -> Any: ...
()
714,232
taskorbit.middlewares.middleware
__call__
null
@abstractmethod async def __call__(self, handler: Callable, metadata: Message, data: dict[str, Any]) -> Any: ...
(self, handler: Callable, metadata: taskorbit.models.Message, data: dict[str, typing.Any]) -> Any
714,233
taskorbit.dispatching.router
Router
null
class Router: def __init__(self, name: str = uuid.uuid4().hex) -> None: self.name = name self.child_routers: dict["Router", tuple[FilterType, ...]] = {} self.handlers: dict[Type[HandlerType], tuple[FilterType, ...]] = {} def __str__(self) -> str: return f"<Router:{self.name}>" def __repr__(self) -> str: return self.__str__() def include_router(self, router: "Router", *filters: FilterType) -> None: if not isinstance(router, Router): raise TypeError(f"The router must be an instance of Router, but received {type(router).__name__}") self.child_routers[router] = validate_filters(filters) def include_class_handler(self, *filters: FilterType) -> Type[HandlerType]: def wrapper(cls: HandlerType): self.handlers[cls] = validate_filters(filters) return cls return wrapper def include_handler( self, *filters: FilterType, execution_timeout: Optional[int] = None, on_execution_timeout: Optional[Callable] = None, close_timeout: Optional[int] = None, on_close: Optional[Callable] = None, ) -> Callable: def wrapper(handler: Callable): cls = Handler() cls.name = handler.__name__ cls.execution_timeout = execution_timeout cls.on_execution_timeout = on_execution_timeout cls.close_timeout = close_timeout cls.on_close = on_close cls.handle = handler self.handlers[cls] = validate_filters(filters) return handler return wrapper
(name: str = 'c759e0fd67344c11a39960a79ab8959b') -> None
714,234
taskorbit.dispatching.router
__init__
null
def __init__(self, name: str = uuid.uuid4().hex) -> None: self.name = name self.child_routers: dict["Router", tuple[FilterType, ...]] = {} self.handlers: dict[Type[HandlerType], tuple[FilterType, ...]] = {}
(self, name: str = 'c759e0fd67344c11a39960a79ab8959b') -> NoneType
714,240
taskorbit.models
ServiceMessage
ServiceMessage(uuid: str, command: taskorbit.enums.Commands)
class ServiceMessage(BaseType): uuid: str command: Commands
(uuid: str, command: taskorbit.enums.Commands) -> None
714,319
pythonnet
_create_runtime_from_spec
null
def _create_runtime_from_spec( spec: str, params: Optional[Dict[str, Any]] = None ) -> clr_loader.Runtime: was_default = False if spec == "default": was_default = True if sys.platform == "win32": spec = "netfx" else: spec = "mono" params = params or _get_params_from_env(spec) try: if spec == "netfx": return clr_loader.get_netfx(**params) elif spec == "mono": return clr_loader.get_mono(**params) elif spec == "coreclr": return clr_loader.get_coreclr(**params) else: raise RuntimeError(f"Invalid runtime name: '{spec}'") except Exception as exc: if was_default: raise RuntimeError( f"""Failed to create a default .NET runtime, which would have been "{spec}" on this system. Either install a compatible runtime or configure it explicitly via `set_runtime` or the `PYTHONNET_*` environment variables (see set_runtime_from_env).""" ) from exc else: raise RuntimeError( f"""Failed to create a .NET runtime ({spec}) using the parameters {params}.""" ) from exc
(spec: str, params: Optional[Dict[str, Any]] = None) -> clr_loader.types.Runtime
714,320
pythonnet
_get_params_from_env
null
def _get_params_from_env(prefix: str) -> Dict[str, str]: from os import environ full_prefix = f"PYTHONNET_{prefix.upper()}_" len_ = len(full_prefix) env_vars = { (k[len_:].lower()): v for k, v in environ.items() if k.upper().startswith(full_prefix) } return env_vars
(prefix: str) -> Dict[str, str]
714,322
pythonnet
get_runtime_info
Retrieve information on the configured runtime
def get_runtime_info() -> Optional[clr_loader.RuntimeInfo]: """Retrieve information on the configured runtime""" if _RUNTIME is None: return None else: return _RUNTIME.info()
() -> Optional[clr_loader.types.RuntimeInfo]
714,323
pythonnet
load
Load Python.NET in the specified runtime The same parameters as for `set_runtime` can be used. By default, `set_default_runtime` is called if no environment has been set yet and no parameters are passed. After a successful call, further invocations will return immediately.
def load(runtime: Union[clr_loader.Runtime, str, None] = None, **params: str) -> None: """Load Python.NET in the specified runtime The same parameters as for `set_runtime` can be used. By default, `set_default_runtime` is called if no environment has been set yet and no parameters are passed. After a successful call, further invocations will return immediately.""" global _LOADED, _LOADER_ASSEMBLY if _LOADED: return if _RUNTIME is None: if runtime is None: set_runtime_from_env() else: set_runtime(runtime, **params) if _RUNTIME is None: raise RuntimeError("No valid runtime selected") dll_path = Path(__file__).parent / "runtime" / "Python.Runtime.dll" _LOADER_ASSEMBLY = assembly = _RUNTIME.get_assembly(str(dll_path)) func = assembly.get_function("Python.Runtime.Loader.Initialize") if func(b"") != 0: raise RuntimeError("Failed to initialize Python.Runtime.dll") _LOADED = True import atexit atexit.register(unload)
(runtime: Union[clr_loader.types.Runtime, str, NoneType] = None, **params: str) -> NoneType
714,324
pythonnet
set_runtime
Set up a clr_loader runtime without loading it :param runtime: Either an already initialised `clr_loader` runtime, or one of netfx, coreclr, mono, or default. If a string parameter is given, the runtime will be created.
def set_runtime(runtime: Union[clr_loader.Runtime, str], **params: str) -> None: """Set up a clr_loader runtime without loading it :param runtime: Either an already initialised `clr_loader` runtime, or one of netfx, coreclr, mono, or default. If a string parameter is given, the runtime will be created. """ global _RUNTIME if _LOADED: raise RuntimeError(f"The runtime {_RUNTIME} has already been loaded") if isinstance(runtime, str): runtime = _create_runtime_from_spec(runtime, params) _RUNTIME = runtime
(runtime: Union[clr_loader.types.Runtime, str], **params: str) -> NoneType
714,325
pythonnet
set_runtime_from_env
Set up the runtime using the environment This will use the environment variable PYTHONNET_RUNTIME to decide the runtime to use, which may be one of netfx, coreclr or mono. The parameters of the respective clr_loader.get_<runtime> functions can also be given as environment variables, named `PYTHONNET_<RUNTIME>_<PARAM_NAME>`. In particular, to use `PYTHONNET_RUNTIME=coreclr`, the variable `PYTHONNET_CORECLR_RUNTIME_CONFIG` has to be set to a valid `.runtimeconfig.json`. If no environment variable is specified, a globally installed Mono is used for all environments but Windows, on Windows the legacy .NET Framework is used.
def set_runtime_from_env() -> None: """Set up the runtime using the environment This will use the environment variable PYTHONNET_RUNTIME to decide the runtime to use, which may be one of netfx, coreclr or mono. The parameters of the respective clr_loader.get_<runtime> functions can also be given as environment variables, named `PYTHONNET_<RUNTIME>_<PARAM_NAME>`. In particular, to use `PYTHONNET_RUNTIME=coreclr`, the variable `PYTHONNET_CORECLR_RUNTIME_CONFIG` has to be set to a valid `.runtimeconfig.json`. If no environment variable is specified, a globally installed Mono is used for all environments but Windows, on Windows the legacy .NET Framework is used. """ from os import environ spec = environ.get("PYTHONNET_RUNTIME", "default") runtime = _create_runtime_from_spec(spec) set_runtime(runtime)
() -> NoneType
714,327
pythonnet
unload
Explicitly unload a loaded runtime and shut down Python.NET
def unload() -> None: """Explicitly unload a loaded runtime and shut down Python.NET""" global _RUNTIME, _LOADER_ASSEMBLY if _LOADER_ASSEMBLY is not None: func = _LOADER_ASSEMBLY.get_function("Python.Runtime.Loader.Shutdown") if func(b"full_shutdown") != 0: raise RuntimeError("Failed to call Python.NET shutdown") _LOADER_ASSEMBLY = None if _RUNTIME is not None: _RUNTIME.shutdown() _RUNTIME = None
() -> NoneType
714,328
qt5_tools
_add_to_env_var_path_list
null
def _add_to_env_var_path_list(environment, name, before, after): return { name: os.pathsep.join(( *before, environment.get(name, ''), *after )) }
(environment, name, before, after)
714,330
qt5_tools
application_names
null
def application_names(): return qt_applications._application_names()
()
714,331
qt5_tools
application_path
null
def application_path(name): return qt_applications._application_path(name)
(name)
714,332
qt5_tools
bin_path
null
def bin_path(): return qt_applications._bin
()
714,333
qt5_tools
create_command_elements
null
def create_command_elements(name, sys_platform=sys.platform): path = application_path(name) if sys_platform == 'darwin' and path.suffix == '.app': inner = path.joinpath('Contents', 'MacOS', path.stem) return [fspath(inner)] return [fspath(path)]
(name, sys_platform='linux')
714,334
qt5_tools
create_environment
null
def create_environment(reference=None): if reference is None: reference = os.environ environment = dict(reference) if sys.platform in ['linux', 'darwin']: environment.update(_add_to_env_var_path_list( environment=environment, name='LD_LIBRARY_PATH', before=[''], after=[sysconfig.get_config_var('LIBDIR')], )) if sys.platform == 'win32': environment.update(_add_to_env_var_path_list( environment=environment, name='PATH', before=[''], after=[sysconfig.get_path('scripts')], )) return environment
(reference=None)
714,340
biolexica.api
Annotation
Data about an annotation.
class Annotation(BaseModel): """Data about an annotation.""" text: str start: int end: int match: Match @property def reference(self) -> Reference: """Get the match's reference.""" return self.match.reference @property def name(self) -> str: """Get the match's entry name.""" return self.match.name @property def curie(self) -> str: """Get the match's CURIE.""" return self.match.curie @property def score(self) -> float: """Get the match's score.""" return self.match.score @property def substr(self) -> str: """Get the substring that was matched.""" return self.text[self.start : self.end]
(*, text: str, start: int, end: int, match: biolexica.api.Match) -> None
714,369
biolexica.api
Configuration
A configuration for construction of a lexicon.
class Configuration(BaseModel): """A configuration for construction of a lexicon.""" inputs: List[Input] excludes: Optional[List[str]] = Field( default=None, description="A list of CURIEs to exclude after processing is complete" )
(*, inputs: List[biolexica.api.Input], excludes: Optional[List[str]] = None) -> None
714,398
biolexica.api
Grounder
Wrap a Gilda grounder with additional functionality.
class Grounder(gilda.Grounder): """Wrap a Gilda grounder with additional functionality.""" def get_matches( self, s: str, context: Optional[str] = None, organisms: Optional[List[str]] = None, namespaces: Optional[List[str]] = None, ) -> List[Match]: """Get matches in Biolexica's format.""" return [ Match.from_gilda(scored_match) for scored_match in super().ground( s, context=context, organisms=organisms, namespaces=namespaces ) ] def get_best_match( self, s: str, context: Optional[str] = None, organisms: Optional[List[str]] = None, namespaces: Optional[List[str]] = None, ) -> Optional[Match]: """Get the best match in Biolexica's format.""" scored_matches = super().ground( s, context=context, organisms=organisms, namespaces=namespaces ) if not scored_matches: return None return Match.from_gilda(scored_matches[0]) def annotate(self, text: str, **kwargs: Any) -> List[Annotation]: """Annotate the text.""" import gilda.ner return [ Annotation(text=text, match=Match.from_gilda(match), start=start, end=end) for text, match, start, end in gilda.ner.annotate(text, grounder=self, **kwargs) ]
(terms: Union[str, pathlib.Path, Iterable[gilda.term.Term], Mapping[str, List[gilda.term.Term]], NoneType] = None, *, namespace_priority: List[str] = None)
714,399
gilda.grounder
__init__
null
def __init__( self, terms: Optional[GrounderInput] = None, *, namespace_priority: Optional[List[str]] = None, ): if terms is None: terms = get_grounding_terms() if isinstance(terms, str) and terms.startswith("http"): with tempfile.TemporaryDirectory() as directory: path = Path(directory).joinpath("terms.tsv.gz") urlretrieve(terms, path) # noqa:S310 self.entries = load_terms_file(path) elif isinstance(terms, (str, Path)): extension = os.path.splitext(terms)[1] if extension == '.db': from .resources.sqlite_adapter import SqliteEntries self.entries = SqliteEntries(terms) else: self.entries = load_terms_file(terms) elif isinstance(terms, dict): self.entries = terms elif isinstance(terms, collections.abc.Iterable): self.entries = defaultdict(list) for term in terms: self.entries[term.norm_text].append(term) self.entries = dict(self.entries) else: raise TypeError('terms is neither a path nor a list of terms,' 'nor a normalized entry name to term dictionary') self.prefix_index = {} self._build_prefix_index() self.adeft_disambiguators = find_adeft_models() self.gilda_disambiguators = None self.namespace_priority = ( DEFAULT_NAMESPACE_PRIORITY if namespace_priority is None else namespace_priority )
(self, terms: Union[str, pathlib.Path, Iterable[gilda.term.Term], Mapping[str, List[gilda.term.Term]], NoneType] = None, *, namespace_priority: Optional[List[str]] = None)
714,400
gilda.grounder
_build_prefix_index
null
def _build_prefix_index(self): prefix_index = defaultdict(set) for norm_term in self.entries: if not norm_term: continue parts = norm_term.split() if not parts: continue prefix_index[parts[0]].add(len(parts)) self.prefix_index = dict(prefix_index)
(self)
714,401
gilda.grounder
_generate_lookups
null
def _generate_lookups(self, raw_str: str) -> Set[str]: # TODO: we should propagate flags about depluralization and possible # other modifications made here and take them into account when # scoring # We first add the normalized string itself norm = normalize(raw_str) lookups = {norm} # Then we add a version with dashes replaced by spaces norm_spacedash = normalize(replace_dashes(raw_str, ' ')) lookups.add(norm_spacedash) # We then try to replace spelled out greek letters with # their unicode equivalents or their latin equivalents greek_replaced = normalize(replace_greek_uni(raw_str)) lookups.add(greek_replaced) greek_replaced = normalize(replace_greek_latin(raw_str)) lookups.add(greek_replaced) greek_replaced = normalize(replace_greek_spelled_out(raw_str)) lookups.add(greek_replaced) # We try exchanging roman and arabic numerals roman_arabic = normalize(replace_roman_arabic(raw_str)) lookups.add(roman_arabic) # Finally, we attempt to depluralize the word for singular, rule in depluralize(raw_str): lookups.add(normalize(singular)) logger.debug('Looking up the following strings: %s' % ', '.join(lookups)) return lookups
(self, raw_str: str) -> Set[str]
714,402
gilda.grounder
_iter_terms
null
def _iter_terms(self): for terms in self.entries.values(): yield from terms
(self)
714,403
gilda.grounder
_merge_equivalent_matches
null
@staticmethod def _merge_equivalent_matches(scored_matches): unique_entries = [] # Characterize an entry by its grounding term_dbid = lambda x: (x.term.db, x.term.id) # Sort and group scores by grounding scored_matches.sort(key=term_dbid) entry_groups = itertools.groupby(scored_matches, key=term_dbid) # Now look at each group and find the highest scoring match for _, entry_group in entry_groups: entries = sorted(list(entry_group), key=lambda x: x.score, reverse=True) entries[0].subsumed_terms = [e.term for e in entries[1:]] unique_entries.append(entries[0]) # Return the list of unique entries return unique_entries
(scored_matches)
714,404
gilda.grounder
_score_namespace
Apply a priority to the term based on its namespace. .. note:: This is currently not included as an explicit score term. It is just used to rank identically scored entries.
def _score_namespace(self, term) -> int: """Apply a priority to the term based on its namespace. .. note:: This is currently not included as an explicit score term. It is just used to rank identically scored entries. """ try: return len(self.namespace_priority) - self.namespace_priority.index(term.db) except ValueError: return 0
(self, term) -> int
714,405
biolexica.api
annotate
Annotate the text.
def annotate(self, text: str, **kwargs: Any) -> List[Annotation]: """Annotate the text.""" import gilda.ner return [ Annotation(text=text, match=Match.from_gilda(match), start=start, end=end) for text, match, start, end in gilda.ner.annotate(text, grounder=self, **kwargs) ]
(self, text: str, **kwargs: Any) -> List[biolexica.api.Annotation]
714,406
gilda.grounder
disambiguate
null
def disambiguate(self, raw_str, scored_matches, context): # This is only called if context was passed in so we do lazy # loading here if self.gilda_disambiguators is None: self.gilda_disambiguators = load_gilda_models() # If we don't have a disambiguator for this string, we return with # the original scores intact. Otherwise, we attempt to disambiguate. if raw_str in self.adeft_disambiguators: logger.info('Running Adeft disambiguation for %s' % raw_str) try: scored_matches = \ self.disambiguate_adeft(raw_str, scored_matches, context) except Exception as e: logger.exception(e) elif raw_str in self.gilda_disambiguators: logger.info('Running Gilda disambiguation for %s' % raw_str) try: scored_matches = \ self.disambiguate_gilda(raw_str, scored_matches, context) except Exception as e: logger.exception(e) return scored_matches
(self, raw_str, scored_matches, context)
714,407
gilda.grounder
disambiguate_adeft
null
def disambiguate_adeft(self, raw_str, scored_matches, context): # We find the disambiguator for the given string and pass in # context if self.adeft_disambiguators[raw_str] is None: self.adeft_disambiguators[raw_str] = load_disambiguator(raw_str) res = self.adeft_disambiguators[raw_str].disambiguate([context]) # The actual grounding dict is at this index in the result grounding_dict = res[0][2] logger.debug('Result from Adeft: %s' % str(grounding_dict)) # We attempt to get the score for the 'ungrounded' entry ungrounded_score = grounding_dict.get('ungrounded', 1.0) # Now we check if each scored match has a corresponding Adeft # grounding and score. If we find one, we multiply the original # match score with the Adeft score. Otherwise, we multiply the # original score with the 'ungrounded' score given by Adeft. for match in scored_matches: has_adeft_grounding = False for grounding, score in grounding_dict.items(): # There is a corner case here where grounding is # some name other than 'ungrounded' but is not a proper # ns:id pair. if grounding == 'ungrounded' or ':' not in grounding: continue db, id = grounding.split(':', maxsplit=1) if match.term.db == db and match.term.id == id: match.disambiguation = {'type': 'adeft', 'score': score, 'match': 'grounded'} match.multiply(score) has_adeft_grounding = True break if not has_adeft_grounding: match.disambiguation = {'type': 'adeft', 'score': ungrounded_score, 'match': 'ungrounded'} match.multiply(ungrounded_score) return scored_matches
(self, raw_str, scored_matches, context)
714,408
gilda.grounder
disambiguate_gilda
null
def disambiguate_gilda(self, raw_str, scored_matches, context): res = self.gilda_disambiguators[raw_str].predict_proba([context]) if not res: raise ValueError('No result from disambiguation.') grounding_dict = res[0] for match in scored_matches: key = '%s:%s' % (match.term.db, match.term.id) score_entry = grounding_dict.get(key, None) score = score_entry if score_entry is not None else 0.0 match.disambiguation = {'type': 'gilda', 'score': score, 'match': ('grounded' if score_entry is not None else 'ungrounded')} match.multiply(score) return scored_matches
(self, raw_str, scored_matches, context)
714,409
gilda.grounder
get_ambiguities
Return a list of ambiguous term groups in the grounder. Parameters ---------- skip_names : If True, groups of terms where one has the "name" status are skipped. This makes sense usually since these are prioritized over synonyms anyway. skip_curated : If True, groups of terms where one has the "curated" status are skipped. This makes sense usually since these are prioritized over synonyms anyway. skip_name_matches : If True, groups of terms that all share the same standard name are skipped. This is effective at eliminating spurious ambiguities due to unresolved cross-references between equivalent terms in different namespaces. skip_species_ambigs : If True, groups of terms that are all genes or proteins, and are all from different species (one term from each species) are skipped. This is effective at eliminating ambiguities between orthologous genes in different species that are usually resolved using the organism priority list.
def get_ambiguities(self, skip_names: bool = True, skip_curated: bool = True, skip_name_matches: bool = True, skip_species_ambigs: bool = True) -> List[List[Term]]: """Return a list of ambiguous term groups in the grounder. Parameters ---------- skip_names : If True, groups of terms where one has the "name" status are skipped. This makes sense usually since these are prioritized over synonyms anyway. skip_curated : If True, groups of terms where one has the "curated" status are skipped. This makes sense usually since these are prioritized over synonyms anyway. skip_name_matches : If True, groups of terms that all share the same standard name are skipped. This is effective at eliminating spurious ambiguities due to unresolved cross-references between equivalent terms in different namespaces. skip_species_ambigs : If True, groups of terms that are all genes or proteins, and are all from different species (one term from each species) are skipped. This is effective at eliminating ambiguities between orthologous genes in different species that are usually resolved using the organism priority list. """ ambig_entries = defaultdict(list) for terms in self.entries.values(): for term in terms: # We consider it an ambiguity if the same text entry appears # multiple times key = term.text ambig_entries[key].append(term) # It's only an ambiguity if there are two entries at least ambig_entries = {k: v for k, v in ambig_entries.items() if len(v) >= 2} ambigs = [] for text, entries in ambig_entries.items(): dbs = {e.db for e in entries} db_ids = {(e.db, e.id) for e in entries} statuses = {e.status for e in entries} sources = {e.source for e in entries} names = {e.entry_name for e in entries} # If the entries all point to the same ID, we skip it if len(db_ids) <= 1: continue # If there is a name in statuses, we skip it because it's # prioritized if skip_names and 'name' in statuses: continue # We skip curated terms because they are prioritized anyway if skip_curated and 'curated' in statuses: continue # If there is an adeft model already, we skip it if 'adeft' in sources: continue if skip_name_matches: if len({e.entry_name.lower() for e in entries}) == 1: continue if skip_species_ambigs: if dbs <= {'HGNC', 'UP'} and \ len({e.organism for e in entries}) == len(entries): continue # Everything else is an ambiguity ambigs.append(entries) return ambigs
(self, skip_names: bool = True, skip_curated: bool = True, skip_name_matches: bool = True, skip_species_ambigs: bool = True) -> List[List[gilda.term.Term]]
714,410
biolexica.api
get_best_match
Get the best match in Biolexica's format.
def get_best_match( self, s: str, context: Optional[str] = None, organisms: Optional[List[str]] = None, namespaces: Optional[List[str]] = None, ) -> Optional[Match]: """Get the best match in Biolexica's format.""" scored_matches = super().ground( s, context=context, organisms=organisms, namespaces=namespaces ) if not scored_matches: return None return Match.from_gilda(scored_matches[0])
(self, s: str, context: Optional[str] = None, organisms: Optional[List[str]] = None, namespaces: Optional[List[str]] = None) -> Optional[biolexica.api.Match]
714,411
biolexica.api
get_matches
Get matches in Biolexica's format.
def get_matches( self, s: str, context: Optional[str] = None, organisms: Optional[List[str]] = None, namespaces: Optional[List[str]] = None, ) -> List[Match]: """Get matches in Biolexica's format.""" return [ Match.from_gilda(scored_match) for scored_match in super().ground( s, context=context, organisms=organisms, namespaces=namespaces ) ]
(self, s: str, context: Optional[str] = None, organisms: Optional[List[str]] = None, namespaces: Optional[List[str]] = None) -> List[biolexica.api.Match]
714,412
gilda.grounder
get_models
Return a list of entity texts for which disambiguation models exist. Returns ------- list[str] The list of entity texts for which a disambiguation model is available.
def get_models(self): """Return a list of entity texts for which disambiguation models exist. Returns ------- list[str] The list of entity texts for which a disambiguation model is available. """ if self.gilda_disambiguators is None: self.gilda_disambiguators = load_gilda_models() return sorted(list(self.gilda_disambiguators.keys()))
(self)
714,413
gilda.grounder
get_names
Return a list of entity texts corresponding to a given database ID. Parameters ---------- db : str The database in which the ID is an entry, e.g., HGNC. id : str The ID of an entry in the database. status : Optional[str] If given, only entity texts with the given status e.g., "synonym" are returned. source : Optional[str] If given, only entity texts from the given source e.g., "uniprot" are returned. Returns ------- names: list[str] A list of entity texts corresponding to the given database/ID
def get_names(self, db, id, status=None, source=None): """Return a list of entity texts corresponding to a given database ID. Parameters ---------- db : str The database in which the ID is an entry, e.g., HGNC. id : str The ID of an entry in the database. status : Optional[str] If given, only entity texts with the given status e.g., "synonym" are returned. source : Optional[str] If given, only entity texts from the given source e.g., "uniprot" are returned. Returns ------- names: list[str] A list of entity texts corresponding to the given database/ID """ names = set() for entries in self.entries.values(): for entry in entries: if (entry.db == db) and (entry.id == id) and \ (not status or entry.status == status) and \ (not source or entry.source == source): names.add(entry.text) return sorted(names)
(self, db, id, status=None, source=None)
714,414
gilda.grounder
ground
Return scored groundings for a given raw string. Parameters ---------- raw_str : str A string to be grounded with respect to the set of Terms that the Grounder contains. context : Optional[str] Any additional text that serves as context for disambiguating the given entity text, used if a model exists for disambiguating the given text. organisms : Optional[List[str]] An optional list of organism identifiers defining a priority ranking among organisms, if genes/proteins from multiple organisms match the input. If not provided, the default ['9606'] i.e., human is used. namespaces : Optional[List[str]] A list of namespaces to restrict matches to. This will apply to both the primary namespace of a matched term, to any subsumed matches, and to the source namespaces of terms if they were created using cross-reference mappings. By default, no restriction is applied. Returns ------- list[gilda.grounder.ScoredMatch] A list of ScoredMatch objects representing the groundings sorted by decreasing score.
def ground(self, raw_str, context=None, organisms=None, namespaces=None): """Return scored groundings for a given raw string. Parameters ---------- raw_str : str A string to be grounded with respect to the set of Terms that the Grounder contains. context : Optional[str] Any additional text that serves as context for disambiguating the given entity text, used if a model exists for disambiguating the given text. organisms : Optional[List[str]] An optional list of organism identifiers defining a priority ranking among organisms, if genes/proteins from multiple organisms match the input. If not provided, the default ['9606'] i.e., human is used. namespaces : Optional[List[str]] A list of namespaces to restrict matches to. This will apply to both the primary namespace of a matched term, to any subsumed matches, and to the source namespaces of terms if they were created using cross-reference mappings. By default, no restriction is applied. Returns ------- list[gilda.grounder.ScoredMatch] A list of ScoredMatch objects representing the groundings sorted by decreasing score. """ if not organisms: organisms = ['9606'] # Stripping whitespaces is done up front directly on the raw string # so that all lookups and comparisons are done with respect to the # stripped string raw_str = raw_str.strip() # Initial lookup of all possible matches entries = self.lookup(raw_str) logger.debug('Filtering %d entries by organism' % len(entries)) entries = filter_for_organism(entries, organisms) logger.debug('Comparing %s with %d entries' % (raw_str, len(entries))) # For each entry to compare to, we generate a match data structure # describing the comparison of the raw (unnormalized) input string # and the entity text corresponding to the matched Term. This match # is then further scored to account for the nature of the grounding # itself. scored_matches = [] for term in entries: match = generate_match(raw_str, term.text) sc = score(match, term) scored_match = ScoredMatch(term, sc, match) scored_matches.append(scored_match) # Return early if we don't have anything to avoid calling other # functions with no matches if not scored_matches: return scored_matches # Merge equivalent matches unique_scores = self._merge_equivalent_matches(scored_matches) # If there's context available, disambiguate based on that if context: unique_scores = self.disambiguate(raw_str, unique_scores, context) # Then sort by decreasing score rank_fun = lambda x: (x.score, self._score_namespace(x.term)) unique_scores = sorted(unique_scores, key=rank_fun, reverse=True) # If we have a namespace constraint, we filter to the given # namespaces. if namespaces: unique_scores = [ scored_match for scored_match in unique_scores if scored_match.get_namespaces() & set(namespaces) ] return unique_scores
(self, raw_str, context=None, organisms=None, namespaces=None)
714,415
gilda.grounder
ground_best
Return the best scored grounding for a given raw string. Parameters ---------- raw_str : str A string to be grounded with respect to the set of Terms that the Grounder contains. context : Optional[str] Any additional text that serves as context for disambiguating the given entity text, used if a model exists for disambiguating the given text. organisms : Optional[List[str]] An optional list of organism identifiers defining a priority ranking among organisms, if genes/proteins from multiple organisms match the input. If not provided, the default ['9606'] i.e., human is used. namespaces : Optional[List[str]] A list of namespaces to restrict matches to. This will apply to both the primary namespace of a matched term, to any subsumed matches, and to the source namespaces of terms if they were created using cross-reference mappings. By default, no restriction is applied. Returns ------- Optional[gilda.grounder.ScoredMatch] The best ScoredMatch returned by :meth:`ground` if any are returned, otherwise None.
def ground_best( self, raw_str: str, context: Optional[str] = None, organisms: Optional[List[str]] = None, namespaces: Optional[List[str]] = None, ) -> Optional["ScoredMatch"]: """Return the best scored grounding for a given raw string. Parameters ---------- raw_str : str A string to be grounded with respect to the set of Terms that the Grounder contains. context : Optional[str] Any additional text that serves as context for disambiguating the given entity text, used if a model exists for disambiguating the given text. organisms : Optional[List[str]] An optional list of organism identifiers defining a priority ranking among organisms, if genes/proteins from multiple organisms match the input. If not provided, the default ['9606'] i.e., human is used. namespaces : Optional[List[str]] A list of namespaces to restrict matches to. This will apply to both the primary namespace of a matched term, to any subsumed matches, and to the source namespaces of terms if they were created using cross-reference mappings. By default, no restriction is applied. Returns ------- Optional[gilda.grounder.ScoredMatch] The best ScoredMatch returned by :meth:`ground` if any are returned, otherwise None. """ scored_matches = self.ground( raw_str=raw_str, context=context, organisms=organisms, namespaces=namespaces, ) if scored_matches: # Because of the way the ground() function is implemented, # the first element is guaranteed to have the best score # (after filtering by namespace) return scored_matches[0] return None
(self, raw_str: str, context: Optional[str] = None, organisms: Optional[List[str]] = None, namespaces: Optional[List[str]] = None) -> Optional[gilda.grounder.ScoredMatch]
714,416
gilda.grounder
lookup
Return matching Terms for a given raw string. Parameters ---------- raw_str : A string to be looked up in the set of Terms that the Grounder contains. Returns ------- : A list of Terms that are potential matches for the given string.
def lookup(self, raw_str: str) -> List[Term]: """Return matching Terms for a given raw string. Parameters ---------- raw_str : A string to be looked up in the set of Terms that the Grounder contains. Returns ------- : A list of Terms that are potential matches for the given string. """ lookups = self._generate_lookups(raw_str) entries = [] for lookup in lookups: entries += self.entries.get(lookup, []) return entries
(self, raw_str: str) -> List[gilda.term.Term]
714,417
gilda.grounder
print_summary
Print the summary of this grounder.
def print_summary(self, **kwargs) -> None: """Print the summary of this grounder.""" print(self.summary_str(), **kwargs)
(self, **kwargs) -> NoneType
714,418
gilda.grounder
summary_str
Summarize the contents of the grounder.
def summary_str(self) -> str: """Summarize the contents of the grounder.""" namespaces = {ns for term in self._iter_terms() for ns in term.get_namespaces()} status_counter = dict(Counter(term.status for term in self._iter_terms())) return dedent(f"""\ Lookups: {len(self.entries):,} Terms: {sum(len(terms) for terms in self.entries.values()):,} Term Namespaces: {namespaces} Term Statuses: {status_counter} Adeft Disambiguators: {len(self.adeft_disambiguators):,} Gilda Disambiguators: {len(self.gilda_disambiguators):,} """)
(self) -> str
714,419
biolexica.api
Input
An input towards lexicon assembly.
class Input(BaseModel): """An input towards lexicon assembly.""" processor: Processor source: str ancestors: Union[None, str, List[str]] = None kwargs: Optional[Dict[str, Any]] = None
(*, processor: Literal['pyobo', 'bioontologies', 'biosynonyms', 'gilda'], source: str, ancestors: Union[NoneType, str, List[str]] = None, kwargs: Optional[Dict[str, Any]] = None) -> None
714,448
biolexica.api
Match
Model a scored match from Gilda.
class Match(BaseModel): """Model a scored match from Gilda.""" reference: Reference name: str score: float @property def curie(self) -> str: """Get the reference's curie.""" return self.reference.curie @classmethod def from_gilda(cls, scored_match: gilda.ScoredMatch): """Construct a match from a Gilda object.""" return cls( reference=Reference(prefix=scored_match.term.db, identifier=scored_match.term.id), name=scored_match.term.entry_name, score=round(scored_match.score, 4), )
(*, reference: curies.api.Reference, name: str, score: float) -> None
714,478
biolexica.api
assemble_terms
Assemble terms from multiple resources.
def assemble_terms( configuration: Configuration, mappings: Optional[List["semra.Mapping"]] = None, *, extra_terms: Optional[List["gilda.Term"]] = None, include_biosynonyms: bool = True, raw_path: Optional[Path] = None, processed_path: Optional[Path] = None, ) -> List[gilda.Term]: """Assemble terms from multiple resources.""" terms: List[gilda.Term] = [] for inp in configuration.inputs: if inp.processor in {"pyobo", "bioontologies"}: terms.extend( iter_terms_by_prefix( inp.source, ancestors=inp.ancestors, processor=inp.processor, **(inp.kwargs or {}), ) ) elif inp.processor == "biosynonyms": terms.extend(s.as_gilda_term() for s in biosynonyms.parse_synonyms(inp.source)) elif inp.processor == "gilda": terms.extend(load_entries_from_terms_file(inp.source)) else: raise ValueError(f"Unknown processor {inp.processor}") if extra_terms: terms.extend(extra_terms) if include_biosynonyms: terms.extend(biosynonyms.get_gilda_terms()) if raw_path is not None: logger.info("Writing %d raw terms to %s", len(terms), raw_path) gilda.term.dump_terms(terms, raw_path) if mappings is not None: from semra.gilda_utils import update_terms terms = update_terms(terms, mappings) if configuration.excludes: _excludes_set = set(configuration.excludes) terms = [term for term in terms if _term_curie(term) not in _excludes_set] if processed_path is not None: logger.info("Writing %d processed terms to %s", len(terms), processed_path) gilda.term.dump_terms(terms, processed_path) return terms
(configuration: biolexica.api.Configuration, mappings: Optional[List[ForwardRef('semra.Mapping')]] = None, *, extra_terms: Optional[List[ForwardRef('gilda.Term')]] = None, include_biosynonyms: bool = True, raw_path: Optional[pathlib.Path] = None, processed_path: Optional[pathlib.Path] = None) -> List[gilda.term.Term]
714,479
biolexica.api
get_mesh_category_curies
Get the MeSH LUIDs for a category, by letter (e.g., "A").
def get_mesh_category_curies(letter, skip=None) -> List[str]: """Get the MeSH LUIDs for a category, by letter (e.g., "A").""" # see https://meshb.nlm.nih.gov/treeView import bioversions from pyobo.sources.mesh import get_tree_to_mesh_id mesh_version = bioversions.get_version("mesh") if mesh_version is None: raise ValueError tree_to_mesh = get_tree_to_mesh_id(mesh_version) rv = [] for i in range(1, 100): key = f"{letter}{i:02}" if skip and key in skip: continue mesh_id = tree_to_mesh.get(key) if mesh_id is None: break rv.append(f"mesh:{mesh_id}") return rv
(letter, skip=None) -> List[str]
714,480
biolexica.api
iter_terms_by_prefix
Iterate over all terms from a given prefix.
def iter_terms_by_prefix( prefix: str, *, ancestors: Union[None, str, List[str]] = None, processor: Processor, **kwargs ) -> Iterable[gilda.Term]: """Iterate over all terms from a given prefix.""" if processor == "pyobo": if ancestors is None: import pyobo.gilda_utils yield from pyobo.gilda_utils.get_gilda_terms(prefix, **kwargs) else: yield from _get_pyobo_subset_terms(prefix, ancestors, **kwargs) elif processor == "bioontologies": if ancestors is None: import bioontologies.gilda_utils yield from bioontologies.gilda_utils.get_gilda_terms(prefix, **kwargs) else: yield from _get_bioontologies_subset_terms(prefix, ancestors, **kwargs) else: raise ValueError(f"Unknown processor: {processor}")
(prefix: str, *, ancestors: Union[NoneType, str, List[str]] = None, processor: Literal['pyobo', 'bioontologies', 'biosynonyms', 'gilda'], **kwargs) -> Iterable[gilda.term.Term]
714,481
biolexica.api
load_grounder
Load a gilda grounder, potentially from a remote location.
def load_grounder(grounder: GrounderHint) -> Grounder: """Load a gilda grounder, potentially from a remote location.""" if isinstance(grounder, str): if grounder in PREDEFINED: if LEXICA.is_dir(): # If biolexica is installed in editable mode, try looking for # the directory outside the package root and load the predefined # index directly grounder = LEXICA.joinpath(grounder, "terms.tsv.gz").as_posix() else: grounder = URL_FMT.format(key=grounder) if grounder.startswith("http"): with tempfile.TemporaryDirectory() as directory: path = Path(directory).joinpath("terms.tsv.gz") urlretrieve(grounder, path) # noqa:S310 return Grounder(path) if isinstance(grounder, (str, Path)): path = Path(grounder).resolve() if not path.is_file(): raise FileNotFoundError(path) return Grounder(grounder) if isinstance(grounder, Grounder): return grounder if isinstance(grounder, gilda.Grounder): return Grounder(grounder.entries) raise TypeError
(grounder: Union[gilda.grounder.Grounder, str, pathlib.Path]) -> biolexica.api.Grounder
714,485
pylint_django.plugin
load_configuration
Amend existing checker config.
def load_configuration(linter): """ Amend existing checker config. """ linter.config.good_names += ( "pk", "qs", "urlpatterns", "register", "app_name", "handler400", "handler403", "handler404", "handler500", ) # we don't care about South migrations linter.config.black_list += ("migrations", "south_migrations")
(linter)
714,487
pylint_django.plugin
register
Registering additional checkers.
def register(linter): """ Registering additional checkers. """ # add all of the checkers register_checkers(linter) # register any checking fiddlers try: # pylint: disable=import-outside-toplevel from pylint_django.augmentations import apply_augmentations apply_augmentations(linter) except ImportError: # probably trying to execute pylint_django when Django isn't installed # in this case the django-not-installed checker will kick-in pass if not compat.LOAD_CONFIGURATION_SUPPORTED: load_configuration(linter)
(linter)
714,491
rcssmin
_as_str
Make sure the style is a text string
def _as_str(script): """ Make sure the style is a text string """ is_bytes = False if str is bytes: if not isinstance(script, basestring): # noqa pylint: disable = undefined-variable raise TypeError("Unexpected type") elif isinstance(script, bytes): is_bytes = True script = script.decode('latin-1') elif isinstance(script, bytearray): is_bytes = 2 script = script.decode('latin-1') elif not isinstance(script, str): raise TypeError("Unexpected type") return is_bytes, script
(script)
714,492
rcssmin
_make_cssmin
Generate CSS minifier. Parameters: python_only (bool): Use only the python variant. If true, the c extension is not even tried to be loaded. Returns: callable: Minifier
def _make_cssmin(python_only=False): """ Generate CSS minifier. Parameters: python_only (bool): Use only the python variant. If true, the c extension is not even tried to be loaded. Returns: callable: Minifier """ # pylint: disable = unused-variable, possibly-unused-variable # pylint: disable = too-many-locals, too-many-statements if not python_only: try: import _rcssmin # pylint: disable = import-outside-toplevel except ImportError: pass else: # Ensure that the C version is in sync if getattr(_rcssmin, '__version__', None) == __version__: return _rcssmin.cssmin nl = r'(?:[\n\f]|\r\n?)' # pylint: disable = invalid-name spacechar = r'[\r\n\f\040\t]' unicoded = r'[0-9a-fA-F]{1,6}(?:[\040\n\t\f]|\r\n?)?' escaped = r'[^\n\r\f0-9a-fA-F]' escape = r'(?:\\(?:%(unicoded)s|%(escaped)s))' % locals() nmchar = r'[^\000-\054\056\057\072-\100\133-\136\140\173-\177]' # nmstart = r'[^\000-\100\133-\136\140\173-\177]' # ident = (r'(?:' # r'-?(?:%(nmstart)s|%(escape)s)%(nmchar)s*(?:%(escape)s%(nmchar)s*)*' # r')') % locals() comment = r'(?:/\*[^*]*\*+(?:[^/*][^*]*\*+)*/)' # only for specific purposes. The bang is grouped: _bang_comment = r'(?:/\*(!?)[^*]*\*+(?:[^/*][^*]*\*+)*/)' string1 = \ r'(?:\047[^\047\\\r\n\f]*(?:\\[^\r\n\f][^\047\\\r\n\f]*)*\047)' string2 = r'(?:"[^"\\\r\n\f]*(?:\\[^\r\n\f][^"\\\r\n\f]*)*")' strings = r'(?:%s|%s)' % (string1, string2) nl_string1 = \ r'(?:\047[^\047\\\r\n\f]*(?:\\(?:[^\r]|\r\n?)[^\047\\\r\n\f]*)*\047)' nl_string2 = r'(?:"[^"\\\r\n\f]*(?:\\(?:[^\r]|\r\n?)[^"\\\r\n\f]*)*")' nl_strings = r'(?:%s|%s)' % (nl_string1, nl_string2) uri_nl_string1 = r'(?:\047[^\047\\]*(?:\\(?:[^\r]|\r\n?)[^\047\\]*)*\047)' uri_nl_string2 = r'(?:"[^"\\]*(?:\\(?:[^\r]|\r\n?)[^"\\]*)*")' uri_nl_strings = r'(?:%s|%s)' % (uri_nl_string1, uri_nl_string2) nl_escaped = r'(?:\\%(nl)s)' % locals() space = r'(?:%(spacechar)s|%(comment)s)' % locals() ie7hack = r'(?:>/\*\*/)' uri = ( # noqa pylint: disable = bad-option-value, bad-continuation r'(?:' r'(?:[^\000-\040"\047()\\\177]*' r'(?:%(escape)s[^\000-\040"\047()\\\177]*)*)' r'(?:' r'(?:%(spacechar)s+|%(nl_escaped)s+)' r'(?:' r'(?:[^\000-\040"\047()\\\177]|%(escape)s|%(nl_escaped)s)' r'[^\000-\040"\047()\\\177]*' r'(?:%(escape)s[^\000-\040"\047()\\\177]*)*' r')+' r')*' r')' ) % locals() nl_unesc_sub = _re.compile(nl_escaped).sub uri_data_plain = _re.compile(( r'[\047"][dD][aA][tT][aA]:([^\000-\040\\"\047,]*),' )).match uri_space_sub = _re.compile(( r'(%(escape)s+)|%(spacechar)s+|%(nl_escaped)s+' ) % locals()).sub uri_space_subber = lambda m: m.groups()[0] or '' space_sub_simple = _re.compile(( r'[\r\n\f\040\t;]+|(%(comment)s+)' ) % locals()).sub space_sub_banged = _re.compile(( r'[\r\n\f\040\t;]+|(%(_bang_comment)s+)' ) % locals()).sub post_esc_sub = _re.compile(r'[\r\n\f\t]+').sub main_sub = _re.compile(( # noqa pylint: disable = bad-option-value, bad-continuation r'([^\\"\047u>@\r\n\f\040\t/;:{}+]+)' # 1 r'|(?<=[{}(=:>[,!])(%(space)s+)' # 2 r'|^(%(space)s+)' # 3 r'|(%(space)s+)(?=(([:{});=>\],!])|$)?)' # 4, 5, 6 r'|;(%(space)s*(?:;%(space)s*)*)(?=(\})?)' # 7, 8 r'|(\{)' # 9 r'|(\})' # 10 r'|(%(strings)s)' # 11 r'|(?<!%(nmchar)s)url\(%(spacechar)s*(' # 12 r'%(uri_nl_strings)s' r'|%(uri)s' r')%(spacechar)s*\)' r'|(@(?:' # 13 r'[mM][eE][dD][iI][aA]' r'|[sS][uU][pP][pP][oO][rR][tT][sS]' r'|[dD][oO][cC][uU][mM][eE][nN][tT]' r'|(?:-(?:' r'[wW][eE][bB][kK][iI][tT]|[mM][oO][zZ]|[oO]|[mM][sS]' r')-)?' r'[kK][eE][yY][fF][rR][aA][mM][eE][sS]' r'))(?!%(nmchar)s)' r'|(%(ie7hack)s)(%(space)s*)' # 14, 15 r'|(:[fF][iI][rR][sS][tT]-[lL]' # 16 r'(?:[iI][nN][eE]|[eE][tT][tT][eE][rR]))' r'(%(space)s*)(?=[{,])' # 17 r'|(%(nl_strings)s)' # 18 r'|(%(escape)s[^\\"\047u>@\r\n\f\040\t/;:{}+]*)' # 19 ) % locals()).sub # print(main_sub.__self__.pattern) def main_subber(keep_bang_comments): """ Make main subber """ in_macie5, in_rule, at_group = [0], [0], [0] if keep_bang_comments: space_sub = space_sub_banged def space_subber(match): """ Space|Comment subber """ if match.lastindex: group1, group2 = match.group(1, 2) if group2: if group1.endswith(r'\*/'): in_macie5[0] = 1 else: in_macie5[0] = 0 return group1 if group1.endswith(r'\*/'): if in_macie5[0]: return '' in_macie5[0] = 1 return r'/*\*/' elif in_macie5[0]: in_macie5[0] = 0 return '/**/' return '' else: space_sub = space_sub_simple def space_subber(match): """ Space|Comment subber """ if match.lastindex: if match.group(1).endswith(r'\*/'): if in_macie5[0]: return '' in_macie5[0] = 1 return r'/*\*/' elif in_macie5[0]: in_macie5[0] = 0 return '/**/' return '' def fn_space_post(group): """ space with token after """ if group(5) is None or ( group(6) == ':' and not in_rule[0] and not at_group[0]): return ' ' + space_sub(space_subber, group(4)) return space_sub(space_subber, group(4)) def fn_semicolon(group): """ ; handler """ return ';' + space_sub(space_subber, group(7)) def fn_semicolon2(group): """ ; handler """ if in_rule[0]: return space_sub(space_subber, group(7)) return ';' + space_sub(space_subber, group(7)) def fn_open(_): """ { handler """ if at_group[0]: at_group[0] -= 1 else: in_rule[0] = 1 return '{' def fn_close(_): """ } handler """ in_rule[0] = 0 return '}' def fn_url(group): """ url() handler """ uri = group(12) data = uri_data_plain(uri) if not data or data.group(1).lower().endswith(';base64'): uri = uri_space_sub(uri_space_subber, uri) return 'url(%s)' % (uri,) def fn_at_group(group): """ @xxx group handler """ at_group[0] += 1 return group(13) def fn_ie7hack(group): """ IE7 Hack handler """ if not in_rule[0] and not at_group[0]: in_macie5[0] = 0 return group(14) + space_sub(space_subber, group(15)) return '>' + space_sub(space_subber, group(15)) table = ( # noqa pylint: disable = bad-option-value, bad-continuation None, None, None, None, fn_space_post, # space with token after fn_space_post, # space with token after fn_space_post, # space with token after fn_semicolon, # semicolon fn_semicolon2, # semicolon fn_open, # { fn_close, # } lambda g: g(11), # string fn_url, # url(...) fn_at_group, # @xxx expecting {...} None, fn_ie7hack, # ie7hack None, lambda g: g(16) + ' ' + space_sub(space_subber, g(17)), # :first-line|letter followed # by [{,] (apparently space # needed for IE6) lambda g: nl_unesc_sub('', g(18)), # nl_string lambda g: post_esc_sub(' ', g(19)), # escape ) def func(match): """ Main subber """ idx, group = match.lastindex, match.group if idx > 3: return table[idx](group) # shortcuts for frequent operations below: elif idx == 1: # not interesting return group(1) # else: # space with token before or at the beginning return space_sub(space_subber, group(idx)) return func def cssmin(style, keep_bang_comments=False): """ Minify CSS. Parameters: style (str): CSS to minify keep_bang_comments (bool): Keep comments starting with an exclamation mark? (``/*!...*/``) Returns: str: Minified style """ # pylint: disable = redefined-outer-name is_bytes, style = _as_str(style) style = main_sub(main_subber(keep_bang_comments), style) if is_bytes: style = style.encode('latin-1') if is_bytes == 2: style = bytearray(style) return style return cssmin
(python_only=False)
714,497
webp
WebPAnimDecoder
null
class WebPAnimDecoder: def __init__(self, ptr, dec_opts, anim_info): self.ptr = ptr self.dec_opts = dec_opts self.anim_info = anim_info def __del__(self): lib.WebPAnimDecoderDelete(self.ptr) def has_more_frames(self): return lib.WebPAnimDecoderHasMoreFrames(self.ptr) != 0 def reset(self): lib.WebPAnimDecoderReset(self.ptr) def decode_frame(self): """Decodes the next frame of the animation. Returns: numpy.array: The frame image. float: The timestamp for the end of the frame. """ timestamp_ptr = ffi.new('int*') buf_ptr = ffi.new('uint8_t**') if lib.WebPAnimDecoderGetNext(self.ptr, buf_ptr, timestamp_ptr) == 0: raise WebPError('decoding error') size = self.anim_info.height * self.anim_info.width * 4 buf = ffi.buffer(buf_ptr[0], size) arr = np.copy(np.frombuffer(buf, dtype=np.uint8)) arr = np.reshape(arr, (self.anim_info.height, self.anim_info.width, 4)) # timestamp_ms contains the _end_ time of this frame timestamp_ms = timestamp_ptr[0] return arr, timestamp_ms def frames(self): while self.has_more_frames(): arr, timestamp_ms = self.decode_frame() yield arr, timestamp_ms @staticmethod def new(webp_data, dec_opts=None): if dec_opts is None: dec_opts = WebPAnimDecoderOptions.new() ptr = lib.WebPAnimDecoderNew(webp_data.ptr, dec_opts.ptr) if ptr == ffi.NULL: raise WebPError('failed to create decoder') anim_info = WebPAnimInfo.new() if lib.WebPAnimDecoderGetInfo(ptr, anim_info.ptr) == 0: raise WebPError('failed to get animation info') return WebPAnimDecoder(ptr, dec_opts, anim_info)
(ptr, dec_opts, anim_info)
714,498
webp
__del__
null
def __del__(self): lib.WebPAnimDecoderDelete(self.ptr)
(self)
714,499
webp
__init__
null
def __init__(self, ptr, dec_opts, anim_info): self.ptr = ptr self.dec_opts = dec_opts self.anim_info = anim_info
(self, ptr, dec_opts, anim_info)
714,500
webp
decode_frame
Decodes the next frame of the animation. Returns: numpy.array: The frame image. float: The timestamp for the end of the frame.
def decode_frame(self): """Decodes the next frame of the animation. Returns: numpy.array: The frame image. float: The timestamp for the end of the frame. """ timestamp_ptr = ffi.new('int*') buf_ptr = ffi.new('uint8_t**') if lib.WebPAnimDecoderGetNext(self.ptr, buf_ptr, timestamp_ptr) == 0: raise WebPError('decoding error') size = self.anim_info.height * self.anim_info.width * 4 buf = ffi.buffer(buf_ptr[0], size) arr = np.copy(np.frombuffer(buf, dtype=np.uint8)) arr = np.reshape(arr, (self.anim_info.height, self.anim_info.width, 4)) # timestamp_ms contains the _end_ time of this frame timestamp_ms = timestamp_ptr[0] return arr, timestamp_ms
(self)
714,501
webp
frames
null
def frames(self): while self.has_more_frames(): arr, timestamp_ms = self.decode_frame() yield arr, timestamp_ms
(self)
714,502
webp
has_more_frames
null
def has_more_frames(self): return lib.WebPAnimDecoderHasMoreFrames(self.ptr) != 0
(self)
714,503
webp
new
null
@staticmethod def new(webp_data, dec_opts=None): if dec_opts is None: dec_opts = WebPAnimDecoderOptions.new() ptr = lib.WebPAnimDecoderNew(webp_data.ptr, dec_opts.ptr) if ptr == ffi.NULL: raise WebPError('failed to create decoder') anim_info = WebPAnimInfo.new() if lib.WebPAnimDecoderGetInfo(ptr, anim_info.ptr) == 0: raise WebPError('failed to get animation info') return WebPAnimDecoder(ptr, dec_opts, anim_info)
(webp_data, dec_opts=None)
714,504
webp
reset
null
def reset(self): lib.WebPAnimDecoderReset(self.ptr)
(self)
714,505
webp
WebPAnimDecoderOptions
null
class WebPAnimDecoderOptions: def __init__(self, ptr): self.ptr = ptr @property def color_mode(self): return WebPColorMode(self.ptr.color_mode) @color_mode.setter def color_mode(self, color_mode): self.ptr.color_mode = color_mode.value @property def use_threads(self): return self.ptr.use_threads != 0 @use_threads.setter def use_threads(self, use_threads): self.ptr.use_threads = 1 if use_threads else 0 @staticmethod def new(use_threads=False, color_mode=WebPColorMode.RGBA): ptr = ffi.new('WebPAnimDecoderOptions*') if lib.WebPAnimDecoderOptionsInit(ptr) == 0: raise WebPError('version mismatch') dec_opts = WebPAnimDecoderOptions(ptr) dec_opts.use_threads = use_threads dec_opts.color_mode = color_mode return dec_opts
(ptr)
714,506
webp
__init__
null
def __init__(self, ptr): self.ptr = ptr
(self, ptr)
714,507
webp
new
null
@staticmethod def new(use_threads=False, color_mode=WebPColorMode.RGBA): ptr = ffi.new('WebPAnimDecoderOptions*') if lib.WebPAnimDecoderOptionsInit(ptr) == 0: raise WebPError('version mismatch') dec_opts = WebPAnimDecoderOptions(ptr) dec_opts.use_threads = use_threads dec_opts.color_mode = color_mode return dec_opts
(use_threads=False, color_mode=<WebPColorMode.RGBA: 1>)
714,508
webp
WebPAnimEncoder
null
class WebPAnimEncoder: def __init__(self, ptr, enc_opts): self.ptr = ptr self.enc_opts = enc_opts def __del__(self): lib.WebPAnimEncoderDelete(self.ptr) def encode_frame(self, frame, timestamp_ms, config=None): """Add a frame to the animation. Args: frame (WebPPicture): Frame image. timestamp_ms (int): When the frame should be shown (in milliseconds). config (WebPConfig): Encoder configuration. """ if config is None: config = WebPConfig.new() if lib.WebPAnimEncoderAdd(self.ptr, frame.ptr, timestamp_ms, config.ptr) == 0: raise WebPError('encoding error: ' + self.ptr.error_code) def assemble(self, end_timestamp_ms): if lib.WebPAnimEncoderAdd(self.ptr, ffi.NULL, end_timestamp_ms, ffi.NULL) == 0: raise WebPError('encoding error: ' + self.ptr.error_code) _webp_data = _WebPData() if lib.WebPAnimEncoderAssemble(self.ptr, _webp_data.ptr) == 0: raise WebPError('error assembling animation') return _webp_data.done() @staticmethod def new(width, height, enc_opts=None): if enc_opts is None: enc_opts = WebPAnimEncoderOptions.new() ptr = lib.WebPAnimEncoderNew(width, height, enc_opts.ptr) return WebPAnimEncoder(ptr, enc_opts)
(ptr, enc_opts)
714,509
webp
__del__
null
def __del__(self): lib.WebPAnimEncoderDelete(self.ptr)
(self)
714,510
webp
__init__
null
def __init__(self, ptr, enc_opts): self.ptr = ptr self.enc_opts = enc_opts
(self, ptr, enc_opts)
714,511
webp
assemble
null
def assemble(self, end_timestamp_ms): if lib.WebPAnimEncoderAdd(self.ptr, ffi.NULL, end_timestamp_ms, ffi.NULL) == 0: raise WebPError('encoding error: ' + self.ptr.error_code) _webp_data = _WebPData() if lib.WebPAnimEncoderAssemble(self.ptr, _webp_data.ptr) == 0: raise WebPError('error assembling animation') return _webp_data.done()
(self, end_timestamp_ms)
714,512
webp
encode_frame
Add a frame to the animation. Args: frame (WebPPicture): Frame image. timestamp_ms (int): When the frame should be shown (in milliseconds). config (WebPConfig): Encoder configuration.
def encode_frame(self, frame, timestamp_ms, config=None): """Add a frame to the animation. Args: frame (WebPPicture): Frame image. timestamp_ms (int): When the frame should be shown (in milliseconds). config (WebPConfig): Encoder configuration. """ if config is None: config = WebPConfig.new() if lib.WebPAnimEncoderAdd(self.ptr, frame.ptr, timestamp_ms, config.ptr) == 0: raise WebPError('encoding error: ' + self.ptr.error_code)
(self, frame, timestamp_ms, config=None)
714,513
webp
new
null
@staticmethod def new(width, height, enc_opts=None): if enc_opts is None: enc_opts = WebPAnimEncoderOptions.new() ptr = lib.WebPAnimEncoderNew(width, height, enc_opts.ptr) return WebPAnimEncoder(ptr, enc_opts)
(width, height, enc_opts=None)