input
stringlengths 33
5k
| output
stringlengths 32
5k
|
---|---|
import numpy as np
import pytest
from pydantic.tools import parse_obj_as, schema_json_of
from docarray.base_document.io.json import orjson_dumps
from docarray.typing import AnyEmbedding
@pytest.mark.proto
def test_proto_embedding():
embedding = parse_obj_as(AnyEmbedding, np.zeros((3, 224, 224)))
embedding._to_node_protobuf()
def test_json_schema():
schema_json_of(AnyEmbedding)
def test_dump_json():
tensor = parse_obj_as(AnyEmbedding, np.zeros((3, 224, 224)))
orjson_dumps(tensor)
|
import numpy as np
from pydantic.tools import parse_obj_as, schema_json_of
from docarray.base_document.io.json import orjson_dumps
from docarray.typing import AnyEmbedding
def test_proto_embedding():
embedding = parse_obj_as(AnyEmbedding, np.zeros((3, 224, 224)))
embedding._to_node_protobuf()
def test_json_schema():
schema_json_of(AnyEmbedding)
def test_dump_json():
tensor = parse_obj_as(AnyEmbedding, np.zeros((3, 224, 224)))
orjson_dumps(tensor)
|
import sys
from os import path
from setuptools import find_packages
from setuptools import setup
if sys.version_info < (3, 7, 0):
raise OSError(f'DocArray requires Python >=3.7, but yours is {sys.version}')
try:
pkg_name = 'docarray'
libinfo_py = path.join(pkg_name, '__init__.py')
libinfo_content = open(libinfo_py, 'r', encoding='utf8').readlines()
version_line = [l.strip() for l in libinfo_content if l.startswith('__version__')][
0
]
exec(version_line) # gives __version__
except FileNotFoundError:
__version__ = '0.0.0'
try:
with open('README.md', encoding='utf8') as fp:
_long_description = fp.read()
except FileNotFoundError:
_long_description = ''
setup(
name=pkg_name,
packages=find_packages(),
version=__version__,
include_package_data=True,
description='The data structure for unstructured data',
author='Jina AI',
author_email='hello@jina.ai',
license='Apache 2.0',
url='https://github.com/jina-ai/docarray',
download_url='https://github.com/jina-ai/docarray/tags',
long_description=_long_description,
long_description_content_type='text/markdown',
zip_safe=False,
setup_requires=['setuptools>=18.0', 'wheel'],
install_requires=['numpy', 'rich>=12.0.0'],
extras_require={
# req usage, please see https://docarray.jina.ai/#install
'common': [
'protobuf>=3.13.0',
'lz4',
'requests',
'matplotlib',
'Pillow',
'fastapi',
'uvicorn',
'jina-hubble-sdk>=0.11.0',
],
'full': [
'protobuf>=3.13.0',
'lz4',
'requests',
'matplotlib',
'Pillow',
'trimesh',
'scipy',
'jina-hubble-sdk>=0.10.0',
'av',
'fastapi',
'uvicorn',
'strawberry-graphql',
'weaviate-client~=3.3.0',
'annlite>=0.3.2',
'qdrant-client~=0.7.3',
'elasticsearch>=8.2.0',
],
'qdrant': [
'qdrant-client~=0.7.3',
],
'annlite': [
'annlite>=0.3.2',
],
'weaviate': [
'weaviate-client~=3.3.0',
],
'elasticsearch': [
'elasticsearch>=8.2.0',
],
'test': [
'pytest',
'pytest-timeout',
'pytest-mock',
'pytest-cov',
'pytest-repeat',
'pytest-reraise',
'mock',
'pytest-custom_exit_code',
'black==22.3.0',
'tensorflow==2.7.0',
'paddlepaddle==2.2.0',
'torch==1.9.0',
'torchvision==0.10.0',
'datasets',
'onnx',
'onnxruntime',
'jupyterlab',
'transformers>=4.16.2',
'weaviate-client~=3.3.0',
'annlite>=0.3.2',
'elasticsearch>=8.2.0',
'jina',
],
},
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Intended Audience :: Science/Research',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
'Programming Language :: Unix Shell',
'Environment :: Console',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Topic :: Database :: Database Engines/Servers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Internet :: WWW/HTTP :: Indexing/Search',
'Topic :: Scientific/Engineering :: Image Recognition',
'Topic :: Multimedia :: Video',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Mathematics',
'Topic :: Software Development',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
],
project_urls={
'Documentation': 'https://docarray.jina.ai',
'Source': 'https://github.com/jina-ai/docarray/',
'Tracker': 'https://github.com/jina-ai/docarray/issues',
},
keywords='docarray deep-learning data-structures cross-modal multi-modal unstructured-data nested-data neural-search',
)
|
import sys
from os import path
from setuptools import find_packages
from setuptools import setup
if sys.version_info < (3, 7, 0):
raise OSError(f'DocArray requires Python >=3.7, but yours is {sys.version}')
try:
pkg_name = 'docarray'
libinfo_py = path.join(pkg_name, '__init__.py')
libinfo_content = open(libinfo_py, 'r', encoding='utf8').readlines()
version_line = [l.strip() for l in libinfo_content if l.startswith('__version__')][
0
]
exec(version_line) # gives __version__
except FileNotFoundError:
__version__ = '0.0.0'
try:
with open('README.md', encoding='utf8') as fp:
_long_description = fp.read()
except FileNotFoundError:
_long_description = ''
setup(
name=pkg_name,
packages=find_packages(),
version=__version__,
include_package_data=True,
description='The data structure for unstructured data',
author='Jina AI',
author_email='hello@jina.ai',
license='Apache 2.0',
url='https://github.com/jina-ai/docarray',
download_url='https://github.com/jina-ai/docarray/tags',
long_description=_long_description,
long_description_content_type='text/markdown',
zip_safe=False,
setup_requires=['setuptools>=18.0', 'wheel'],
install_requires=['numpy', 'rich>=12.0.0'],
extras_require={
# req usage, please see https://docarray.jina.ai/#install
'common': [
'protobuf>=3.13.0,<=3.20.1',
'lz4',
'requests',
'matplotlib',
'Pillow',
'fastapi',
'uvicorn',
'jina-hubble-sdk>=0.11.0',
],
'full': [
'protobuf>=3.13.0,<=3.20.1',
'lz4',
'requests',
'matplotlib',
'Pillow',
'trimesh',
'scipy',
'jina-hubble-sdk>=0.10.0',
'av',
'fastapi',
'uvicorn',
'strawberry-graphql',
'weaviate-client~=3.3.0',
'annlite>=0.3.2',
'qdrant-client~=0.7.3',
'elasticsearch>=8.2.0',
],
'qdrant': [
'qdrant-client~=0.7.3',
],
'annlite': [
'annlite>=0.3.2',
],
'weaviate': [
'weaviate-client~=3.3.0',
],
'elasticsearch': [
'elasticsearch>=8.2.0',
],
'test': [
'pytest',
'pytest-timeout',
'pytest-mock',
'pytest-cov',
'pytest-repeat',
'pytest-reraise',
'mock',
'pytest-custom_exit_code',
'black==22.3.0',
'tensorflow==2.7.0',
'paddlepaddle==2.2.0',
'torch==1.9.0',
'torchvision==0.10.0',
'datasets',
'onnx',
'onnxruntime',
'jupyterlab',
'transformers>=4.16.2',
'weaviate-client~=3.3.0',
'annlite>=0.3.2',
'elasticsearch>=8.2.0',
'jina',
],
},
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Intended Audience :: Science/Research',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
'Programming Language :: Unix Shell',
'Environment :: Console',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Topic :: Database :: Database Engines/Servers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Internet :: WWW/HTTP :: Indexing/Search',
'Topic :: Scientific/Engineering :: Image Recognition',
'Topic :: Multimedia :: Video',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Mathematics',
'Topic :: Software Development',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
],
project_urls={
'Documentation': 'https://docarray.jina.ai',
'Source': 'https://github.com/jina-ai/docarray/',
'Tracker': 'https://github.com/jina-ai/docarray/issues',
},
keywords='docarray deep-learning data-structures cross-modal multi-modal unstructured-data nested-data neural-search',
)
|
from typing import TYPE_CHECKING, Any, Dict, List, Tuple, Type, TypeVar, Union, cast
import numpy as np
if TYPE_CHECKING:
from pydantic.fields import ModelField
from pydantic import BaseConfig
from docarray.document.base_node import BaseNode
from docarray.proto import NdArrayProto, NodeProto
T = TypeVar('T', bound='Tensor')
class Tensor(np.ndarray, BaseNode):
@classmethod
def __get_validators__(cls):
# one or more validators may be yielded which will be called in the
# order to validate the input, each validator will receive as an input
# the value returned from the previous validator
yield cls.validate
@classmethod
def validate(
cls: Type[T],
value: Union[T, np.ndarray, List[Any], Tuple[Any], Any],
field: 'ModelField',
config: 'BaseConfig',
) -> T:
if isinstance(value, np.ndarray):
return cls.from_ndarray(value)
elif isinstance(value, Tensor):
return cast(T, value)
elif isinstance(value, list) or isinstance(value, tuple):
try:
arr_from_list: np.ndarray = np.asarray(value)
return cls.from_ndarray(arr_from_list)
except Exception:
pass # handled below
else:
try:
arr: np.ndarray = np.ndarray(value)
return cls.from_ndarray(arr)
except Exception:
pass # handled below
raise ValueError(f'Expected a numpy.ndarray compatible type, got {type(value)}')
@classmethod
def from_ndarray(cls: Type[T], value: np.ndarray) -> T:
return value.view(cls)
@classmethod
def __modify_schema__(cls, field_schema: Dict[str, Any]) -> None:
# this is needed to dump to json
field_schema.update(type='string', format='tensor')
def _to_json_compatible(self) -> np.ndarray:
"""
Convert tensor into a json compatible object
:return: a list representation of the tensor
"""
return self.unwrap()
def unwrap(self) -> np.ndarray:
"""
Return the original ndarray without any memory copy.
The original view rest intact and is still a Document Tensor
but the return object is a pure np.ndarray but both object share
the same memory layout.
EXAMPLE USAGE
.. code-block:: python
from docarray.typing import Tensor
import numpy as np
t1 = Tensor.validate(np.zeros((3, 224, 224)), None, None)
# here t is a docarray Tensor
t2 = t.unwrap()
# here t2 is a pure np.ndarray but t1 is still a Docarray Tensor
# But both share the same underlying memory
:return: a numpy ndarray
"""
return self.view(np.ndarray)
def _to_node_protobuf(self: T, field: str = 'tensor') -> NodeProto:
"""Convert itself into a NodeProto protobuf message. This function should
be called when the Document is nested into another Document that need to be
converted into a protobuf
:param field: field in which to store the content in the node proto
:return: the nested item protobuf message
"""
nd_proto = NdArrayProto()
self._flush_tensor_to_proto(nd_proto, value=self)
return NodeProto(**{field: nd_proto})
@classmethod
def from_protobuf(cls: Type[T], pb_msg: 'NdArrayProto') -> 'T':
"""
read ndarray from a proto msg
:param pb_msg:
:return: a numpy array
"""
source = pb_msg.dense
if source.buffer:
x = np.frombuffer(source.buffer, dtype=source.dtype)
return cls.from_ndarray(x.reshape(source.shape))
elif len(source.shape) > 0:
return cls.from_ndarray(np.zeros(source.shape))
else:
raise ValueError(f'proto message {pb_msg} cannot be cast to a Tensor')
@staticmethod
def _flush_tensor_to_proto(pb_msg: 'NdArrayProto', value: 'Tensor'):
pb_msg.dense.buffer = value.tobytes()
pb_msg.dense.ClearField('shape')
pb_msg.dense.shape.extend(list(value.shape))
pb_msg.dense.dtype = value.dtype.str
|
from typing import TYPE_CHECKING, Any, Type, TypeVar, Union, cast
import numpy as np
if TYPE_CHECKING:
from pydantic.fields import ModelField
from pydantic import BaseConfig
from docarray.document.base_node import BaseNode
from docarray.proto import NdArrayProto, NodeProto
T = TypeVar('T', bound='Tensor')
class Tensor(np.ndarray, BaseNode):
@classmethod
def __get_validators__(cls):
# one or more validators may be yielded which will be called in the
# order to validate the input, each validator will receive as an input
# the value returned from the previous validator
yield cls.validate
@classmethod
def validate(
cls: Type[T], value: Union[T, Any], field: 'ModelField', config: 'BaseConfig'
) -> T:
if isinstance(value, np.ndarray):
return cls.from_ndarray(value)
elif isinstance(value, Tensor):
return cast(T, value)
else:
try:
arr: np.ndarray = np.ndarray(value)
return cls.from_ndarray(arr)
except Exception:
pass # handled below
raise ValueError(f'Expected a numpy.ndarray, got {type(value)}')
@classmethod
def from_ndarray(cls: Type[T], value: np.ndarray) -> T:
return value.view(cls)
def _to_node_protobuf(self: T, field: str = 'tensor') -> NodeProto:
"""Convert Document into a NodeProto protobuf message. This function should
be called when the Document is nested into another Document that need to be
converted into a protobuf
:param field: field in which to store the content in the node proto
:return: the nested item protobuf message
"""
nd_proto = NdArrayProto()
self._flush_tensor_to_proto(nd_proto, value=self)
return NodeProto(**{field: nd_proto})
@classmethod
def from_protobuf(cls: Type[T], pb_msg: 'NdArrayProto') -> 'T':
"""
read ndarray from a proto msg
:param pb_msg:
:return: a numpy array
"""
source = pb_msg.dense
if source.buffer:
x = np.frombuffer(source.buffer, dtype=source.dtype)
return cls.from_ndarray(x.reshape(source.shape))
elif len(source.shape) > 0:
return cls.from_ndarray(np.zeros(source.shape))
else:
raise ValueError(f'proto message {pb_msg} cannot be cast to a Tensor')
@staticmethod
def _flush_tensor_to_proto(pb_msg: 'NdArrayProto', value: 'Tensor'):
pb_msg.dense.buffer = value.tobytes()
pb_msg.dense.ClearField('shape')
pb_msg.dense.shape.extend(list(value.shape))
pb_msg.dense.dtype = value.dtype.str
|
from __future__ import annotations
import gzip
from . import InputExample
class PairedFilesReader:
"""Reads in the a Pair Dataset, split in two files"""
def __init__(self, filepaths):
self.filepaths = filepaths
def get_examples(self, max_examples=0):
fIns = []
for filepath in self.filepaths:
fIn = (
gzip.open(filepath, "rt", encoding="utf-8")
if filepath.endswith(".gz")
else open(filepath, encoding="utf-8")
)
fIns.append(fIn)
examples = []
eof = False
while not eof:
texts = []
for fIn in fIns:
text = fIn.readline()
if text == "":
eof = True
break
texts.append(text)
if eof:
break
examples.append(InputExample(guid=str(len(examples)), texts=texts, label=1))
if max_examples > 0 and len(examples) >= max_examples:
break
return examples
|
from __future__ import annotations
import gzip
from . import InputExample
class PairedFilesReader(object):
"""Reads in the a Pair Dataset, split in two files"""
def __init__(self, filepaths):
self.filepaths = filepaths
def get_examples(self, max_examples=0):
fIns = []
for filepath in self.filepaths:
fIn = (
gzip.open(filepath, "rt", encoding="utf-8")
if filepath.endswith(".gz")
else open(filepath, encoding="utf-8")
)
fIns.append(fIn)
examples = []
eof = False
while not eof:
texts = []
for fIn in fIns:
text = fIn.readline()
if text == "":
eof = True
break
texts.append(text)
if eof:
break
examples.append(InputExample(guid=str(len(examples)), texts=texts, label=1))
if max_examples > 0 and len(examples) >= max_examples:
break
return examples
|
import os
from typing import Dict
from hubble.executor.helper import is_valid_docker_uri, parse_hub_uri
from hubble.executor.hubio import HubIO
from jina import (
__default_executor__,
__default_grpc_gateway__,
__default_http_gateway__,
__default_websocket_gateway__,
__version__,
)
from jina.enums import PodRoleType
def get_image_name(uses: str) -> str:
"""The image can be provided in different formats by the user.
This function converts it to an image name which can be understood by k8s.
It uses the Hub api to get the image name and the latest tag on Docker Hub.
If you don't want to rebuild image on Jina Hub,
you can set `JINA_HUB_NO_IMAGE_REBUILD` environment variable.
:param uses: image name
:return: normalized image name
"""
try:
rebuild_image = 'JINA_HUB_NO_IMAGE_REBUILD' not in os.environ
scheme, name, tag, secret = parse_hub_uri(uses)
meta_data, _ = HubIO.fetch_meta(
name, tag, secret=secret, rebuild_image=rebuild_image, force=True
)
image_name = meta_data.image_name
return image_name
except Exception:
if uses.startswith('docker'):
# docker:// is a valid requirement and user may want to put its own image
return uses.replace('docker://', '')
raise
def to_compatible_name(name: str) -> str:
"""Converts the deployment name to a valid name for K8s and docker compose.
:param name: name of the deployment
:return: compatible name
"""
return name.replace('/', '-').replace('_', '-').lower()
def get_base_executor_version():
"""
Get the version of jina to be used
:return: the version tag
"""
import requests
try:
url = 'https://registry.hub.docker.com/v2/repositories/jinaai/jina/tags'
result: Dict = requests.get(url, params={'name': __version__}).json()
if result.get('count', 0) > 0:
return __version__
else:
return 'master'
except:
return 'master'
def construct_runtime_container_args(cargs, uses_metas, uses_with, pod_type):
"""
Construct a set of Namespace arguments into a list of arguments to pass to a container entrypoint
:param cargs: The namespace arguments
:param uses_metas: The uses_metas to override
:param uses_with: The uses_with to override
:param pod_type: The pod_type
:return: Arguments to pass to container
"""
import json
from jina.helper import ArgNamespace
from jina.parsers import set_pod_parser
taboo = {
'uses_with',
'uses_metas',
'volumes',
'uses_before',
'uses_after',
'workspace_id',
'noblock_on_start',
'env',
}
if pod_type == PodRoleType.HEAD:
taboo.add('uses')
taboo.add('workspace')
if pod_type in {PodRoleType.WORKER, PodRoleType.GATEWAY}:
taboo.add('polling')
non_defaults = ArgNamespace.get_non_defaults_args(
cargs,
set_pod_parser(),
taboo=taboo,
)
_args = ArgNamespace.kwargs2list(non_defaults)
container_args = ['executor'] + _args
if uses_metas is not None:
container_args.extend(['--uses-metas', json.dumps(uses_metas)])
if uses_with is not None:
container_args.extend(['--uses-with', json.dumps(uses_with)])
container_args.append('--native')
return container_args
def validate_uses(uses: str):
"""Validate uses argument
:param uses: uses argument
:return: boolean indicating whether is a valid uses to be used in K8s or docker compose
"""
# Uses can be either None (not specified), default gateway class, default executor or docker image
# None => deplyoment uses base container image and uses is determined inside container
# default gateway class or default executor => deployment uses base container and sets uses in command
# container images => deployment uses the specified container image and uses is defined by container
if (
uses is None
or uses
in [
__default_http_gateway__,
__default_websocket_gateway__,
__default_grpc_gateway__,
__default_executor__,
]
or uses.startswith('docker://')
):
return True
try:
return is_valid_docker_uri(uses)
except ValueError:
return False
|
import os
from typing import Dict
from hubble.executor.helper import parse_hub_uri
from hubble.executor.hubio import HubIO
from jina import (
__default_executor__,
__default_grpc_gateway__,
__default_http_gateway__,
__default_websocket_gateway__,
__version__,
)
from jina.enums import PodRoleType
def get_image_name(uses: str) -> str:
"""The image can be provided in different formats by the user.
This function converts it to an image name which can be understood by k8s.
It uses the Hub api to get the image name and the latest tag on Docker Hub.
If you don't want to rebuild image on Jina Hub,
you can set `JINA_HUB_NO_IMAGE_REBUILD` environment variable.
:param uses: image name
:return: normalized image name
"""
try:
rebuild_image = 'JINA_HUB_NO_IMAGE_REBUILD' not in os.environ
scheme, name, tag, secret = parse_hub_uri(uses)
meta_data, _ = HubIO.fetch_meta(
name, tag, secret=secret, rebuild_image=rebuild_image, force=True
)
image_name = meta_data.image_name
return image_name
except Exception:
if uses.startswith('docker'):
# docker:// is a valid requirement and user may want to put its own image
return uses.replace('docker://', '')
raise
def to_compatible_name(name: str) -> str:
"""Converts the deployment name to a valid name for K8s and docker compose.
:param name: name of the deployment
:return: compatible name
"""
return name.replace('/', '-').replace('_', '-').lower()
def get_base_executor_version():
"""
Get the version of jina to be used
:return: the version tag
"""
import requests
try:
url = 'https://registry.hub.docker.com/v2/repositories/jinaai/jina/tags'
result: Dict = requests.get(url, params={'name': __version__}).json()
if result.get('count', 0) > 0:
return __version__
else:
return 'master'
except:
return 'master'
def construct_runtime_container_args(cargs, uses_metas, uses_with, pod_type):
"""
Construct a set of Namespace arguments into a list of arguments to pass to a container entrypoint
:param cargs: The namespace arguments
:param uses_metas: The uses_metas to override
:param uses_with: The uses_with to override
:param pod_type: The pod_type
:return: Arguments to pass to container
"""
import json
from jina.helper import ArgNamespace
from jina.parsers import set_pod_parser
taboo = {
'uses_with',
'uses_metas',
'volumes',
'uses_before',
'uses_after',
'workspace_id',
'noblock_on_start',
'env',
}
if pod_type == PodRoleType.HEAD:
taboo.add('uses')
taboo.add('workspace')
if pod_type in {PodRoleType.WORKER, PodRoleType.GATEWAY}:
taboo.add('polling')
non_defaults = ArgNamespace.get_non_defaults_args(
cargs,
set_pod_parser(),
taboo=taboo,
)
_args = ArgNamespace.kwargs2list(non_defaults)
container_args = ['executor'] + _args
if uses_metas is not None:
container_args.extend(['--uses-metas', json.dumps(uses_metas)])
if uses_with is not None:
container_args.extend(['--uses-with', json.dumps(uses_with)])
container_args.append('--native')
return container_args
def validate_uses(uses: str):
"""Validate uses argument
:param uses: uses argument
:return: boolean indicating whether is a valid uses to be used in K8s or docker compose
"""
# Uses can be either None (not specified), default gateway class, default executor or docker image
# None => deplyoment uses base container image and uses is determined inside container
# default gateway class or default executor => deployment uses base container and sets uses in command
# container images => deployment uses the specified container image and uses is defined by container
if (
uses is None
or uses
in [
__default_http_gateway__,
__default_websocket_gateway__,
__default_grpc_gateway__,
__default_executor__,
]
or uses.startswith('docker://')
):
return True
try:
scheme, _, _, _ = parse_hub_uri(uses)
if scheme in {'jinahub+docker', 'jinahub+sandbox'}:
return True
except ValueError:
return False
|
"""Function calling agent."""
from typing import Any, List, Optional
from llama_index.core.agent.runner.base import AgentRunner, AgentState
from llama_index.core.agent.function_calling.step import (
FunctionCallingAgentWorker,
DEFAULT_MAX_FUNCTION_CALLS,
)
from llama_index.core.base.llms.types import ChatMessage
from llama_index.core.callbacks import CallbackManager
from llama_index.core.llms.function_calling import FunctionCallingLLM
from llama_index.core.memory.types import BaseMemory
from llama_index.core.objects.base import ObjectRetriever
from llama_index.core.settings import Settings
from llama_index.core.tools.types import BaseTool
class FunctionCallingAgent(AgentRunner):
"""
Function calling agent.
Light wrapper around AgentRunner.
"""
@classmethod
def from_tools(
cls,
tools: Optional[List[BaseTool]] = None,
tool_retriever: Optional[ObjectRetriever[BaseTool]] = None,
llm: Optional[FunctionCallingLLM] = None,
verbose: bool = False,
max_function_calls: int = DEFAULT_MAX_FUNCTION_CALLS,
callback_manager: Optional[CallbackManager] = None,
system_prompt: Optional[str] = None,
prefix_messages: Optional[List[ChatMessage]] = None,
memory: Optional[BaseMemory] = None,
chat_history: Optional[List[ChatMessage]] = None,
state: Optional[AgentState] = None,
allow_parallel_tool_calls: bool = True,
**kwargs: Any,
) -> "FunctionCallingAgent":
"""Create a FunctionCallingAgent from a list of tools."""
tools = tools or []
llm = llm or Settings.llm # type: ignore
assert isinstance(
llm, FunctionCallingLLM
), "llm must be an instance of FunctionCallingLLM"
if callback_manager is not None:
llm.callback_manager = callback_manager
if system_prompt is not None:
if prefix_messages is not None:
raise ValueError(
"Cannot specify both system_prompt and prefix_messages"
)
prefix_messages = [ChatMessage(content=system_prompt, role="system")]
prefix_messages = prefix_messages or []
agent_worker = FunctionCallingAgentWorker.from_tools(
tools,
tool_retriever=tool_retriever,
llm=llm,
verbose=verbose,
max_function_calls=max_function_calls,
callback_manager=callback_manager,
prefix_messages=prefix_messages,
allow_parallel_tool_calls=allow_parallel_tool_calls,
)
return cls(
agent_worker=agent_worker,
memory=memory,
chat_history=chat_history,
state=state,
llm=llm,
callback_manager=callback_manager,
verbose=verbose,
**kwargs,
)
|
"""Function calling agent."""
from typing import Any, List, Optional
from llama_index.core.agent.runner.base import AgentRunner, AgentState
from llama_index.core.agent.function_calling.step import (
FunctionCallingAgentWorker,
DEFAULT_MAX_FUNCTION_CALLS,
)
from llama_index.core.base.llms.types import ChatMessage
from llama_index.core.callbacks import CallbackManager
from llama_index.core.llms.function_calling import FunctionCallingLLM
from llama_index.core.memory.types import BaseMemory
from llama_index.core.objects.base import ObjectRetriever
from llama_index.core.settings import Settings
from llama_index.core.tools.types import BaseTool
class FunctionCallingAgent(AgentRunner):
"""Function calling agent.
Light wrapper around AgentRunner.
"""
@classmethod
def from_tools(
cls,
tools: Optional[List[BaseTool]] = None,
tool_retriever: Optional[ObjectRetriever[BaseTool]] = None,
llm: Optional[FunctionCallingLLM] = None,
verbose: bool = False,
max_function_calls: int = DEFAULT_MAX_FUNCTION_CALLS,
callback_manager: Optional[CallbackManager] = None,
system_prompt: Optional[str] = None,
prefix_messages: Optional[List[ChatMessage]] = None,
memory: Optional[BaseMemory] = None,
chat_history: Optional[List[ChatMessage]] = None,
state: Optional[AgentState] = None,
allow_parallel_tool_calls: bool = True,
**kwargs: Any,
) -> "FunctionCallingAgent":
"""Create a FunctionCallingAgent from a list of tools."""
tools = tools or []
llm = llm or Settings.llm # type: ignore
assert isinstance(
llm, FunctionCallingLLM
), "llm must be an instance of FunctionCallingLLM"
if callback_manager is not None:
llm.callback_manager = callback_manager
if system_prompt is not None:
if prefix_messages is not None:
raise ValueError(
"Cannot specify both system_prompt and prefix_messages"
)
prefix_messages = [ChatMessage(content=system_prompt, role="system")]
prefix_messages = prefix_messages or []
agent_worker = FunctionCallingAgentWorker.from_tools(
tools,
tool_retriever=tool_retriever,
llm=llm,
verbose=verbose,
max_function_calls=max_function_calls,
callback_manager=callback_manager,
prefix_messages=prefix_messages,
allow_parallel_tool_calls=allow_parallel_tool_calls,
)
return cls(
agent_worker=agent_worker,
memory=memory,
chat_history=chat_history,
state=state,
llm=llm,
callback_manager=callback_manager,
verbose=verbose,
**kwargs,
)
|
from abc import ABC, abstractmethod
import warnings
from collections import namedtuple
from dataclasses import is_dataclass, asdict
from typing import Dict, Optional, TYPE_CHECKING, Union, List, Tuple
if TYPE_CHECKING:
from docarray.typing import DocumentArraySourceType, ArrayType
TypeMap = namedtuple('TypeMap', ['type', 'converter'])
class BaseBackendMixin(ABC):
TYPE_MAP: Dict[str, TypeMap]
def _init_storage(
self,
_docs: Optional['DocumentArraySourceType'] = None,
copy: bool = False,
*args,
**kwargs,
):
self._load_offset2ids()
def _init_subindices(
self, _docs: Optional['DocumentArraySourceType'] = None, *args, **kwargs
):
self._subindices = {}
subindex_configs = kwargs.get('subindex_configs', None)
if subindex_configs:
config = asdict(self._config) if getattr(self, '_config', None) else dict()
for name, config_subindex in subindex_configs.items():
config_subindex = (
dict() if config_subindex is None else config_subindex
) # allow None as input
if is_dataclass(config_subindex):
config_subindex = asdict(config_subindex)
config_joined = {**config, **config_subindex}
config_joined = self._ensure_unique_config(
config, config_subindex, config_joined, name
)
self._subindices[name] = self.__class__(config=config_joined)
if _docs:
from docarray import DocumentArray
self._subindices[name].extend(
DocumentArray(_docs).traverse_flat(name[1:])
)
@abstractmethod
def _ensure_unique_config(
self,
config_root: dict,
config_subindex: dict,
config_joined: dict,
subindex_name: str,
) -> dict:
"""
Ensures that the subindex configuration is unique, despite it inheriting unpopulated fields from the root config.
:param config_root: The configuration of the root index.
:param config_subindex: The configuration that was explicitly provided by the user for the subindex.
:param config_joined: The configuration that combines root and subindex configs. This is the configuration that will be used for subindex construction.
:param subindex_name: Name (access path) of the subindex
:return: config_joined that is unique compared to config_root
"""
...
def _get_storage_infos(self) -> Optional[Dict]:
if hasattr(self, '_config') and is_dataclass(self._config):
return {k: str(v) for k, v in asdict(self._config).items()}
def _map_id(self, _id: str) -> str:
return _id
def _map_column(self, value, col_type) -> str:
return self.TYPE_MAP[col_type].converter(value)
def _map_embedding(self, embedding: 'ArrayType') -> 'ArrayType':
from docarray.math.ndarray import to_numpy_array
return to_numpy_array(embedding)
def _map_type(self, col_type: str) -> str:
return self.TYPE_MAP[col_type].type
def _normalize_columns(
self, columns: Optional[Union[List[Tuple[str, str]], Dict[str, str]]]
) -> Dict[str, str]:
if columns is None:
return {}
if isinstance(columns, list):
warnings.warn(
'Using "columns" as a List of Tuples will be deprecated soon. Please provide a Dictionary.'
)
columns = {col_desc[0]: col_desc[1] for col_desc in columns}
return columns
|
from abc import ABC, abstractmethod
import warnings
from collections import namedtuple
from dataclasses import is_dataclass, asdict
from typing import Dict, Optional, TYPE_CHECKING, Union, List, Tuple
if TYPE_CHECKING:
from docarray.typing import DocumentArraySourceType, ArrayType
TypeMap = namedtuple('TypeMap', ['type', 'converter'])
class BaseBackendMixin(ABC):
TYPE_MAP: Dict[str, TypeMap]
def _init_storage(
self,
_docs: Optional['DocumentArraySourceType'] = None,
copy: bool = False,
*args,
**kwargs,
):
self._load_offset2ids()
def _init_subindices(self, *args, **kwargs):
self._subindices = {}
subindex_configs = kwargs.get('subindex_configs', None)
if subindex_configs:
config = asdict(self._config) if getattr(self, '_config', None) else dict()
for name, config_subindex in subindex_configs.items():
config_subindex = (
dict() if config_subindex is None else config_subindex
) # allow None as input
if is_dataclass(config_subindex):
config_subindex = asdict(config_subindex)
config_joined = {**config, **config_subindex}
config_joined = self._ensure_unique_config(
config, config_subindex, config_joined, name
)
self._subindices[name] = self.__class__(config=config_joined)
self._subindices[name].extend(self.traverse_flat(name[1:]))
@abstractmethod
def _ensure_unique_config(
self,
config_root: dict,
config_subindex: dict,
config_joined: dict,
subindex_name: str,
) -> dict:
"""
Ensures that the subindex configuration is unique, despite it inheriting unpopulated fields from the root config.
:param config_root: The configuration of the root index.
:param config_subindex: The configuration that was explicitly provided by the user for the subindex.
:param config_joined: The configuration that combines root and subindex configs. This is the configuration that will be used for subindex construction.
:param subindex_name: Name (access path) of the subindex
:return: config_joined that is unique compared to config_root
"""
...
def _get_storage_infos(self) -> Optional[Dict]:
if hasattr(self, '_config') and is_dataclass(self._config):
return {k: str(v) for k, v in asdict(self._config).items()}
def _map_id(self, _id: str) -> str:
return _id
def _map_column(self, value, col_type) -> str:
return self.TYPE_MAP[col_type].converter(value)
def _map_embedding(self, embedding: 'ArrayType') -> 'ArrayType':
from docarray.math.ndarray import to_numpy_array
return to_numpy_array(embedding)
def _map_type(self, col_type: str) -> str:
return self.TYPE_MAP[col_type].type
def _normalize_columns(
self, columns: Optional[Union[List[Tuple[str, str]], Dict[str, str]]]
) -> Dict[str, str]:
if columns is None:
return {}
if isinstance(columns, list):
warnings.warn(
'Using "columns" as a List of Tuples will be deprecated soon. Please provide a Dictionary.'
)
columns = {col_desc[0]: col_desc[1] for col_desc in columns}
return columns
|
# Copyright (c) OpenMMLab. All rights reserved.
import logging
from typing import List, Optional, Sequence
import torch
from torch.nn.parameter import Parameter
from torch.nn.utils import clip_grad
from mmengine.registry import HOOKS
from .hook import Hook
DATA_BATCH = Optional[Sequence[dict]]
@HOOKS.register_module()
class OptimizerHook(Hook):
"""A hook contains custom operations for the optimizer.
Args:
grad_clip (dict, optional): A config dict to control the clip_grad.
Defaults to None.
detect_anomalous_params (bool): This option is only used for
debugging which will slow down the training speed.
Detect anomalous parameters that are not included in
the computational graph with ``loss`` as the root.
There are two cases
- Parameters were not used during
forward pass.
- Parameters were not used to produce
loss.
Defaults to False.
"""
priority = 'HIGH'
def __init__(self,
grad_clip: Optional[dict] = None,
detect_anomalous_params: bool = False) -> None:
self.grad_clip = grad_clip
self.detect_anomalous_params = detect_anomalous_params
def clip_grads(self, params: List[Parameter]) -> Optional[torch.Tensor]:
"""Clip the gradients of parameters.
Args:
params (list[Parameter]): Model's parameters.
Returns:
Optional[torch.Tensor]: Total norm of the parameters if there is
at least one param requiring gradient, else None.
"""
params = list(
filter(lambda p: p.requires_grad and p.grad is not None, params))
if len(params) > 0:
return clip_grad.clip_grad_norm_(params, **self.grad_clip)
return None
def after_train_iter(self,
runner,
batch_idx: int,
data_batch: DATA_BATCH = None,
outputs: Optional[dict] = None) -> None:
"""All operations need to be finished after each training iteration.
This function will finish following 3 operations:
- Detect any anomalous parameters which are not included in the
training graph. (optional)
- Compute the gradient of model parameters.
- Clip the gradients of each parameter. (optional)
- Update model parameters with gradients.
Args:
runner (Runner): The runner of the training process.
batch_idx (int): The index of the current batch in the train loop.
data_batch (Sequence[dict], optional): Data from dataloader.
In order to keep this interface consistent with other hooks,
we keep ``data_batch`` here. Defaults to None.
outputs (dict, optional): Outputs from model.
In order to keep this interface consistent with other hooks,
we keep ``outputs`` here. Defaults to None.
"""
runner.optimizer.zero_grad()
runner.message_hub.update_scalar(
'train/lr', runner.optimizer.param_groups[0]['lr'])
if self.detect_anomalous_params:
self.detect_anomalous_parameters(runner.outputs['loss'], runner)
runner.outputs['loss'].backward()
if self.grad_clip is not None:
grad_norm = self.clip_grads(runner.model.parameters())
if grad_norm is not None:
# Add grad norm to the logger
runner.log_buffer.update({'grad_norm': float(grad_norm)},
runner.outputs['num_samples'])
runner.optimizer.step()
def detect_anomalous_parameters(self, loss: torch.Tensor, runner) -> None:
"""Detect anomalous parameters that are not included in the graph.
Args:
loss (torch.Tensor): The loss of current iteration.
runner (Runner): The runner of the training process.
"""
logger = runner.logger
parameters_in_graph = set()
visited = set()
def traverse(grad_fn):
if grad_fn is None:
return
if grad_fn not in visited:
visited.add(grad_fn)
if hasattr(grad_fn, 'variable'):
parameters_in_graph.add(grad_fn.variable)
parents = grad_fn.next_functions
if parents is not None:
for parent in parents:
grad_fn = parent[0]
traverse(grad_fn)
traverse(loss.grad_fn)
for n, p in runner.model.named_parameters():
if p not in parameters_in_graph and p.requires_grad:
logger.log(
level=logging.ERROR,
msg=f'{n} with shape {p.size()} is not '
f'in the computational graph \n')
|
# Copyright (c) OpenMMLab. All rights reserved.
import logging
from typing import List, Optional, Sequence
import torch
from torch.nn.parameter import Parameter
from torch.nn.utils import clip_grad
from mmengine.registry import HOOKS
from .hook import Hook
DATA_BATCH = Optional[Sequence[dict]]
@HOOKS.register_module()
class OptimizerHook(Hook):
"""A hook contains custom operations for the optimizer.
Args:
grad_clip (dict, optional): A config dict to control the clip_grad.
Defaults to None.
detect_anomalous_params (bool): This option is only used for
debugging which will slow down the training speed.
Detect anomalous parameters that are not included in
the computational graph with ``loss`` as the root.
There are two cases
- Parameters were not used during
forward pass.
- Parameters were not used to produce
loss.
Defaults to False.
"""
priority = 'HIGH'
def __init__(self,
grad_clip: Optional[dict] = None,
detect_anomalous_params: bool = False) -> None:
self.grad_clip = grad_clip
self.detect_anomalous_params = detect_anomalous_params
def clip_grads(self, params: List[Parameter]) -> Optional[torch.Tensor]:
"""Clip the gradients of parameters.
Args:
params (list[Parameter]): Model's parameters.
Returns:
Optional[torch.Tensor]: Total norm of the parameters if there is
at least one param requiring gradient, else None.
"""
params = list(
filter(lambda p: p.requires_grad and p.grad is not None, params))
if len(params) > 0:
return clip_grad.clip_grad_norm_(params, **self.grad_clip)
return None
def after_train_iter(self,
runner,
batch_idx: int,
data_batch: DATA_BATCH = None,
outputs: Optional[dict] = None) -> None:
"""All operations need to be finished after each training iteration.
This function will finish following 3 operations:
- Detect any anomalous parameters which are not included in the
training graph. (optional)
- Compute the gradient of model parameters.
- Clip the gradients of each parameter. (optional)
- Update model parameters with gradients.
Args:
runner (Runner): The runner of the training process.
batch_idx (int): The index of the current batch in the train loop.
data_batch (Sequence[dict], optional): Data from dataloader.
In order to keep this interface consistent with other hooks,
we keep ``data_batch`` here. Defaults to None.
outputs (dict, optional): Outputs from model.
In order to keep this interface consistent with other hooks,
we keep ``outputs`` here. Defaults to None.
"""
runner.optimizer.zero_grad()
if self.detect_anomalous_params:
self.detect_anomalous_parameters(runner.outputs['loss'], runner)
runner.outputs['loss'].backward()
if self.grad_clip is not None:
grad_norm = self.clip_grads(runner.model.parameters())
if grad_norm is not None:
# Add grad norm to the logger
runner.log_buffer.update({'grad_norm': float(grad_norm)},
runner.outputs['num_samples'])
runner.optimizer.step()
def detect_anomalous_parameters(self, loss: torch.Tensor, runner) -> None:
"""Detect anomalous parameters that are not included in the graph.
Args:
loss (torch.Tensor): The loss of current iteration.
runner (Runner): The runner of the training process.
"""
logger = runner.logger
parameters_in_graph = set()
visited = set()
def traverse(grad_fn):
if grad_fn is None:
return
if grad_fn not in visited:
visited.add(grad_fn)
if hasattr(grad_fn, 'variable'):
parameters_in_graph.add(grad_fn.variable)
parents = grad_fn.next_functions
if parents is not None:
for parent in parents:
grad_fn = parent[0]
traverse(grad_fn)
traverse(loss.grad_fn)
for n, p in runner.model.named_parameters():
if p not in parameters_in_graph and p.requires_grad:
logger.log(
level=logging.ERROR,
msg=f'{n} with shape {p.size()} is not '
f'in the computational graph \n')
|
from __future__ import annotations
import json
from typing import TYPE_CHECKING, List, Optional, Sequence, Type
from langchain_core.callbacks import (
AsyncCallbackManagerForToolRun,
CallbackManagerForToolRun,
)
from pydantic import BaseModel, Field
from langchain_community.tools.playwright.base import BaseBrowserTool
from langchain_community.tools.playwright.utils import (
aget_current_page,
get_current_page,
)
if TYPE_CHECKING:
from playwright.async_api import Page as AsyncPage
from playwright.sync_api import Page as SyncPage
class GetElementsToolInput(BaseModel):
"""Input for GetElementsTool."""
selector: str = Field(
...,
description="CSS selector, such as '*', 'div', 'p', 'a', #id, .classname",
)
attributes: List[str] = Field(
default_factory=lambda: ["innerText"],
description="Set of attributes to retrieve for each element",
)
async def _aget_elements(
page: AsyncPage, selector: str, attributes: Sequence[str]
) -> List[dict]:
"""Get elements matching the given CSS selector."""
elements = await page.query_selector_all(selector)
results = []
for element in elements:
result = {}
for attribute in attributes:
if attribute == "innerText":
val: Optional[str] = await element.inner_text()
else:
val = await element.get_attribute(attribute)
if val is not None and val.strip() != "":
result[attribute] = val
if result:
results.append(result)
return results
def _get_elements(
page: SyncPage, selector: str, attributes: Sequence[str]
) -> List[dict]:
"""Get elements matching the given CSS selector."""
elements = page.query_selector_all(selector)
results = []
for element in elements:
result = {}
for attribute in attributes:
if attribute == "innerText":
val: Optional[str] = element.inner_text()
else:
val = element.get_attribute(attribute)
if val is not None and val.strip() != "":
result[attribute] = val
if result:
results.append(result)
return results
class GetElementsTool(BaseBrowserTool):
"""Tool for getting elements in the current web page matching a CSS selector."""
name: str = "get_elements"
description: str = (
"Retrieve elements in the current web page matching the given CSS selector"
)
args_schema: Type[BaseModel] = GetElementsToolInput
def _run(
self,
selector: str,
attributes: Sequence[str] = ["innerText"],
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the tool."""
if self.sync_browser is None:
raise ValueError(f"Synchronous browser not provided to {self.name}")
page = get_current_page(self.sync_browser)
# Navigate to the desired webpage before using this tool
results = _get_elements(page, selector, attributes)
return json.dumps(results, ensure_ascii=False)
async def _arun(
self,
selector: str,
attributes: Sequence[str] = ["innerText"],
run_manager: Optional[AsyncCallbackManagerForToolRun] = None,
) -> str:
"""Use the tool."""
if self.async_browser is None:
raise ValueError(f"Asynchronous browser not provided to {self.name}")
page = await aget_current_page(self.async_browser)
# Navigate to the desired webpage before using this tool
results = await _aget_elements(page, selector, attributes)
return json.dumps(results, ensure_ascii=False)
|
from __future__ import annotations
import json
from typing import TYPE_CHECKING, List, Optional, Sequence, Type
from langchain_core.callbacks import (
AsyncCallbackManagerForToolRun,
CallbackManagerForToolRun,
)
from pydantic import BaseModel, Field
from langchain_community.tools.playwright.base import BaseBrowserTool
from langchain_community.tools.playwright.utils import (
aget_current_page,
get_current_page,
)
if TYPE_CHECKING:
from playwright.async_api import Page as AsyncPage
from playwright.sync_api import Page as SyncPage
class GetElementsToolInput(BaseModel):
"""Input for GetElementsTool."""
selector: str = Field(
...,
description="CSS selector, such as '*', 'div', 'p', 'a', #id, .classname",
)
attributes: List[str] = Field(
default_factory=lambda: ["innerText"],
description="Set of attributes to retrieve for each element",
)
async def _aget_elements(
page: AsyncPage, selector: str, attributes: Sequence[str]
) -> List[dict]:
"""Get elements matching the given CSS selector."""
elements = await page.query_selector_all(selector)
results = []
for element in elements:
result = {}
for attribute in attributes:
if attribute == "innerText":
val: Optional[str] = await element.inner_text()
else:
val = await element.get_attribute(attribute)
if val is not None and val.strip() != "":
result[attribute] = val
if result:
results.append(result)
return results
def _get_elements(
page: SyncPage, selector: str, attributes: Sequence[str]
) -> List[dict]:
"""Get elements matching the given CSS selector."""
elements = page.query_selector_all(selector)
results = []
for element in elements:
result = {}
for attribute in attributes:
if attribute == "innerText":
val: Optional[str] = element.inner_text()
else:
val = element.get_attribute(attribute)
if val is not None and val.strip() != "":
result[attribute] = val
if result:
results.append(result)
return results
class GetElementsTool(BaseBrowserTool): # type: ignore[override, override]
"""Tool for getting elements in the current web page matching a CSS selector."""
name: str = "get_elements"
description: str = (
"Retrieve elements in the current web page matching the given CSS selector"
)
args_schema: Type[BaseModel] = GetElementsToolInput
def _run(
self,
selector: str,
attributes: Sequence[str] = ["innerText"],
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the tool."""
if self.sync_browser is None:
raise ValueError(f"Synchronous browser not provided to {self.name}")
page = get_current_page(self.sync_browser)
# Navigate to the desired webpage before using this tool
results = _get_elements(page, selector, attributes)
return json.dumps(results, ensure_ascii=False)
async def _arun(
self,
selector: str,
attributes: Sequence[str] = ["innerText"],
run_manager: Optional[AsyncCallbackManagerForToolRun] = None,
) -> str:
"""Use the tool."""
if self.async_browser is None:
raise ValueError(f"Asynchronous browser not provided to {self.name}")
page = await aget_current_page(self.async_browser)
# Navigate to the desired webpage before using this tool
results = await _aget_elements(page, selector, attributes)
return json.dumps(results, ensure_ascii=False)
|
from __future__ import annotations
import csv
import logging
import os
import numpy as np
from sentence_transformers import InputExample
logger = logging.getLogger(__name__)
class CEBinaryAccuracyEvaluator:
"""
This evaluator can be used with the CrossEncoder class.
It is designed for CrossEncoders with 1 outputs. It measure the
accuracy of the predict class vs. the gold labels. It uses a fixed threshold to determine the label (0 vs 1).
See CEBinaryClassificationEvaluator for an evaluator that determines automatically the optimal threshold.
"""
def __init__(
self,
sentence_pairs: list[list[str]],
labels: list[int],
name: str = "",
threshold: float = 0.5,
write_csv: bool = True,
):
self.sentence_pairs = sentence_pairs
self.labels = labels
self.name = name
self.threshold = threshold
self.csv_file = "CEBinaryAccuracyEvaluator" + ("_" + name if name else "") + "_results.csv"
self.csv_headers = ["epoch", "steps", "Accuracy"]
self.write_csv = write_csv
@classmethod
def from_input_examples(cls, examples: list[InputExample], **kwargs):
sentence_pairs = []
labels = []
for example in examples:
sentence_pairs.append(example.texts)
labels.append(example.label)
return cls(sentence_pairs, labels, **kwargs)
def __call__(self, model, output_path: str = None, epoch: int = -1, steps: int = -1) -> float:
if epoch != -1:
if steps == -1:
out_txt = f" after epoch {epoch}:"
else:
out_txt = f" in epoch {epoch} after {steps} steps:"
else:
out_txt = ":"
logger.info("CEBinaryAccuracyEvaluator: Evaluating the model on " + self.name + " dataset" + out_txt)
pred_scores = model.predict(self.sentence_pairs, convert_to_numpy=True, show_progress_bar=False)
pred_labels = pred_scores > self.threshold
assert len(pred_labels) == len(self.labels)
acc = np.sum(pred_labels == self.labels) / len(self.labels)
logger.info(f"Accuracy: {acc * 100:.2f}")
if output_path is not None and self.write_csv:
csv_path = os.path.join(output_path, self.csv_file)
output_file_exists = os.path.isfile(csv_path)
with open(csv_path, mode="a" if output_file_exists else "w", encoding="utf-8") as f:
writer = csv.writer(f)
if not output_file_exists:
writer.writerow(self.csv_headers)
writer.writerow([epoch, steps, acc])
return acc
|
from __future__ import annotations
import csv
import logging
import os
import numpy as np
from sentence_transformers import InputExample
logger = logging.getLogger(__name__)
class CEBinaryAccuracyEvaluator:
"""
This evaluator can be used with the CrossEncoder class.
It is designed for CrossEncoders with 1 outputs. It measure the
accuracy of the predict class vs. the gold labels. It uses a fixed threshold to determine the label (0 vs 1).
See CEBinaryClassificationEvaluator for an evaluator that determines automatically the optimal threshold.
"""
def __init__(
self,
sentence_pairs: list[list[str]],
labels: list[int],
name: str = "",
threshold: float = 0.5,
write_csv: bool = True,
):
self.sentence_pairs = sentence_pairs
self.labels = labels
self.name = name
self.threshold = threshold
self.csv_file = "CEBinaryAccuracyEvaluator" + ("_" + name if name else "") + "_results.csv"
self.csv_headers = ["epoch", "steps", "Accuracy"]
self.write_csv = write_csv
@classmethod
def from_input_examples(cls, examples: list[InputExample], **kwargs):
sentence_pairs = []
labels = []
for example in examples:
sentence_pairs.append(example.texts)
labels.append(example.label)
return cls(sentence_pairs, labels, **kwargs)
def __call__(self, model, output_path: str = None, epoch: int = -1, steps: int = -1) -> float:
if epoch != -1:
if steps == -1:
out_txt = " after epoch {}:".format(epoch)
else:
out_txt = " in epoch {} after {} steps:".format(epoch, steps)
else:
out_txt = ":"
logger.info("CEBinaryAccuracyEvaluator: Evaluating the model on " + self.name + " dataset" + out_txt)
pred_scores = model.predict(self.sentence_pairs, convert_to_numpy=True, show_progress_bar=False)
pred_labels = pred_scores > self.threshold
assert len(pred_labels) == len(self.labels)
acc = np.sum(pred_labels == self.labels) / len(self.labels)
logger.info("Accuracy: {:.2f}".format(acc * 100))
if output_path is not None and self.write_csv:
csv_path = os.path.join(output_path, self.csv_file)
output_file_exists = os.path.isfile(csv_path)
with open(csv_path, mode="a" if output_file_exists else "w", encoding="utf-8") as f:
writer = csv.writer(f)
if not output_file_exists:
writer.writerow(self.csv_headers)
writer.writerow([epoch, steps, acc])
return acc
|
from torchaudio._internal.module_utils import dropping_support
_CTC_DECODERS = [
"CTCHypothesis",
"CTCDecoder",
"CTCDecoderLM",
"CTCDecoderLMState",
"ctc_decoder",
"download_pretrained_files",
]
_CUDA_CTC_DECODERS = [
"CUCTCDecoder",
"CUCTCHypothesis",
"cuda_ctc_decoder",
]
def __getattr__(name: str):
if name in _CTC_DECODERS:
try:
from . import _ctc_decoder
except Exception as err:
raise RuntimeError(
"CTC Decoder suit requires flashlight-text package and optionally KenLM. Please install them."
) from err
item = getattr(_ctc_decoder, name)
globals()[name] = item
return item
elif name in _CUDA_CTC_DECODERS:
try:
from . import _cuda_ctc_decoder
except AttributeError as err:
raise RuntimeError(
"To use CUCTC decoder, please set BUILD_CUDA_CTC_DECODER=1 when building from source."
) from err
item = dropping_support(getattr(_cuda_ctc_decoder, name))
globals()[name] = item
return item
raise AttributeError(f"module {__name__} has no attribute {name}")
def __dir__():
return sorted(__all__)
__all__ = _CTC_DECODERS + _CUDA_CTC_DECODERS
|
_CTC_DECODERS = [
"CTCHypothesis",
"CTCDecoder",
"CTCDecoderLM",
"CTCDecoderLMState",
"ctc_decoder",
"download_pretrained_files",
]
_CUDA_CTC_DECODERS = [
"CUCTCDecoder",
"CUCTCHypothesis",
"cuda_ctc_decoder",
]
def __getattr__(name: str):
if name in _CTC_DECODERS:
try:
from . import _ctc_decoder
except Exception as err:
raise RuntimeError(
"CTC Decoder suit requires flashlight-text package and optionally KenLM. Please install them."
) from err
item = getattr(_ctc_decoder, name)
globals()[name] = item
return item
elif name in _CUDA_CTC_DECODERS:
try:
from . import _cuda_ctc_decoder
except AttributeError as err:
raise RuntimeError(
"To use CUCTC decoder, please set BUILD_CUDA_CTC_DECODER=1 when building from source."
) from err
item = getattr(_cuda_ctc_decoder, name)
globals()[name] = item
return item
raise AttributeError(f"module {__name__} has no attribute {name}")
def __dir__():
return sorted(__all__)
__all__ = _CTC_DECODERS + _CUDA_CTC_DECODERS
|
"""Run smoke tests"""
import os
from pathlib import Path
from sys import platform
import torch
import torch.nn as nn
import torchvision
from torchvision.io import read_image
from torchvision.models import resnet50, ResNet50_Weights
SCRIPT_DIR = Path(__file__).parent
def smoke_test_torchvision() -> None:
print(
"Is torchvision usable?",
all(x is not None for x in [torch.ops.image.decode_png, torch.ops.torchvision.roi_align]),
)
def smoke_test_torchvision_read_decode() -> None:
img_jpg = read_image(str(SCRIPT_DIR / "assets" / "encode_jpeg" / "grace_hopper_517x606.jpg"))
if img_jpg.ndim != 3 or img_jpg.numel() < 100:
raise RuntimeError(f"Unexpected shape of img_jpg: {img_jpg.shape}")
img_png = read_image(str(SCRIPT_DIR / "assets" / "interlaced_png" / "wizard_low.png"))
if img_png.ndim != 3 or img_png.numel() < 100:
raise RuntimeError(f"Unexpected shape of img_png: {img_png.shape}")
def smoke_test_compile() -> None:
try:
model = resnet50().cuda()
model = torch.compile(model)
x = torch.randn(1, 3, 224, 224, device="cuda")
out = model(x)
print(f"torch.compile model output: {out.shape}")
except RuntimeError:
if platform == "win32":
print("Successfully caught torch.compile RuntimeError on win")
else:
raise
def smoke_test_torchvision_resnet50_classify(device: str = "cpu") -> None:
img = read_image(str(SCRIPT_DIR / ".." / "gallery" / "assets" / "dog2.jpg")).to(device)
# Step 1: Initialize model with the best available weights
weights = ResNet50_Weights.DEFAULT
model = resnet50(weights=weights).to(device)
model.eval()
# Step 2: Initialize the inference transforms
preprocess = weights.transforms()
# Step 3: Apply inference preprocessing transforms
batch = preprocess(img).unsqueeze(0)
# Step 4: Use the model and print the predicted category
prediction = model(batch).squeeze(0).softmax(0)
class_id = prediction.argmax().item()
score = prediction[class_id].item()
category_name = weights.meta["categories"][class_id]
expected_category = "German shepherd"
print(f"{category_name} ({device}): {100 * score:.1f}%")
if category_name != expected_category:
raise RuntimeError(f"Failed ResNet50 classify {category_name} Expected: {expected_category}")
def main() -> None:
print(f"torchvision: {torchvision.__version__}")
print(f"torch.cuda.is_available: {torch.cuda.is_available()}")
smoke_test_torchvision()
smoke_test_torchvision_read_decode()
smoke_test_torchvision_resnet50_classify()
if torch.cuda.is_available():
smoke_test_torchvision_resnet50_classify("cuda")
smoke_test_compile()
if torch.backends.mps.is_available():
smoke_test_torchvision_resnet50_classify("mps")
if __name__ == "__main__":
main()
|
"""Run smoke tests"""
import os
from pathlib import Path
import torch
import torch.nn as nn
import torchvision
from torchvision.io import read_image
from torchvision.models import resnet50, ResNet50_Weights
SCRIPT_DIR = Path(__file__).parent
def smoke_test_torchvision() -> None:
print(
"Is torchvision usable?",
all(x is not None for x in [torch.ops.image.decode_png, torch.ops.torchvision.roi_align]),
)
def smoke_test_torchvision_read_decode() -> None:
img_jpg = read_image(str(SCRIPT_DIR / "assets" / "encode_jpeg" / "grace_hopper_517x606.jpg"))
if img_jpg.ndim != 3 or img_jpg.numel() < 100:
raise RuntimeError(f"Unexpected shape of img_jpg: {img_jpg.shape}")
img_png = read_image(str(SCRIPT_DIR / "assets" / "interlaced_png" / "wizard_low.png"))
if img_png.ndim != 3 or img_png.numel() < 100:
raise RuntimeError(f"Unexpected shape of img_png: {img_png.shape}")
def smoke_test_compile() -> None:
model = resnet50().cuda()
model = torch.compile(model)
x = torch.randn(1, 3, 224, 224, device="cuda")
out = model(x)
print(f"torch.compile model output: {out.shape}")
def smoke_test_torchvision_resnet50_classify(device: str = "cpu") -> None:
img = read_image(str(SCRIPT_DIR / ".." / "gallery" / "assets" / "dog2.jpg")).to(device)
# Step 1: Initialize model with the best available weights
weights = ResNet50_Weights.DEFAULT
model = resnet50(weights=weights).to(device)
model.eval()
# Step 2: Initialize the inference transforms
preprocess = weights.transforms()
# Step 3: Apply inference preprocessing transforms
batch = preprocess(img).unsqueeze(0)
# Step 4: Use the model and print the predicted category
prediction = model(batch).squeeze(0).softmax(0)
class_id = prediction.argmax().item()
score = prediction[class_id].item()
category_name = weights.meta["categories"][class_id]
expected_category = "German shepherd"
print(f"{category_name} ({device}): {100 * score:.1f}%")
if category_name != expected_category:
raise RuntimeError(f"Failed ResNet50 classify {category_name} Expected: {expected_category}")
def main() -> None:
print(f"torchvision: {torchvision.__version__}")
print(f"torch.cuda.is_available: {torch.cuda.is_available()}")
smoke_test_torchvision()
smoke_test_torchvision_read_decode()
smoke_test_torchvision_resnet50_classify()
if torch.cuda.is_available():
smoke_test_torchvision_resnet50_classify("cuda")
smoke_test_compile()
if torch.backends.mps.is_available():
smoke_test_torchvision_resnet50_classify("mps")
if __name__ == "__main__":
main()
|
import json
import logging
import os
from collections import defaultdict
from pathlib import Path
from huggingface_hub import HfApi
import diffusers
PATH_TO_REPO = Path(__file__).parent.parent.resolve()
ALWAYS_TEST_PIPELINE_MODULES = [
"controlnet",
"stable_diffusion",
"stable_diffusion_2",
"stable_diffusion_xl",
"stable_diffusion_adapter",
"ip_adapters",
"kandinsky2_2",
]
PIPELINE_USAGE_CUTOFF = int(os.getenv("PIPELINE_USAGE_CUTOFF", 50000))
logger = logging.getLogger(__name__)
api = HfApi()
def filter_pipelines(usage_dict, usage_cutoff=10000):
output = []
for diffusers_object, usage in usage_dict.items():
if usage < usage_cutoff:
continue
is_diffusers_pipeline = hasattr(diffusers.pipelines, diffusers_object)
if not is_diffusers_pipeline:
continue
output.append(diffusers_object)
return output
def fetch_pipeline_objects():
models = api.list_models(library="diffusers")
downloads = defaultdict(int)
for model in models:
is_counted = False
for tag in model.tags:
if tag.startswith("diffusers:"):
is_counted = True
downloads[tag[len("diffusers:") :]] += model.downloads
if not is_counted:
downloads["other"] += model.downloads
# Remove 0 downloads
downloads = {k: v for k, v in downloads.items() if v > 0}
pipeline_objects = filter_pipelines(downloads, PIPELINE_USAGE_CUTOFF)
return pipeline_objects
def fetch_pipeline_modules_to_test():
try:
pipeline_objects = fetch_pipeline_objects()
except Exception as e:
logger.error(e)
raise RuntimeError("Unable to fetch model list from HuggingFace Hub.")
test_modules = []
for pipeline_name in pipeline_objects:
module = getattr(diffusers, pipeline_name)
test_module = module.__module__.split(".")[-2].strip()
test_modules.append(test_module)
return test_modules
def main():
test_modules = fetch_pipeline_modules_to_test()
test_modules.extend(ALWAYS_TEST_PIPELINE_MODULES)
# Get unique modules
test_modules = sorted(set(test_modules))
print(json.dumps(test_modules))
save_path = f"{PATH_TO_REPO}/reports"
os.makedirs(save_path, exist_ok=True)
with open(f"{save_path}/test-pipelines.json", "w") as f:
json.dump({"pipeline_test_modules": test_modules}, f)
if __name__ == "__main__":
main()
|
import json
import logging
import os
from collections import defaultdict
from pathlib import Path
from huggingface_hub import HfApi
import diffusers
PATH_TO_REPO = Path(__file__).parent.parent.resolve()
ALWAYS_TEST_PIPELINE_MODULES = [
"controlnet",
"stable_diffusion",
"stable_diffusion_2",
"stable_diffusion_xl",
"stable_diffusion_adapter",
"deepfloyd_if",
"ip_adapters",
"kandinsky",
"kandinsky2_2",
"text_to_video_synthesis",
"wuerstchen",
]
PIPELINE_USAGE_CUTOFF = int(os.getenv("PIPELINE_USAGE_CUTOFF", 50000))
logger = logging.getLogger(__name__)
api = HfApi()
def filter_pipelines(usage_dict, usage_cutoff=10000):
output = []
for diffusers_object, usage in usage_dict.items():
if usage < usage_cutoff:
continue
is_diffusers_pipeline = hasattr(diffusers.pipelines, diffusers_object)
if not is_diffusers_pipeline:
continue
output.append(diffusers_object)
return output
def fetch_pipeline_objects():
models = api.list_models(library="diffusers")
downloads = defaultdict(int)
for model in models:
is_counted = False
for tag in model.tags:
if tag.startswith("diffusers:"):
is_counted = True
downloads[tag[len("diffusers:") :]] += model.downloads
if not is_counted:
downloads["other"] += model.downloads
# Remove 0 downloads
downloads = {k: v for k, v in downloads.items() if v > 0}
pipeline_objects = filter_pipelines(downloads, PIPELINE_USAGE_CUTOFF)
return pipeline_objects
def fetch_pipeline_modules_to_test():
try:
pipeline_objects = fetch_pipeline_objects()
except Exception as e:
logger.error(e)
raise RuntimeError("Unable to fetch model list from HuggingFace Hub.")
test_modules = []
for pipeline_name in pipeline_objects:
module = getattr(diffusers, pipeline_name)
test_module = module.__module__.split(".")[-2].strip()
test_modules.append(test_module)
return test_modules
def main():
test_modules = fetch_pipeline_modules_to_test()
test_modules.extend(ALWAYS_TEST_PIPELINE_MODULES)
# Get unique modules
test_modules = sorted(set(test_modules))
print(json.dumps(test_modules))
save_path = f"{PATH_TO_REPO}/reports"
os.makedirs(save_path, exist_ok=True)
with open(f"{save_path}/test-pipelines.json", "w") as f:
json.dump({"pipeline_test_modules": test_modules}, f)
if __name__ == "__main__":
main()
|
import numpy as np
import pytest
from docarray.computation.numpy_backend import NumpyCompBackend
def test_to_device():
with pytest.raises(NotImplementedError):
NumpyCompBackend.to_device(np.random.rand(10, 3), 'meta')
@pytest.mark.parametrize(
'array,result',
[
(np.zeros((5)), 1),
(np.zeros((1, 5)), 2),
(np.zeros((5, 5)), 2),
(np.zeros(()), 0),
],
)
def test_n_dim(array, result):
assert NumpyCompBackend.n_dim(array) == result
@pytest.mark.parametrize(
'array,result',
[
(np.zeros((10,)), (10,)),
(np.zeros((5, 5)), (5, 5)),
(np.zeros(()), ()),
],
)
def test_shape(array, result):
shape = NumpyCompBackend.shape(array)
assert shape == result
assert type(shape) == tuple
def test_device():
array = np.array([1, 2, 3])
assert NumpyCompBackend.device(array) is None
@pytest.mark.parametrize('dtype', [np.int64, np.float64, np.int, np.float])
def test_dtype(dtype):
array = np.array([1, 2, 3], dtype=dtype)
assert NumpyCompBackend.dtype(array) == dtype
def test_empty():
array = NumpyCompBackend.empty((10, 3))
assert array.shape == (10, 3)
def test_empty_dtype():
tensor = NumpyCompBackend.empty((10, 3), dtype=np.int32)
assert tensor.shape == (10, 3)
assert tensor.dtype == np.int32
def test_empty_device():
with pytest.raises(NotImplementedError):
NumpyCompBackend.empty((10, 3), device='meta')
def test_squeeze():
tensor = np.zeros(shape=(1, 1, 3, 1))
squeezed = NumpyCompBackend.squeeze(tensor)
assert squeezed.shape == (3,)
@pytest.mark.parametrize(
'array,t_range,x_range,result',
[
(np.array([0, 1, 2, 3, 4, 5]), (0, 10), None, np.array([0, 2, 4, 6, 8, 10])),
(np.array([0, 1, 2, 3, 4, 5]), (0, 10), (0, 10), np.array([0, 1, 2, 3, 4, 5])),
(
np.array([[0.0, 1.0], [0.0, 1.0]]),
(0, 10),
None,
np.array([[0.0, 10.0], [0.0, 10.0]]),
),
],
)
def test_minmax_normalize(array, t_range, x_range, result):
output = NumpyCompBackend.minmax_normalize(
tensor=array, t_range=t_range, x_range=x_range
)
assert np.allclose(output, result)
|
import numpy as np
import pytest
from docarray.computation.numpy_backend import NumpyCompBackend
def test_to_device():
with pytest.raises(NotImplementedError):
NumpyCompBackend.to_device(np.random.rand(10, 3), 'meta')
@pytest.mark.parametrize(
'array,result',
[
(np.zeros((5)), 1),
(np.zeros((1, 5)), 2),
(np.zeros((5, 5)), 2),
(np.zeros(()), 0),
],
)
def test_n_dim(array, result):
assert NumpyCompBackend.n_dim(array) == result
@pytest.mark.parametrize(
'array,result',
[
(np.zeros((10,)), (10,)),
(np.zeros((5, 5)), (5, 5)),
(np.zeros(()), ()),
],
)
def test_shape(array, result):
shape = NumpyCompBackend.shape(array)
assert shape == result
assert type(shape) == tuple
def test_empty():
array = NumpyCompBackend.empty((10, 3))
assert array.shape == (10, 3)
def test_empty_dtype():
tensor = NumpyCompBackend.empty((10, 3), dtype=np.int32)
assert tensor.shape == (10, 3)
assert tensor.dtype == np.int32
def test_empty_device():
with pytest.raises(NotImplementedError):
NumpyCompBackend.empty((10, 3), device='meta')
def test_squeeze():
tensor = np.zeros(shape=(1, 1, 3, 1))
squeezed = NumpyCompBackend.squeeze(tensor)
assert squeezed.shape == (3,)
@pytest.mark.parametrize(
'array,t_range,x_range,result',
[
(np.array([0, 1, 2, 3, 4, 5]), (0, 10), None, np.array([0, 2, 4, 6, 8, 10])),
(np.array([0, 1, 2, 3, 4, 5]), (0, 10), (0, 10), np.array([0, 1, 2, 3, 4, 5])),
(
np.array([[0.0, 1.0], [0.0, 1.0]]),
(0, 10),
None,
np.array([[0.0, 10.0], [0.0, 10.0]]),
),
],
)
def test_minmax_normalize(array, t_range, x_range, result):
output = NumpyCompBackend.minmax_normalize(
tensor=array, t_range=t_range, x_range=x_range
)
assert np.allclose(output, result)
|
# Copyright (c) OpenMMLab. All rights reserved.
from .builder import DATASETS, PIPELINES, build_dataset
from .cityscapes import CityscapesDataset
from .coco import CocoDataset
from .coco_panoptic import CocoPanopticDataset
from .dataset_wrappers import MultiImageMixDataset
from .deepfashion import DeepFashionDataset
from .lvis import LVISDataset, LVISV1Dataset, LVISV05Dataset
from .openimages import OpenImagesChallengeDataset, OpenImagesDataset
from .samplers import AspectRatioBatchSampler, ClassAwareSampler
from .utils import get_loading_pipeline
from .voc import VOCDataset
from .wider_face import WIDERFaceDataset
from .xml_style import XMLDataset
__all__ = [
'XMLDataset', 'CocoDataset', 'DeepFashionDataset', 'VOCDataset',
'CityscapesDataset', 'LVISDataset', 'LVISV05Dataset', 'LVISV1Dataset',
'WIDERFaceDataset', 'DATASETS', 'PIPELINES', 'build_dataset',
'get_loading_pipeline', 'CocoPanopticDataset', 'MultiImageMixDataset',
'OpenImagesDataset', 'OpenImagesChallengeDataset',
'AspectRatioBatchSampler', 'ClassAwareSampler'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .builder import DATASETS, PIPELINES, build_dataset
from .cityscapes import CityscapesDataset
from .coco import CocoDataset
from .coco_panoptic import CocoPanopticDataset
from .custom import CustomDataset
from .dataset_wrappers import MultiImageMixDataset
from .deepfashion import DeepFashionDataset
from .lvis import LVISDataset, LVISV1Dataset, LVISV05Dataset
from .openimages import OpenImagesChallengeDataset, OpenImagesDataset
from .samplers import AspectRatioBatchSampler, ClassAwareSampler
from .utils import get_loading_pipeline, replace_ImageToTensor
from .voc import VOCDataset
from .wider_face import WIDERFaceDataset
from .xml_style import XMLDataset
__all__ = [
'CustomDataset', 'XMLDataset', 'CocoDataset', 'DeepFashionDataset',
'VOCDataset', 'CityscapesDataset', 'LVISDataset', 'LVISV05Dataset',
'LVISV1Dataset', 'WIDERFaceDataset', 'DATASETS', 'PIPELINES',
'build_dataset', 'replace_ImageToTensor', 'get_loading_pipeline',
'CocoPanopticDataset', 'MultiImageMixDataset', 'OpenImagesDataset',
'OpenImagesChallengeDataset', 'AspectRatioBatchSampler',
'ClassAwareSampler'
]
|
"""Test EdenAi's text to speech Tool .
In order to run this test, you need to have an EdenAI api key.
You can get it by registering for free at https://app.edenai.run/user/register.
A test key can be found at https://app.edenai.run/admin/account/settings by
clicking on the 'sandbox' toggle.
(calls will be free, and will return dummy results)
You'll then need to set EDENAI_API_KEY environment variable to your api key.
"""
from urllib.parse import urlparse
from langchain_community.tools.edenai import EdenAiTextToSpeechTool
def test_edenai_call() -> None:
"""Test simple call to edenai's text to speech endpoint."""
text2speech = EdenAiTextToSpeechTool(
providers=["amazon"], language="en", voice="MALE"
)
output = text2speech.invoke("hello")
parsed_url = urlparse(output)
assert text2speech.name == "edenai_text_to_speech"
assert text2speech.feature == "audio"
assert text2speech.subfeature == "text_to_speech"
assert isinstance(output, str)
assert parsed_url.scheme in ["http", "https"]
|
"""Test EdenAi's text to speech Tool .
In order to run this test, you need to have an EdenAI api key.
You can get it by registering for free at https://app.edenai.run/user/register.
A test key can be found at https://app.edenai.run/admin/account/settings by
clicking on the 'sandbox' toggle.
(calls will be free, and will return dummy results)
You'll then need to set EDENAI_API_KEY environment variable to your api key.
"""
from urllib.parse import urlparse
from langchain_community.tools.edenai import EdenAiTextToSpeechTool
def test_edenai_call() -> None:
"""Test simple call to edenai's text to speech endpoint."""
text2speech = EdenAiTextToSpeechTool( # type: ignore[call-arg]
providers=["amazon"], language="en", voice="MALE"
)
output = text2speech.invoke("hello")
parsed_url = urlparse(output)
assert text2speech.name == "edenai_text_to_speech"
assert text2speech.feature == "audio"
assert text2speech.subfeature == "text_to_speech"
assert isinstance(output, str)
assert parsed_url.scheme in ["http", "https"]
|
from langchain_core.load.serializable import (
BaseSerialized,
Serializable,
SerializedConstructor,
SerializedNotImplemented,
SerializedSecret,
to_json_not_implemented,
try_neq_default,
)
__all__ = [
"BaseSerialized",
"Serializable",
"SerializedConstructor",
"SerializedNotImplemented",
"SerializedSecret",
"to_json_not_implemented",
"try_neq_default",
]
|
from langchain_core.load.serializable import (
BaseSerialized,
Serializable,
SerializedConstructor,
SerializedNotImplemented,
SerializedSecret,
to_json_not_implemented,
try_neq_default,
)
__all__ = [
"BaseSerialized",
"SerializedConstructor",
"SerializedSecret",
"SerializedNotImplemented",
"try_neq_default",
"Serializable",
"to_json_not_implemented",
]
|
import os
import warnings
from pathlib import Path
import torch
from torchaudio._internal import module_utils as _mod_utils # noqa: F401
_LIB_DIR = Path(__file__).parent / "lib"
def _get_lib_path(lib: str):
suffix = "pyd" if os.name == "nt" else "so"
path = _LIB_DIR / f"{lib}.{suffix}"
return path
def _load_lib(lib: str) -> bool:
"""Load extension module
Note:
In case `torchaudio` is deployed with `pex` format, the library file
is not in a standard location.
In this case, we expect that `libtorchaudio` is available somewhere
in the search path of dynamic loading mechanism, so that importing
`_torchaudio` will have library loader find and load `libtorchaudio`.
This is the reason why the function should not raising an error when the library
file is not found.
Returns:
bool:
True if the library file is found AND the library loaded without failure.
False if the library file is not found (like in the case where torchaudio
is deployed with pex format, thus the shared library file is
in a non-standard location.).
If the library file is found but there is an issue loading the library,
(such as missing dependency) then this function raises the exception as-is.
Raises:
Exception:
If the library file is found, but there is an issue loading the library file,
(when underlying `ctype.DLL` throws an exception), this function will pass
the exception as-is, instead of catching it and returning bool.
The expected case is `OSError` thrown by `ctype.DLL` when a dynamic dependency
is not found.
This behavior was chosen because the expected failure case is not recoverable.
If a dependency is missing, then users have to install it.
"""
path = _get_lib_path(lib)
if not path.exists():
return False
torch.ops.load_library(path)
torch.classes.load_library(path)
return True
_FFMPEG_INITIALIZED = False
def _init_ffmpeg():
global _FFMPEG_INITIALIZED
if _FFMPEG_INITIALIZED:
return
if not torch.ops.torchaudio.is_ffmpeg_available():
raise RuntimeError(
"torchaudio is not compiled with FFmpeg integration. Please set USE_FFMPEG=1 when compiling torchaudio."
)
try:
_load_lib("libtorchaudio_ffmpeg")
except OSError as err:
raise ImportError("FFmpeg libraries are not found. Please install FFmpeg.") from err
import torchaudio._torchaudio_ffmpeg # noqa
torch.ops.torchaudio.ffmpeg_init()
if torch.ops.torchaudio.ffmpeg_get_log_level() > 8:
torch.ops.torchaudio.ffmpeg_set_log_level(8)
_FFMPEG_INITIALIZED = True
def _init_extension():
if not _mod_utils.is_module_available("torchaudio._torchaudio"):
warnings.warn("torchaudio C++ extension is not available.")
return
_load_lib("libtorchaudio")
# This import is for initializing the methods registered via PyBind11
# This has to happen after the base library is loaded
from torchaudio import _torchaudio # noqa
# Because this part is executed as part of `import torchaudio`, we ignore the
# initialization failure.
# If the FFmpeg integration is not properly initialized, then detailed error
# will be raised when client code attempts to import the dedicated feature.
try:
_init_ffmpeg()
except Exception:
pass
def _check_cuda_version():
version = torch.ops.torchaudio.cuda_version()
if version is not None and torch.version.cuda is not None:
version_str = str(version)
ta_version = f"{version_str[:-3]}.{version_str[-2]}"
t_version = torch.version.cuda.split(".")
t_version = f"{t_version[0]}.{t_version[1]}"
if ta_version != t_version:
raise RuntimeError(
"Detected that PyTorch and TorchAudio were compiled with different CUDA versions. "
f"PyTorch has CUDA version {t_version} whereas TorchAudio has CUDA version {ta_version}. "
"Please install the TorchAudio version that matches your PyTorch version."
)
_init_extension()
_check_cuda_version()
|
import os
import warnings
from pathlib import Path
import torch
from torchaudio._internal import module_utils as _mod_utils # noqa: F401
_LIB_DIR = Path(__file__).parent / "lib"
def _get_lib_path(lib: str):
suffix = "pyd" if os.name == "nt" else "so"
path = _LIB_DIR / f"{lib}.{suffix}"
return path
def _load_lib(lib: str) -> bool:
"""Load extension module
Note:
In case `torchaudio` is deployed with `pex` format, the library file
is not in a standard location.
In this case, we expect that `libtorchaudio` is available somewhere
in the search path of dynamic loading mechanism, so that importing
`_torchaudio` will have library loader find and load `libtorchaudio`.
This is the reason why the function should not raising an error when the library
file is not found.
Returns:
bool:
True if the library file is found AND the library loaded without failure.
False if the library file is not found (like in the case where torchaudio
is deployed with pex format, thus the shared library file is
in a non-standard location.).
If the library file is found but there is an issue loading the library,
(such as missing dependency) then this function raises the exception as-is.
Raises:
Exception:
If the library file is found, but there is an issue loading the library file,
(when underlying `ctype.DLL` throws an exception), this function will pass
the exception as-is, instead of catching it and returning bool.
The expected case is `OSError` thrown by `ctype.DLL` when a dynamic dependency
is not found.
This behavior was chosen because the expected failure case is not recoverable.
If a dependency is missing, then users have to install it.
"""
path = _get_lib_path(lib)
if not path.exists():
return False
torch.ops.load_library(path)
torch.classes.load_library(path)
return True
_FFMPEG_INITIALIZED = False
def _init_ffmpeg():
global _FFMPEG_INITIALIZED
if _FFMPEG_INITIALIZED:
return
if not torch.ops.torchaudio.is_ffmpeg_available():
raise RuntimeError(
"torchaudio is not compiled with FFmpeg integration. Please set USE_FFMPEG=1 when compiling torchaudio."
)
try:
_load_lib("libtorchaudio_ffmpeg")
except OSError as err:
raise ImportError("FFmpeg libraries are not found. Please install FFmpeg.") from err
import torchaudio._torchaudio_ffmpeg # noqa
torch.ops.torchaudio.ffmpeg_init()
if torch.ops.torchaudio.ffmpeg_get_log_level() > 8:
torch.ops.torchaudio.ffmpeg_set_log_level(8)
_FFMPEG_INITIALIZED = True
def _init_extension():
if not _mod_utils.is_module_available("torchaudio._torchaudio"):
warnings.warn("torchaudio C++ extension is not available.")
return
_load_lib("libtorchaudio")
# This import is for initializing the methods registered via PyBind11
# This has to happen after the base library is loaded
from torchaudio import _torchaudio # noqa
# Because this part is executed as part of `import torchaudio`, we ignore the
# initialization failure.
# If the FFmpeg integration is not properly initialized, then detailed error
# will be raised when client code attempts to import the dedicated feature.
try:
_init_ffmpeg()
except Exception:
pass
def _check_cuda_version():
version = torch.ops.torchaudio.cuda_version()
if version is not None and torch.version.cuda is not None:
version_str = str(version)
ta_version = f"{version_str[:-3]}.{version_str[-2]}"
t_version = torch.version.cuda
if ta_version != t_version:
raise RuntimeError(
"Detected that PyTorch and TorchAudio were compiled with different CUDA versions. "
f"PyTorch has CUDA version {t_version} whereas TorchAudio has CUDA version {ta_version}. "
"Please install the TorchAudio version that matches your PyTorch version."
)
_init_extension()
_check_cuda_version()
|
"""
This example runs a BiLSTM after the word embedding lookup. The output of the BiLSTM is than pooled,
for example with max-pooling (which gives a system like InferSent) or with mean-pooling.
Note, you can also pass BERT embeddings to the BiLSTM.
"""
import logging
import traceback
from datetime import datetime
from datasets import load_dataset
from sentence_transformers import SentenceTransformer, losses, models
from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator
from sentence_transformers.similarity_functions import SimilarityFunction
from sentence_transformers.trainer import SentenceTransformerTrainer
from sentence_transformers.training_args import SentenceTransformerTrainingArguments
# Set the log level to INFO to get more information
logging.basicConfig(format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO)
num_train_epochs = 1
batch_size = 32
output_dir = "output/training_stsbenchmark_bilstm-" + datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
# 1. Load the STSB dataset: https://huggingface.co/datasets/sentence-transformers/stsb
train_dataset = load_dataset("sentence-transformers/stsb", split="train")
eval_dataset = load_dataset("sentence-transformers/stsb", split="validation")
test_dataset = load_dataset("sentence-transformers/stsb", split="test")
logging.info(train_dataset)
# 2. Define the model
# Map tokens to traditional word embeddings like GloVe
word_embedding_model = models.WordEmbeddings.from_text_file("glove.6B.300d.txt.gz")
lstm = models.LSTM(word_embedding_dimension=word_embedding_model.get_word_embedding_dimension(), hidden_dim=1024)
# Apply mean pooling to get one fixed sized sentence vector
pooling_model = models.Pooling(
lstm.get_word_embedding_dimension(),
pooling_mode="mean",
)
model = SentenceTransformer(modules=[word_embedding_model, lstm, pooling_model])
# 3. Define our training loss
# CosineSimilarityLoss (https://sbert.net/docs/package_reference/sentence_transformer/losses.html#cosinesimilarityloss) needs two text columns and
# one similarity score column (between 0 and 1)
train_loss = losses.CosineSimilarityLoss(model=model)
# 4. Define an evaluator for use during training. This is useful to keep track of alongside the evaluation loss.
dev_evaluator = EmbeddingSimilarityEvaluator(
sentences1=eval_dataset["sentence1"],
sentences2=eval_dataset["sentence2"],
scores=eval_dataset["score"],
main_similarity=SimilarityFunction.COSINE,
name="sts-dev",
)
# 5. Define the training arguments
args = SentenceTransformerTrainingArguments(
# Required parameter:
output_dir=output_dir,
# Optional training parameters:
num_train_epochs=num_train_epochs,
per_device_train_batch_size=batch_size,
per_device_eval_batch_size=batch_size,
warmup_ratio=0.1,
fp16=True, # Set to False if you get an error that your GPU can't run on FP16
bf16=False, # Set to True if you have a GPU that supports BF16
# Optional tracking/debugging parameters:
eval_strategy="steps",
eval_steps=100,
save_strategy="steps",
save_steps=100,
save_total_limit=2,
logging_steps=100,
run_name="glove-bilstm-sts", # Will be used in W&B if `wandb` is installed
)
# 6. Create the trainer & start training
trainer = SentenceTransformerTrainer(
model=model,
args=args,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
loss=train_loss,
evaluator=dev_evaluator,
)
trainer.train()
# 7. Save the trained & evaluated model locally
final_output_dir = f"{output_dir}/final"
model.save(final_output_dir)
# 8. (Optional) save the model to the Hugging Face Hub!
# It is recommended to run `huggingface-cli login` to log into your Hugging Face account first
model_name = "glove-bilstm-sts"
try:
model.push_to_hub(model_name)
except Exception:
logging.error(
f"Error uploading model to the Hugging Face Hub:\n{traceback.format_exc()}To upload it manually, you can run "
f"`huggingface-cli login`, followed by loading the model using `model = SentenceTransformer({final_output_dir!r})` "
f"and saving it using `model.push_to_hub('{model_name}')`."
)
|
"""
This example runs a BiLSTM after the word embedding lookup. The output of the BiLSTM is than pooled,
for example with max-pooling (which gives a system like InferSent) or with mean-pooling.
Note, you can also pass BERT embeddings to the BiLSTM.
"""
import logging
import traceback
from datetime import datetime
from datasets import load_dataset
from sentence_transformers import SentenceTransformer, losses, models
from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator
from sentence_transformers.similarity_functions import SimilarityFunction
from sentence_transformers.trainer import SentenceTransformerTrainer
from sentence_transformers.training_args import SentenceTransformerTrainingArguments
# Set the log level to INFO to get more information
logging.basicConfig(format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO)
num_train_epochs = 1
batch_size = 32
output_dir = "output/training_stsbenchmark_bilstm-" + datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
# 1. Load the STSB dataset: https://huggingface.co/datasets/sentence-transformers/stsb
train_dataset = load_dataset("sentence-transformers/stsb", split="train")
eval_dataset = load_dataset("sentence-transformers/stsb", split="validation")
test_dataset = load_dataset("sentence-transformers/stsb", split="test")
logging.info(train_dataset)
# 2. Define the model
# Map tokens to traditional word embeddings like GloVe
word_embedding_model = models.WordEmbeddings.from_text_file("glove.6B.300d.txt.gz")
lstm = models.LSTM(word_embedding_dimension=word_embedding_model.get_word_embedding_dimension(), hidden_dim=1024)
# Apply mean pooling to get one fixed sized sentence vector
pooling_model = models.Pooling(
lstm.get_word_embedding_dimension(),
pooling_mode="mean",
)
model = SentenceTransformer(modules=[word_embedding_model, lstm, pooling_model])
# 3. Define our training loss
# CosineSimilarityLoss (https://sbert.net/docs/package_reference/sentence_transformer/losses.html#cosinesimilarityloss) needs two text columns and
# one similarity score column (between 0 and 1)
train_loss = losses.CosineSimilarityLoss(model=model)
# 4. Define an evaluator for use during training. This is useful to keep track of alongside the evaluation loss.
dev_evaluator = EmbeddingSimilarityEvaluator(
sentences1=eval_dataset["sentence1"],
sentences2=eval_dataset["sentence2"],
scores=eval_dataset["score"],
main_similarity=SimilarityFunction.COSINE,
name="sts-dev",
)
# 5. Define the training arguments
args = SentenceTransformerTrainingArguments(
# Required parameter:
output_dir=output_dir,
# Optional training parameters:
num_train_epochs=num_train_epochs,
per_device_train_batch_size=batch_size,
per_device_eval_batch_size=batch_size,
warmup_ratio=0.1,
fp16=True, # Set to False if you get an error that your GPU can't run on FP16
bf16=False, # Set to True if you have a GPU that supports BF16
# Optional tracking/debugging parameters:
eval_strategy="steps",
eval_steps=100,
save_strategy="steps",
save_steps=100,
save_total_limit=2,
logging_steps=100,
run_name="glove-bilstm-sts", # Will be used in W&B if `wandb` is installed
)
# 6. Create the trainer & start training
trainer = SentenceTransformerTrainer(
model=model,
args=args,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
loss=train_loss,
evaluator=dev_evaluator,
)
trainer.train()
# 7. Save the trained & evaluated model locally
final_output_dir = f"{output_dir}/final"
model.save(final_output_dir)
# 8. (Optional) save the model to the Hugging Face Hub!
# It is recommended to run `huggingface-cli login` to log into your Hugging Face account first
model_name = "glove-bilstm-sts"
try:
model.push_to_hub(model_name)
except Exception:
logging.error(
f"Error uploading model to the Hugging Face Hub:\n{traceback.format_exc()}To upload it manually, you can run "
f"`huggingface-cli login`, followed by loading the model using `model = SentenceTransformer({final_output_dir!r})` "
f"and saving it using `model.push_to_hub('{model_name}')`."
)
|
_base_ = [
'../_base_/models/ssd300.py', '../_base_/datasets/openimages_detection.py',
'../_base_/default_runtime.py', '../_base_/schedules/schedule_1x.py'
]
model = dict(
bbox_head=dict(
num_classes=601,
anchor_generator=dict(basesize_ratio_range=(0.2, 0.9))))
# dataset settings
dataset_type = 'OpenImagesDataset'
data_root = 'data/OpenImages/'
input_size = 300
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PhotoMetricDistortion',
brightness_delta=32,
contrast_range=(0.5, 1.5),
saturation_range=(0.5, 1.5),
hue_delta=18),
dict(
type='Expand',
mean={{_base_.model.data_preprocessor.mean}},
to_rgb={{_base_.model.data_preprocessor.bgr_to_rgb}},
ratio_range=(1, 4)),
dict(
type='MinIoURandomCrop',
min_ious=(0.1, 0.3, 0.5, 0.7, 0.9),
min_crop_size=0.3),
dict(type='Resize', scale=(input_size, input_size), keep_ratio=False),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='Resize', scale=(input_size, input_size), keep_ratio=False),
# avoid bboxes being resized
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor', 'instances'))
]
train_dataloader = dict(
batch_size=8, # using 32 GPUS while training. total batch size is 32 x 8
batch_sampler=None,
dataset=dict(
_delete_=True,
type='RepeatDataset',
times=3, # repeat 3 times, total epochs are 12 x 3
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/oidv6-train-annotations-bbox.csv',
data_prefix=dict(img='OpenImages/train/'),
label_file='annotations/class-descriptions-boxable.csv',
hierarchy_file='annotations/bbox_labels_600_hierarchy.json',
meta_file='annotations/train-image-metas.pkl',
pipeline=train_pipeline)))
val_dataloader = dict(batch_size=8, dataset=dict(pipeline=test_pipeline))
test_dataloader = dict(batch_size=8, dataset=dict(pipeline=test_pipeline))
# optimizer
optim_wrapper = dict(
optimizer=dict(type='SGD', lr=0.04, momentum=0.9, weight_decay=5e-4))
# learning rate
param_scheduler = [
dict(
type='LinearLR',
start_factor=0.001,
by_epoch=False,
begin=0,
end=20000),
dict(
type='MultiStepLR',
begin=0,
end=12,
by_epoch=True,
milestones=[8, 11],
gamma=0.1)
]
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (32 GPUs) x (8 samples per GPU)
auto_scale_lr = dict(base_batch_size=256)
|
_base_ = [
'../_base_/models/ssd300.py', '../_base_/datasets/openimages_detection.py',
'../_base_/default_runtime.py', '../_base_/schedules/schedule_1x.py'
]
model = dict(
bbox_head=dict(
num_classes=601,
anchor_generator=dict(basesize_ratio_range=(0.2, 0.9))))
# dataset settings
dataset_type = 'OpenImagesDataset'
data_root = 'data/OpenImages/'
img_norm_cfg = dict(mean=[123.675, 116.28, 103.53], std=[1, 1, 1], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile', to_float32=True),
dict(type='LoadAnnotations', with_bbox=True, normed_bbox=True),
dict(
type='PhotoMetricDistortion',
brightness_delta=32,
contrast_range=(0.5, 1.5),
saturation_range=(0.5, 1.5),
hue_delta=18),
dict(
type='Expand',
mean=img_norm_cfg['mean'],
to_rgb=img_norm_cfg['to_rgb'],
ratio_range=(1, 4)),
dict(
type='MinIoURandomCrop',
min_ious=(0.1, 0.3, 0.5, 0.7, 0.9),
min_crop_size=0.3),
dict(type='Resize', img_scale=(300, 300), keep_ratio=False),
dict(type='Normalize', **img_norm_cfg),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(300, 300),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=False),
dict(type='Normalize', **img_norm_cfg),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
samples_per_gpu=8, # using 32 GPUS while training.
workers_per_gpu=0, # workers_per_gpu > 0 may occur out of memory
train=dict(
_delete_=True,
type='RepeatDataset',
times=3,
dataset=dict(
type=dataset_type,
ann_file=data_root +
'annotations/oidv6-train-annotations-bbox.csv',
img_prefix=data_root + 'OpenImages/train/',
label_file=data_root +
'annotations/class-descriptions-boxable.csv',
hierarchy_file=data_root +
'annotations/bbox_labels_600_hierarchy.json',
pipeline=train_pipeline)),
val=dict(pipeline=test_pipeline),
test=dict(pipeline=test_pipeline))
# optimizer
optimizer = dict(type='SGD', lr=0.04, momentum=0.9, weight_decay=5e-4)
optimizer_config = dict()
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=20000,
warmup_ratio=0.001,
step=[8, 11])
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (32 GPUs) x (8 samples per GPU)
auto_scale_lr = dict(base_batch_size=256)
|
"""Autoretriever prompts."""
from llama_index.core.prompts.base import PromptTemplate
from llama_index.core.prompts.prompt_type import PromptType
from llama_index.core.vector_stores.types import (
FilterOperator,
MetadataFilter,
MetadataInfo,
VectorStoreInfo,
VectorStoreQuerySpec,
)
# NOTE: these prompts are inspired from langchain's self-query prompt,
# and adapted to our use case.
# https://github.com/hwchase17/langchain/tree/main/langchain/chains/query_constructor/prompt.py
PREFIX = """\
Your goal is to structure the user's query to match the request schema provided below.
<< Structured Request Schema >>
When responding use a markdown code snippet with a JSON object formatted in the \
following schema:
{schema_str}
The query string should contain only text that is expected to match the contents of \
documents. Any conditions in the filter should not be mentioned in the query as well.
Make sure that filters only refer to attributes that exist in the data source.
Make sure that filters take into account the descriptions of attributes.
Make sure that filters are only used as needed. If there are no filters that should be \
applied return [] for the filter value.\
If the user's query explicitly mentions number of documents to retrieve, set top_k to \
that number, otherwise do not set top_k.
"""
example_info = VectorStoreInfo(
content_info="Lyrics of a song",
metadata_info=[
MetadataInfo(name="artist", type="str", description="Name of the song artist"),
MetadataInfo(
name="genre",
type="str",
description='The song genre, one of "pop", "rock" or "rap"',
),
],
)
example_query = "What are songs by Taylor Swift or Katy Perry in the dance pop genre"
example_output = VectorStoreQuerySpec(
query="teenager love",
filters=[
MetadataFilter(key="artist", value="Taylor Swift"),
MetadataFilter(key="artist", value="Katy Perry"),
MetadataFilter(key="genre", value="pop"),
],
)
example_info_2 = VectorStoreInfo(
content_info="Classic literature",
metadata_info=[
MetadataInfo(name="author", type="str", description="Author name"),
MetadataInfo(
name="book_title",
type="str",
description="Book title",
),
MetadataInfo(
name="year",
type="int",
description="Year Published",
),
MetadataInfo(
name="pages",
type="int",
description="Number of pages",
),
MetadataInfo(
name="summary",
type="str",
description="A short summary of the book",
),
],
)
example_query_2 = "What are some books by Jane Austen published after 1813 that explore the theme of marriage for social standing?"
example_output_2 = VectorStoreQuerySpec(
query="Books related to theme of marriage for social standing",
filters=[
MetadataFilter(key="year", value="1813", operator=FilterOperator.GT),
MetadataFilter(key="author", value="Jane Austen"),
],
)
EXAMPLES = f"""\
<< Example 1. >>
Data Source:
```json
{example_info.model_dump_json(indent=4)}
```
User Query:
{example_query}
Structured Request:
```json
{example_output.model_dump_json()}
<< Example 2. >>
Data Source:
```json
{example_info_2.model_dump_json(indent=4)}
```
User Query:
{example_query_2}
Structured Request:
```json
{example_output_2.model_dump_json()}
```
""".replace("{", "{{").replace("}", "}}")
SUFFIX = """
<< Example 3. >>
Data Source:
```json
{info_str}
```
User Query:
{query_str}
Structured Request:
"""
DEFAULT_VECTOR_STORE_QUERY_PROMPT_TMPL = PREFIX + EXAMPLES + SUFFIX
# deprecated, kept for backwards compatibility
"""Vector store query prompt."""
VectorStoreQueryPrompt = PromptTemplate
DEFAULT_VECTOR_STORE_QUERY_PROMPT = PromptTemplate(
template=DEFAULT_VECTOR_STORE_QUERY_PROMPT_TMPL,
prompt_type=PromptType.VECTOR_STORE_QUERY,
)
|
"""Autoretriever prompts."""
from llama_index.core.prompts.base import PromptTemplate
from llama_index.core.prompts.prompt_type import PromptType
from llama_index.core.vector_stores.types import (
FilterOperator,
MetadataFilter,
MetadataInfo,
VectorStoreInfo,
VectorStoreQuerySpec,
)
# NOTE: these prompts are inspired from langchain's self-query prompt,
# and adapted to our use case.
# https://github.com/hwchase17/langchain/tree/main/langchain/chains/query_constructor/prompt.py
PREFIX = """\
Your goal is to structure the user's query to match the request schema provided below.
<< Structured Request Schema >>
When responding use a markdown code snippet with a JSON object formatted in the \
following schema:
{schema_str}
The query string should contain only text that is expected to match the contents of \
documents. Any conditions in the filter should not be mentioned in the query as well.
Make sure that filters only refer to attributes that exist in the data source.
Make sure that filters take into account the descriptions of attributes.
Make sure that filters are only used as needed. If there are no filters that should be \
applied return [] for the filter value.\
If the user's query explicitly mentions number of documents to retrieve, set top_k to \
that number, otherwise do not set top_k.
"""
example_info = VectorStoreInfo(
content_info="Lyrics of a song",
metadata_info=[
MetadataInfo(name="artist", type="str", description="Name of the song artist"),
MetadataInfo(
name="genre",
type="str",
description='The song genre, one of "pop", "rock" or "rap"',
),
],
)
example_query = "What are songs by Taylor Swift or Katy Perry in the dance pop genre"
example_output = VectorStoreQuerySpec(
query="teenager love",
filters=[
MetadataFilter(key="artist", value="Taylor Swift"),
MetadataFilter(key="artist", value="Katy Perry"),
MetadataFilter(key="genre", value="pop"),
],
)
example_info_2 = VectorStoreInfo(
content_info="Classic literature",
metadata_info=[
MetadataInfo(name="author", type="str", description="Author name"),
MetadataInfo(
name="book_title",
type="str",
description="Book title",
),
MetadataInfo(
name="year",
type="int",
description="Year Published",
),
MetadataInfo(
name="pages",
type="int",
description="Number of pages",
),
MetadataInfo(
name="summary",
type="str",
description="A short summary of the book",
),
],
)
example_query_2 = "What are some books by Jane Austen published after 1813 that explore the theme of marriage for social standing?"
example_output_2 = VectorStoreQuerySpec(
query="Books related to theme of marriage for social standing",
filters=[
MetadataFilter(key="year", value="1813", operator=FilterOperator.GT),
MetadataFilter(key="author", value="Jane Austen"),
],
)
EXAMPLES = f"""\
<< Example 1. >>
Data Source:
```json
{example_info.model_dump_json(indent=4)}
```
User Query:
{example_query}
Structured Request:
```json
{example_output.model_dump_json()}
<< Example 2. >>
Data Source:
```json
{example_info_2.model_dump_json(indent=4)}
```
User Query:
{example_query_2}
Structured Request:
```json
{example_output_2.model_dump_json()}
```
""".replace(
"{", "{{"
).replace(
"}", "}}"
)
SUFFIX = """
<< Example 3. >>
Data Source:
```json
{info_str}
```
User Query:
{query_str}
Structured Request:
"""
DEFAULT_VECTOR_STORE_QUERY_PROMPT_TMPL = PREFIX + EXAMPLES + SUFFIX
# deprecated, kept for backwards compatibility
"""Vector store query prompt."""
VectorStoreQueryPrompt = PromptTemplate
DEFAULT_VECTOR_STORE_QUERY_PROMPT = PromptTemplate(
template=DEFAULT_VECTOR_STORE_QUERY_PROMPT_TMPL,
prompt_type=PromptType.VECTOR_STORE_QUERY,
)
|
from typing import Any, Optional, Union, cast
from langchain_core._api import deprecated
from langchain_core.language_models import BaseLanguageModel
from langchain_core.messages import HumanMessage, SystemMessage
from langchain_core.output_parsers import BaseLLMOutputParser
from langchain_core.output_parsers.openai_functions import (
OutputFunctionsParser,
PydanticOutputFunctionsParser,
)
from langchain_core.prompts import PromptTemplate
from langchain_core.prompts.chat import ChatPromptTemplate, HumanMessagePromptTemplate
from langchain_core.utils.pydantic import is_basemodel_subclass
from pydantic import BaseModel, Field
from langchain.chains.llm import LLMChain
from langchain.chains.openai_functions.utils import get_llm_kwargs
class AnswerWithSources(BaseModel):
"""An answer to the question, with sources."""
answer: str = Field(..., description="Answer to the question that was asked")
sources: list[str] = Field(
...,
description="List of sources used to answer the question",
)
@deprecated(
since="0.2.13",
removal="1.0",
message=(
"This function is deprecated. Refer to this guide on retrieval and question "
"answering with structured responses: "
"https://python.langchain.com/docs/how_to/qa_sources/#structure-sources-in-model-response"
),
)
def create_qa_with_structure_chain(
llm: BaseLanguageModel,
schema: Union[dict, type[BaseModel]],
output_parser: str = "base",
prompt: Optional[Union[PromptTemplate, ChatPromptTemplate]] = None,
verbose: bool = False, # noqa: FBT001,FBT002
) -> LLMChain:
"""Create a question answering chain that returns an answer with sources
based on schema.
Args:
llm: Language model to use for the chain.
schema: Pydantic schema to use for the output.
output_parser: Output parser to use. Should be one of `pydantic` or `base`.
Default to `base`.
prompt: Optional prompt to use for the chain.
Returns:
"""
if output_parser == "pydantic":
if not (isinstance(schema, type) and is_basemodel_subclass(schema)):
msg = (
"Must provide a pydantic class for schema when output_parser is "
"'pydantic'."
)
raise ValueError(msg)
_output_parser: BaseLLMOutputParser = PydanticOutputFunctionsParser(
pydantic_schema=schema,
)
elif output_parser == "base":
_output_parser = OutputFunctionsParser()
else:
msg = (
f"Got unexpected output_parser: {output_parser}. "
f"Should be one of `pydantic` or `base`."
)
raise ValueError(msg)
if isinstance(schema, type) and is_basemodel_subclass(schema):
if hasattr(schema, "model_json_schema"):
schema_dict = cast(dict, schema.model_json_schema())
else:
schema_dict = cast(dict, schema.schema())
else:
schema_dict = cast(dict, schema)
function = {
"name": schema_dict["title"],
"description": schema_dict["description"],
"parameters": schema_dict,
}
llm_kwargs = get_llm_kwargs(function)
messages = [
SystemMessage(
content=(
"You are a world class algorithm to answer "
"questions in a specific format."
),
),
HumanMessage(content="Answer question using the following context"),
HumanMessagePromptTemplate.from_template("{context}"),
HumanMessagePromptTemplate.from_template("Question: {question}"),
HumanMessage(content="Tips: Make sure to answer in the correct format"),
]
prompt = prompt or ChatPromptTemplate(messages=messages) # type: ignore[arg-type]
return LLMChain(
llm=llm,
prompt=prompt,
llm_kwargs=llm_kwargs,
output_parser=_output_parser,
verbose=verbose,
)
@deprecated(
since="0.2.13",
removal="1.0",
message=(
"This function is deprecated. Refer to this guide on retrieval and question "
"answering with sources: "
"https://python.langchain.com/docs/how_to/qa_sources/#structure-sources-in-model-response"
),
)
def create_qa_with_sources_chain(
llm: BaseLanguageModel,
verbose: bool = False, # noqa: FBT001,FBT002
**kwargs: Any,
) -> LLMChain:
"""Create a question answering chain that returns an answer with sources.
Args:
llm: Language model to use for the chain.
verbose: Whether to print the details of the chain
**kwargs: Keyword arguments to pass to `create_qa_with_structure_chain`.
Returns:
Chain (LLMChain) that can be used to answer questions with citations.
"""
return create_qa_with_structure_chain(
llm,
AnswerWithSources,
verbose=verbose,
**kwargs,
)
|
from typing import Any, Optional, Union, cast
from langchain_core._api import deprecated
from langchain_core.language_models import BaseLanguageModel
from langchain_core.messages import HumanMessage, SystemMessage
from langchain_core.output_parsers import BaseLLMOutputParser
from langchain_core.output_parsers.openai_functions import (
OutputFunctionsParser,
PydanticOutputFunctionsParser,
)
from langchain_core.prompts import PromptTemplate
from langchain_core.prompts.chat import ChatPromptTemplate, HumanMessagePromptTemplate
from langchain_core.utils.pydantic import is_basemodel_subclass
from pydantic import BaseModel, Field
from langchain.chains.llm import LLMChain
from langchain.chains.openai_functions.utils import get_llm_kwargs
class AnswerWithSources(BaseModel):
"""An answer to the question, with sources."""
answer: str = Field(..., description="Answer to the question that was asked")
sources: list[str] = Field(
..., description="List of sources used to answer the question"
)
@deprecated(
since="0.2.13",
removal="1.0",
message=(
"This function is deprecated. Refer to this guide on retrieval and question "
"answering with structured responses: "
"https://python.langchain.com/docs/how_to/qa_sources/#structure-sources-in-model-response"
),
)
def create_qa_with_structure_chain(
llm: BaseLanguageModel,
schema: Union[dict, type[BaseModel]],
output_parser: str = "base",
prompt: Optional[Union[PromptTemplate, ChatPromptTemplate]] = None,
verbose: bool = False, # noqa: FBT001,FBT002
) -> LLMChain:
"""Create a question answering chain that returns an answer with sources
based on schema.
Args:
llm: Language model to use for the chain.
schema: Pydantic schema to use for the output.
output_parser: Output parser to use. Should be one of `pydantic` or `base`.
Default to `base`.
prompt: Optional prompt to use for the chain.
Returns:
"""
if output_parser == "pydantic":
if not (isinstance(schema, type) and is_basemodel_subclass(schema)):
msg = (
"Must provide a pydantic class for schema when output_parser is "
"'pydantic'."
)
raise ValueError(msg)
_output_parser: BaseLLMOutputParser = PydanticOutputFunctionsParser(
pydantic_schema=schema
)
elif output_parser == "base":
_output_parser = OutputFunctionsParser()
else:
msg = (
f"Got unexpected output_parser: {output_parser}. "
f"Should be one of `pydantic` or `base`."
)
raise ValueError(msg)
if isinstance(schema, type) and is_basemodel_subclass(schema):
if hasattr(schema, "model_json_schema"):
schema_dict = cast(dict, schema.model_json_schema())
else:
schema_dict = cast(dict, schema.schema())
else:
schema_dict = cast(dict, schema)
function = {
"name": schema_dict["title"],
"description": schema_dict["description"],
"parameters": schema_dict,
}
llm_kwargs = get_llm_kwargs(function)
messages = [
SystemMessage(
content=(
"You are a world class algorithm to answer "
"questions in a specific format."
)
),
HumanMessage(content="Answer question using the following context"),
HumanMessagePromptTemplate.from_template("{context}"),
HumanMessagePromptTemplate.from_template("Question: {question}"),
HumanMessage(content="Tips: Make sure to answer in the correct format"),
]
prompt = prompt or ChatPromptTemplate(messages=messages) # type: ignore[arg-type]
return LLMChain(
llm=llm,
prompt=prompt,
llm_kwargs=llm_kwargs,
output_parser=_output_parser,
verbose=verbose,
)
@deprecated(
since="0.2.13",
removal="1.0",
message=(
"This function is deprecated. Refer to this guide on retrieval and question "
"answering with sources: "
"https://python.langchain.com/docs/how_to/qa_sources/#structure-sources-in-model-response"
),
)
def create_qa_with_sources_chain(
llm: BaseLanguageModel,
verbose: bool = False, # noqa: FBT001,FBT002
**kwargs: Any,
) -> LLMChain:
"""Create a question answering chain that returns an answer with sources.
Args:
llm: Language model to use for the chain.
verbose: Whether to print the details of the chain
**kwargs: Keyword arguments to pass to `create_qa_with_structure_chain`.
Returns:
Chain (LLMChain) that can be used to answer questions with citations.
"""
return create_qa_with_structure_chain(
llm, AnswerWithSources, verbose=verbose, **kwargs
)
|
_base_ = './faster-rcnn_r50_fpn_8xb8-amp-lsj-200e_coco.py'
model = dict(
backbone=dict(
depth=18,
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet18')),
neck=dict(in_channels=[64, 128, 256, 512]))
|
_base_ = './faster_rcnn_r50_fpn_lsj_200e_8x8_fp16_coco.py'
model = dict(
backbone=dict(
depth=18,
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet18')),
neck=dict(in_channels=[64, 128, 256, 512]))
|
from typing import Any, Dict, List, Optional, Sequence, Tuple
from llama_index.core.base.llms.types import ChatMessage, MessageRole
from llama_index.core.base.llms.generic_utils import get_from_param_or_env
DEFAULT_FIREWORKS_API_BASE = "https://api.fireworks.ai/inference/v1"
DEFAULT_FIREWORKS_API_VERSION = ""
LLAMA_MODELS = {
"accounts/fireworks/models/llama-v2-7b-chat": 4096,
"accounts/fireworks/models/llama-v2-13b-chat": 4096,
"accounts/fireworks/models/llama-v2-70b-chat": 4096,
"accounts/fireworks/models/llama-v2-34b-code-instruct": 16384,
"accounts/fireworks/models/llamaguard-7b": 4096,
"accounts/fireworks/models/llama-v3-8b-instruct": 8192,
"accounts/fireworks/models/llama-v3-70b-instruct": 8192,
"accounts/fireworks/models/llama-v3p1-8b-instruct": 131072,
"accounts/fireworks/models/llama-v3p1-70b-instruct": 131072,
"accounts/fireworks/models/llama-v3p1-405b-instruct": 131072,
"accounts/fireworks/models/llama-v3p2-1b-instruct": 131072,
"accounts/fireworks/models/llama-v3p2-3b-instruct": 131072,
"accounts/fireworks/models/llama-v3p2-11b-vision-instruct": 131072,
"accounts/fireworks/models/llama-v3p2-90b-vision-instruct": 131072,
}
MISTRAL_MODELS = {
"accounts/fireworks/models/mistral-7b-instruct-4k": 16384,
"accounts/fireworks/models/mixtral-8x7b-instruct": 32768,
"accounts/fireworks/models/firefunction-v1": 32768,
"accounts/fireworks/models/mixtral-8x22b-instruct": 65536,
}
FUNCTION_CALLING_MODELS = {
"accounts/fireworks/models/firefunction-v2": 8192,
}
DEEPSEEK_MODELS = {
"accounts/fireworks/models/deepseek-v3": 131072,
"accounts/fireworks/models/deepseek-r1": 163840,
}
ALL_AVAILABLE_MODELS = {
**LLAMA_MODELS,
**MISTRAL_MODELS,
**FUNCTION_CALLING_MODELS,
**DEEPSEEK_MODELS,
}
DISCONTINUED_MODELS: Dict[str, int] = {}
def fireworks_modelname_to_contextsize(modelname: str) -> int:
"""
Calculate the maximum number of tokens possible to generate for a model.
Args:
modelname: The modelname we want to know the context size for.
Returns:
The maximum context size
Example:
.. code-block:: python
max_tokens = fireworks_modelname_to_contextsize(model_name)
"""
# handling finetuned models
# TO BE FILLED
if modelname in DISCONTINUED_MODELS:
raise ValueError(
f"Fireworks hosted model {modelname} has been discontinued. "
"Please choose another model."
)
context_size = ALL_AVAILABLE_MODELS.get(modelname, None)
if context_size is None:
raise ValueError(
f"Unknown model: {modelname}. Please provide a valid Fireworks model name."
"Known models are: " + ", ".join(ALL_AVAILABLE_MODELS.keys())
)
return context_size
def is_function_calling_model(model: str) -> bool:
return "function" in model
def _message_to_fireworks_prompt(message: ChatMessage) -> Dict[str, Any]:
if message.role == MessageRole.USER:
prompt = {"role": "user", "content": message.content}
elif message.role == MessageRole.ASSISTANT:
prompt = {"role": "assistant", "content": message.content}
elif message.role == MessageRole.SYSTEM:
prompt = {"role": "system", "content": message.content}
elif message.role == MessageRole.FUNCTION:
raise ValueError(f"Message role {MessageRole.FUNCTION} is not supported.")
else:
raise ValueError(f"Unknown message role: {message.role}")
return prompt
def messages_to_fireworks_prompt(messages: Sequence[ChatMessage]) -> List[Dict]:
if len(messages) == 0:
raise ValueError("Got empty list of messages.")
return [_message_to_fireworks_prompt(message) for message in messages]
def resolve_fireworks_credentials(
api_key: Optional[str] = None,
api_base: Optional[str] = None,
api_version: Optional[str] = None,
) -> Tuple[Optional[str], str, str]:
"""
"Resolve OpenAI credentials.
The order of precedence is:
1. param
2. env
3. openai module
4. default
"""
# resolve from param or env
api_key = get_from_param_or_env("api_key", api_key, "FIREWORKS_API_KEY", "")
api_base = get_from_param_or_env("api_base", api_base, "FIREWORKS_API_BASE", "")
api_version = get_from_param_or_env(
"api_version", api_version, "FIREWORKS_API_VERSION", ""
)
# resolve from openai module or default
final_api_key = api_key or ""
final_api_base = api_base or DEFAULT_FIREWORKS_API_BASE
final_api_version = api_version or DEFAULT_FIREWORKS_API_VERSION
return final_api_key, str(final_api_base), final_api_version
|
from typing import Any, Dict, List, Optional, Sequence, Tuple
from llama_index.core.base.llms.types import ChatMessage, MessageRole
from llama_index.core.base.llms.generic_utils import get_from_param_or_env
DEFAULT_FIREWORKS_API_BASE = "https://api.fireworks.ai/inference/v1"
DEFAULT_FIREWORKS_API_VERSION = ""
LLAMA_MODELS = {
"accounts/fireworks/models/llama-v2-7b-chat": 4096,
"accounts/fireworks/models/llama-v2-13b-chat": 4096,
"accounts/fireworks/models/llama-v2-70b-chat": 4096,
"accounts/fireworks/models/llama-v2-34b-code-instruct": 16384,
"accounts/fireworks/models/llamaguard-7b": 4096,
"accounts/fireworks/models/llama-v3-8b-instruct": 8192,
"accounts/fireworks/models/llama-v3-70b-instruct": 8192,
"accounts/fireworks/models/llama-v3p1-8b-instruct": 131072,
"accounts/fireworks/models/llama-v3p1-70b-instruct": 131072,
"accounts/fireworks/models/llama-v3p1-405b-instruct": 131072,
"accounts/fireworks/models/llama-v3p2-1b-instruct": 131072,
"accounts/fireworks/models/llama-v3p2-3b-instruct": 131072,
"accounts/fireworks/models/llama-v3p2-11b-vision-instruct": 131072,
"accounts/fireworks/models/llama-v3p2-90b-vision-instruct": 131072,
}
MISTRAL_MODELS = {
"accounts/fireworks/models/mistral-7b-instruct-4k": 16384,
"accounts/fireworks/models/mixtral-8x7b-instruct": 32768,
"accounts/fireworks/models/firefunction-v1": 32768,
"accounts/fireworks/models/mixtral-8x22b-instruct": 65536,
}
FUNCTION_CALLING_MODELS = {
"accounts/fireworks/models/firefunction-v2": 8192,
}
DEEPSEEK_MODELS = {
"accounts/fireworks/models/deepseek-v3": 131072,
}
ALL_AVAILABLE_MODELS = {
**LLAMA_MODELS,
**MISTRAL_MODELS,
**FUNCTION_CALLING_MODELS,
**DEEPSEEK_MODELS,
}
DISCONTINUED_MODELS: Dict[str, int] = {}
def fireworks_modelname_to_contextsize(modelname: str) -> int:
"""
Calculate the maximum number of tokens possible to generate for a model.
Args:
modelname: The modelname we want to know the context size for.
Returns:
The maximum context size
Example:
.. code-block:: python
max_tokens = fireworks_modelname_to_contextsize(model_name)
"""
# handling finetuned models
# TO BE FILLED
if modelname in DISCONTINUED_MODELS:
raise ValueError(
f"Fireworks hosted model {modelname} has been discontinued. "
"Please choose another model."
)
context_size = ALL_AVAILABLE_MODELS.get(modelname, None)
if context_size is None:
raise ValueError(
f"Unknown model: {modelname}. Please provide a valid Fireworks model name."
"Known models are: " + ", ".join(ALL_AVAILABLE_MODELS.keys())
)
return context_size
def is_function_calling_model(model: str) -> bool:
return "function" in model
def _message_to_fireworks_prompt(message: ChatMessage) -> Dict[str, Any]:
if message.role == MessageRole.USER:
prompt = {"role": "user", "content": message.content}
elif message.role == MessageRole.ASSISTANT:
prompt = {"role": "assistant", "content": message.content}
elif message.role == MessageRole.SYSTEM:
prompt = {"role": "system", "content": message.content}
elif message.role == MessageRole.FUNCTION:
raise ValueError(f"Message role {MessageRole.FUNCTION} is not supported.")
else:
raise ValueError(f"Unknown message role: {message.role}")
return prompt
def messages_to_fireworks_prompt(messages: Sequence[ChatMessage]) -> List[Dict]:
if len(messages) == 0:
raise ValueError("Got empty list of messages.")
return [_message_to_fireworks_prompt(message) for message in messages]
def resolve_fireworks_credentials(
api_key: Optional[str] = None,
api_base: Optional[str] = None,
api_version: Optional[str] = None,
) -> Tuple[Optional[str], str, str]:
"""
"Resolve OpenAI credentials.
The order of precedence is:
1. param
2. env
3. openai module
4. default
"""
# resolve from param or env
api_key = get_from_param_or_env("api_key", api_key, "FIREWORKS_API_KEY", "")
api_base = get_from_param_or_env("api_base", api_base, "FIREWORKS_API_BASE", "")
api_version = get_from_param_or_env(
"api_version", api_version, "FIREWORKS_API_VERSION", ""
)
# resolve from openai module or default
final_api_key = api_key or ""
final_api_base = api_base or DEFAULT_FIREWORKS_API_BASE
final_api_version = api_version or DEFAULT_FIREWORKS_API_VERSION
return final_api_key, str(final_api_base), final_api_version
|
from torchvision.transforms import AutoAugmentPolicy, InterpolationMode # usort: skip
from . import functional # usort: skip
from ._transform import Transform # usort: skip
from ._presets import StereoMatching # usort: skip
from ._augment import RandomCutmix, RandomErasing, RandomMixup, SimpleCopyPaste
from ._auto_augment import AugMix, AutoAugment, RandAugment, TrivialAugmentWide
from ._color import (
ColorJitter,
RandomAdjustSharpness,
RandomAutocontrast,
RandomEqualize,
RandomInvert,
RandomPhotometricDistort,
RandomPosterize,
RandomSolarize,
)
from ._container import Compose, RandomApply, RandomChoice, RandomOrder
from ._geometry import (
CenterCrop,
ElasticTransform,
FiveCrop,
FixedSizeCrop,
Pad,
RandomAffine,
RandomCrop,
RandomHorizontalFlip,
RandomIoUCrop,
RandomPerspective,
RandomResize,
RandomResizedCrop,
RandomRotation,
RandomShortestSize,
RandomVerticalFlip,
RandomZoomOut,
Resize,
ScaleJitter,
TenCrop,
)
from ._meta import ClampBoundingBoxes, ConvertBoundingBoxFormat, ConvertColorSpace, ConvertDtype, ConvertImageDtype
from ._misc import (
GaussianBlur,
Identity,
Lambda,
LinearTransformation,
Normalize,
PermuteDimensions,
RemoveSmallBoundingBoxes,
ToDtype,
TransposeDimensions,
)
from ._temporal import UniformTemporalSubsample
from ._type_conversion import LabelToOneHot, PILToTensor, ToImagePIL, ToImageTensor, ToPILImage
from ._deprecated import Grayscale, RandomGrayscale, ToTensor # usort: skip
|
from torchvision.transforms import AutoAugmentPolicy, InterpolationMode # usort: skip
from . import functional # usort: skip
from ._transform import Transform # usort: skip
from ._presets import StereoMatching # usort: skip
from ._augment import RandomCutmix, RandomErasing, RandomMixup, SimpleCopyPaste
from ._auto_augment import AugMix, AutoAugment, RandAugment, TrivialAugmentWide
from ._color import (
ColorJitter,
RandomAdjustSharpness,
RandomAutocontrast,
RandomEqualize,
RandomInvert,
RandomPhotometricDistort,
RandomPosterize,
RandomSolarize,
)
from ._container import Compose, RandomApply, RandomChoice, RandomOrder
from ._geometry import (
CenterCrop,
ElasticTransform,
FiveCrop,
FixedSizeCrop,
Pad,
RandomAffine,
RandomCrop,
RandomHorizontalFlip,
RandomIoUCrop,
RandomPerspective,
RandomResize,
RandomResizedCrop,
RandomRotation,
RandomShortestSize,
RandomVerticalFlip,
RandomZoomOut,
Resize,
ScaleJitter,
TenCrop,
)
from ._meta import ClampBoundingBoxes, ConvertBoundingBoxFormat, ConvertColorSpace, ConvertDtype, ConvertImageDtype
from ._misc import (
GaussianBlur,
Identity,
Lambda,
LinearTransformation,
Normalize,
PermuteDimensions,
RemoveSmallBoundingBoxes,
ToDtype,
TransposeDimensions,
)
from ._temporal import UniformTemporalSubsample
from ._type_conversion import DecodeImage, LabelToOneHot, PILToTensor, ToImagePIL, ToImageTensor, ToPILImage
from ._deprecated import Grayscale, RandomGrayscale, ToTensor # usort: skip
|
from __future__ import annotations
import logging
from typing import TYPE_CHECKING, Any, Literal
from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator
if TYPE_CHECKING:
import numpy as np
from torch import Tensor
from sentence_transformers.similarity_functions import SimilarityFunction
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
logger = logging.getLogger(__name__)
class SparseEmbeddingSimilarityEvaluator(EmbeddingSimilarityEvaluator):
def __init__(
self,
sentences1: list[str],
sentences2: list[str],
scores: list[float],
batch_size: int = 16,
main_similarity: str | SimilarityFunction | None = None,
similarity_fn_names: list[Literal["cosine", "euclidean", "manhattan", "dot"]] | None = None,
name: str = "",
show_progress_bar: bool = False,
write_csv: bool = True,
precision: Literal["float32", "int8", "uint8", "binary", "ubinary"] | None = None,
truncate_dim: int | None = None,
):
super().__init__(
sentences1,
sentences2,
scores,
batch_size,
main_similarity,
similarity_fn_names,
name,
show_progress_bar,
write_csv,
precision,
truncate_dim,
)
def __call__(
self, model: SparseEncoder, output_path: str = None, epoch: int = -1, steps: int = -1
) -> dict[str, float]:
return super().__call__(model, output_path, epoch, steps)
def embed_inputs(
self,
model: SparseEncoder,
sentences: str | list[str] | np.ndarray,
**kwargs,
) -> Tensor:
return model.encode(
sentences,
batch_size=self.batch_size,
show_progress_bar=self.show_progress_bar,
convert_to_sparse_tensor=True,
precision=self.precision,
normalize_embeddings=bool(self.precision),
**kwargs,
)
def store_metrics_in_model_card_data(
self, model: SparseEncoder, metrics: dict[str, Any], epoch: int = 0, step: int = 0
) -> None:
model.model_card_data.set_evaluation_metrics(self, metrics, epoch, step)
|
from __future__ import annotations
import logging
from typing import TYPE_CHECKING, Any
from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator
if TYPE_CHECKING:
import numpy as np
from torch import Tensor
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
logger = logging.getLogger(__name__)
class SparseEmbeddingSimilarityEvaluator(EmbeddingSimilarityEvaluator):
def __call__(
self, model: SparseEncoder, output_path: str = None, epoch: int = -1, steps: int = -1
) -> dict[str, float]:
return super.__call__(model, output_path, epoch, steps)
def embed_inputs(
self,
model: SparseEncoder,
sentences: str | list[str] | np.ndarray,
**kwargs,
) -> Tensor:
return model.encode(
sentences,
batch_size=self.batch_size,
show_progress_bar=self.show_progress_bar,
convert_to_sparse_tensor=True,
precision=self.precision,
normalize_embeddings=bool(self.precision),
**kwargs,
)
def store_metrics_in_model_card_data(
self, model: SparseEncoder, metrics: dict[str, Any], epoch: int = 0, step: int = 0
) -> None:
model.model_card_data.set_evaluation_metrics(self, metrics, epoch, step)
|
NEWS_DOCS = """API documentation:
Endpoint: https://newsapi.org
Top headlines /v2/top-headlines
This endpoint provides live top and breaking headlines for a country, specific category in a country, single source, or multiple sources. You can also search with keywords. Articles are sorted by the earliest date published first.
This endpoint is great for retrieving headlines for use with news tickers or similar.
Request parameters
country | The 2-letter ISO 3166-1 code of the country you want to get headlines for. Possible options: ae ar at au be bg br ca ch cn co cu cz de eg fr gb gr hk hu id ie il in it jp kr lt lv ma mx my ng nl no nz ph pl pt ro rs ru sa se sg si sk th tr tw ua us ve za. Note: you can't mix this param with the sources param.
category | The category you want to get headlines for. Possible options: business entertainment general health science sports technology. Note: you can't mix this param with the sources param.
sources | A comma-separated string of identifiers for the news sources or blogs you want headlines from. Use the /top-headlines/sources endpoint to locate these programmatically or look at the sources index. Note: you can't mix this param with the country or category params.
q | Keywords or a phrase to search for.
pageSize | int | The number of results to return per page (request). 20 is the default, 100 is the maximum.
page | int | Use this to page through the results if the total results found is greater than the page size.
Response object
status | string | If the request was successful or not. Options: ok, error. In the case of error a code and message property will be populated.
totalResults | int | The total number of results available for your request.
articles | array[article] | The results of the request.
source | object | The identifier id and a display name name for the source this article came from.
author | string | The author of the article
title | string | The headline or title of the article.
description | string | A description or snippet from the article.
url | string | The direct URL to the article.
urlToImage | string | The URL to a relevant image for the article.
publishedAt | string | The date and time that the article was published, in UTC (+000)
content | string | The unformatted content of the article, where available. This is truncated to 200 chars.
Use page size: 2
""" # noqa: E501
|
# flake8: noqa
NEWS_DOCS = """API documentation:
Endpoint: https://newsapi.org
Top headlines /v2/top-headlines
This endpoint provides live top and breaking headlines for a country, specific category in a country, single source, or multiple sources. You can also search with keywords. Articles are sorted by the earliest date published first.
This endpoint is great for retrieving headlines for use with news tickers or similar.
Request parameters
country | The 2-letter ISO 3166-1 code of the country you want to get headlines for. Possible options: ae ar at au be bg br ca ch cn co cu cz de eg fr gb gr hk hu id ie il in it jp kr lt lv ma mx my ng nl no nz ph pl pt ro rs ru sa se sg si sk th tr tw ua us ve za. Note: you can't mix this param with the sources param.
category | The category you want to get headlines for. Possible options: business entertainment general health science sports technology. Note: you can't mix this param with the sources param.
sources | A comma-separated string of identifiers for the news sources or blogs you want headlines from. Use the /top-headlines/sources endpoint to locate these programmatically or look at the sources index. Note: you can't mix this param with the country or category params.
q | Keywords or a phrase to search for.
pageSize | int | The number of results to return per page (request). 20 is the default, 100 is the maximum.
page | int | Use this to page through the results if the total results found is greater than the page size.
Response object
status | string | If the request was successful or not. Options: ok, error. In the case of error a code and message property will be populated.
totalResults | int | The total number of results available for your request.
articles | array[article] | The results of the request.
source | object | The identifier id and a display name name for the source this article came from.
author | string | The author of the article
title | string | The headline or title of the article.
description | string | A description or snippet from the article.
url | string | The direct URL to the article.
urlToImage | string | The URL to a relevant image for the article.
publishedAt | string | The date and time that the article was published, in UTC (+000)
content | string | The unformatted content of the article, where available. This is truncated to 200 chars.
Use page size: 2
"""
|
from jina import DocumentArray, Executor, Flow, requests
def test_gateway_metric_labels(monkeypatch_metric_exporter):
collect_metrics, read_metrics = monkeypatch_metric_exporter
class FirstExec(Executor):
@requests()
def meow(self, docs, **kwargs):
return DocumentArray.empty(3)
class SecondExec(Executor):
@requests()
def meow(self, docs, **kwargs):
return DocumentArray.empty(3)
with Flow(
tracing=False,
metrics=True,
metrics_exporter_host='http://localhost',
metrics_exporter_port=4317,
port=12345,
).add(name='first_exec', uses=FirstExec).add(
name="second_exec", uses=SecondExec
) as f:
f.post('/')
collect_metrics()
metrics = read_metrics()
gateway_metrics = metrics['gateway/rep-0']['resource_metrics'][0][
'scope_metrics'
][0]['metrics']
gateway_metric_data_point = {
i['name']: i['data']['data_points'] for i in gateway_metrics
}
assert (
'address'
in gateway_metric_data_point['jina_sending_request_seconds'][0]['attributes']
)
assert (
'address'
in gateway_metric_data_point['jina_sent_request_bytes'][0]['attributes']
)
assert (
'address'
in gateway_metric_data_point['jina_received_response_bytes'][0]['attributes']
)
assert (
'address'
in gateway_metric_data_point['jina_sending_request_seconds'][1]['attributes']
)
assert (
'address'
in gateway_metric_data_point['jina_sent_request_bytes'][1]['attributes']
)
assert (
'address'
in gateway_metric_data_point['jina_received_response_bytes'][1]['attributes']
)
assert (
'deployment'
in gateway_metric_data_point['jina_sending_request_seconds'][0]['attributes']
)
assert (
'deployment'
in gateway_metric_data_point['jina_sent_request_bytes'][0]['attributes']
)
assert (
'deployment'
in gateway_metric_data_point['jina_received_response_bytes'][0]['attributes']
)
assert (
'deployment'
in gateway_metric_data_point['jina_sending_request_seconds'][1]['attributes']
)
assert (
'deployment'
in gateway_metric_data_point['jina_sent_request_bytes'][1]['attributes']
)
assert (
'deployment'
in gateway_metric_data_point['jina_received_response_bytes'][1]['attributes']
)
assert {'first_exec', 'second_exec'} == {
i['attributes']['deployment']
for i in gateway_metric_data_point['jina_received_response_bytes']
}
assert {'first_exec', 'second_exec'} == {
i['attributes']['deployment']
for i in gateway_metric_data_point['jina_sent_request_bytes']
}
assert {'first_exec', 'second_exec'} == {
i['attributes']['deployment']
for i in gateway_metric_data_point['jina_sending_request_seconds']
}
def test_merge_with_no_reduce(monkeypatch_metric_exporter):
collect_metrics, read_metrics = monkeypatch_metric_exporter
f = (
Flow(
tracing=False,
metrics=True,
metrics_exporter_host='http://localhost',
metrics_exporter_port=4317,
port=12345,
)
.add(name='name1')
.add(name='name2', needs=['gateway'])
.add(name='name3', needs=['name1', 'name2'], disable_reduce=True)
)
with f:
f.post('/')
collect_metrics()
metrics = read_metrics()
gateway_metrics = metrics['gateway/rep-0']['resource_metrics'][0][
'scope_metrics'
][0]['metrics']
gateway_metric_data_point = {
i['name']: i['data']['data_points'] for i in gateway_metrics
}
assert {'name1', 'name2', 'name3'} == {
i['attributes']['deployment']
for i in gateway_metric_data_point['jina_received_response_bytes']
}
assert {'name1', 'name2', 'name3'} == {
i['attributes']['deployment']
for i in gateway_metric_data_point['jina_sent_request_bytes']
}
assert {'name1', 'name2', 'name3'} == {
i['attributes']['deployment']
for i in gateway_metric_data_point['jina_sending_request_seconds']
}
|
from jina import DocumentArray, Executor, Flow, requests
def test_gateway_metric_labels(monkeypatch_metric_exporter):
collect_metrics, read_metrics = monkeypatch_metric_exporter
class FirstExec(Executor):
@requests()
def meow(self, docs, **kwargs):
return DocumentArray.empty(3)
class SecondExec(Executor):
@requests()
def meow(self, docs, **kwargs):
return DocumentArray.empty(3)
with Flow(
tracing=False,
metrics=True,
metrics_exporter_host='http://localhost',
metrics_exporter_port=4317,
port=12345,
).add(name='first_exec', uses=FirstExec).add(
name="second_exec", uses=SecondExec
) as f:
f.post('/')
collect_metrics()
metrics = read_metrics()
print(f' metrics {metrics.keys()}')
gateway_metrics = metrics['gateway/rep-0']['resource_metrics'][0][
'scope_metrics'
][0]['metrics']
gateway_metric_data_point = {
i['name']: i['data']['data_points'] for i in gateway_metrics
}
assert (
'address'
in gateway_metric_data_point['jina_sending_request_seconds'][0]['attributes']
)
assert (
'address'
in gateway_metric_data_point['jina_sent_request_bytes'][0]['attributes']
)
assert (
'address'
in gateway_metric_data_point['jina_received_response_bytes'][0]['attributes']
)
assert (
'address'
in gateway_metric_data_point['jina_sending_request_seconds'][1]['attributes']
)
assert (
'address'
in gateway_metric_data_point['jina_sent_request_bytes'][1]['attributes']
)
assert (
'address'
in gateway_metric_data_point['jina_received_response_bytes'][1]['attributes']
)
assert (
'deployment'
in gateway_metric_data_point['jina_sending_request_seconds'][0]['attributes']
)
assert (
'deployment'
in gateway_metric_data_point['jina_sent_request_bytes'][0]['attributes']
)
assert (
'deployment'
in gateway_metric_data_point['jina_received_response_bytes'][0]['attributes']
)
assert (
'deployment'
in gateway_metric_data_point['jina_sending_request_seconds'][1]['attributes']
)
assert (
'deployment'
in gateway_metric_data_point['jina_sent_request_bytes'][1]['attributes']
)
assert (
'deployment'
in gateway_metric_data_point['jina_received_response_bytes'][1]['attributes']
)
assert {'first_exec', 'second_exec'} == {
i['attributes']['deployment']
for i in gateway_metric_data_point['jina_received_response_bytes']
}
assert {'first_exec', 'second_exec'} == {
i['attributes']['deployment']
for i in gateway_metric_data_point['jina_sent_request_bytes']
}
assert {'first_exec', 'second_exec'} == {
i['attributes']['deployment']
for i in gateway_metric_data_point['jina_sending_request_seconds']
}
def test_merge_with_no_reduce(monkeypatch_metric_exporter):
collect_metrics, read_metrics = monkeypatch_metric_exporter
f = (
Flow(
tracing=False,
metrics=True,
metrics_exporter_host='http://localhost',
metrics_exporter_port=4317,
port=12345,
)
.add(name='name1')
.add(name='name2', needs=['gateway'])
.add(name='name3', needs=['name1', 'name2'], disable_reduce=True)
)
with f:
f.post('/')
collect_metrics()
metrics = read_metrics()
gateway_metrics = metrics['gateway/rep-0']['resource_metrics'][0][
'scope_metrics'
][0]['metrics']
gateway_metric_data_point = {
i['name']: i['data']['data_points'] for i in gateway_metrics
}
assert {'name1', 'name2', 'name3'} == {
i['attributes']['deployment']
for i in gateway_metric_data_point['jina_received_response_bytes']
}
assert {'name1', 'name2', 'name3'} == {
i['attributes']['deployment']
for i in gateway_metric_data_point['jina_sent_request_bytes']
}
assert {'name1', 'name2', 'name3'} == {
i['attributes']['deployment']
for i in gateway_metric_data_point['jina_sending_request_seconds']
}
|
# In[1]:
import pandas as pd
# In[2]:
# from https://github.com/pytorch/audio/blob/main/.github/process_commit.py
primary_labels_mapping = {
"BC-breaking": "Backward-incompatible changes",
"deprecation": "Deprecations",
"bug fix": "Bug Fixes",
"new feature": "New Features",
"improvement": "Improvements",
"prototype": "Prototypes",
"other": "Other",
"None": "Missing",
}
secondary_labels_mapping = {
"module: io": "I/O",
"module: ops": "Ops",
"module: models": "Models",
"module: pipelines": "Pipelines",
"module: datasets": "Datasets",
"module: docs": "Documentation",
"module: tests": "Tests",
"tutorial": "Tutorials",
"recipe": "Recipes",
"example": "Examples",
"build": "Build",
"style": "Style",
"perf": "Performance",
"other": "Other",
"None": "Missing",
}
# In[3]:
df = pd.read_json("data.json").T
df.tail()
# In[4]:
def get_labels(col_name, labels):
df[col_name] = [[] for _ in range(len(df))]
for _, row in df.iterrows():
row[col_name] = "None"
for label in labels:
if label in row["labels"]:
row[col_name] = label
break
# In[5]:
get_labels("primary_label", primary_labels_mapping.keys())
get_labels("secondary_label", secondary_labels_mapping.keys())
df.tail(5)
# In[6]:
for primary_label in primary_labels_mapping.keys():
primary_df = df[df["primary_label"] == primary_label]
if primary_df.empty:
continue
print(f"## {primary_labels_mapping[primary_label]}")
for secondary_label in secondary_labels_mapping.keys():
secondary_df = primary_df[primary_df["secondary_label"] == secondary_label]
if secondary_df.empty:
continue
print(f"### {secondary_labels_mapping[secondary_label]}")
for _, row in secondary_df.iterrows():
print(f"- {row['title']}")
print()
print()
|
# In[1]:
import pandas as pd
# In[2]:
# from https://github.com/pytorch/audio/blob/main/.github/process_commit.py
primary_labels_mapping = {
"BC-breaking": "Backward-incompatible changes",
"deprecation": "Deprecations",
"bug fix": "Bug Fixes",
"new feature": "New Features",
"improvement": "Improvements",
"prototype": "Prototypes",
"other": "Other",
"None": "Missing",
}
secondary_labels_mapping = {
"module: io": "I/O",
"module: ops": "Ops",
"module: models": "Models",
"module: pipelines": "Pipelines",
"module: datasets": "Datasets",
"module: docs": "Documentation",
"module: tests": "Tests",
"tutorial": "Tutorials",
"recipe": "Recipes",
"example": "Examples",
"build": "Build",
"style": "Style",
"perf": "Performance",
"other": "Other",
"None": "Missing",
}
# In[3]:
df = pd.read_json("data.json").T
df.tail()
# In[4]:
def get_labels(col_name, labels):
df[col_name] = [[] for _ in range(len(df))]
for _, row in df.iterrows():
row[col_name] = "None"
for label in labels:
if label in row["labels"]:
row[col_name] = label
break
# In[5]:
get_labels("primary_label", primary_labels_mapping.keys())
get_labels("secondary_label", secondary_labels_mapping.keys())
df.tail(5)
# In[6]:
for primary_label in primary_labels_mapping.keys():
primary_df = df[df["primary_label"] == primary_label]
if primary_df.empty:
continue
print(f"## {primary_labels_mapping[primary_label]}")
for secondary_label in secondary_labels_mapping.keys():
secondary_df = primary_df[primary_df["secondary_label"] == secondary_label]
if secondary_df.empty:
continue
print(f"### {secondary_labels_mapping[secondary_label]}")
for _, row in secondary_df.iterrows():
print(f"- {row['title']}")
print()
print()
|
from langchain_core.prompts import __all__
EXPECTED_ALL = [
"AIMessagePromptTemplate",
"BaseChatPromptTemplate",
"BasePromptTemplate",
"ChatMessagePromptTemplate",
"ChatPromptTemplate",
"DictPromptTemplate",
"FewShotPromptTemplate",
"FewShotPromptWithTemplates",
"FewShotChatMessagePromptTemplate",
"format_document",
"aformat_document",
"HumanMessagePromptTemplate",
"MessagesPlaceholder",
"PipelinePromptTemplate",
"PromptTemplate",
"StringPromptTemplate",
"SystemMessagePromptTemplate",
"load_prompt",
"check_valid_template",
"get_template_variables",
"jinja2_formatter",
"validate_jinja2",
]
def test_all_imports() -> None:
assert set(__all__) == set(EXPECTED_ALL)
|
from langchain_core.prompts import __all__
EXPECTED_ALL = [
"AIMessagePromptTemplate",
"BaseChatPromptTemplate",
"BasePromptTemplate",
"ChatMessagePromptTemplate",
"ChatPromptTemplate",
"FewShotPromptTemplate",
"FewShotPromptWithTemplates",
"FewShotChatMessagePromptTemplate",
"format_document",
"aformat_document",
"HumanMessagePromptTemplate",
"MessagesPlaceholder",
"PipelinePromptTemplate",
"PromptTemplate",
"StringPromptTemplate",
"SystemMessagePromptTemplate",
"load_prompt",
"check_valid_template",
"get_template_variables",
"jinja2_formatter",
"validate_jinja2",
]
def test_all_imports() -> None:
assert set(__all__) == set(EXPECTED_ALL)
|
"""Pydantic v1 compatibility shim."""
from importlib import metadata
from langchain_core._api.deprecation import warn_deprecated
# Create namespaces for pydantic v1 and v2.
# This code must stay at the top of the file before other modules may
# attempt to import pydantic since it adds pydantic_v1 and pydantic_v2 to sys.modules.
#
# This hack is done for the following reasons:
# * Langchain will attempt to remain compatible with both pydantic v1 and v2 since
# both dependencies and dependents may be stuck on either version of v1 or v2.
# * Creating namespaces for pydantic v1 and v2 should allow us to write code that
# unambiguously uses either v1 or v2 API.
# * This change is easier to roll out and roll back.
try:
from pydantic.v1 import * # noqa: F403
except ImportError:
from pydantic import * # type: ignore # noqa: F403
try:
_PYDANTIC_MAJOR_VERSION: int = int(metadata.version("pydantic").split(".")[0])
except metadata.PackageNotFoundError:
_PYDANTIC_MAJOR_VERSION = 0
warn_deprecated(
"0.3.0",
removal="1.0.0",
alternative="pydantic.v1 or pydantic",
message=(
"As of langchain-core 0.3.0, LangChain uses pydantic v2 internally. "
"The langchain_core.pydantic_v1 module was a "
"compatibility shim for pydantic v1, and should no longer be used. "
"Please update the code to import from Pydantic directly.\n\n"
"For example, replace imports like: "
"`from langchain_core.pydantic_v1 import BaseModel`\n"
"with: `from pydantic import BaseModel`\n"
"or the v1 compatibility namespace if you are working in a code base "
"that has not been fully upgraded to pydantic 2 yet. "
"\tfrom pydantic.v1 import BaseModel\n"
),
)
|
from importlib import metadata
from langchain_core._api.deprecation import warn_deprecated
# Create namespaces for pydantic v1 and v2.
# This code must stay at the top of the file before other modules may
# attempt to import pydantic since it adds pydantic_v1 and pydantic_v2 to sys.modules.
#
# This hack is done for the following reasons:
# * Langchain will attempt to remain compatible with both pydantic v1 and v2 since
# both dependencies and dependents may be stuck on either version of v1 or v2.
# * Creating namespaces for pydantic v1 and v2 should allow us to write code that
# unambiguously uses either v1 or v2 API.
# * This change is easier to roll out and roll back.
try:
from pydantic.v1 import * # noqa: F403
except ImportError:
from pydantic import * # type: ignore # noqa: F403
try:
_PYDANTIC_MAJOR_VERSION: int = int(metadata.version("pydantic").split(".")[0])
except metadata.PackageNotFoundError:
_PYDANTIC_MAJOR_VERSION = 0
warn_deprecated(
"0.3.0",
removal="1.0.0",
alternative="pydantic.v1 or pydantic",
message=(
"As of langchain-core 0.3.0, LangChain uses pydantic v2 internally. "
"The langchain_core.pydantic_v1 module was a "
"compatibility shim for pydantic v1, and should no longer be used. "
"Please update the code to import from Pydantic directly.\n\n"
"For example, replace imports like: "
"`from langchain_core.pydantic_v1 import BaseModel`\n"
"with: `from pydantic import BaseModel`\n"
"or the v1 compatibility namespace if you are working in a code base "
"that has not been fully upgraded to pydantic 2 yet. "
"\tfrom pydantic.v1 import BaseModel\n"
),
)
|
_base_ = [
'./faster_rcnn_r50_dc5.py', './mot_challenge.py',
'../../../configs/_base_/default_runtime.py'
]
model = dict(
type='SELSA',
pretrains=None,
detector=dict(
backbone=dict(depth=18, base_channels=2),
roi_head=dict(
type='SelsaRoIHead',
bbox_head=dict(
type='SelsaBBoxHead',
num_shared_fcs=2,
aggregator=dict(
type='SelsaAggregator',
in_channels=32,
num_attention_blocks=16)))))
# dataset settings
data = dict(
val=dict(
ref_img_sampler=dict(
_delete_=True,
num_ref_imgs=14,
frame_range=[-7, 7],
method='test_with_adaptive_stride')),
test=dict(
ref_img_sampler=dict(
_delete_=True,
num_ref_imgs=14,
frame_range=[-7, 7],
method='test_with_adaptive_stride')))
# optimizer
optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001)
optimizer_config = dict(
_delete_=True, grad_clip=dict(max_norm=35, norm_type=2))
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=1.0 / 3,
step=[2, 5])
# runtime settings
total_epochs = 7
evaluation = dict(metric=['bbox'], interval=7)
|
_base_ = [
'./faster_rcnn_r50_dc5.py', './mot_challenge.py',
'../../../configs/_base_/default_runtime.py'
]
model = dict(
type='SELSA',
pretrains=None,
detector=dict(
pretrained='torchvision://resnet101',
backbone=dict(depth=101),
roi_head=dict(
type='SelsaRoIHead',
bbox_head=dict(
type='SelsaBBoxHead',
num_shared_fcs=2,
aggregator=dict(
type='SelsaAggregator',
in_channels=1024,
num_attention_blocks=16)))))
# dataset settings
data = dict(
val=dict(
ref_img_sampler=dict(
_delete_=True,
num_ref_imgs=14,
frame_range=[-7, 7],
method='test_with_adaptive_stride')),
test=dict(
ref_img_sampler=dict(
_delete_=True,
num_ref_imgs=14,
frame_range=[-7, 7],
method='test_with_adaptive_stride')))
# optimizer
optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001)
optimizer_config = dict(
_delete_=True, grad_clip=dict(max_norm=35, norm_type=2))
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=1.0 / 3,
step=[2, 5])
# runtime settings
total_epochs = 7
evaluation = dict(metric=['bbox'], interval=7)
|
_base_ = './faster-rcnn_r50_fpn_1x_coco.py'
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')))
|
_base_ = './faster_rcnn_r50_fpn_1x_coco.py'
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')))
|
# Copyright (c) OpenMMLab. All rights reserved.
"""MMEngine provides 11 root registries to support using modules across
projects.
More datails can be found at
https://mmengine.readthedocs.io/en/latest/tutorials/registry.html.
"""
from .registry import Registry, build_runner_from_cfg
# manage all kinds of runners like `EpochBasedRunner` and `IterBasedRunner`
RUNNERS = Registry('runner', build_func=build_runner_from_cfg)
# manage runner constructors that define how to initialize runners
RUNNER_CONSTRUCTORS = Registry('runner constructor')
# manage all kinds of loops like `EpochBasedTrainLoop`
LOOPS = Registry('loop')
# manage all kinds of hooks like `CheckpointHook`
HOOKS = Registry('hook')
# manage data-related modules
DATASETS = Registry('dataset')
DATA_SAMPLERS = Registry('data sampler')
TRANSFORMS = Registry('transform')
# mangage all kinds of modules inheriting `nn.Module`
MODELS = Registry('model')
# mangage all kinds of model wrappers like 'MMDistributedDataParallel'
MODEL_WRAPPERS = Registry('model_wrapper')
# mangage all kinds of weight initialization modules like `Uniform`
WEIGHT_INITIALIZERS = Registry('weight initializer')
# mangage all kinds of optimizers like `SGD` and `Adam`
OPTIMIZERS = Registry('optimizer')
# manage constructors that customize the optimization hyperparameters.
OPTIM_WRAPPER_CONSTRUCTORS = Registry('optimizer wrapper constructor')
# mangage all kinds of parameter schedulers like `MultiStepLR`
PARAM_SCHEDULERS = Registry('parameter scheduler')
# manage all kinds of metrics
METRICS = Registry('metric')
# manage task-specific modules like anchor generators and box coders
TASK_UTILS = Registry('task util')
# manage visualizer
VISUALIZERS = Registry('visualizer')
# manage visualizer backend
VISBACKENDS = Registry('vis_backend')
# manage logprocessor
LOG_PROCESSORS = Registry('log_processor')
# manage optimizer wrapper
OPTIM_WRAPPERS = Registry('optim_wrapper')
# manage evaluator
EVALUATOR = Registry('evaluator')
|
# Copyright (c) OpenMMLab. All rights reserved.
"""MMEngine provides 11 root registries to support using modules across
projects.
More datails can be found at
https://mmengine.readthedocs.io/en/latest/tutorials/registry.html.
"""
from .registry import Registry, build_runner_from_cfg
# manage all kinds of runners like `EpochBasedRunner` and `IterBasedRunner`
RUNNERS = Registry('runner', build_func=build_runner_from_cfg)
# manage runner constructors that define how to initialize runners
RUNNER_CONSTRUCTORS = Registry('runner constructor')
# manage all kinds of loops like `EpochBasedTrainLoop`
LOOPS = Registry('loop')
# manage all kinds of hooks like `CheckpointHook`
HOOKS = Registry('hook')
# manage data-related modules
DATASETS = Registry('dataset')
DATA_SAMPLERS = Registry('data sampler')
TRANSFORMS = Registry('transform')
# mangage all kinds of modules inheriting `nn.Module`
MODELS = Registry('model')
# mangage all kinds of model wrappers like 'MMDistributedDataParallel'
MODEL_WRAPPERS = Registry('model_wrapper')
# mangage all kinds of weight initialization modules like `Uniform`
WEIGHT_INITIALIZERS = Registry('weight initializer')
# mangage all kinds of optimizers like `SGD` and `Adam`
OPTIMIZERS = Registry('optimizer')
# manage constructors that customize the optimization hyperparameters.
OPTIM_WRAPPER_CONSTRUCTORS = Registry('optimizer wrapper constructor')
# mangage all kinds of parameter schedulers like `MultiStepLR`
PARAM_SCHEDULERS = Registry('parameter scheduler')
# manage all kinds of metrics
METRICS = Registry('metric')
# manage task-specific modules like anchor generators and box coders
TASK_UTILS = Registry('task util')
# manage visualizer
VISUALIZERS = Registry('visualizer')
# manage visualizer backend
VISBACKENDS = Registry('vis_backend')
# manage logprocessor
LOG_PROCESSORS = Registry('log_processor')
# manage optimizer wrapper
OPTIM_WRAPPERS = Registry('optim_wrapper')
|
import copy as cp
from dataclasses import fields
from functools import lru_cache
from typing import TYPE_CHECKING, Optional, Tuple, Dict
from docarray.dataclasses import is_multimodal
from docarray.helper import typename
if TYPE_CHECKING: # pragma: no cover
from docarray.typing import T
@lru_cache()
def _get_fields(dc):
return [f.name for f in fields(dc)]
class BaseDCType:
_data_class = None
def __init__(
self: 'T',
_obj: Optional['T'] = None,
copy: bool = False,
field_resolver: Optional[Dict[str, str]] = None,
unknown_fields_handler: str = 'catch',
**kwargs,
):
self._data = None
if isinstance(_obj, type(self)):
if copy:
self.copy_from(_obj)
else:
self._data = _obj._data
elif isinstance(_obj, dict):
kwargs.update(_obj)
elif is_multimodal(_obj):
self._data = type(self)._from_dataclass(_obj)._data
if kwargs:
try:
self._data = self._data_class(self, **kwargs)
except TypeError as ex:
if unknown_fields_handler == 'raise':
raise AttributeError(f'unknown attributes') from ex
else:
if field_resolver:
kwargs = {
field_resolver.get(k, k): v for k, v in kwargs.items()
}
_fields = _get_fields(self._data_class)
_unknown_kwargs = None
_unresolved = set(kwargs.keys()).difference(_fields)
if _unresolved:
_unknown_kwargs = {k: kwargs[k] for k in _unresolved}
for k in _unresolved:
kwargs.pop(k)
self._data = self._data_class(self, **kwargs)
if _unknown_kwargs and unknown_fields_handler == 'catch':
getattr(self, self._unresolved_fields_dest).update(
_unknown_kwargs
)
for k in self._post_init_fields:
if k in kwargs:
setattr(self, k, kwargs[k])
if not _obj and not kwargs and self._data is None:
self._data = self._data_class(self)
if self._data is None:
raise ValueError(
f'Failed to initialize {typename(self)} from obj={_obj}, kwargs={kwargs}'
)
def copy_from(self: 'T', other: 'T') -> None:
"""Overwrite self by copying from another :class:`Document`.
:param other: the other Document to copy from
"""
self._data = cp.deepcopy(other._data)
def clear(self) -> None:
"""Clear all fields from this :class:`Document` to their default values."""
for f in self.non_empty_fields:
setattr(self._data, f, None)
def pop(self, *fields) -> None:
"""Clear some fields from this :class:`Document` to their default values.
:param fields: field names to clear.
"""
for f in fields:
if hasattr(self, f):
setattr(self._data, f, None)
@property
def non_empty_fields(self) -> Tuple[str]:
"""Get all non-emtpy fields of this :class:`Document`.
Non-empty fields are the fields with not-`None` and not-default values.
:return: field names in a tuple.
"""
return self._data._non_empty_fields
@property
def nbytes(self) -> int:
"""Return total bytes consumed by protobuf.
:return: number of bytes
"""
return len(bytes(self))
def __hash__(self):
return hash(self._data)
def __repr__(self):
content = str(self.non_empty_fields)
content += f' at {getattr(self, "id", id(self))}'
return f'<{self.__class__.__name__} {content.strip()}>'
def __bytes__(self):
return self.to_bytes()
def __eq__(self, other):
if type(self) is type(other):
return self._data == other._data
return False
|
import copy as cp
from dataclasses import fields
from functools import lru_cache
from typing import TYPE_CHECKING, Optional, Tuple, Dict
from docarray.dataclasses import is_multimodal
from docarray.helper import typename
if TYPE_CHECKING:
from docarray.typing import T
@lru_cache()
def _get_fields(dc):
return [f.name for f in fields(dc)]
class BaseDCType:
_data_class = None
def __init__(
self: 'T',
_obj: Optional['T'] = None,
copy: bool = False,
field_resolver: Optional[Dict[str, str]] = None,
unknown_fields_handler: str = 'catch',
**kwargs,
):
self._data = None
if isinstance(_obj, type(self)):
if copy:
self.copy_from(_obj)
else:
self._data = _obj._data
elif isinstance(_obj, dict):
kwargs.update(_obj)
elif is_multimodal(_obj):
self._data = type(self)._from_dataclass(_obj)._data
if kwargs:
try:
self._data = self._data_class(self, **kwargs)
except TypeError as ex:
if unknown_fields_handler == 'raise':
raise AttributeError(f'unknown attributes') from ex
else:
if field_resolver:
kwargs = {
field_resolver.get(k, k): v for k, v in kwargs.items()
}
_fields = _get_fields(self._data_class)
_unknown_kwargs = None
_unresolved = set(kwargs.keys()).difference(_fields)
if _unresolved:
_unknown_kwargs = {k: kwargs[k] for k in _unresolved}
for k in _unresolved:
kwargs.pop(k)
self._data = self._data_class(self, **kwargs)
if _unknown_kwargs and unknown_fields_handler == 'catch':
getattr(self, self._unresolved_fields_dest).update(
_unknown_kwargs
)
for k in self._post_init_fields:
if k in kwargs:
setattr(self, k, kwargs[k])
if not _obj and not kwargs and self._data is None:
self._data = self._data_class(self)
if self._data is None:
raise ValueError(
f'Failed to initialize {typename(self)} from obj={_obj}, kwargs={kwargs}'
)
def copy_from(self: 'T', other: 'T') -> None:
"""Overwrite self by copying from another :class:`Document`.
:param other: the other Document to copy from
"""
self._data = cp.deepcopy(other._data)
def clear(self) -> None:
"""Clear all fields from this :class:`Document` to their default values."""
for f in self.non_empty_fields:
setattr(self._data, f, None)
def pop(self, *fields) -> None:
"""Clear some fields from this :class:`Document` to their default values.
:param fields: field names to clear.
"""
for f in fields:
if hasattr(self, f):
setattr(self._data, f, None)
@property
def non_empty_fields(self) -> Tuple[str]:
"""Get all non-emtpy fields of this :class:`Document`.
Non-empty fields are the fields with not-`None` and not-default values.
:return: field names in a tuple.
"""
return self._data._non_empty_fields
@property
def nbytes(self) -> int:
"""Return total bytes consumed by protobuf.
:return: number of bytes
"""
return len(bytes(self))
def __hash__(self):
return hash(self._data)
def __repr__(self):
content = str(self.non_empty_fields)
content += f' at {getattr(self, "id", id(self))}'
return f'<{self.__class__.__name__} {content.strip()}>'
def __bytes__(self):
return self.to_bytes()
def __eq__(self, other):
if type(self) is type(other):
return self._data == other._data
return False
|
# Copyright (c) OpenMMLab. All rights reserved.
__version__ = '2.22.0'
short_version = __version__
def parse_version_info(version_str):
version_info = []
for x in version_str.split('.'):
if x.isdigit():
version_info.append(int(x))
elif x.find('rc') != -1:
patch_version = x.split('rc')
version_info.append(int(patch_version[0]))
version_info.append(f'rc{patch_version[1]}')
return tuple(version_info)
version_info = parse_version_info(__version__)
|
# Copyright (c) OpenMMLab. All rights reserved.
__version__ = '2.21.0'
short_version = __version__
def parse_version_info(version_str):
version_info = []
for x in version_str.split('.'):
if x.isdigit():
version_info.append(int(x))
elif x.find('rc') != -1:
patch_version = x.split('rc')
version_info.append(int(patch_version[0]))
version_info.append(f'rc{patch_version[1]}')
return tuple(version_info)
version_info = parse_version_info(__version__)
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmengine.registry import HOOKS
from .hook import Hook
@HOOKS.register_module()
class DistSamplerSeedHook(Hook):
"""Data-loading sampler for distributed training.
When distributed training, it is only useful in conjunction with
:obj:`EpochBasedRunner`, while :obj:`IterBasedRunner` achieves the same
purpose with :obj:`IterLoader`.
"""
priority = 'NORMAL'
def before_train_epoch(self, runner, mode: str = 'train') -> None:
"""Set the seed for sampler and batch_sampler.
Args:
runner (Runner): The runner of the training process.
"""
if hasattr(runner.train_loop.dataloader, 'sampler') and hasattr(
runner.train_loop.dataloader.sampler, 'set_epoch'):
# In case the` _SingleProcessDataLoaderIter` has no sampler,
# or data loader uses `SequentialSampler` in Pytorch.
runner.train_loop.dataloader.sampler.set_epoch(runner.epoch)
elif hasattr(runner.train_loop.dataloader,
'batch_sampler') and hasattr(
runner.train_loop.dataloader.batch_sampler.sampler,
'set_epoch'):
# In case the` _SingleProcessDataLoaderIter` has no batch sampler.
# batch sampler in pytorch warps the sampler as its attributes.
runner.train_loop.dataloader.batch_sampler.sampler.set_epoch(
runner.epoch)
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmengine.registry import HOOKS
from .hook import Hook
@HOOKS.register_module()
class DistSamplerSeedHook(Hook):
"""Data-loading sampler for distributed training.
When distributed training, it is only useful in conjunction with
:obj:`EpochBasedRunner`, while :obj:`IterBasedRunner` achieves the same
purpose with :obj:`IterLoader`.
"""
priority = 'NORMAL'
def before_train_epoch(self, runner, mode: str = 'train') -> None:
"""Set the seed for sampler and batch_sampler.
Args:
runner (Runner): The runner of the training process.
"""
if hasattr(runner.cur_dataloader.sampler, 'set_epoch'):
# in case the data loader uses `SequentialSampler` in Pytorch
runner.cur_dataloader.sampler.set_epoch(runner.epoch)
elif hasattr(runner.cur_dataloader.batch_sampler.sampler, 'set_epoch'):
# batch sampler in pytorch warps the sampler as its attributes.
runner.cur_dataloader.batch_sampler.sampler.set_epoch(runner.epoch)
|
#!/usr/bin/env python
import functools as func
import glob
import os.path as osp
import re
import numpy as np
url_prefix = 'https://github.com/open-mmlab/mmdetection/blob/main/configs'
files = sorted(glob.glob('../../configs/*/README.md'))
stats = []
titles = []
num_ckpts = 0
for f in files:
url = osp.dirname(f.replace('../../configs', url_prefix))
with open(f, 'r') as content_file:
content = content_file.read()
title = content.split('\n')[0].replace('# ', '').strip()
ckpts = set(x.lower().strip()
for x in re.findall(r'\[model\]\((https?.*)\)', content))
if len(ckpts) == 0:
continue
_papertype = [x for x in re.findall(r'\[([A-Z]+)\]', content)]
assert len(_papertype) > 0
papertype = _papertype[0]
paper = set([(papertype, title)])
titles.append(title)
num_ckpts += len(ckpts)
statsmsg = f"""
\t* [{papertype}] [{title}]({url}) ({len(ckpts)} ckpts)
"""
stats.append((paper, ckpts, statsmsg))
allpapers = func.reduce(lambda a, b: a.union(b), [p for p, _, _ in stats])
msglist = '\n'.join(x for _, _, x in stats)
papertypes, papercounts = np.unique([t for t, _ in allpapers],
return_counts=True)
countstr = '\n'.join(
[f' - {t}: {c}' for t, c in zip(papertypes, papercounts)])
modelzoo = f"""
# Model Zoo Statistics
* Number of papers: {len(set(titles))}
{countstr}
* Number of checkpoints: {num_ckpts}
{msglist}
"""
with open('modelzoo_statistics.md', 'w') as f:
f.write(modelzoo)
|
#!/usr/bin/env python
import functools as func
import glob
import os.path as osp
import re
import numpy as np
url_prefix = 'https://github.com/open-mmlab/mmdetection/blob/3.x/configs'
files = sorted(glob.glob('../../configs/*/README.md'))
stats = []
titles = []
num_ckpts = 0
for f in files:
url = osp.dirname(f.replace('../../configs', url_prefix))
with open(f, 'r') as content_file:
content = content_file.read()
title = content.split('\n')[0].replace('# ', '').strip()
ckpts = set(x.lower().strip()
for x in re.findall(r'\[model\]\((https?.*)\)', content))
if len(ckpts) == 0:
continue
_papertype = [x for x in re.findall(r'\[([A-Z]+)\]', content)]
assert len(_papertype) > 0
papertype = _papertype[0]
paper = set([(papertype, title)])
titles.append(title)
num_ckpts += len(ckpts)
statsmsg = f"""
\t* [{papertype}] [{title}]({url}) ({len(ckpts)} ckpts)
"""
stats.append((paper, ckpts, statsmsg))
allpapers = func.reduce(lambda a, b: a.union(b), [p for p, _, _ in stats])
msglist = '\n'.join(x for _, _, x in stats)
papertypes, papercounts = np.unique([t for t, _ in allpapers],
return_counts=True)
countstr = '\n'.join(
[f' - {t}: {c}' for t, c in zip(papertypes, papercounts)])
modelzoo = f"""
# Model Zoo Statistics
* Number of papers: {len(set(titles))}
{countstr}
* Number of checkpoints: {num_ckpts}
{msglist}
"""
with open('modelzoo_statistics.md', 'w') as f:
f.write(modelzoo)
|
import inspect
import pytest
from datasets.splits import Split, SplitDict, SplitInfo
from datasets.utils.py_utils import asdict
@pytest.mark.parametrize(
"split_dict",
[
SplitDict(),
SplitDict({"train": SplitInfo(name="train", num_bytes=1337, num_examples=42, dataset_name="my_dataset")}),
SplitDict({"train": SplitInfo(name="train", num_bytes=1337, num_examples=42)}),
SplitDict({"train": SplitInfo()}),
],
)
def test_split_dict_to_yaml_list(split_dict: SplitDict):
split_dict_yaml_list = split_dict._to_yaml_list()
assert len(split_dict_yaml_list) == len(split_dict)
reloaded = SplitDict._from_yaml_list(split_dict_yaml_list)
for split_name, split_info in split_dict.items():
# dataset_name field is deprecated, and is therefore not part of the YAML dump
split_info.dataset_name = None
# the split name of split_dict takes over the name of the split info object
split_info.name = split_name
assert split_dict == reloaded
@pytest.mark.parametrize(
"split_info", [SplitInfo(), SplitInfo(dataset_name=None), SplitInfo(dataset_name="my_dataset")]
)
def test_split_dict_asdict_has_dataset_name(split_info):
# For backward compatibility, we need asdict(split_dict) to return split info dictrionaries with the "dataset_name"
# field even if it's deprecated. This way old versionso of `datasets` can still reload dataset_infos.json files
split_dict_asdict = asdict(SplitDict({"train": split_info}))
assert "dataset_name" in split_dict_asdict["train"]
assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name
def test_named_split_inequality():
# Used while building the docs, when set as a default parameter value in a function signature
assert Split.TRAIN != inspect.Parameter.empty
|
import pytest
from datasets.splits import SplitDict, SplitInfo
from datasets.utils.py_utils import asdict
@pytest.mark.parametrize(
"split_dict",
[
SplitDict(),
SplitDict({"train": SplitInfo(name="train", num_bytes=1337, num_examples=42, dataset_name="my_dataset")}),
SplitDict({"train": SplitInfo(name="train", num_bytes=1337, num_examples=42)}),
SplitDict({"train": SplitInfo()}),
],
)
def test_split_dict_to_yaml_list(split_dict: SplitDict):
split_dict_yaml_list = split_dict._to_yaml_list()
assert len(split_dict_yaml_list) == len(split_dict)
reloaded = SplitDict._from_yaml_list(split_dict_yaml_list)
for split_name, split_info in split_dict.items():
# dataset_name field is deprecated, and is therefore not part of the YAML dump
split_info.dataset_name = None
# the split name of split_dict takes over the name of the split info object
split_info.name = split_name
assert split_dict == reloaded
@pytest.mark.parametrize(
"split_info", [SplitInfo(), SplitInfo(dataset_name=None), SplitInfo(dataset_name="my_dataset")]
)
def test_split_dict_asdict_has_dataset_name(split_info):
# For backward compatibility, we need asdict(split_dict) to return split info dictrionaries with the "dataset_name"
# field even if it's deprecated. This way old versionso of `datasets` can still reload dataset_infos.json files
split_dict_asdict = asdict(SplitDict({"train": split_info}))
assert "dataset_name" in split_dict_asdict["train"]
assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name
|
# Copyright (c) OpenMMLab. All rights reserved.
"""Get image metas on a specific dataset.
Here is an example to run this script.
Example:
python tools/misc/get_image_metas.py ${CONFIG} \
--out ${OUTPUT FILE NAME}
"""
import argparse
import csv
import os.path as osp
from multiprocessing import Pool
import mmcv
from mmcv import Config
def parse_args():
parser = argparse.ArgumentParser(description='Collect image metas')
parser.add_argument('config', help='Config file path')
parser.add_argument(
'--dataset',
default='val',
choices=['train', 'val', 'test'],
help='Collect image metas from which dataset')
parser.add_argument(
'--out',
default='validation-image-metas.pkl',
help='The output image metas file name. The save dir is in the '
'same directory as `dataset.ann_file` path')
parser.add_argument(
'--nproc',
default=4,
type=int,
help='Processes used for get image metas')
args = parser.parse_args()
return args
def get_metas_from_csv_style_ann_file(ann_file):
data_infos = []
cp_filename = None
with open(ann_file, 'r') as f:
reader = csv.reader(f)
for i, line in enumerate(reader):
if i == 0:
continue
img_id = line[0]
filename = f'{img_id}.jpg'
if filename != cp_filename:
data_infos.append(dict(filename=filename))
cp_filename = filename
return data_infos
def get_metas_from_txt_style_ann_file(ann_file):
with open(ann_file) as f:
lines = f.readlines()
i = 0
data_infos = []
while i < len(lines):
filename = lines[i].rstrip()
data_infos.append(dict(filename=filename))
skip_lines = int(lines[i + 2]) + 3
i += skip_lines
return data_infos
def get_image_metas(data_info, img_prefix):
file_client = mmcv.FileClient(backend='disk')
filename = data_info.get('filename', None)
if filename is not None:
if img_prefix is not None:
filename = osp.join(img_prefix, filename)
img_bytes = file_client.get(filename)
img = mmcv.imfrombytes(img_bytes, flag='color')
shape = img.shape
meta = dict(filename=filename, ori_shape=shape)
else:
raise NotImplementedError('Missing `filename` in data_info')
return meta
def main():
args = parse_args()
assert args.out.endswith('pkl'), 'The output file name must be pkl suffix'
# load config files
cfg = Config.fromfile(args.config)
dataloader_cfg = cfg.get(f'{args.dataset}_dataloader')
ann_file = osp.join(dataloader_cfg.dataset.data_root,
dataloader_cfg.dataset.ann_file)
img_prefix = osp.join(dataloader_cfg.dataset.data_root,
dataloader_cfg.dataset.data_prefix['img'])
print(f'{"-" * 5} Start Processing {"-" * 5}')
if ann_file.endswith('csv'):
data_infos = get_metas_from_csv_style_ann_file(ann_file)
elif ann_file.endswith('txt'):
data_infos = get_metas_from_txt_style_ann_file(ann_file)
else:
shuffix = ann_file.split('.')[-1]
raise NotImplementedError('File name must be csv or txt suffix but '
f'get {shuffix}')
print(f'Successfully load annotation file from {ann_file}')
print(f'Processing {len(data_infos)} images...')
pool = Pool(args.nproc)
# get image metas with multiple processes
image_metas = pool.starmap(
get_image_metas,
zip(data_infos, [img_prefix for _ in range(len(data_infos))]),
)
pool.close()
# save image metas
root_path = dataloader_cfg.dataset.ann_file.rsplit('/', 1)[0]
save_path = osp.join(root_path, args.out)
mmcv.dump(image_metas, save_path, protocol=4)
print(f'Image meta file save to: {save_path}')
if __name__ == '__main__':
main()
|
# Copyright (c) OpenMMLab. All rights reserved.
"""Get test image metas on a specific dataset.
Here is an example to run this script.
Example:
python tools/misc/get_image_metas.py ${CONFIG} \
--out ${OUTPUT FILE NAME}
"""
import argparse
import csv
import os.path as osp
from multiprocessing import Pool
import mmcv
from mmcv import Config
def parse_args():
parser = argparse.ArgumentParser(description='Collect image metas')
parser.add_argument('config', help='Config file path')
parser.add_argument(
'--out',
default='validation-image-metas.pkl',
help='The output image metas file name. The save dir is in the '
'same directory as `dataset.ann_file` path')
parser.add_argument(
'--nproc',
default=4,
type=int,
help='Processes used for get image metas')
args = parser.parse_args()
return args
def get_metas_from_csv_style_ann_file(ann_file):
data_infos = []
cp_filename = None
with open(ann_file, 'r') as f:
reader = csv.reader(f)
for i, line in enumerate(reader):
if i == 0:
continue
img_id = line[0]
filename = f'{img_id}.jpg'
if filename != cp_filename:
data_infos.append(dict(filename=filename))
cp_filename = filename
return data_infos
def get_metas_from_txt_style_ann_file(ann_file):
with open(ann_file) as f:
lines = f.readlines()
i = 0
data_infos = []
while i < len(lines):
filename = lines[i].rstrip()
data_infos.append(dict(filename=filename))
skip_lines = int(lines[i + 2]) + 3
i += skip_lines
return data_infos
def get_image_metas(data_info, img_prefix):
file_client = mmcv.FileClient(backend='disk')
filename = data_info.get('filename', None)
if filename is not None:
if img_prefix is not None:
filename = osp.join(img_prefix, filename)
img_bytes = file_client.get(filename)
img = mmcv.imfrombytes(img_bytes, flag='color')
meta = dict(filename=filename, ori_shape=img.shape)
else:
raise NotImplementedError('Missing `filename` in data_info')
return meta
def main():
args = parse_args()
assert args.out.endswith('pkl'), 'The output file name must be pkl suffix'
# load config files
cfg = Config.fromfile(args.config)
ann_file = cfg.data.test.ann_file
img_prefix = cfg.data.test.img_prefix
print(f'{"-" * 5} Start Processing {"-" * 5}')
if ann_file.endswith('csv'):
data_infos = get_metas_from_csv_style_ann_file(ann_file)
elif ann_file.endswith('txt'):
data_infos = get_metas_from_txt_style_ann_file(ann_file)
else:
shuffix = ann_file.split('.')[-1]
raise NotImplementedError('File name must be csv or txt suffix but '
f'get {shuffix}')
print(f'Successfully load annotation file from {ann_file}')
print(f'Processing {len(data_infos)} images...')
pool = Pool(args.nproc)
# get image metas with multiple processes
image_metas = pool.starmap(
get_image_metas,
zip(data_infos, [img_prefix for _ in range(len(data_infos))]),
)
pool.close()
# save image metas
root_path = cfg.data.test.ann_file.rsplit('/', 1)[0]
save_path = osp.join(root_path, args.out)
mmcv.dump(image_metas, save_path)
print(f'Image meta file save to: {save_path}')
if __name__ == '__main__':
main()
|
"""
Top-level module of Jina.
The primary function of this module is to import all of the public Jina
interfaces into a single place. The interfaces themselves are located in
sub-modules, as described below.
"""
import os as _os
import platform as _platform
import signal as _signal
import sys as _sys
import warnings as _warnings
import docarray as _docarray
if _sys.version_info < (3, 7, 0):
raise OSError(f'Jina requires Python >= 3.7, but yours is {_sys.version_info}')
def _warning_on_one_line(message, category, filename, lineno, *args, **kwargs):
return '\033[1;33m%s: %s\033[0m \033[1;30m(raised from %s:%s)\033[0m\n' % (
category.__name__,
message,
filename,
lineno,
)
_warnings.formatwarning = _warning_on_one_line
_warnings.simplefilter('always', DeprecationWarning)
# fix fork error on MacOS but seems no effect? must do EXPORT manually before jina start
_os.environ['OBJC_DISABLE_INITIALIZE_FORK_SAFETY'] = 'YES'
# JINA_MP_START_METHOD has higher priority than os-patch
_start_method = _os.environ.get('JINA_MP_START_METHOD', None)
if _start_method and _start_method.lower() in {'fork', 'spawn', 'forkserver'}:
from multiprocessing import set_start_method as _set_start_method
try:
_set_start_method(_start_method.lower())
_warnings.warn(
f'multiprocessing start method is set to `{_start_method.lower()}`'
)
except Exception as e:
_warnings.warn(
f'failed to set multiprocessing start_method to `{_start_method.lower()}`: {e!r}'
)
elif _sys.version_info >= (3, 8, 0) and _platform.system() == 'Darwin':
# DO SOME OS-WISE PATCHES
# temporary fix for python 3.8 on macos where the default start is set to "spawn"
# https://docs.python.org/3/library/multiprocessing.html#contexts-and-start-methods
from multiprocessing import set_start_method as _set_start_method
_set_start_method('fork')
# do not change this line manually
# this is managed by git tag and updated on every release
# NOTE: this represents the NEXT release version
__version__ = '3.16.2'
# do not change this line manually
# this is managed by proto/build-proto.sh and updated on every execution
__proto_version__ = '0.1.27'
try:
__docarray_version__ = _docarray.__version__
except AttributeError as e:
raise RuntimeError(
'`docarray` dependency is not installed correctly, please reinstall with `pip install -U --force-reinstall docarray`'
)
try:
_signal.signal(_signal.SIGINT, _signal.default_int_handler)
except Exception as exc:
_warnings.warn(f'failed to set default signal handler: {exc!r}`')
def _set_nofile(nofile_atleast=4096):
"""
Set nofile soft limit to at least 4096, useful for running matlplotlib/seaborn on
parallel executing plot generators vs. Ubuntu default ulimit -n 1024 or OS X El Captian 256
temporary setting extinguishing with Python session.
:param nofile_atleast: nofile soft limit
:return: nofile soft limit and nofile hard limit
"""
try:
import resource as res
except ImportError: # Windows
res = None
if res is None:
return (None,) * 2
soft, ohard = res.getrlimit(res.RLIMIT_NOFILE)
hard = ohard
if soft < nofile_atleast:
soft = nofile_atleast
if hard < soft:
hard = soft
try:
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except (ValueError, res.error):
try:
hard = soft
print(f'trouble with max limit, retrying with soft,hard {soft},{hard}')
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except Exception:
print('failed to set ulimit, giving up')
soft, hard = res.getrlimit(res.RLIMIT_NOFILE)
return soft, hard
_set_nofile()
# ONLY FIRST CLASS CITIZENS ARE ALLOWED HERE, namely Document, Executor Flow
# Document
from jina._docarray import Document, DocumentArray
# Client
from jina.clients import Client
# Deployment
from jina.orchestrate.deployments import Deployment
from jina.orchestrate.flow.asyncio import AsyncFlow
# Flow
from jina.orchestrate.flow.base import Flow
# Executor
from jina.serve.executors import BaseExecutor as Executor
from jina.serve.executors.decorators import dynamic_batching, monitor, requests
# Custom Gateway
from jina.serve.runtimes.gateway.gateway import Gateway
|
"""
Top-level module of Jina.
The primary function of this module is to import all of the public Jina
interfaces into a single place. The interfaces themselves are located in
sub-modules, as described below.
"""
import os as _os
import platform as _platform
import signal as _signal
import sys as _sys
import warnings as _warnings
import docarray as _docarray
if _sys.version_info < (3, 7, 0):
raise OSError(f'Jina requires Python >= 3.7, but yours is {_sys.version_info}')
def _warning_on_one_line(message, category, filename, lineno, *args, **kwargs):
return '\033[1;33m%s: %s\033[0m \033[1;30m(raised from %s:%s)\033[0m\n' % (
category.__name__,
message,
filename,
lineno,
)
_warnings.formatwarning = _warning_on_one_line
_warnings.simplefilter('always', DeprecationWarning)
# fix fork error on MacOS but seems no effect? must do EXPORT manually before jina start
_os.environ['OBJC_DISABLE_INITIALIZE_FORK_SAFETY'] = 'YES'
# JINA_MP_START_METHOD has higher priority than os-patch
_start_method = _os.environ.get('JINA_MP_START_METHOD', None)
if _start_method and _start_method.lower() in {'fork', 'spawn', 'forkserver'}:
from multiprocessing import set_start_method as _set_start_method
try:
_set_start_method(_start_method.lower())
_warnings.warn(
f'multiprocessing start method is set to `{_start_method.lower()}`'
)
except Exception as e:
_warnings.warn(
f'failed to set multiprocessing start_method to `{_start_method.lower()}`: {e!r}'
)
elif _sys.version_info >= (3, 8, 0) and _platform.system() == 'Darwin':
# DO SOME OS-WISE PATCHES
# temporary fix for python 3.8 on macos where the default start is set to "spawn"
# https://docs.python.org/3/library/multiprocessing.html#contexts-and-start-methods
from multiprocessing import set_start_method as _set_start_method
_set_start_method('fork')
# do not change this line manually
# this is managed by git tag and updated on every release
# NOTE: this represents the NEXT release version
__version__ = '3.16.1'
# do not change this line manually
# this is managed by proto/build-proto.sh and updated on every execution
__proto_version__ = '0.1.27'
try:
__docarray_version__ = _docarray.__version__
except AttributeError as e:
raise RuntimeError(
'`docarray` dependency is not installed correctly, please reinstall with `pip install -U --force-reinstall docarray`'
)
try:
_signal.signal(_signal.SIGINT, _signal.default_int_handler)
except Exception as exc:
_warnings.warn(f'failed to set default signal handler: {exc!r}`')
def _set_nofile(nofile_atleast=4096):
"""
Set nofile soft limit to at least 4096, useful for running matlplotlib/seaborn on
parallel executing plot generators vs. Ubuntu default ulimit -n 1024 or OS X El Captian 256
temporary setting extinguishing with Python session.
:param nofile_atleast: nofile soft limit
:return: nofile soft limit and nofile hard limit
"""
try:
import resource as res
except ImportError: # Windows
res = None
if res is None:
return (None,) * 2
soft, ohard = res.getrlimit(res.RLIMIT_NOFILE)
hard = ohard
if soft < nofile_atleast:
soft = nofile_atleast
if hard < soft:
hard = soft
try:
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except (ValueError, res.error):
try:
hard = soft
print(f'trouble with max limit, retrying with soft,hard {soft},{hard}')
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except Exception:
print('failed to set ulimit, giving up')
soft, hard = res.getrlimit(res.RLIMIT_NOFILE)
return soft, hard
_set_nofile()
# ONLY FIRST CLASS CITIZENS ARE ALLOWED HERE, namely Document, Executor Flow
# Document
from jina._docarray import Document, DocumentArray
# Client
from jina.clients import Client
# Deployment
from jina.orchestrate.deployments import Deployment
from jina.orchestrate.flow.asyncio import AsyncFlow
# Flow
from jina.orchestrate.flow.base import Flow
# Executor
from jina.serve.executors import BaseExecutor as Executor
from jina.serve.executors.decorators import dynamic_batching, monitor, requests
# Custom Gateway
from jina.serve.runtimes.gateway.gateway import Gateway
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Optional
import torch
import torch.nn as nn
from mmengine.runner import load_checkpoint
from torch import Tensor
from mmdet.core import ConfigType, OptConfigType, SampleList
from mmdet.registry import MODELS
from ..utils.misc import unpack_gt_instances
from .kd_one_stage import KnowledgeDistillationSingleStageDetector
@MODELS.register_module()
class LAD(KnowledgeDistillationSingleStageDetector):
"""Implementation of `LAD <https://arxiv.org/pdf/2108.10520.pdf>`_."""
def __init__(self,
backbone: ConfigType,
neck: ConfigType,
bbox_head: ConfigType,
teacher_backbone: ConfigType,
teacher_neck: ConfigType,
teacher_bbox_head: ConfigType,
teacher_ckpt: Optional[str] = None,
eval_teacher: bool = True,
train_cfg: OptConfigType = None,
test_cfg: OptConfigType = None,
preprocess_cfg: OptConfigType = None) -> None:
super(KnowledgeDistillationSingleStageDetector, self).__init__(
backbone=backbone,
neck=neck,
bbox_head=bbox_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
preprocess_cfg=preprocess_cfg)
self.eval_teacher = eval_teacher
self.teacher_model = nn.Module()
self.teacher_model.backbone = MODELS.build(teacher_backbone)
if teacher_neck is not None:
self.teacher_model.neck = MODELS.build(teacher_neck)
teacher_bbox_head.update(train_cfg=train_cfg)
teacher_bbox_head.update(test_cfg=test_cfg)
self.teacher_model.bbox_head = MODELS.build(teacher_bbox_head)
if teacher_ckpt is not None:
load_checkpoint(
self.teacher_model, teacher_ckpt, map_location='cpu')
@property
def with_teacher_neck(self) -> bool:
"""bool: whether the detector has a teacher_neck"""
return hasattr(self.teacher_model, 'neck') and \
self.teacher_model.neck is not None
def extract_teacher_feat(self, batch_inputs: Tensor) -> Tensor:
"""Directly extract teacher features from the backbone+neck."""
x = self.teacher_model.backbone(batch_inputs)
if self.with_teacher_neck:
x = self.teacher_model.neck(x)
return x
def forward_train(self, batch_inputs: Tensor,
batch_data_samples: SampleList, **kwargs) -> dict:
"""
Args:
batch_inputs (Tensor): Input images of shape (N, C, H, W).
These should usually be mean centered and std scaled.
batch_data_samples (list[:obj:`DetDataSample`]): The batch
data samples. It usually includes information such
as `gt_instance` or `gt_panoptic_seg` or `gt_sem_seg`.
Returns:
dict[str, Tensor]: A dictionary of loss components.
"""
outputs = unpack_gt_instances(batch_data_samples)
batch_gt_instances, batch_gt_instances_ignore, batch_img_metas \
= outputs
# get label assignment from the teacher
with torch.no_grad():
x_teacher = self.extract_teacher_feat(batch_inputs)
outs_teacher = self.teacher_model.bbox_head(x_teacher)
label_assignment_results = \
self.teacher_model.bbox_head.get_label_assignment(
*outs_teacher, batch_gt_instances, batch_img_metas,
batch_gt_instances_ignore)
# the student use the label assignment from the teacher to learn
x = self.extract_feat(batch_inputs)
losses = self.bbox_head.forward_train(x, label_assignment_results,
batch_gt_instances,
batch_img_metas,
batch_gt_instances_ignore)
return losses
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Optional
import torch
import torch.nn as nn
from mmengine.runner import load_checkpoint
from torch import Tensor
from mmdet.core import ConfigType, OptConfigType, SampleList
from mmdet.registry import MODELS
from .kd_one_stage import KnowledgeDistillationSingleStageDetector
@MODELS.register_module()
class LAD(KnowledgeDistillationSingleStageDetector):
"""Implementation of `LAD <https://arxiv.org/pdf/2108.10520.pdf>`_."""
def __init__(self,
backbone: ConfigType,
neck: ConfigType,
bbox_head: ConfigType,
teacher_backbone: ConfigType,
teacher_neck: ConfigType,
teacher_bbox_head: ConfigType,
teacher_ckpt: Optional[str] = None,
eval_teacher: bool = True,
train_cfg: OptConfigType = None,
test_cfg: OptConfigType = None,
preprocess_cfg: OptConfigType = None) -> None:
super(KnowledgeDistillationSingleStageDetector, self).__init__(
backbone=backbone,
neck=neck,
bbox_head=bbox_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
preprocess_cfg=preprocess_cfg)
self.eval_teacher = eval_teacher
self.teacher_model = nn.Module()
self.teacher_model.backbone = MODELS.build(teacher_backbone)
if teacher_neck is not None:
self.teacher_model.neck = MODELS.build(teacher_neck)
teacher_bbox_head.update(train_cfg=train_cfg)
teacher_bbox_head.update(test_cfg=test_cfg)
self.teacher_model.bbox_head = MODELS.build(teacher_bbox_head)
if teacher_ckpt is not None:
load_checkpoint(
self.teacher_model, teacher_ckpt, map_location='cpu')
@property
def with_teacher_neck(self) -> bool:
"""bool: whether the detector has a teacher_neck"""
return hasattr(self.teacher_model, 'neck') and \
self.teacher_model.neck is not None
def extract_teacher_feat(self, batch_inputs: Tensor) -> Tensor:
"""Directly extract teacher features from the backbone+neck."""
x = self.teacher_model.backbone(batch_inputs)
if self.with_teacher_neck:
x = self.teacher_model.neck(x)
return x
def forward_train(self, batch_inputs: Tensor,
batch_data_samples: SampleList, **kwargs) -> dict:
"""
Args:
batch_inputs (Tensor): Input images of shape (N, C, H, W).
These should usually be mean centered and std scaled.
batch_data_samples (list[:obj:`DetDataSample`]): The batch
data samples. It usually includes information such
as `gt_instance` or `gt_panoptic_seg` or `gt_sem_seg`.
Returns:
dict[str, Tensor]: A dictionary of loss components.
"""
batch_gt_instances = []
batch_gt_instances_ignore = []
batch_img_metas = []
for data_sample in batch_data_samples:
batch_img_metas.append(data_sample.metainfo)
batch_gt_instances.append(data_sample.gt_instances)
if 'ignored_instances' in data_sample:
batch_gt_instances_ignore.append(data_sample.ignored_instances)
else:
batch_gt_instances_ignore.append(None)
# get label assignment from the teacher
with torch.no_grad():
x_teacher = self.extract_teacher_feat(batch_inputs)
outs_teacher = self.teacher_model.bbox_head(x_teacher)
label_assignment_results = \
self.teacher_model.bbox_head.get_label_assignment(
*outs_teacher, batch_gt_instances, batch_img_metas,
batch_gt_instances_ignore)
# the student use the label assignment from the teacher to learn
x = self.extract_feat(batch_inputs)
losses = self.bbox_head.forward_train(x, label_assignment_results,
batch_gt_instances,
batch_img_metas,
batch_gt_instances_ignore)
return losses
|
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class HfFileSystem(AbstractFileSystem):
"""Interface to files in a Hugging face repository"""
root_marker = ""
protocol = "hf-legacy" # "hf://"" is reserved for hffs
def __init__(
self,
repo_info: Optional[DatasetInfo] = None,
token: Optional[str] = None,
**kwargs,
):
"""
The file system can be instantiated using a huggingface_hub.hf_api.DatasetInfo object,
and can be used to list and open files from a Hugging Face dataset repository with fsspec.
Args:
repo_info (:obj:``DatasetInfo``, `optional`):
Dataset repository info from huggingface_hub.HfApi().dataset_info(...)
token (:obj:``str``, `optional`):
Hugging Face token. Will default to the locally saved token if not provided.
"""
super().__init__(self, **kwargs)
self.repo_info = repo_info
self.token = token
self.dir_cache = None
def _get_dirs(self):
if self.dir_cache is None:
self.dir_cache = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
self.dir_cache[hf_file.rfilename] = {
"name": hf_file.rfilename,
"size": None,
"type": "file",
}
self.dir_cache.update(
{
str(d): {"name": str(d), "size": None, "type": "directory"}
for d in list(PurePosixPath(hf_file.rfilename).parents)[:-1]
}
)
def _open(
self,
path: str,
mode: str = "rb",
**kwargs,
):
if not isinstance(self.repo_info, DatasetInfo):
raise NotImplementedError(f"Open is only implemented for dataset repositories, but got {self.repo_info}")
url = hf_hub_url(self.repo_info.id, path, revision=self.repo_info.sha)
return fsspec.open(
url,
mode=mode,
headers=get_authentication_headers_for_url(url, token=self.token),
client_kwargs={"trust_env": True}, # Enable reading proxy env variables.
).open()
def info(self, path, **kwargs):
self._get_dirs()
path = self._strip_protocol(path)
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(path)
def ls(self, path, detail=False, **kwargs):
self._get_dirs()
path = PurePosixPath(path.strip("/"))
paths = {}
for p, f in self.dir_cache.items():
p = PurePosixPath(p.strip("/"))
root = p.parent
if root == path:
paths[str(p)] = f
out = list(paths.values())
if detail:
return out
else:
return sorted(f["name"] for f in out)
|
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class HfFileSystem(AbstractFileSystem):
"""Interface to files in a Hugging face repository"""
root_marker = ""
protocol = "hf-legacy" # "hf://"" is reserved for hffs
def __init__(
self,
repo_info: Optional[DatasetInfo] = None,
token: Optional[str] = None,
**kwargs,
):
"""
The file system can be instantiated using a huggingface_hub.hf_api.DatasetInfo object,
and can be used to list and open files from a Hugging Face dataset repository with fsspec.
Args:
repo_info (:obj:``DatasetInfo``, `optional`):
Dataset repository info from huggingface_hub.HfApi().dataset_info(...)
token (:obj:``str``, `optional`):
Hugging Face token. Will default to the locally saved token if not provided.
"""
super().__init__(self, **kwargs)
self.repo_info = repo_info
self.token = token
self.dir_cache = None
def _get_dirs(self):
if self.dir_cache is None:
self.dir_cache = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
self.dir_cache[hf_file.rfilename] = {
"name": hf_file.rfilename,
"size": None,
"type": "file",
}
self.dir_cache.update(
{
str(d): {"name": str(d), "size": None, "type": "directory"}
for d in list(PurePosixPath(hf_file.rfilename).parents)[:-1]
}
)
def _open(
self,
path: str,
mode: str = "rb",
**kwargs,
):
if not isinstance(self.repo_info, DatasetInfo):
raise NotImplementedError(f"Open is only implemented for dataset repositories, but got {self.repo_info}")
url = hf_hub_url(self.repo_info.id, path, revision=self.repo_info.sha)
return fsspec.open(
url,
mode=mode,
headers=get_authentication_headers_for_url(url, use_auth_token=self.token),
client_kwargs={"trust_env": True}, # Enable reading proxy env variables.
).open()
def info(self, path, **kwargs):
self._get_dirs()
path = self._strip_protocol(path)
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(path)
def ls(self, path, detail=False, **kwargs):
self._get_dirs()
path = PurePosixPath(path.strip("/"))
paths = {}
for p, f in self.dir_cache.items():
p = PurePosixPath(p.strip("/"))
root = p.parent
if root == path:
paths[str(p)] = f
out = list(paths.values())
if detail:
return out
else:
return sorted(f["name"] for f in out)
|
from jina.schemas.helper import _cli_to_schema
from jina_cli.export import api_to_dict
for s in ('flow', 'gateway', 'executor', 'deployment'):
a = _cli_to_schema(api_to_dict(), s)
table = ['| Name | Description | Type | Default |', '|----|----|----|----|']
for k, v in a[f'Jina::{s.capitalize()}']['properties'].items():
desc = v["description"].replace("\n", "<br>")
if k in ('port', 'port_monitoring'):
v[
'default'
] = 'random in [49152, 65535]' # avoid random numbers cause devbot forever committing
type = None if v['type'] == 'null' else v['type']
table.append(f'| `{k}` | {desc} | `{type}` | `{v["default"]}` |')
with open(f'../docs/concepts/flow/{s}-args.md', 'w') as fp:
fp.write('\n'.join(table))
|
from jina.schemas.helper import _cli_to_schema
from jina_cli.export import api_to_dict
for s in ('flow', 'gateway', 'executor'):
a = _cli_to_schema(api_to_dict(), s)
table = ['| Name | Description | Type | Default |', '|----|----|----|----|']
for k, v in a[f'Jina::{s.capitalize()}']['properties'].items():
desc = v["description"].replace("\n", "<br>")
if k in ('port', 'port_monitoring'):
v[
'default'
] = 'random in [49152, 65535]' # avoid random numbers cause devbot forever committing
type = None if v['type'] == 'null' else v['type']
table.append(f'| `{k}` | {desc} | `{type}` | `{v["default"]}` |')
with open(f'../docs/concepts/flow/{s}-args.md', 'w') as fp:
fp.write('\n'.join(table))
|
from collections.abc import Mapping
from operator import itemgetter
from typing import Any, Callable, Optional, Union
from langchain_core.messages import BaseMessage
from langchain_core.output_parsers.openai_functions import JsonOutputFunctionsParser
from langchain_core.runnables import RouterRunnable, Runnable
from langchain_core.runnables.base import RunnableBindingBase
from typing_extensions import TypedDict
class OpenAIFunction(TypedDict):
"""A function description for ChatOpenAI"""
name: str
"""The name of the function."""
description: str
"""The description of the function."""
parameters: dict
"""The parameters to the function."""
class OpenAIFunctionsRouter(RunnableBindingBase[BaseMessage, Any]):
"""A runnable that routes to the selected function."""
functions: Optional[list[OpenAIFunction]]
def __init__(
self,
runnables: Mapping[
str,
Union[
Runnable[dict, Any],
Callable[[dict], Any],
],
],
functions: Optional[list[OpenAIFunction]] = None,
):
if functions is not None:
if len(functions) != len(runnables):
raise ValueError(
"The number of functions does not match the number of runnables."
)
if not all(func["name"] in runnables for func in functions):
raise ValueError(
"One or more function names are not found in runnables."
)
router = (
JsonOutputFunctionsParser(args_only=False)
| {"key": itemgetter("name"), "input": itemgetter("arguments")}
| RouterRunnable(runnables)
)
super().__init__(bound=router, kwargs={}, functions=functions)
|
from collections.abc import Mapping
from operator import itemgetter
from typing import Any, Callable, Optional, Union
from langchain_core.messages import BaseMessage
from langchain_core.output_parsers.openai_functions import JsonOutputFunctionsParser
from langchain_core.runnables import RouterRunnable, Runnable
from langchain_core.runnables.base import RunnableBindingBase
from typing_extensions import TypedDict
class OpenAIFunction(TypedDict):
"""A function description for ChatOpenAI"""
name: str
"""The name of the function."""
description: str
"""The description of the function."""
parameters: dict
"""The parameters to the function."""
class OpenAIFunctionsRouter(RunnableBindingBase[BaseMessage, Any]):
"""A runnable that routes to the selected function."""
functions: Optional[list[OpenAIFunction]]
def __init__(
self,
runnables: Mapping[
str,
Union[
Runnable[dict, Any],
Callable[[dict], Any],
],
],
functions: Optional[list[OpenAIFunction]] = None,
):
if functions is not None:
assert len(functions) == len(runnables)
assert all(func["name"] in runnables for func in functions)
router = (
JsonOutputFunctionsParser(args_only=False)
| {"key": itemgetter("name"), "input": itemgetter("arguments")}
| RouterRunnable(runnables)
)
super().__init__(bound=router, kwargs={}, functions=functions)
|
"""
Top-level module of Jina.
The primary function of this module is to import all of the public Jina
interfaces into a single place. The interfaces themselves are located in
sub-modules, as described below.
"""
import os as _os
import platform as _platform
import signal as _signal
import sys as _sys
import warnings as _warnings
import docarray as _docarray
if _sys.version_info < (3, 7, 0):
raise OSError(f'Jina requires Python >= 3.7, but yours is {_sys.version_info}')
def _warning_on_one_line(message, category, filename, lineno, *args, **kwargs):
return '\033[1;33m%s: %s\033[0m \033[1;30m(raised from %s:%s)\033[0m\n' % (
category.__name__,
message,
filename,
lineno,
)
def _ignore_google_warnings():
import warnings
warnings.filterwarnings(
'ignore',
category=DeprecationWarning,
message='Deprecated call to `pkg_resources.declare_namespace(\'google\')`.',
append=True
)
_warnings.formatwarning = _warning_on_one_line
_warnings.simplefilter('always', DeprecationWarning, append=True)
_ignore_google_warnings()
# fix fork error on MacOS but seems no effect? must do EXPORT manually before jina start
_os.environ['OBJC_DISABLE_INITIALIZE_FORK_SAFETY'] = 'YES'
# JINA_MP_START_METHOD has higher priority than os-patch
_start_method = _os.environ.get('JINA_MP_START_METHOD', None)
if _start_method and _start_method.lower() in {'fork', 'spawn', 'forkserver'}:
from multiprocessing import set_start_method as _set_start_method
try:
_set_start_method(_start_method.lower())
_warnings.warn(
f'multiprocessing start method is set to `{_start_method.lower()}`'
)
except Exception as e:
_warnings.warn(
f'failed to set multiprocessing start_method to `{_start_method.lower()}`: {e!r}'
)
elif _sys.version_info >= (3, 8, 0) and _platform.system() == 'Darwin':
# DO SOME OS-WISE PATCHES
# temporary fix for python 3.8 on macos where the default start is set to "spawn"
# https://docs.python.org/3/library/multiprocessing.html#contexts-and-start-methods
from multiprocessing import set_start_method as _set_start_method
try:
_set_start_method('fork')
_warnings.warn(f'multiprocessing start method is set to `fork`')
except Exception as e:
_warnings.warn(f'failed to set multiprocessing start_method to `fork`: {e!r}')
# do not change this line manually
# this is managed by git tag and updated on every release
# NOTE: this represents the NEXT release version
__version__ = '3.23.1'
# do not change this line manually
# this is managed by proto/build-proto.sh and updated on every execution
__proto_version__ = '0.1.27'
try:
__docarray_version__ = _docarray.__version__
except AttributeError as e:
raise RuntimeError(
'`docarray` dependency is not installed correctly, please reinstall with `pip install -U --force-reinstall docarray`'
)
try:
_signal.signal(_signal.SIGINT, _signal.default_int_handler)
except Exception as exc:
_warnings.warn(f'failed to set default signal handler: {exc!r}`')
def _set_nofile(nofile_atleast=4096):
"""
Set nofile soft limit to at least 4096, useful for running matlplotlib/seaborn on
parallel executing plot generators vs. Ubuntu default ulimit -n 1024 or OS X El Captian 256
temporary setting extinguishing with Python session.
:param nofile_atleast: nofile soft limit
:return: nofile soft limit and nofile hard limit
"""
try:
import resource as res
except ImportError: # Windows
res = None
if res is None:
return (None,) * 2
soft, ohard = res.getrlimit(res.RLIMIT_NOFILE)
hard = ohard
if soft < nofile_atleast:
soft = nofile_atleast
if hard < soft:
hard = soft
try:
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except (ValueError, res.error):
try:
hard = soft
print(f'trouble with max limit, retrying with soft,hard {soft},{hard}')
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except Exception:
print('failed to set ulimit, giving up')
soft, hard = res.getrlimit(res.RLIMIT_NOFILE)
return soft, hard
_set_nofile()
# ONLY FIRST CLASS CITIZENS ARE ALLOWED HERE, namely Document, Executor Flow
# Document
from jina._docarray import Document, DocumentArray
# Client
from jina.clients import Client
# Deployment
from jina.orchestrate.deployments import Deployment
from jina.orchestrate.flow.asyncio import AsyncFlow
# Flow
from jina.orchestrate.flow.base import Flow
# Executor
from jina.serve.executors import BaseExecutor as Executor
from jina.serve.executors.decorators import dynamic_batching, monitor, requests
# Custom Gateway
from jina.serve.runtimes.gateway.gateway import Gateway
|
"""
Top-level module of Jina.
The primary function of this module is to import all of the public Jina
interfaces into a single place. The interfaces themselves are located in
sub-modules, as described below.
"""
import os as _os
import platform as _platform
import signal as _signal
import sys as _sys
import warnings as _warnings
import docarray as _docarray
if _sys.version_info < (3, 7, 0):
raise OSError(f'Jina requires Python >= 3.7, but yours is {_sys.version_info}')
def _warning_on_one_line(message, category, filename, lineno, *args, **kwargs):
return '\033[1;33m%s: %s\033[0m \033[1;30m(raised from %s:%s)\033[0m\n' % (
category.__name__,
message,
filename,
lineno,
)
def _ignore_google_warnings():
import warnings
warnings.filterwarnings(
'ignore',
category=DeprecationWarning,
message='Deprecated call to `pkg_resources.declare_namespace(\'google\')`.',
append=True
)
_warnings.formatwarning = _warning_on_one_line
_warnings.simplefilter('always', DeprecationWarning, append=True)
_ignore_google_warnings()
# fix fork error on MacOS but seems no effect? must do EXPORT manually before jina start
_os.environ['OBJC_DISABLE_INITIALIZE_FORK_SAFETY'] = 'YES'
# JINA_MP_START_METHOD has higher priority than os-patch
_start_method = _os.environ.get('JINA_MP_START_METHOD', None)
if _start_method and _start_method.lower() in {'fork', 'spawn', 'forkserver'}:
from multiprocessing import set_start_method as _set_start_method
try:
_set_start_method(_start_method.lower())
_warnings.warn(
f'multiprocessing start method is set to `{_start_method.lower()}`'
)
except Exception as e:
_warnings.warn(
f'failed to set multiprocessing start_method to `{_start_method.lower()}`: {e!r}'
)
elif _sys.version_info >= (3, 8, 0) and _platform.system() == 'Darwin':
# DO SOME OS-WISE PATCHES
# temporary fix for python 3.8 on macos where the default start is set to "spawn"
# https://docs.python.org/3/library/multiprocessing.html#contexts-and-start-methods
from multiprocessing import set_start_method as _set_start_method
try:
_set_start_method('fork')
_warnings.warn(f'multiprocessing start method is set to `fork`')
except Exception as e:
_warnings.warn(f'failed to set multiprocessing start_method to `fork`: {e!r}')
# do not change this line manually
# this is managed by git tag and updated on every release
# NOTE: this represents the NEXT release version
__version__ = '3.23.0'
# do not change this line manually
# this is managed by proto/build-proto.sh and updated on every execution
__proto_version__ = '0.1.27'
try:
__docarray_version__ = _docarray.__version__
except AttributeError as e:
raise RuntimeError(
'`docarray` dependency is not installed correctly, please reinstall with `pip install -U --force-reinstall docarray`'
)
try:
_signal.signal(_signal.SIGINT, _signal.default_int_handler)
except Exception as exc:
_warnings.warn(f'failed to set default signal handler: {exc!r}`')
def _set_nofile(nofile_atleast=4096):
"""
Set nofile soft limit to at least 4096, useful for running matlplotlib/seaborn on
parallel executing plot generators vs. Ubuntu default ulimit -n 1024 or OS X El Captian 256
temporary setting extinguishing with Python session.
:param nofile_atleast: nofile soft limit
:return: nofile soft limit and nofile hard limit
"""
try:
import resource as res
except ImportError: # Windows
res = None
if res is None:
return (None,) * 2
soft, ohard = res.getrlimit(res.RLIMIT_NOFILE)
hard = ohard
if soft < nofile_atleast:
soft = nofile_atleast
if hard < soft:
hard = soft
try:
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except (ValueError, res.error):
try:
hard = soft
print(f'trouble with max limit, retrying with soft,hard {soft},{hard}')
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except Exception:
print('failed to set ulimit, giving up')
soft, hard = res.getrlimit(res.RLIMIT_NOFILE)
return soft, hard
_set_nofile()
# ONLY FIRST CLASS CITIZENS ARE ALLOWED HERE, namely Document, Executor Flow
# Document
from jina._docarray import Document, DocumentArray
# Client
from jina.clients import Client
# Deployment
from jina.orchestrate.deployments import Deployment
from jina.orchestrate.flow.asyncio import AsyncFlow
# Flow
from jina.orchestrate.flow.base import Flow
# Executor
from jina.serve.executors import BaseExecutor as Executor
from jina.serve.executors.decorators import dynamic_batching, monitor, requests
# Custom Gateway
from jina.serve.runtimes.gateway.gateway import Gateway
|
from typing import Union
from docarray.typing.tensor.ndarray import NdArray
from docarray.utils._internal.misc import is_tf_available, is_torch_available
torch_available = is_torch_available()
if torch_available:
from docarray.typing.tensor.torch_tensor import TorchTensor # noqa: F401
tf_available = is_tf_available()
if tf_available:
from docarray.typing.tensor.tensorflow_tensor import TensorFlowTensor # noqa: F401
AnyTensor = Union[NdArray]
if torch_available and tf_available:
AnyTensor = Union[NdArray, TorchTensor, TensorFlowTensor] # type: ignore
elif torch_available:
AnyTensor = Union[NdArray, TorchTensor] # type: ignore
elif tf_available:
AnyTensor = Union[NdArray, TensorFlowTensor] # type: ignore
|
from typing import Union
from docarray.typing.tensor.ndarray import NdArray
from docarray.utils.misc import is_tf_available, is_torch_available
torch_available = is_torch_available()
if torch_available:
from docarray.typing.tensor.torch_tensor import TorchTensor # noqa: F401
tf_available = is_tf_available()
if tf_available:
from docarray.typing.tensor.tensorflow_tensor import TensorFlowTensor # noqa: F401
AnyTensor = Union[NdArray]
if torch_available and tf_available:
AnyTensor = Union[NdArray, TorchTensor, TensorFlowTensor] # type: ignore
elif torch_available:
AnyTensor = Union[NdArray, TorchTensor] # type: ignore
elif tf_available:
AnyTensor = Union[NdArray, TensorFlowTensor] # type: ignore
|
_base_ = './fcos_r50-caffe_fpn_gn-head_1x_coco.py'
# model settings
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron/resnet101_caffe')))
# dataset settings
train_pipeline = [
dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='RandomChoiceResize',
scales=[(1333, 640), (1333, 800)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
# training schedule for 2x
max_epochs = 24
train_cfg = dict(max_epochs=max_epochs)
# learning rate
param_scheduler = [
dict(type='ConstantLR', factor=1.0 / 3, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[16, 22],
gamma=0.1)
]
|
_base_ = './fcos_r50-caffe_fpn_gn-head_1x_coco.py'
# model settings
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron/resnet101_caffe')))
# dataset settings
train_pipeline = [
dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='RandomChoiceResize',
scale=[(1333, 640), (1333, 800)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
# training schedule for 2x
max_epochs = 24
train_cfg = dict(max_epochs=max_epochs)
# learning rate
param_scheduler = [
dict(type='ConstantLR', factor=1.0 / 3, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[16, 22],
gamma=0.1)
]
|
import numpy as np
from docarray import BaseDoc, DocList
from docarray.typing import NdArray
from pydantic import Field, BaseModel
from jina import Executor, requests
class TextDoc(BaseDoc):
text: str = Field(description="The text of the document", default="")
class EmbeddingResponseModel(TextDoc):
embeddings: NdArray = Field(description="The embedding of the texts", default=[])
class Config(BaseDoc.Config):
allow_population_by_field_name = True
arbitrary_types_allowed = True
json_encoders = {NdArray: lambda v: v.tolist()}
class Parameters(BaseModel):
emb_dim: int
class SampleExecutor(Executor):
@requests(on="/encode")
def foo(self, docs: DocList[TextDoc], **kwargs) -> DocList[EmbeddingResponseModel]:
ret = []
for doc in docs:
ret.append(
EmbeddingResponseModel(
id=doc.id,
text=doc.text,
embeddings=np.random.random((1, 64)),
)
)
return DocList[EmbeddingResponseModel](ret)
@requests(on="/encode_parameter")
def bar(self, docs: DocList[TextDoc], parameters: Parameters, **kwargs) -> DocList[EmbeddingResponseModel]:
ret = []
for doc in docs:
ret.append(
EmbeddingResponseModel(
id=doc.id,
text=doc.text,
embeddings=np.random.random((1, parameters.emb_dim)),
)
)
return DocList[EmbeddingResponseModel](ret)
|
import numpy as np
from docarray import BaseDoc, DocList
from docarray.typing import NdArray
from pydantic import Field
from jina import Executor, requests
class TextDoc(BaseDoc):
text: str = Field(description="The text of the document", default="")
class EmbeddingResponseModel(TextDoc):
embeddings: NdArray = Field(description="The embedding of the texts", default=[])
class Config(BaseDoc.Config):
allow_population_by_field_name = True
arbitrary_types_allowed = True
json_encoders = {NdArray: lambda v: v.tolist()}
class SampleExecutor(Executor):
@requests(on="/encode")
def foo(self, docs: DocList[TextDoc], **kwargs) -> DocList[EmbeddingResponseModel]:
ret = []
for doc in docs:
ret.append(
EmbeddingResponseModel(
id=doc.id,
text=doc.text,
embeddings=np.random.random((1, 64)),
)
)
return DocList[EmbeddingResponseModel](ret)
|
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
from typing import Optional, List, Union, Dict
import numpy as np
from annoy import AnnoyIndex
from jina import Executor, requests, DocumentArray, Document
from jina_commons import get_logger
from jina_commons.indexers.dump import import_vectors
class AnnoySearcher(Executor):
"""Annoy powered vector indexer
For more information about the Annoy supported parameters, please consult:
- https://github.com/spotify/annoy
.. note::
Annoy package dependency is only required at the query time.
"""
def __init__(
self,
default_top_k: int = 10,
metric: str = 'euclidean',
num_trees: int = 10,
dump_path: Optional[str] = None,
default_traversal_paths: List[str] = ['r'],
is_distance: bool = False,
**kwargs,
):
"""
Initialize an AnnoyIndexer
:param default_top_k: get tok k vectors
:param metric: Metric can be "angular", "euclidean", "manhattan", "hamming", or "dot"
:param num_trees: builds a forest of n_trees trees. More trees gives higher precision when querying.
:param dump_path: the path to load ids and vecs
:param traverse_path: traverse path on docs, e.g. ['r'], ['c']
:param is_distance: Boolean flag that describes if distance metric need to be reinterpreted as similarities.
:param args:
:param kwargs:
"""
super().__init__(**kwargs)
self.default_top_k = default_top_k
self.metric = metric
self.num_trees = num_trees
self.default_traversal_paths = default_traversal_paths
self.is_distance = is_distance
self.logger = get_logger(self)
self._doc_id_to_offset = {}
dump_path = dump_path or kwargs.get('runtime_args', {}).get('dump_path', None)
if dump_path is not None:
self.logger.info('Start building "AnnoyIndexer" from dump data')
ids, vecs = import_vectors(dump_path, str(self.runtime_args.pea_id))
self._ids = np.array(list(ids))
self._vecs = np.array(list(vecs))
num_dim = self._vecs.shape[1]
self._indexer = AnnoyIndex(num_dim, self.metric)
self._load_index(self._ids, self._vecs)
self.logger.info('Done building Annoy index')
else:
self.logger.warning(
'No data loaded in "AnnoyIndexer". Use .rolling_update() to re-initialize it...'
)
def _load_index(self, ids, vecs):
for idx, v in enumerate(vecs):
self._indexer.add_item(idx, v.astype(np.float32))
self._doc_id_to_offset[ids[idx]] = idx
self._indexer.build(self.num_trees)
@requests(on='/search')
def search(self, docs: DocumentArray, parameters: Dict, **kwargs):
if not hasattr(self, '_indexer'):
self.logger.warning('Querying against an empty index')
return
traversal_paths = parameters.get(
'traversal_paths', self.default_traversal_paths
)
top_k = parameters.get('top_k', self.default_top_k)
for doc in docs.traverse_flat(traversal_paths):
indices, dists = self._indexer.get_nns_by_vector(
doc.embedding, top_k, include_distances=True
)
for idx, dist in zip(indices, dists):
match = Document(id=self._ids[idx], embedding=self._vecs[idx])
if self.is_distance:
if self.metric == 'dot':
match.scores[self.metric] = 1 - dist
else:
match.scores[self.metric] = dist
else:
if self.metric == 'dot':
match.scores[self.metric] = dist
elif self.metric == 'angular' or self.metric == 'hamming':
match.scores[self.metric] = 1 - dist
else:
match.scores[self.metric] = 1 / (1 + dist)
doc.matches.append(match)
@requests(on='/fill_embedding')
def fill_embedding(self, query_da: DocumentArray, **kwargs):
for doc in query_da:
doc_idx = self._doc_id_to_offset.get(doc.id)
if doc_idx is not None:
doc.embedding = np.array(self._indexer.get_item_vector(int(doc_idx)))
else:
self.logger.warning(f'Document {doc.id} not found in index')
|
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
from typing import Optional, List, Union, Dict
import numpy as np
from annoy import AnnoyIndex
from jina import Executor, requests, DocumentArray, Document
from jina_commons import get_logger
from jina_commons.indexers.dump import import_vectors
class AnnoySearcher(Executor):
"""Annoy powered vector indexer
For more information about the Annoy supported parameters, please consult:
- https://github.com/spotify/annoy
.. note::
Annoy package dependency is only required at the query time.
"""
def __init__(
self,
default_top_k: int = 10,
metric: str = 'euclidean',
num_trees: int = 10,
dump_path: Optional[str] = None,
default_traversal_paths: List[str] = ['r'],
is_distance: bool = False,
**kwargs,
):
"""
Initialize an AnnoyIndexer
:param default_top_k: get tok k vectors
:param metric: Metric can be "angular", "euclidean", "manhattan", "hamming", or "dot"
:param num_trees: builds a forest of n_trees trees. More trees gives higher precision when querying.
:param dump_path: the path to load ids and vecs
:param traverse_path: traverse path on docs, e.g. ['r'], ['c']
:param is_distance: Boolean flag that describes if distance metric need to be reinterpreted as similarities.
:param args:
:param kwargs:
"""
super().__init__(**kwargs)
self.default_top_k = default_top_k
self.metric = metric
self.num_trees = num_trees
self.default_traversal_paths = default_traversal_paths
self.is_distance = is_distance
self.logger = get_logger(self)
dump_path = dump_path or kwargs.get('runtime_args', {}).get('dump_path', None)
if dump_path is not None:
self.logger.info('Start building "AnnoyIndexer" from dump data')
ids, vecs = import_vectors(dump_path, str(self.runtime_args.pea_id))
self._ids = np.array(list(ids))
self._vecs = np.array(list(vecs))
num_dim = self._vecs.shape[1]
self._indexer = AnnoyIndex(num_dim, self.metric)
self._doc_id_to_offset = {}
self._load_index(self._ids, self._vecs)
self.logger.info('Done building Annoy index')
else:
self.logger.warning(
'No data loaded in "AnnoyIndexer". Use .rolling_update() to re-initialize it...'
)
def _load_index(self, ids, vecs):
for idx, v in enumerate(vecs):
self._indexer.add_item(idx, v.astype(np.float32))
self._doc_id_to_offset[ids[idx]] = idx
self._indexer.build(self.num_trees)
@requests(on='/search')
def search(self, docs: DocumentArray, parameters: Dict, **kwargs):
if not hasattr(self, '_indexer'):
self.logger.warning('Querying against an empty index')
return
traversal_paths = parameters.get(
'traversal_paths', self.default_traversal_paths
)
top_k = parameters.get('top_k', self.default_top_k)
for doc in docs.traverse_flat(traversal_paths):
indices, dists = self._indexer.get_nns_by_vector(
doc.embedding, top_k, include_distances=True
)
for idx, dist in zip(indices, dists):
match = Document(id=self._ids[idx], embedding=self._vecs[idx])
if self.is_distance:
if self.metric == 'dot':
match.scores[self.metric] = 1 - dist
else:
match.scores[self.metric] = dist
else:
if self.metric == 'dot':
match.scores[self.metric] = dist
elif self.metric == 'angular' or self.metric == 'hamming':
match.scores[self.metric] = 1 - dist
else:
match.scores[self.metric] = 1 / (1 + dist)
doc.matches.append(match)
@requests(on='/fill_embedding')
def fill_embedding(self, query_da: DocumentArray, **kwargs):
for doc in query_da:
doc.embedding = np.array(
self._indexer.get_item_vector(int(self._doc_id_to_offset[str(doc.id)]))
)
|
"""Test in memory docstore."""
from typing import Any
from langchain.output_parsers.combining import CombiningOutputParser
from langchain.output_parsers.regex import RegexParser
from langchain.output_parsers.structured import ResponseSchema, StructuredOutputParser
DEF_EXPECTED_RESULT = {
"answer": "Paris",
"source": "https://en.wikipedia.org/wiki/France",
"confidence": "A",
"explanation": "Paris is the capital of France according to Wikipedia.",
}
DEF_README = """```json
{
"answer": "Paris",
"source": "https://en.wikipedia.org/wiki/France"
}
```
//Confidence: A, Explanation: Paris is the capital of France according to Wikipedia."""
def test_combining_dict_result() -> None:
"""Test combining result."""
parsers = [
StructuredOutputParser(
response_schemas=[
ResponseSchema(
name="answer", description="answer to the user's question"
),
ResponseSchema(
name="source",
description="source used to answer the user's question",
),
]
),
RegexParser(
regex=r"Confidence: (A|B|C), Explanation: (.*)",
output_keys=["confidence", "explanation"],
default_output_key="noConfidence",
),
]
combining_parser = CombiningOutputParser(parsers=parsers)
result_dict = combining_parser.parse(DEF_README)
assert DEF_EXPECTED_RESULT == result_dict
def test_combining_output_parser_output_type() -> None:
"""Test combining output parser output type is Dict[str, Any]."""
parsers = [
StructuredOutputParser(
response_schemas=[
ResponseSchema(
name="answer", description="answer to the user's question"
),
ResponseSchema(
name="source",
description="source used to answer the user's question",
),
]
),
RegexParser(
regex=r"Confidence: (A|B|C), Explanation: (.*)",
output_keys=["confidence", "explanation"],
default_output_key="noConfidence",
),
]
combining_parser = CombiningOutputParser(parsers=parsers)
assert combining_parser.OutputType == dict[str, Any]
|
"""Test in memory docstore."""
from typing import Any, Dict
from langchain.output_parsers.combining import CombiningOutputParser
from langchain.output_parsers.regex import RegexParser
from langchain.output_parsers.structured import ResponseSchema, StructuredOutputParser
DEF_EXPECTED_RESULT = {
"answer": "Paris",
"source": "https://en.wikipedia.org/wiki/France",
"confidence": "A",
"explanation": "Paris is the capital of France according to Wikipedia.",
}
DEF_README = """```json
{
"answer": "Paris",
"source": "https://en.wikipedia.org/wiki/France"
}
```
//Confidence: A, Explanation: Paris is the capital of France according to Wikipedia."""
def test_combining_dict_result() -> None:
"""Test combining result."""
parsers = [
StructuredOutputParser(
response_schemas=[
ResponseSchema(
name="answer", description="answer to the user's question"
),
ResponseSchema(
name="source",
description="source used to answer the user's question",
),
]
),
RegexParser(
regex=r"Confidence: (A|B|C), Explanation: (.*)",
output_keys=["confidence", "explanation"],
default_output_key="noConfidence",
),
]
combining_parser = CombiningOutputParser(parsers=parsers)
result_dict = combining_parser.parse(DEF_README)
assert DEF_EXPECTED_RESULT == result_dict
def test_combining_output_parser_output_type() -> None:
"""Test combining output parser output type is Dict[str, Any]."""
parsers = [
StructuredOutputParser(
response_schemas=[
ResponseSchema(
name="answer", description="answer to the user's question"
),
ResponseSchema(
name="source",
description="source used to answer the user's question",
),
]
),
RegexParser(
regex=r"Confidence: (A|B|C), Explanation: (.*)",
output_keys=["confidence", "explanation"],
default_output_key="noConfidence",
),
]
combining_parser = CombiningOutputParser(parsers=parsers)
assert combining_parser.OutputType is Dict[str, Any]
|
"""Argparser module for Deployment runtimes"""
import argparse
from jina.enums import DeploymentRoleType
from jina.parsers.helper import _SHOW_ALL_ARGS, KVAppendAction, add_arg_group
from jina.parsers.orchestrate.runtimes.remote import _mixin_http_server_parser
def mixin_base_deployment_parser(parser):
"""Add mixin arguments required by :class:`BaseDeployment` into the given parser.
:param parser: the parser instance to which we add arguments
"""
gp = add_arg_group(parser, title='Deployment')
gp.add_argument(
'--uses-before',
type=str,
help='The executor attached before the Pods described by --uses, typically before sending to all '
'shards, accepted type follows `--uses`. This argument only applies for sharded Deployments (shards > 1).',
)
gp.add_argument(
'--uses-after',
type=str,
help='The executor attached after the Pods described by --uses, typically used for receiving from '
'all shards, accepted type follows `--uses`. This argument only applies for sharded Deployments (shards > 1).',
)
gp.add_argument(
'--when',
action=KVAppendAction,
metavar='KEY: VALUE',
nargs='*',
help='The condition that the documents need to fulfill before reaching the Executor.'
'The condition can be defined in the form of a `DocArray query condition <https://docarray.jina.ai/fundamentals/documentarray/find/#query-by-conditions>`',
)
gp.add_argument(
'--external',
action='store_true',
default=False,
help='The Deployment will be considered an external Deployment that has been started independently from the Flow.'
'This Deployment will not be context managed by the Flow.',
)
gp.add_argument(
'--grpc-metadata',
action=KVAppendAction,
metavar='KEY: VALUE',
nargs='*',
help='The metadata to be passed to the gRPC request.',
)
# hidden CLI used for internal only
gp.add_argument(
'--deployment-role',
type=DeploymentRoleType.from_string,
choices=list(DeploymentRoleType),
help='The role of this deployment in the flow'
if _SHOW_ALL_ARGS
else argparse.SUPPRESS,
)
gp.add_argument(
'--tls',
action='store_true',
default=False,
help='If set, connect to deployment using tls encryption',
)
_mixin_http_server_parser(gp)
|
"""Argparser module for Deployment runtimes"""
import argparse
from jina.enums import DeploymentRoleType
from jina.parsers.helper import _SHOW_ALL_ARGS, KVAppendAction, add_arg_group
def mixin_base_deployment_parser(parser):
"""Add mixin arguments required by :class:`BaseDeployment` into the given parser.
:param parser: the parser instance to which we add arguments
"""
gp = add_arg_group(parser, title='Deployment')
gp.add_argument(
'--uses-before',
type=str,
help='The executor attached before the Pods described by --uses, typically before sending to all '
'shards, accepted type follows `--uses`. This argument only applies for sharded Deployments (shards > 1).',
)
gp.add_argument(
'--uses-after',
type=str,
help='The executor attached after the Pods described by --uses, typically used for receiving from '
'all shards, accepted type follows `--uses`. This argument only applies for sharded Deployments (shards > 1).',
)
gp.add_argument(
'--when',
action=KVAppendAction,
metavar='KEY: VALUE',
nargs='*',
help='The condition that the documents need to fulfill before reaching the Executor.'
'The condition can be defined in the form of a `DocArray query condition <https://docarray.jina.ai/fundamentals/documentarray/find/#query-by-conditions>`',
)
gp.add_argument(
'--external',
action='store_true',
default=False,
help='The Deployment will be considered an external Deployment that has been started independently from the Flow.'
'This Deployment will not be context managed by the Flow.',
)
gp.add_argument(
'--grpc-metadata',
action=KVAppendAction,
metavar='KEY: VALUE',
nargs='*',
help='The metadata to be passed to the gRPC request.',
)
# hidden CLI used for internal only
gp.add_argument(
'--deployment-role',
type=DeploymentRoleType.from_string,
choices=list(DeploymentRoleType),
help='The role of this deployment in the flow'
if _SHOW_ALL_ARGS
else argparse.SUPPRESS,
)
gp.add_argument(
'--tls',
action='store_true',
default=False,
help='If set, connect to deployment using tls encryption',
)
|
# Copyright (c) OpenMMLab. All rights reserved.
"""Image Demo.
This script adopts a new infenence class, currently supports image path,
np.array and folder input formats, and will support video and webcam
in the future.
Example:
Save visualizations and predictions results::
python demo/image_demo.py demo/demo.jpg rtmdet-s
python demo/image_demo.py demo/demo.jpg \
configs/rtmdet/rtmdet_s_8xb32-300e_coco.py \
--weights rtmdet_s_8xb32-300e_coco_20220905_161602-387a891e.pth
python demo/image_demo.py demo/demo.jpg \
glip_atss_swin-t_a_fpn_dyhead_pretrain_obj365 --texts bench
python demo/image_demo.py demo/demo.jpg \
glip_atss_swin-t_a_fpn_dyhead_pretrain_obj365 --texts 'bench . car .'
python demo/image_demo.py demo/demo.jpg \
glip_atss_swin-t_a_fpn_dyhead_pretrain_obj365
--texts 'bench . car .' -c
python demo/image_demo.py demo/demo.jpg \
glip_atss_swin-t_a_fpn_dyhead_pretrain_obj365 \
--texts 'There are a lot of cars here.'
Visualize prediction results::
python demo/image_demo.py demo/demo.jpg rtmdet-ins-s --show
python demo/image_demo.py demo/demo.jpg rtmdet-ins_s_8xb32-300e_coco \
--show
"""
from argparse import ArgumentParser
from mmengine.logging import print_log
from mmdet.apis import DetInferencer
def parse_args():
parser = ArgumentParser()
parser.add_argument(
'inputs', type=str, help='Input image file or folder path.')
parser.add_argument(
'model',
type=str,
help='Config or checkpoint .pth file or the model name '
'and alias defined in metafile. The model configuration '
'file will try to read from .pth if the parameter is '
'a .pth weights file.')
parser.add_argument('--weights', default=None, help='Checkpoint file')
parser.add_argument(
'--out-dir',
type=str,
default='outputs',
help='Output directory of images or prediction results.')
parser.add_argument('--texts', help='text prompt')
parser.add_argument(
'--device', default='cuda:0', help='Device used for inference')
parser.add_argument(
'--pred-score-thr',
type=float,
default=0.3,
help='bbox score threshold')
parser.add_argument(
'--batch-size', type=int, default=1, help='Inference batch size.')
parser.add_argument(
'--show',
action='store_true',
help='Display the image in a popup window.')
parser.add_argument(
'--no-save-vis',
action='store_true',
help='Do not save detection vis results')
parser.add_argument(
'--no-save-pred',
action='store_true',
help='Do not save detection json results')
parser.add_argument(
'--print-result',
action='store_true',
help='Whether to print the results.')
parser.add_argument(
'--palette',
default='none',
choices=['coco', 'voc', 'citys', 'random', 'none'],
help='Color palette used for visualization')
# only for GLIP
parser.add_argument(
'--custom-entities',
'-c',
action='store_true',
help='Whether to customize entity names? '
'If so, the input text should be '
'"cls_name1 . cls_name2 . cls_name3 ." format')
call_args = vars(parser.parse_args())
if call_args['no_save_vis'] and call_args['no_save_pred']:
call_args['out_dir'] = ''
if call_args['model'].endswith('.pth'):
print_log('The model is a weight file, automatically '
'assign the model to --weights')
call_args['weights'] = call_args['model']
call_args['model'] = None
init_kws = ['model', 'weights', 'device', 'palette']
init_args = {}
for init_kw in init_kws:
init_args[init_kw] = call_args.pop(init_kw)
return init_args, call_args
def main():
init_args, call_args = parse_args()
# TODO: Video and Webcam are currently not supported and
# may consume too much memory if your input folder has a lot of images.
# We will be optimized later.
inferencer = DetInferencer(**init_args)
inferencer(**call_args)
if call_args['out_dir'] != '' and not (call_args['no_save_vis']
and call_args['no_save_pred']):
print_log(f'results have been saved at {call_args["out_dir"]}')
if __name__ == '__main__':
main()
|
# Copyright (c) OpenMMLab. All rights reserved.
"""Image Demo.
This script adopts a new infenence class, currently supports image path,
np.array and folder input formats, and will support video and webcam
in the future.
Example:
Save visualizations and predictions results::
python demo/image_demo.py demo/demo.jpg rtmdet-s
python demo/image_demo.py demo/demo.jpg \
configs/rtmdet/rtmdet_s_8xb32-300e_coco.py \
--weights rtmdet_s_8xb32-300e_coco_20220905_161602-387a891e.pth
Visualize prediction results::
python demo/image_demo.py demo/demo.jpg rtmdet-ins-s --show
python demo/image_demo.py demo/demo.jpg rtmdet-ins_s_8xb32-300e_coco \
--show
"""
from argparse import ArgumentParser
from mmengine.logging import print_log
from mmdet.apis import DetInferencer
def parse_args():
parser = ArgumentParser()
parser.add_argument(
'inputs', type=str, help='Input image file or folder path.')
parser.add_argument(
'model',
type=str,
help='Config or checkpoint .pth file or the model name '
'and alias defined in metafile. The model configuration '
'file will try to read from .pth if the parameter is '
'a .pth weights file.')
parser.add_argument('--weights', default=None, help='Checkpoint file')
parser.add_argument(
'--out-dir',
type=str,
default='outputs',
help='Output directory of images or prediction results.')
parser.add_argument(
'--device', default='cuda:0', help='Device used for inference')
parser.add_argument(
'--pred-score-thr',
type=float,
default=0.3,
help='bbox score threshold')
parser.add_argument(
'--batch-size', type=int, default=1, help='Inference batch size.')
parser.add_argument(
'--show',
action='store_true',
help='Display the image in a popup window.')
parser.add_argument(
'--no-save-vis',
action='store_true',
help='Do not save detection vis results')
parser.add_argument(
'--no-save-pred',
action='store_true',
help='Do not save detection json results')
parser.add_argument(
'--print-result',
action='store_true',
help='Whether to print the results.')
parser.add_argument(
'--palette',
default='none',
choices=['coco', 'voc', 'citys', 'random', 'none'],
help='Color palette used for visualization')
call_args = vars(parser.parse_args())
if call_args['no_save_vis'] and call_args['no_save_pred']:
call_args['out_dir'] = ''
if call_args['model'].endswith('.pth'):
print_log('The model is a weight file, automatically '
'assign the model to --weights')
call_args['weights'] = call_args['model']
call_args['model'] = None
init_kws = ['model', 'weights', 'device', 'palette']
init_args = {}
for init_kw in init_kws:
init_args[init_kw] = call_args.pop(init_kw)
return init_args, call_args
def main():
init_args, call_args = parse_args()
# TODO: Video and Webcam are currently not supported and
# may consume too much memory if your input folder has a lot of images.
# We will be optimized later.
inferencer = DetInferencer(**init_args)
inferencer(**call_args)
if call_args['out_dir'] != '' and not (call_args['no_save_vis']
and call_args['no_save_pred']):
print_log(f'results have been saved at {call_args["out_dir"]}')
if __name__ == '__main__':
main()
|
#!/usr/bin/env python
# coding=utf-8
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License
"""Utilities to handle file locking in `datasets`."""
import os
from filelock import FileLock as FileLock_
from filelock import UnixFileLock
class FileLock(FileLock_):
"""
A `filelock.FileLock` initializer that handles long paths.
"""
MAX_FILENAME_LENGTH = 255
def __init__(self, lock_file, *args, **kwargs):
lock_file = self.hash_filename_if_too_long(lock_file)
super().__init__(lock_file, *args, **kwargs)
@classmethod
def hash_filename_if_too_long(cls, path: str) -> str:
filename = os.path.basename(path)
max_filename_length = cls.MAX_FILENAME_LENGTH
if issubclass(cls, UnixFileLock):
max_filename_length = min(max_filename_length, os.statvfs(os.path.dirname(path)).f_namemax)
if len(filename) > max_filename_length:
dirname = os.path.dirname(path)
hashed_filename = str(hash(filename))
new_filename = (
filename[: max_filename_length - len(hashed_filename) - 8] + "..." + hashed_filename + ".lock"
)
return os.path.join(dirname, new_filename)
else:
return path
|
#!/usr/bin/env python
# coding=utf-8
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License
"""Utilities to handle file locking in `datasets`."""
import os
from filelock import FileLock as FileLock_
class FileLock(FileLock_):
"""
A `filelock.FileLock` initializer that handles long paths.
"""
MAX_FILENAME_LENGTH = 255
def __init__(self, lock_file, *args, **kwargs):
lock_file = self.hash_filename_if_too_long(lock_file)
super().__init__(lock_file, *args, **kwargs)
@classmethod
def hash_filename_if_too_long(cls, path: str) -> str:
filename = os.path.basename(path)
if len(filename) > cls.MAX_FILENAME_LENGTH:
dirname = os.path.dirname(path)
hashed_filename = str(hash(filename))
new_filename = (
filename[: cls.MAX_FILENAME_LENGTH - len(hashed_filename) - 8] + "..." + hashed_filename + ".lock"
)
return os.path.join(dirname, new_filename)
else:
return path
|
# Copyright (c) OpenMMLab. All rights reserved.
from functools import partial
from typing import Optional
import torch
TORCH_VERSION = torch.__version__
def is_rocm_pytorch() -> bool:
"""Check whether the PyTorch is compiled on ROCm."""
is_rocm = False
if TORCH_VERSION != 'parrots':
try:
from torch.utils.cpp_extension import ROCM_HOME
is_rocm = True if ((torch.version.hip is not None) and
(ROCM_HOME is not None)) else False
except ImportError:
pass
return is_rocm
def _get_cuda_home() -> Optional[str]:
"""Obtain the path of CUDA home."""
if TORCH_VERSION == 'parrots':
from parrots.utils.build_extension import CUDA_HOME
else:
if is_rocm_pytorch():
from torch.utils.cpp_extension import ROCM_HOME
CUDA_HOME = ROCM_HOME
else:
from torch.utils.cpp_extension import CUDA_HOME
return CUDA_HOME
def get_build_config():
"""Obtain the build information of PyTorch or Parrots."""
if TORCH_VERSION == 'parrots':
from parrots.config import get_build_info
return get_build_info()
else:
return torch.__config__.show()
def _get_conv() -> tuple:
"""A wrapper to obtain base classes of Conv layers from PyTorch or
Parrots."""
if TORCH_VERSION == 'parrots':
from parrots.nn.modules.conv import _ConvNd, _ConvTransposeMixin
else:
from torch.nn.modules.conv import _ConvNd, _ConvTransposeMixin
return _ConvNd, _ConvTransposeMixin
def _get_dataloader() -> tuple:
"""A wrapper to obtain DataLoader class from PyTorch or Parrots."""
if TORCH_VERSION == 'parrots':
from torch.utils.data import DataLoader, PoolDataLoader
else:
from torch.utils.data import DataLoader
PoolDataLoader = DataLoader
return DataLoader, PoolDataLoader
def _get_extension():
"""A wrapper to obtain extension class from PyTorch or Parrots."""
if TORCH_VERSION == 'parrots':
from parrots.utils.build_extension import BuildExtension, Extension
CppExtension = partial(Extension, cuda=False)
CUDAExtension = partial(Extension, cuda=True)
else:
from torch.utils.cpp_extension import (BuildExtension, CppExtension,
CUDAExtension)
return BuildExtension, CppExtension, CUDAExtension
def _get_pool() -> tuple:
"""A wrapper to obtain base classes of pooling layers from PyTorch or
Parrots."""
if TORCH_VERSION == 'parrots':
from parrots.nn.modules.pool import (_AdaptiveAvgPoolNd,
_AdaptiveMaxPoolNd, _AvgPoolNd,
_MaxPoolNd)
else:
from torch.nn.modules.pooling import (_AdaptiveAvgPoolNd,
_AdaptiveMaxPoolNd, _AvgPoolNd,
_MaxPoolNd)
return _AdaptiveAvgPoolNd, _AdaptiveMaxPoolNd, _AvgPoolNd, _MaxPoolNd
def _get_norm() -> tuple:
"""A wrapper to obtain base classes of normalization layers from PyTorch or
Parrots."""
if TORCH_VERSION == 'parrots':
from parrots.nn.modules.batchnorm import _BatchNorm, _InstanceNorm
SyncBatchNorm_ = torch.nn.SyncBatchNorm2d
else:
from torch.nn.modules.batchnorm import _BatchNorm
from torch.nn.modules.instancenorm import _InstanceNorm
SyncBatchNorm_ = torch.nn.SyncBatchNorm
return _BatchNorm, _InstanceNorm, SyncBatchNorm_
_ConvNd, _ConvTransposeMixin = _get_conv()
DataLoader, PoolDataLoader = _get_dataloader()
BuildExtension, CppExtension, CUDAExtension = _get_extension()
_BatchNorm, _InstanceNorm, SyncBatchNorm_ = _get_norm()
_AdaptiveAvgPoolNd, _AdaptiveMaxPoolNd, _AvgPoolNd, _MaxPoolNd = _get_pool()
|
# Copyright (c) OpenMMLab. All rights reserved.
from functools import partial
from typing import Optional
import torch
TORCH_VERSION = torch.__version__
def is_rocm_pytorch() -> bool:
is_rocm = False
if TORCH_VERSION != 'parrots':
try:
from torch.utils.cpp_extension import ROCM_HOME
is_rocm = True if ((torch.version.hip is not None) and
(ROCM_HOME is not None)) else False
except ImportError:
pass
return is_rocm
def _get_cuda_home() -> Optional[str]:
if TORCH_VERSION == 'parrots':
from parrots.utils.build_extension import CUDA_HOME
else:
if is_rocm_pytorch():
from torch.utils.cpp_extension import ROCM_HOME
CUDA_HOME = ROCM_HOME
else:
from torch.utils.cpp_extension import CUDA_HOME
return CUDA_HOME
def get_build_config():
if TORCH_VERSION == 'parrots':
from parrots.config import get_build_info
return get_build_info()
else:
return torch.__config__.show()
def _get_conv() -> tuple:
if TORCH_VERSION == 'parrots':
from parrots.nn.modules.conv import _ConvNd, _ConvTransposeMixin
else:
from torch.nn.modules.conv import _ConvNd, _ConvTransposeMixin
return _ConvNd, _ConvTransposeMixin
def _get_dataloader() -> tuple:
if TORCH_VERSION == 'parrots':
from torch.utils.data import DataLoader, PoolDataLoader
else:
from torch.utils.data import DataLoader
PoolDataLoader = DataLoader
return DataLoader, PoolDataLoader
def _get_extension():
if TORCH_VERSION == 'parrots':
from parrots.utils.build_extension import BuildExtension, Extension
CppExtension = partial(Extension, cuda=False)
CUDAExtension = partial(Extension, cuda=True)
else:
from torch.utils.cpp_extension import (BuildExtension, CppExtension,
CUDAExtension)
return BuildExtension, CppExtension, CUDAExtension
def _get_pool() -> tuple:
if TORCH_VERSION == 'parrots':
from parrots.nn.modules.pool import (_AdaptiveAvgPoolNd,
_AdaptiveMaxPoolNd, _AvgPoolNd,
_MaxPoolNd)
else:
from torch.nn.modules.pooling import (_AdaptiveAvgPoolNd,
_AdaptiveMaxPoolNd, _AvgPoolNd,
_MaxPoolNd)
return _AdaptiveAvgPoolNd, _AdaptiveMaxPoolNd, _AvgPoolNd, _MaxPoolNd
def _get_norm() -> tuple:
if TORCH_VERSION == 'parrots':
from parrots.nn.modules.batchnorm import _BatchNorm, _InstanceNorm
SyncBatchNorm_ = torch.nn.SyncBatchNorm2d
else:
from torch.nn.modules.batchnorm import _BatchNorm
from torch.nn.modules.instancenorm import _InstanceNorm
SyncBatchNorm_ = torch.nn.SyncBatchNorm
return _BatchNorm, _InstanceNorm, SyncBatchNorm_
_ConvNd, _ConvTransposeMixin = _get_conv()
DataLoader, PoolDataLoader = _get_dataloader()
BuildExtension, CppExtension, CUDAExtension = _get_extension()
_BatchNorm, _InstanceNorm, SyncBatchNorm_ = _get_norm()
_AdaptiveAvgPoolNd, _AdaptiveMaxPoolNd, _AvgPoolNd, _MaxPoolNd = _get_pool()
|
from __future__ import annotations
import os
import platform
import tempfile
import pytest
from sentence_transformers import CrossEncoder, SentenceTransformer
from sentence_transformers.models import Pooling, Transformer
from sentence_transformers.util import is_datasets_available
if is_datasets_available():
from datasets import DatasetDict, load_dataset
@pytest.fixture()
def stsb_bert_tiny_model() -> SentenceTransformer:
return SentenceTransformer("sentence-transformers-testing/stsb-bert-tiny-safetensors")
@pytest.fixture(scope="session")
def stsb_bert_tiny_model_reused() -> SentenceTransformer:
return SentenceTransformer("sentence-transformers-testing/stsb-bert-tiny-safetensors")
@pytest.fixture()
def paraphrase_distilroberta_base_v1_model() -> SentenceTransformer:
return SentenceTransformer("paraphrase-distilroberta-base-v1")
@pytest.fixture()
def distilroberta_base_ce_model() -> CrossEncoder:
return CrossEncoder("distilroberta-base", num_labels=1)
@pytest.fixture()
def clip_vit_b_32_model() -> SentenceTransformer:
return SentenceTransformer("clip-ViT-B-32")
@pytest.fixture()
def distilbert_base_uncased_model() -> SentenceTransformer:
word_embedding_model = Transformer("distilbert-base-uncased")
pooling_model = Pooling(word_embedding_model.get_word_embedding_dimension())
model = SentenceTransformer(modules=[word_embedding_model, pooling_model])
return model
@pytest.fixture(scope="session")
def stsb_dataset_dict() -> "DatasetDict":
return load_dataset("mteb/stsbenchmark-sts")
@pytest.fixture()
def cache_dir():
"""
In the CI environment, we use a temporary directory as `cache_dir`
to avoid keeping the downloaded models on disk after the test.
This is only required for Ubuntu, as we otherwise have disk space issues there.
"""
if os.environ.get("CI", None) and platform.system() == "Linux":
with tempfile.TemporaryDirectory() as tmp_dir:
yield tmp_dir
else:
yield None
|
import os
import platform
import tempfile
import pytest
from sentence_transformers import CrossEncoder, SentenceTransformer
from sentence_transformers.models import Pooling, Transformer
from sentence_transformers.util import is_datasets_available
if is_datasets_available():
from datasets import DatasetDict, load_dataset
@pytest.fixture()
def stsb_bert_tiny_model() -> SentenceTransformer:
return SentenceTransformer("sentence-transformers-testing/stsb-bert-tiny-safetensors")
@pytest.fixture(scope="session")
def stsb_bert_tiny_model_reused() -> SentenceTransformer:
return SentenceTransformer("sentence-transformers-testing/stsb-bert-tiny-safetensors")
@pytest.fixture()
def paraphrase_distilroberta_base_v1_model() -> SentenceTransformer:
return SentenceTransformer("paraphrase-distilroberta-base-v1")
@pytest.fixture()
def distilroberta_base_ce_model() -> CrossEncoder:
return CrossEncoder("distilroberta-base", num_labels=1)
@pytest.fixture()
def clip_vit_b_32_model() -> SentenceTransformer:
return SentenceTransformer("clip-ViT-B-32")
@pytest.fixture()
def distilbert_base_uncased_model() -> SentenceTransformer:
word_embedding_model = Transformer("distilbert-base-uncased")
pooling_model = Pooling(word_embedding_model.get_word_embedding_dimension())
model = SentenceTransformer(modules=[word_embedding_model, pooling_model])
return model
@pytest.fixture(scope="session")
def stsb_dataset_dict() -> "DatasetDict":
return load_dataset("mteb/stsbenchmark-sts")
@pytest.fixture()
def cache_dir():
"""
In the CI environment, we use a temporary directory as `cache_dir`
to avoid keeping the downloaded models on disk after the test.
This is only required for Ubuntu, as we otherwise have disk space issues there.
"""
if os.environ.get("CI", None) and platform.system() == "Linux":
with tempfile.TemporaryDirectory() as tmp_dir:
yield tmp_dir
else:
yield None
|
import pytest
from sklearn.base import BaseEstimator, RegressorMixin, TransformerMixin
from sklearn.utils._tags import get_tags
class NoTagsEstimator:
pass
class ClassifierEstimator:
# This is to test whether not inheriting from mixins works.
_estimator_type = "classifier"
@pytest.mark.parametrize(
"estimator, value",
[
[NoTagsEstimator(), False],
[ClassifierEstimator(), True],
[TransformerMixin(), False],
[RegressorMixin(), True],
[BaseEstimator(), False],
],
)
def test_requires_y(estimator, value):
assert get_tags(estimator).target_tags.required == value
|
import pytest
from sklearn.base import BaseEstimator
from sklearn.utils._tags import (
_DEFAULT_TAGS,
_safe_tags,
)
class NoTagsEstimator:
pass
class MoreTagsEstimator:
def _more_tags(self):
return {"allow_nan": True}
@pytest.mark.parametrize(
"estimator, err_msg",
[
(BaseEstimator(), "The key xxx is not defined in _get_tags"),
(NoTagsEstimator(), "The key xxx is not defined in _DEFAULT_TAGS"),
],
)
def test_safe_tags_error(estimator, err_msg):
# Check that safe_tags raises error in ambiguous case.
with pytest.raises(ValueError, match=err_msg):
_safe_tags(estimator, key="xxx")
@pytest.mark.parametrize(
"estimator, key, expected_results",
[
(NoTagsEstimator(), None, _DEFAULT_TAGS),
(NoTagsEstimator(), "allow_nan", _DEFAULT_TAGS["allow_nan"]),
(MoreTagsEstimator(), None, {**_DEFAULT_TAGS, **{"allow_nan": True}}),
(MoreTagsEstimator(), "allow_nan", True),
(BaseEstimator(), None, _DEFAULT_TAGS),
(BaseEstimator(), "allow_nan", _DEFAULT_TAGS["allow_nan"]),
(BaseEstimator(), "allow_nan", _DEFAULT_TAGS["allow_nan"]),
],
)
def test_safe_tags_no_get_tags(estimator, key, expected_results):
# check the behaviour of _safe_tags when an estimator does not implement
# _get_tags
assert _safe_tags(estimator, key=key) == expected_results
|
# coding=utf-8
# Copyright 2025 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import random
import unittest
import numpy as np
import torch
from diffusers import DDIMScheduler, LDMSuperResolutionPipeline, UNet2DModel, VQModel
from diffusers.utils import PIL_INTERPOLATION
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
load_image,
nightly,
require_accelerator,
require_torch,
torch_device,
)
enable_full_determinism()
class LDMSuperResolutionPipelineFastTests(unittest.TestCase):
@property
def dummy_image(self):
batch_size = 1
num_channels = 3
sizes = (32, 32)
image = floats_tensor((batch_size, num_channels) + sizes, rng=random.Random(0)).to(torch_device)
return image
@property
def dummy_uncond_unet(self):
torch.manual_seed(0)
model = UNet2DModel(
block_out_channels=(32, 64),
layers_per_block=2,
sample_size=32,
in_channels=6,
out_channels=3,
down_block_types=("DownBlock2D", "AttnDownBlock2D"),
up_block_types=("AttnUpBlock2D", "UpBlock2D"),
)
return model
@property
def dummy_vq_model(self):
torch.manual_seed(0)
model = VQModel(
block_out_channels=[32, 64],
in_channels=3,
out_channels=3,
down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"],
up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"],
latent_channels=3,
)
return model
def test_inference_superresolution(self):
device = "cpu"
unet = self.dummy_uncond_unet
scheduler = DDIMScheduler()
vqvae = self.dummy_vq_model
ldm = LDMSuperResolutionPipeline(unet=unet, vqvae=vqvae, scheduler=scheduler)
ldm.to(device)
ldm.set_progress_bar_config(disable=None)
init_image = self.dummy_image.to(device)
generator = torch.Generator(device=device).manual_seed(0)
image = ldm(image=init_image, generator=generator, num_inference_steps=2, output_type="np").images
image_slice = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
expected_slice = np.array([0.8678, 0.8245, 0.6381, 0.6830, 0.4385, 0.5599, 0.4641, 0.6201, 0.5150])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
@require_accelerator
def test_inference_superresolution_fp16(self):
unet = self.dummy_uncond_unet
scheduler = DDIMScheduler()
vqvae = self.dummy_vq_model
# put models in fp16
unet = unet.half()
vqvae = vqvae.half()
ldm = LDMSuperResolutionPipeline(unet=unet, vqvae=vqvae, scheduler=scheduler)
ldm.to(torch_device)
ldm.set_progress_bar_config(disable=None)
init_image = self.dummy_image.to(torch_device)
image = ldm(init_image, num_inference_steps=2, output_type="np").images
assert image.shape == (1, 64, 64, 3)
@nightly
@require_torch
class LDMSuperResolutionPipelineIntegrationTests(unittest.TestCase):
def test_inference_superresolution(self):
init_image = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/vq_diffusion/teddy_bear_pool.png"
)
init_image = init_image.resize((64, 64), resample=PIL_INTERPOLATION["lanczos"])
ldm = LDMSuperResolutionPipeline.from_pretrained("duongna/ldm-super-resolution")
ldm.set_progress_bar_config(disable=None)
generator = torch.manual_seed(0)
image = ldm(image=init_image, generator=generator, num_inference_steps=20, output_type="np").images
image_slice = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
expected_slice = np.array([0.7644, 0.7679, 0.7642, 0.7633, 0.7666, 0.7560, 0.7425, 0.7257, 0.6907])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
|
# coding=utf-8
# Copyright 2024 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import random
import unittest
import numpy as np
import torch
from diffusers import DDIMScheduler, LDMSuperResolutionPipeline, UNet2DModel, VQModel
from diffusers.utils import PIL_INTERPOLATION
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
load_image,
nightly,
require_accelerator,
require_torch,
torch_device,
)
enable_full_determinism()
class LDMSuperResolutionPipelineFastTests(unittest.TestCase):
@property
def dummy_image(self):
batch_size = 1
num_channels = 3
sizes = (32, 32)
image = floats_tensor((batch_size, num_channels) + sizes, rng=random.Random(0)).to(torch_device)
return image
@property
def dummy_uncond_unet(self):
torch.manual_seed(0)
model = UNet2DModel(
block_out_channels=(32, 64),
layers_per_block=2,
sample_size=32,
in_channels=6,
out_channels=3,
down_block_types=("DownBlock2D", "AttnDownBlock2D"),
up_block_types=("AttnUpBlock2D", "UpBlock2D"),
)
return model
@property
def dummy_vq_model(self):
torch.manual_seed(0)
model = VQModel(
block_out_channels=[32, 64],
in_channels=3,
out_channels=3,
down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"],
up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"],
latent_channels=3,
)
return model
def test_inference_superresolution(self):
device = "cpu"
unet = self.dummy_uncond_unet
scheduler = DDIMScheduler()
vqvae = self.dummy_vq_model
ldm = LDMSuperResolutionPipeline(unet=unet, vqvae=vqvae, scheduler=scheduler)
ldm.to(device)
ldm.set_progress_bar_config(disable=None)
init_image = self.dummy_image.to(device)
generator = torch.Generator(device=device).manual_seed(0)
image = ldm(image=init_image, generator=generator, num_inference_steps=2, output_type="np").images
image_slice = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
expected_slice = np.array([0.8678, 0.8245, 0.6381, 0.6830, 0.4385, 0.5599, 0.4641, 0.6201, 0.5150])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
@require_accelerator
def test_inference_superresolution_fp16(self):
unet = self.dummy_uncond_unet
scheduler = DDIMScheduler()
vqvae = self.dummy_vq_model
# put models in fp16
unet = unet.half()
vqvae = vqvae.half()
ldm = LDMSuperResolutionPipeline(unet=unet, vqvae=vqvae, scheduler=scheduler)
ldm.to(torch_device)
ldm.set_progress_bar_config(disable=None)
init_image = self.dummy_image.to(torch_device)
image = ldm(init_image, num_inference_steps=2, output_type="np").images
assert image.shape == (1, 64, 64, 3)
@nightly
@require_torch
class LDMSuperResolutionPipelineIntegrationTests(unittest.TestCase):
def test_inference_superresolution(self):
init_image = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/vq_diffusion/teddy_bear_pool.png"
)
init_image = init_image.resize((64, 64), resample=PIL_INTERPOLATION["lanczos"])
ldm = LDMSuperResolutionPipeline.from_pretrained("duongna/ldm-super-resolution")
ldm.set_progress_bar_config(disable=None)
generator = torch.manual_seed(0)
image = ldm(image=init_image, generator=generator, num_inference_steps=20, output_type="np").images
image_slice = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
expected_slice = np.array([0.7644, 0.7679, 0.7642, 0.7633, 0.7666, 0.7560, 0.7425, 0.7257, 0.6907])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
|
import pytest
from docarray import DocumentArray, Document
from docarray.array.weaviate import DocumentArrayWeaviate
import numpy as np
@pytest.fixture()
def docs():
return DocumentArray([Document(id=f'{i}') for i in range(1, 10)])
@pytest.mark.parametrize(
'to_delete',
[
0,
1,
4,
-1,
list(range(1, 4)),
[2, 4, 7, 1, 1],
slice(0, 2),
slice(2, 4),
slice(4, -1),
[True, True, False],
...,
],
)
def test_del_all(docs, to_delete):
doc_to_delete = docs[to_delete]
del docs[to_delete]
assert doc_to_delete not in docs
@pytest.mark.parametrize(
'to_delete, missing_id',
[
([True, False], ['1']),
([True, True, False], ['1', '2']),
([False, True], ['2']),
([False, False, True, True], ['3', '4']),
],
)
def test_del_boolean_mask(docs, to_delete, missing_id):
# assert each missing_id is present before deleting
for m_id in missing_id:
assert m_id in docs[:, 'id']
del docs[to_delete]
# assert each missing_id is NOT present AFTER deleting
for m_id in missing_id:
assert m_id not in docs[:, 'id']
@pytest.mark.parametrize(
['deleted_ids', 'expected_ids'],
[
(['1', '2', '3', '4'], ['5', '6', '7', '8', '9']),
(['2', '4', '7', '1'], ['3', '5', '6', '8', '9']),
],
)
def test_del_by_multiple_idx(docs, deleted_ids, expected_ids):
del docs[deleted_ids]
assert docs[:, 'id'] == expected_ids
@pytest.mark.parametrize(
'da_cls,config,persist',
[
(DocumentArrayWeaviate, {'n_dim': 10}, False),
(DocumentArrayWeaviate, {'name': 'Storage', 'n_dim': 10}, True),
],
)
def test_del_da_persist(da_cls, config, persist, docs, start_storage):
da = da_cls(docs, config=config)
del da
da2 = da_cls(config=config)
if persist:
assert len(da2) == len(docs)
else:
assert len(da2) == 0
def test_del_da_attribute():
da = DocumentArray(
[
Document(embedding=np.array([1, 2, 3]), text='d1'),
Document(embedding=np.array([1, 2, 3]), text='d2'),
]
)
q = DocumentArray(
[
Document(embedding=np.array([4, 5, 6]), text='q1'),
Document(embedding=np.array([2, 3, 4]), text='q1'),
]
)
da.match(q)
del da[...][:, 'embedding']
for d in da:
assert d.embedding is None
|
import pytest
from docarray import DocumentArray, Document
from docarray.array.weaviate import DocumentArrayWeaviate
@pytest.fixture()
def docs():
return DocumentArray([Document(id=f'{i}') for i in range(1, 10)])
@pytest.mark.parametrize(
'to_delete',
[
0,
1,
4,
-1,
list(range(1, 4)),
[2, 4, 7, 1, 1],
slice(0, 2),
slice(2, 4),
slice(4, -1),
[True, True, False],
...,
],
)
def test_del_all(docs, to_delete):
doc_to_delete = docs[to_delete]
del docs[to_delete]
assert doc_to_delete not in docs
@pytest.mark.parametrize(
'to_delete, missing_id',
[
([True, False], ['1']),
([True, True, False], ['1', '2']),
([False, True], ['2']),
([False, False, True, True], ['3', '4']),
],
)
def test_del_boolean_mask(docs, to_delete, missing_id):
# assert each missing_id is present before deleting
for m_id in missing_id:
assert m_id in docs[:, 'id']
del docs[to_delete]
# assert each missing_id is NOT present AFTER deleting
for m_id in missing_id:
assert m_id not in docs[:, 'id']
@pytest.mark.parametrize(
['deleted_ids', 'expected_ids'],
[
(['1', '2', '3', '4'], ['5', '6', '7', '8', '9']),
(['2', '4', '7', '1'], ['3', '5', '6', '8', '9']),
],
)
def test_del_by_multiple_idx(docs, deleted_ids, expected_ids):
del docs[deleted_ids]
assert docs[:, 'id'] == expected_ids
@pytest.mark.parametrize(
'da_cls,config,persist',
[
(DocumentArrayWeaviate, {'n_dim': 10}, False),
(DocumentArrayWeaviate, {'name': 'Storage', 'n_dim': 10}, True),
],
)
def test_del_da_persist(da_cls, config, persist, docs, start_storage):
da = da_cls(docs, config=config)
del da
da2 = da_cls(config=config)
if persist:
assert len(da2) == len(docs)
else:
assert len(da2) == 0
|
from abc import ABC
from docarray.array.storage.annlite.backend import BackendMixin, AnnliteConfig
from docarray.array.storage.annlite.find import FindMixin
from docarray.array.storage.annlite.getsetdel import GetSetDelMixin
from docarray.array.storage.annlite.seqlike import SequenceLikeMixin
__all__ = ['StorageMixins', 'AnnliteConfig']
class StorageMixins(FindMixin, BackendMixin, GetSetDelMixin, SequenceLikeMixin, ABC):
...
|
from abc import ABC
from .backend import BackendMixin, AnnliteConfig
from .find import FindMixin
from .getsetdel import GetSetDelMixin
from .seqlike import SequenceLikeMixin
__all__ = ['StorageMixins', 'AnnliteConfig']
class StorageMixins(FindMixin, BackendMixin, GetSetDelMixin, SequenceLikeMixin, ABC):
...
|
import enum
from collections.abc import Sequence
from typing import TypeVar
T = TypeVar("T", bound=enum.Enum)
class StrEnumMeta(enum.EnumMeta):
auto = enum.auto
def from_str(self: type[T], member: str) -> T: # type: ignore[misc]
try:
return self[member]
except KeyError:
# TODO: use `add_suggestion` from torchvision.prototype.utils._internal to improve the error message as
# soon as it is migrated.
raise ValueError(f"Unknown value '{member}' for {self.__name__}.") from None
class StrEnum(enum.Enum, metaclass=StrEnumMeta):
pass
def sequence_to_str(seq: Sequence, separate_last: str = "") -> str:
if not seq:
return ""
if len(seq) == 1:
return f"'{seq[0]}'"
head = "'" + "', '".join([str(item) for item in seq[:-1]]) + "'"
tail = f"{'' if separate_last and len(seq) == 2 else ','} {separate_last}'{seq[-1]}'"
return head + tail
|
import enum
from typing import Sequence, Type, TypeVar
T = TypeVar("T", bound=enum.Enum)
class StrEnumMeta(enum.EnumMeta):
auto = enum.auto
def from_str(self: Type[T], member: str) -> T: # type: ignore[misc]
try:
return self[member]
except KeyError:
# TODO: use `add_suggestion` from torchvision.prototype.utils._internal to improve the error message as
# soon as it is migrated.
raise ValueError(f"Unknown value '{member}' for {self.__name__}.") from None
class StrEnum(enum.Enum, metaclass=StrEnumMeta):
pass
def sequence_to_str(seq: Sequence, separate_last: str = "") -> str:
if not seq:
return ""
if len(seq) == 1:
return f"'{seq[0]}'"
head = "'" + "', '".join([str(item) for item in seq[:-1]]) + "'"
tail = f"{'' if separate_last and len(seq) == 2 else ','} {separate_last}'{seq[-1]}'"
return head + tail
|
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import functools
import warnings
from inspect import signature
__all__ = ["deprecated"]
class deprecated:
"""Decorator to mark a function or class as deprecated.
Issue a warning when the function is called/the class is instantiated and
adds a warning to the docstring.
The optional extra argument will be appended to the deprecation message
and the docstring. Note: to use this with the default value for extra, put
in an empty of parentheses:
Examples
--------
>>> from sklearn.utils import deprecated
>>> deprecated()
<sklearn.utils.deprecation.deprecated object at ...>
>>> @deprecated()
... def some_function(): pass
Parameters
----------
extra : str, default=''
To be added to the deprecation messages.
"""
# Adapted from https://wiki.python.org/moin/PythonDecoratorLibrary,
# but with many changes.
def __init__(self, extra=""):
self.extra = extra
def __call__(self, obj):
"""Call method
Parameters
----------
obj : object
"""
if isinstance(obj, type):
return self._decorate_class(obj)
elif isinstance(obj, property):
# Note that this is only triggered properly if the `deprecated`
# decorator is placed before the `property` decorator, like so:
#
# @deprecated(msg)
# @property
# def deprecated_attribute_(self):
# ...
return self._decorate_property(obj)
else:
return self._decorate_fun(obj)
def _decorate_class(self, cls):
msg = "Class %s is deprecated" % cls.__name__
if self.extra:
msg += "; %s" % self.extra
new = cls.__new__
sig = signature(cls)
def wrapped(cls, *args, **kwargs):
warnings.warn(msg, category=FutureWarning)
if new is object.__new__:
return object.__new__(cls)
return new(cls, *args, **kwargs)
cls.__new__ = wrapped
wrapped.__name__ = "__new__"
wrapped.deprecated_original = new
# Restore the original signature, see PEP 362.
cls.__signature__ = sig
return cls
def _decorate_fun(self, fun):
"""Decorate function fun"""
msg = "Function %s is deprecated" % fun.__name__
if self.extra:
msg += "; %s" % self.extra
@functools.wraps(fun)
def wrapped(*args, **kwargs):
warnings.warn(msg, category=FutureWarning)
return fun(*args, **kwargs)
# Add a reference to the wrapped function so that we can introspect
# on function arguments in Python 2 (already works in Python 3)
wrapped.__wrapped__ = fun
return wrapped
def _decorate_property(self, prop):
msg = self.extra
@property
@functools.wraps(prop.fget)
def wrapped(*args, **kwargs):
warnings.warn(msg, category=FutureWarning)
return prop.fget(*args, **kwargs)
return wrapped
def _is_deprecated(func):
"""Helper to check if func is wrapped by our deprecated decorator"""
closures = getattr(func, "__closure__", [])
if closures is None:
closures = []
is_deprecated = "deprecated" in "".join(
[c.cell_contents for c in closures if isinstance(c.cell_contents, str)]
)
return is_deprecated
# TODO(1.8): remove force_all_finite and change the default value of ensure_all_finite
# to True (remove None without deprecation).
def _deprecate_force_all_finite(force_all_finite, ensure_all_finite):
"""Helper to deprecate force_all_finite in favor of ensure_all_finite."""
if force_all_finite != "deprecated":
warnings.warn(
"'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be "
"removed in 1.8.",
FutureWarning,
)
if ensure_all_finite is not None:
raise ValueError(
"'force_all_finite' and 'ensure_all_finite' cannot be used together. "
"Pass `ensure_all_finite` only."
)
return force_all_finite
if ensure_all_finite is None:
return True
return ensure_all_finite
|
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import functools
import warnings
from inspect import signature
__all__ = ["deprecated"]
class deprecated:
"""Decorator to mark a function or class as deprecated.
Issue a warning when the function is called/the class is instantiated and
adds a warning to the docstring.
The optional extra argument will be appended to the deprecation message
and the docstring. Note: to use this with the default value for extra, put
in an empty of parentheses:
Examples
--------
>>> from sklearn.utils import deprecated
>>> deprecated()
<sklearn.utils.deprecation.deprecated object at ...>
>>> @deprecated()
... def some_function(): pass
Parameters
----------
extra : str, default=''
To be added to the deprecation messages.
"""
# Adapted from https://wiki.python.org/moin/PythonDecoratorLibrary,
# but with many changes.
def __init__(self, extra=""):
self.extra = extra
def __call__(self, obj):
"""Call method
Parameters
----------
obj : object
"""
if isinstance(obj, type):
return self._decorate_class(obj)
elif isinstance(obj, property):
# Note that this is only triggered properly if the `deprecated`
# decorator is placed before the `property` decorator, like so:
#
# @deprecated(msg)
# @property
# def deprecated_attribute_(self):
# ...
return self._decorate_property(obj)
else:
return self._decorate_fun(obj)
def _decorate_class(self, cls):
msg = "Class %s is deprecated" % cls.__name__
if self.extra:
msg += "; %s" % self.extra
new = cls.__new__
sig = signature(cls)
def wrapped(cls, *args, **kwargs):
warnings.warn(msg, category=FutureWarning)
if new is object.__new__:
return object.__new__(cls)
return new(cls, *args, **kwargs)
cls.__new__ = wrapped
wrapped.__name__ = "__new__"
wrapped.deprecated_original = new
# Restore the original signature, see PEP 362.
cls.__signature__ = sig
return cls
def _decorate_fun(self, fun):
"""Decorate function fun"""
msg = "Function %s is deprecated" % fun.__name__
if self.extra:
msg += "; %s" % self.extra
@functools.wraps(fun)
def wrapped(*args, **kwargs):
warnings.warn(msg, category=FutureWarning)
return fun(*args, **kwargs)
# Add a reference to the wrapped function so that we can introspect
# on function arguments in Python 2 (already works in Python 3)
wrapped.__wrapped__ = fun
return wrapped
def _decorate_property(self, prop):
msg = self.extra
@property
@functools.wraps(prop.fget)
def wrapped(*args, **kwargs):
warnings.warn(msg, category=FutureWarning)
return prop.fget(*args, **kwargs)
return wrapped
def _is_deprecated(func):
"""Helper to check if func is wrapped by our deprecated decorator"""
closures = getattr(func, "__closure__", [])
if closures is None:
closures = []
is_deprecated = "deprecated" in "".join(
[c.cell_contents for c in closures if isinstance(c.cell_contents, str)]
)
return is_deprecated
# TODO: remove in 1.7
def _deprecate_Xt_in_inverse_transform(X, Xt):
"""Helper to deprecate the `Xt` argument in favor of `X` in inverse_transform."""
if X is not None and Xt is not None:
raise TypeError("Cannot use both X and Xt. Use X only.")
if X is None and Xt is None:
raise TypeError("Missing required positional argument: X.")
if Xt is not None:
warnings.warn(
"Xt was renamed X in version 1.5 and will be removed in 1.7.",
FutureWarning,
)
return Xt
return X
# TODO(1.8): remove force_all_finite and change the default value of ensure_all_finite
# to True (remove None without deprecation).
def _deprecate_force_all_finite(force_all_finite, ensure_all_finite):
"""Helper to deprecate force_all_finite in favor of ensure_all_finite."""
if force_all_finite != "deprecated":
warnings.warn(
"'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be "
"removed in 1.8.",
FutureWarning,
)
if ensure_all_finite is not None:
raise ValueError(
"'force_all_finite' and 'ensure_all_finite' cannot be used together. "
"Pass `ensure_all_finite` only."
)
return force_all_finite
if ensure_all_finite is None:
return True
return ensure_all_finite
|
"""
Top-level module of Jina.
The primary function of this module is to import all of the public Jina
interfaces into a single place. The interfaces themselves are located in
sub-modules, as described below.
"""
import os as _os
import platform as _platform
import signal as _signal
import sys as _sys
import warnings as _warnings
import docarray as _docarray
if _sys.version_info < (3, 7, 0):
raise OSError(f'Jina requires Python >= 3.7, but yours is {_sys.version_info}')
def _warning_on_one_line(message, category, filename, lineno, *args, **kwargs):
return '\033[1;33m%s: %s\033[0m \033[1;30m(raised from %s:%s)\033[0m\n' % (
category.__name__,
message,
filename,
lineno,
)
def _ignore_google_warnings():
import warnings
warnings.filterwarnings(
'ignore',
category=DeprecationWarning,
message='Deprecated call to `pkg_resources.declare_namespace(\'google\')`.',
append=True,
)
_warnings.formatwarning = _warning_on_one_line
_warnings.simplefilter('always', DeprecationWarning, append=True)
_ignore_google_warnings()
# fix fork error on MacOS but seems no effect? must do EXPORT manually before jina start
_os.environ['OBJC_DISABLE_INITIALIZE_FORK_SAFETY'] = 'YES'
# JINA_MP_START_METHOD has higher priority than os-patch
_start_method = _os.environ.get('JINA_MP_START_METHOD', None)
if _start_method and _start_method.lower() in {'fork', 'spawn', 'forkserver'}:
from multiprocessing import set_start_method as _set_start_method
try:
_set_start_method(_start_method.lower())
_warnings.warn(
f'multiprocessing start method is set to `{_start_method.lower()}`'
)
except Exception as e:
_warnings.warn(
f'failed to set multiprocessing start_method to `{_start_method.lower()}`: {e!r}'
)
elif _sys.version_info >= (3, 8, 0) and _platform.system() == 'Darwin':
# DO SOME OS-WISE PATCHES
# temporary fix for python 3.8 on macos where the default start is set to "spawn"
# https://docs.python.org/3/library/multiprocessing.html#contexts-and-start-methods
from multiprocessing import set_start_method as _set_start_method
try:
_set_start_method('fork')
_warnings.warn(f'multiprocessing start method is set to `fork`')
except Exception as e:
_warnings.warn(f'failed to set multiprocessing start_method to `fork`: {e!r}')
# do not change this line manually this is managed by git tag and updated on every release
# NOTE: this represents the NEXT release version
__version__ = '3.27.9'
# do not change this line manually
# this is managed by proto/build-proto.sh and updated on every execution
__proto_version__ = '0.1.27'
try:
__docarray_version__ = _docarray.__version__
except AttributeError as e:
raise RuntimeError(
'`docarray` dependency is not installed correctly, please reinstall with `pip install -U --force-reinstall docarray`'
)
try:
_signal.signal(_signal.SIGINT, _signal.default_int_handler)
except Exception as exc:
_warnings.warn(f'failed to set default signal handler: {exc!r}`')
def _set_nofile(nofile_atleast=4096):
"""
Set nofile soft limit to at least 4096, useful for running matlplotlib/seaborn on
parallel executing plot generators vs. Ubuntu default ulimit -n 1024 or OS X El Captian 256
temporary setting extinguishing with Python session.
:param nofile_atleast: nofile soft limit
:return: nofile soft limit and nofile hard limit
"""
try:
import resource as res
except ImportError: # Windows
res = None
if res is None:
return (None,) * 2
soft, ohard = res.getrlimit(res.RLIMIT_NOFILE)
hard = ohard
if soft < nofile_atleast:
soft = nofile_atleast
if hard < soft:
hard = soft
try:
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except (ValueError, res.error):
try:
hard = soft
print(f'trouble with max limit, retrying with soft,hard {soft},{hard}')
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except Exception:
print('failed to set ulimit, giving up')
soft, hard = res.getrlimit(res.RLIMIT_NOFILE)
return soft, hard
_set_nofile()
# ONLY FIRST CLASS CITIZENS ARE ALLOWED HERE, namely Document, Executor Flow
# Document
from jina._docarray import Document, DocumentArray
# Client
from jina.clients import Client
# Deployment
from jina.orchestrate.deployments import Deployment
from jina.orchestrate.flow.asyncio import AsyncFlow
# Flow
from jina.orchestrate.flow.base import Flow
# Executor
from jina.serve.executors import BaseExecutor as Executor
from jina.serve.executors.decorators import dynamic_batching, monitor, requests
# Custom Gateway
from jina.serve.runtimes.gateway.gateway import Gateway
|
"""
Top-level module of Jina.
The primary function of this module is to import all of the public Jina
interfaces into a single place. The interfaces themselves are located in
sub-modules, as described below.
"""
import os as _os
import platform as _platform
import signal as _signal
import sys as _sys
import warnings as _warnings
import docarray as _docarray
if _sys.version_info < (3, 7, 0):
raise OSError(f'Jina requires Python >= 3.7, but yours is {_sys.version_info}')
def _warning_on_one_line(message, category, filename, lineno, *args, **kwargs):
return '\033[1;33m%s: %s\033[0m \033[1;30m(raised from %s:%s)\033[0m\n' % (
category.__name__,
message,
filename,
lineno,
)
def _ignore_google_warnings():
import warnings
warnings.filterwarnings(
'ignore',
category=DeprecationWarning,
message='Deprecated call to `pkg_resources.declare_namespace(\'google\')`.',
append=True,
)
_warnings.formatwarning = _warning_on_one_line
_warnings.simplefilter('always', DeprecationWarning, append=True)
_ignore_google_warnings()
# fix fork error on MacOS but seems no effect? must do EXPORT manually before jina start
_os.environ['OBJC_DISABLE_INITIALIZE_FORK_SAFETY'] = 'YES'
# JINA_MP_START_METHOD has higher priority than os-patch
_start_method = _os.environ.get('JINA_MP_START_METHOD', None)
if _start_method and _start_method.lower() in {'fork', 'spawn', 'forkserver'}:
from multiprocessing import set_start_method as _set_start_method
try:
_set_start_method(_start_method.lower())
_warnings.warn(
f'multiprocessing start method is set to `{_start_method.lower()}`'
)
except Exception as e:
_warnings.warn(
f'failed to set multiprocessing start_method to `{_start_method.lower()}`: {e!r}'
)
elif _sys.version_info >= (3, 8, 0) and _platform.system() == 'Darwin':
# DO SOME OS-WISE PATCHES
# temporary fix for python 3.8 on macos where the default start is set to "spawn"
# https://docs.python.org/3/library/multiprocessing.html#contexts-and-start-methods
from multiprocessing import set_start_method as _set_start_method
try:
_set_start_method('fork')
_warnings.warn(f'multiprocessing start method is set to `fork`')
except Exception as e:
_warnings.warn(f'failed to set multiprocessing start_method to `fork`: {e!r}')
# do not change this line manually this is managed by git tag and updated on every release
# NOTE: this represents the NEXT release version
__version__ = '3.27.8'
# do not change this line manually
# this is managed by proto/build-proto.sh and updated on every execution
__proto_version__ = '0.1.27'
try:
__docarray_version__ = _docarray.__version__
except AttributeError as e:
raise RuntimeError(
'`docarray` dependency is not installed correctly, please reinstall with `pip install -U --force-reinstall docarray`'
)
try:
_signal.signal(_signal.SIGINT, _signal.default_int_handler)
except Exception as exc:
_warnings.warn(f'failed to set default signal handler: {exc!r}`')
def _set_nofile(nofile_atleast=4096):
"""
Set nofile soft limit to at least 4096, useful for running matlplotlib/seaborn on
parallel executing plot generators vs. Ubuntu default ulimit -n 1024 or OS X El Captian 256
temporary setting extinguishing with Python session.
:param nofile_atleast: nofile soft limit
:return: nofile soft limit and nofile hard limit
"""
try:
import resource as res
except ImportError: # Windows
res = None
if res is None:
return (None,) * 2
soft, ohard = res.getrlimit(res.RLIMIT_NOFILE)
hard = ohard
if soft < nofile_atleast:
soft = nofile_atleast
if hard < soft:
hard = soft
try:
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except (ValueError, res.error):
try:
hard = soft
print(f'trouble with max limit, retrying with soft,hard {soft},{hard}')
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except Exception:
print('failed to set ulimit, giving up')
soft, hard = res.getrlimit(res.RLIMIT_NOFILE)
return soft, hard
_set_nofile()
# ONLY FIRST CLASS CITIZENS ARE ALLOWED HERE, namely Document, Executor Flow
# Document
from jina._docarray import Document, DocumentArray
# Client
from jina.clients import Client
# Deployment
from jina.orchestrate.deployments import Deployment
from jina.orchestrate.flow.asyncio import AsyncFlow
# Flow
from jina.orchestrate.flow.base import Flow
# Executor
from jina.serve.executors import BaseExecutor as Executor
from jina.serve.executors.decorators import dynamic_batching, monitor, requests
# Custom Gateway
from jina.serve.runtimes.gateway.gateway import Gateway
|
import re
from collections.abc import Sequence
from typing import Optional
from langchain_core.messages import BaseMessage
def _is_openai_data_block(block: dict) -> bool:
"""Check if the block contains multimodal data in OpenAI Chat Completions format."""
if block.get("type") == "image_url":
if (
(set(block.keys()) <= {"type", "image_url", "detail"})
and (image_url := block.get("image_url"))
and isinstance(image_url, dict)
):
url = image_url.get("url")
if isinstance(url, str):
return True
elif block.get("type") == "file":
if (file := block.get("file")) and isinstance(file, dict):
file_data = file.get("file_data")
if isinstance(file_data, str):
return True
elif block.get("type") == "input_audio":
if (input_audio := block.get("input_audio")) and isinstance(input_audio, dict):
audio_data = input_audio.get("data")
audio_format = input_audio.get("format")
if isinstance(audio_data, str) and isinstance(audio_format, str):
return True
else:
return False
return False
def _parse_data_uri(uri: str) -> Optional[dict]:
"""Parse a data URI into its components. If parsing fails, return None.
Example:
.. code-block:: python
data_uri = "data:image/jpeg;base64,/9j/4AAQSkZJRg..."
parsed = _parse_data_uri(data_uri)
assert parsed == {
"source_type": "base64",
"mime_type": "image/jpeg",
"data": "/9j/4AAQSkZJRg...",
}
"""
regex = r"^data:(?P<mime_type>[^;]+);base64,(?P<data>.+)$"
match = re.match(regex, uri)
if match is None:
return None
return {
"source_type": "base64",
"data": match.group("data"),
"mime_type": match.group("mime_type"),
}
def _convert_openai_format_to_data_block(block: dict) -> dict:
"""Convert OpenAI image content block to standard data content block.
If parsing fails, pass-through.
Args:
block: The OpenAI image content block to convert.
Returns:
The converted standard data content block.
"""
if block["type"] == "image_url":
parsed = _parse_data_uri(block["image_url"]["url"])
if parsed is not None:
parsed["type"] = "image"
return parsed
return block
if block["type"] == "file":
parsed = _parse_data_uri(block["file"]["file_data"])
if parsed is not None:
parsed["type"] = "file"
if filename := block["file"].get("filename"):
parsed["filename"] = filename
return parsed
return block
if block["type"] == "input_audio":
data = block["input_audio"].get("data")
audio_format = block["input_audio"].get("format")
if data and audio_format:
return {
"type": "audio",
"source_type": "base64",
"data": data,
"mime_type": f"audio/{audio_format}",
}
return block
return block
def _normalize_messages(messages: Sequence[BaseMessage]) -> list[BaseMessage]:
"""Extend support for message formats.
Chat models implement support for images in OpenAI Chat Completions format, as well
as other multimodal data as standard data blocks. This function extends support to
audio and file data in OpenAI Chat Completions format by converting them to standard
data blocks.
"""
formatted_messages = []
for message in messages:
formatted_message = message
if isinstance(message.content, list):
for idx, block in enumerate(message.content):
if (
isinstance(block, dict)
# Subset to (PDF) files and audio, as most relevant chat models
# support images in OAI format (and some may not yet support the
# standard data block format)
and block.get("type") in {"file", "input_audio"}
and _is_openai_data_block(block)
):
if formatted_message is message:
formatted_message = message.model_copy()
# Also shallow-copy content
formatted_message.content = list(formatted_message.content)
formatted_message.content[idx] = ( # type: ignore[index] # mypy confused by .model_copy
_convert_openai_format_to_data_block(block)
)
formatted_messages.append(formatted_message)
return formatted_messages
|
import re
from collections.abc import Sequence
from typing import Optional
from langchain_core.messages import BaseMessage
def _is_openai_data_block(block: dict) -> bool:
"""Check if the block contains multimodal data in OpenAI Chat Completions format."""
if block.get("type") == "image_url":
if (
(set(block.keys()) <= {"type", "image_url", "detail"})
and (image_url := block.get("image_url"))
and isinstance(image_url, dict)
):
url = image_url.get("url")
if isinstance(url, str):
return True
elif block.get("type") == "file":
if (file := block.get("file")) and isinstance(file, dict):
file_data = file.get("file_data")
if isinstance(file_data, str):
return True
elif block.get("type") == "input_audio":
if (input_audio := block.get("input_audio")) and isinstance(input_audio, dict):
audio_data = input_audio.get("data")
audio_format = input_audio.get("format")
if isinstance(audio_data, str) and isinstance(audio_format, str):
return True
else:
return False
return False
def _parse_data_uri(uri: str) -> Optional[dict]:
"""Parse a data URI into its components. If parsing fails, return None.
Example:
.. code-block:: python
data_uri = "data:image/jpeg;base64,/9j/4AAQSkZJRg..."
parsed = _parse_data_uri(data_uri)
assert parsed == {
"source_type": "base64",
"mime_type": "image/jpeg",
"data": "/9j/4AAQSkZJRg...",
}
"""
regex = r"^data:(?P<mime_type>[^;]+);base64,(?P<data>.+)$"
match = re.match(regex, uri)
if match is None:
return None
return {
"source_type": "base64",
"data": match.group("data"),
"mime_type": match.group("mime_type"),
}
def _convert_openai_format_to_data_block(block: dict) -> dict:
"""Convert OpenAI image content block to standard data content block.
If parsing fails, pass-through.
Args:
block: The OpenAI image content block to convert.
Returns:
The converted standard data content block.
"""
if block["type"] == "image_url":
parsed = _parse_data_uri(block["image_url"]["url"])
if parsed is not None:
parsed["type"] = "image"
return parsed
return block
if block["type"] == "file":
parsed = _parse_data_uri(block["file"]["file_data"])
if parsed is not None:
parsed["type"] = "file"
if filename := block["file"].get("filename"):
parsed["filename"] = filename
return parsed
return block
if block["type"] == "input_audio":
data = block["input_audio"].get("data")
audio_format = block["input_audio"].get("format")
if data and audio_format:
return {
"type": "audio",
"source_type": "base64",
"data": data,
"mime_type": f"audio/{audio_format}",
}
return block
return block
def _normalize_messages(messages: Sequence[BaseMessage]) -> list[BaseMessage]:
"""Extend support for message formats.
Chat models implement support for images in OpenAI Chat Completions format, as well
as other multimodal data as standard data blocks. This function extends support to
audio and file data in OpenAI Chat Completions format by converting them to standard
data blocks.
"""
formatted_messages = []
for message in messages:
formatted_message = message
if isinstance(message.content, list):
for idx, block in enumerate(message.content):
if (
isinstance(block, dict)
# Subset to (PDF) files and audio, as most relevant chat models
# support images in OAI format (and some may not yet support the
# standard data block format)
and block.get("type") in ("file", "input_audio")
and _is_openai_data_block(block)
):
if formatted_message is message:
formatted_message = message.model_copy()
# Also shallow-copy content
formatted_message.content = list(formatted_message.content)
formatted_message.content[idx] = ( # type: ignore[index] # mypy confused by .model_copy
_convert_openai_format_to_data_block(block)
)
formatted_messages.append(formatted_message)
return formatted_messages
|
"""FastAPI framework, high performance, easy to learn, fast to code, ready for production"""
__version__ = "0.115.7"
from starlette import status as status
from .applications import FastAPI as FastAPI
from .background import BackgroundTasks as BackgroundTasks
from .datastructures import UploadFile as UploadFile
from .exceptions import HTTPException as HTTPException
from .exceptions import WebSocketException as WebSocketException
from .param_functions import Body as Body
from .param_functions import Cookie as Cookie
from .param_functions import Depends as Depends
from .param_functions import File as File
from .param_functions import Form as Form
from .param_functions import Header as Header
from .param_functions import Path as Path
from .param_functions import Query as Query
from .param_functions import Security as Security
from .requests import Request as Request
from .responses import Response as Response
from .routing import APIRouter as APIRouter
from .websockets import WebSocket as WebSocket
from .websockets import WebSocketDisconnect as WebSocketDisconnect
|
"""FastAPI framework, high performance, easy to learn, fast to code, ready for production"""
__version__ = "0.115.6"
from starlette import status as status
from .applications import FastAPI as FastAPI
from .background import BackgroundTasks as BackgroundTasks
from .datastructures import UploadFile as UploadFile
from .exceptions import HTTPException as HTTPException
from .exceptions import WebSocketException as WebSocketException
from .param_functions import Body as Body
from .param_functions import Cookie as Cookie
from .param_functions import Depends as Depends
from .param_functions import File as File
from .param_functions import Form as Form
from .param_functions import Header as Header
from .param_functions import Path as Path
from .param_functions import Query as Query
from .param_functions import Security as Security
from .requests import Request as Request
from .responses import Response as Response
from .routing import APIRouter as APIRouter
from .websockets import WebSocket as WebSocket
from .websockets import WebSocketDisconnect as WebSocketDisconnect
|
from torio.io import CodecConfig, StreamingMediaDecoder as StreamReader, StreamingMediaEncoder as StreamWriter
from torchaudio._internal.module_utils import dropping_support
from ._effector import AudioEffector
from ._playback import play_audio as _play_audio
CodecConfig.__init__ = dropping_support(CodecConfig.__init__)
StreamReader.__init__ = dropping_support(StreamReader.__init__)
StreamWriter.__init__ = dropping_support(StreamWriter.__init__)
AudioEffector.__init__ = dropping_support(AudioEffector.__init__)
play_audio = dropping_support(_play_audio)
__all__ = [
"AudioEffector",
"StreamReader",
"StreamWriter",
"CodecConfig",
"play_audio",
]
|
from torio.io import CodecConfig, StreamingMediaDecoder as StreamReader, StreamingMediaEncoder as StreamWriter
from ._effector import AudioEffector
from ._playback import play_audio
__all__ = [
"AudioEffector",
"StreamReader",
"StreamWriter",
"CodecConfig",
"play_audio",
]
|
_base_ = './solo_r50_fpn_1x_coco.py'
train_pipeline = [
dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='RandomChoiceResize',
scales=[(1333, 800), (1333, 768), (1333, 736), (1333, 704),
(1333, 672), (1333, 640)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
# training schedule for 3x
max_epochs = 36
train_cfg = dict(by_epoch=True, max_epochs=max_epochs)
# learning rate
param_scheduler = [
dict(
type='LinearLR',
start_factor=1.0 / 3,
by_epoch=False,
begin=0,
end=500),
dict(
type='MultiStepLR',
begin=0,
end=36,
by_epoch=True,
milestones=[27, 33],
gamma=0.1)
]
|
_base_ = './solo_r50_fpn_1x_coco.py'
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='RandomChoiceResize',
scales=[(1333, 800), (1333, 768), (1333, 736), (1333, 704),
(1333, 672), (1333, 640)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
# training schedule for 3x
max_epochs = 36
train_cfg = dict(by_epoch=True, max_epochs=max_epochs)
# learning rate
param_scheduler = [
dict(
type='LinearLR',
start_factor=1.0 / 3,
by_epoch=False,
begin=0,
end=500),
dict(
type='MultiStepLR',
begin=0,
end=36,
by_epoch=True,
milestones=[27, 33],
gamma=0.1)
]
|
from typing import Any, Dict, List, Optional
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
class MetalReader(BaseReader):
"""
Metal reader.
Args:
api_key (str): Metal API key.
client_id (str): Metal client ID.
index_id (str): Metal index ID.
"""
def __init__(self, api_key: str, client_id: str, index_id: str):
import_err_msg = (
"`metal_sdk` package not found, please run `pip install metal_sdk`"
)
try:
import metal_sdk # noqa
except ImportError:
raise ImportError(import_err_msg)
from metal_sdk.metal import Metal
"""Initialize with parameters."""
self._api_key = api_key
self._client_id = client_id
self._index_id = index_id
self.metal_client = Metal(api_key, client_id, index_id)
def load_data(
self,
limit: int,
query_embedding: Optional[List[float]] = None,
filters: Optional[Dict[str, Any]] = None,
separate_documents: bool = True,
**query_kwargs: Any
) -> List[Document]:
"""
Load data from Metal.
Args:
query_embedding (Optional[List[float]]): Query embedding for search.
limit (int): Number of results to return.
filters (Optional[Dict[str, Any]]): Filters to apply to the search.
separate_documents (Optional[bool]): Whether to return separate
documents per retrieved entry. Defaults to True.
**query_kwargs: Keyword arguments to pass to the search.
Returns:
List[Document]: A list of documents.
"""
payload = {
"embedding": query_embedding,
"filters": filters,
}
response = self.metal_client.search(payload, limit=limit, **query_kwargs)
documents = []
for item in response["data"]:
text = item["text"] or (item["metadata"] and item["metadata"]["text"])
documents.append(Document(text=text))
if not separate_documents:
text_list = [doc.get_content() for doc in documents]
text = "\n\n".join(text_list)
documents = [Document(text=text)]
return documents
|
from typing import Any, Dict, List, Optional
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
class MetalReader(BaseReader):
"""Metal reader.
Args:
api_key (str): Metal API key.
client_id (str): Metal client ID.
index_id (str): Metal index ID.
"""
def __init__(self, api_key: str, client_id: str, index_id: str):
import_err_msg = (
"`metal_sdk` package not found, please run `pip install metal_sdk`"
)
try:
import metal_sdk # noqa
except ImportError:
raise ImportError(import_err_msg)
from metal_sdk.metal import Metal
"""Initialize with parameters."""
self._api_key = api_key
self._client_id = client_id
self._index_id = index_id
self.metal_client = Metal(api_key, client_id, index_id)
def load_data(
self,
limit: int,
query_embedding: Optional[List[float]] = None,
filters: Optional[Dict[str, Any]] = None,
separate_documents: bool = True,
**query_kwargs: Any
) -> List[Document]:
"""Load data from Metal.
Args:
query_embedding (Optional[List[float]]): Query embedding for search.
limit (int): Number of results to return.
filters (Optional[Dict[str, Any]]): Filters to apply to the search.
separate_documents (Optional[bool]): Whether to return separate
documents per retrieved entry. Defaults to True.
**query_kwargs: Keyword arguments to pass to the search.
Returns:
List[Document]: A list of documents.
"""
payload = {
"embedding": query_embedding,
"filters": filters,
}
response = self.metal_client.search(payload, limit=limit, **query_kwargs)
documents = []
for item in response["data"]:
text = item["text"] or (item["metadata"] and item["metadata"]["text"])
documents.append(Document(text=text))
if not separate_documents:
text_list = [doc.get_content() for doc in documents]
text = "\n\n".join(text_list)
documents = [Document(text=text)]
return documents
|
# Licensed to the LF AI & Data foundation under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from docarray.computation.numpy_backend import NumpyCompBackend
from docarray.computation.torch_backend import TorchCompBackend
np_metrics = NumpyCompBackend.Metrics
torch_metrics = TorchCompBackend.Metrics
def test_cosine_sim_compare():
a = torch.rand(128)
b = torch.rand(128)
torch.testing.assert_close(
torch_metrics.cosine_sim(a, b),
torch.from_numpy(np_metrics.cosine_sim(a.numpy(), b.numpy())),
)
a = torch.rand(10, 3)
b = torch.rand(5, 3)
torch.testing.assert_close(
torch_metrics.cosine_sim(a, b),
torch.from_numpy(np_metrics.cosine_sim(a.numpy(), b.numpy())),
)
def test_euclidean_dist_compare():
a = torch.rand(128)
b = torch.rand(128)
torch.testing.assert_close(
torch_metrics.euclidean_dist(a, b),
torch.from_numpy(np_metrics.euclidean_dist(a.numpy(), b.numpy())).to(
torch.float32
),
)
a = torch.rand(10, 3)
b = torch.rand(5, 3)
torch.testing.assert_close(
torch_metrics.euclidean_dist(a, b),
torch.from_numpy(np_metrics.euclidean_dist(a.numpy(), b.numpy())),
)
def test_sqeuclidean_dist_compare():
a = torch.rand(128)
b = torch.rand(128)
torch.testing.assert_close(
torch_metrics.sqeuclidean_dist(a, b),
torch.from_numpy(np_metrics.sqeuclidean_dist(a.numpy(), b.numpy())).to(
torch.float32
),
)
a = torch.rand(10, 3)
b = torch.rand(5, 3)
torch.testing.assert_close(
torch_metrics.sqeuclidean_dist(a, b),
torch.from_numpy(np_metrics.sqeuclidean_dist(a.numpy(), b.numpy())),
)
|
import torch
from docarray.computation.numpy_backend import NumpyCompBackend
from docarray.computation.torch_backend import TorchCompBackend
np_metrics = NumpyCompBackend.Metrics
torch_metrics = TorchCompBackend.Metrics
def test_cosine_sim_compare():
a = torch.rand(128)
b = torch.rand(128)
torch.testing.assert_close(
torch_metrics.cosine_sim(a, b),
torch.from_numpy(np_metrics.cosine_sim(a.numpy(), b.numpy())),
)
a = torch.rand(10, 3)
b = torch.rand(5, 3)
torch.testing.assert_close(
torch_metrics.cosine_sim(a, b),
torch.from_numpy(np_metrics.cosine_sim(a.numpy(), b.numpy())),
)
def test_euclidean_dist_compare():
a = torch.rand(128)
b = torch.rand(128)
torch.testing.assert_close(
torch_metrics.euclidean_dist(a, b),
torch.from_numpy(np_metrics.euclidean_dist(a.numpy(), b.numpy())).to(
torch.float32
),
)
a = torch.rand(10, 3)
b = torch.rand(5, 3)
torch.testing.assert_close(
torch_metrics.euclidean_dist(a, b),
torch.from_numpy(np_metrics.euclidean_dist(a.numpy(), b.numpy())),
)
def test_sqeuclidean_dist_compare():
a = torch.rand(128)
b = torch.rand(128)
torch.testing.assert_close(
torch_metrics.sqeuclidean_dist(a, b),
torch.from_numpy(np_metrics.sqeuclidean_dist(a.numpy(), b.numpy())).to(
torch.float32
),
)
a = torch.rand(10, 3)
b = torch.rand(5, 3)
torch.testing.assert_close(
torch_metrics.sqeuclidean_dist(a, b),
torch.from_numpy(np_metrics.sqeuclidean_dist(a.numpy(), b.numpy())),
)
|
import functools
import time
from threading import Thread
import numpy as np
import pytest
from jina import Client, Document, Flow
@pytest.mark.slow
@pytest.mark.parametrize('protocol', ['websocket', 'http'])
def test_gateway_concurrency(protocol, reraise):
port = 12345
CONCURRENCY = 2
def _validate(req, start, status_codes, durations, index):
end = time.time()
durations[index] = end - start
status_codes[index] = req.status.code
def _request(status_codes, durations, index):
with reraise:
start = time.time()
on_done = functools.partial(
_validate,
start=start,
status_codes=status_codes,
durations=durations,
index=index,
)
results = Client(port=port, protocol=protocol).index(
inputs=(Document() for _ in range(256)), _size=16, return_responses=True
)
assert len(results) > 0
for result in results:
on_done(result)
f = Flow(protocol=protocol, port=port).add(parallel=2)
with f:
threads = []
status_codes = [None] * CONCURRENCY
durations = [None] * CONCURRENCY
for i in range(CONCURRENCY):
t = Thread(target=_request, args=(status_codes, durations, i))
threads.append(t)
t.start()
for t in threads:
t.join()
success = status_codes.count(0)
failed = len(status_codes) - success
print(
f'clients: {len(durations)}\n'
f'min roundtrip time: {np.min(durations)}\n'
f'max roundtrip time: {np.max(durations)}\n'
f'mean roundtrip time: {np.mean(durations)}\n'
)
assert success >= 1
# In some slow environments, a certain degree of failed
# requests will occur. Here we limit the degree of failed
# requests.
rate = failed / success
assert rate < 0.1
|
import functools
import time
from threading import Thread
import numpy as np
import pytest
from jina import Client, Document, Flow
@pytest.mark.slow
@pytest.mark.parametrize('protocol', ['websocket', 'http'])
def test_gateway_concurrency(protocol, reraise):
port = 12345
CONCURRENCY = 2
def _validate(req, start, status_codes, durations, index):
end = time.time()
durations[index] = end - start
status_codes[index] = req.status.code
def _request(status_codes, durations, index):
with reraise:
start = time.time()
on_done = functools.partial(
_validate,
start=start,
status_codes=status_codes,
durations=durations,
index=index,
)
results = Client(port=port, protocol=protocol, return_responses=True).index(
inputs=(Document() for _ in range(256)),
_size=16,
)
assert len(results) > 0
for result in results:
on_done(result)
f = Flow(protocol=protocol, port=port).add(parallel=2)
with f:
threads = []
status_codes = [None] * CONCURRENCY
durations = [None] * CONCURRENCY
for i in range(CONCURRENCY):
t = Thread(target=_request, args=(status_codes, durations, i))
threads.append(t)
t.start()
for t in threads:
t.join()
success = status_codes.count(0)
failed = len(status_codes) - success
print(
f'clients: {len(durations)}\n'
f'min roundtrip time: {np.min(durations)}\n'
f'max roundtrip time: {np.max(durations)}\n'
f'mean roundtrip time: {np.mean(durations)}\n'
)
assert success >= 1
# In some slow environments, a certain degree of failed
# requests will occur. Here we limit the degree of failed
# requests.
rate = failed / success
assert rate < 0.1
|
from enum import Enum
from typing import Dict, Iterable
import torch.nn.functional as F
from torch import Tensor, nn
from sentence_transformers.SentenceTransformer import SentenceTransformer
class TripletDistanceMetric(Enum):
"""The metric for the triplet loss"""
COSINE = lambda x, y: 1 - F.cosine_similarity(x, y)
EUCLIDEAN = lambda x, y: F.pairwise_distance(x, y, p=2)
MANHATTAN = lambda x, y: F.pairwise_distance(x, y, p=1)
class TripletLoss(nn.Module):
def __init__(
self, model: SentenceTransformer, distance_metric=TripletDistanceMetric.EUCLIDEAN, triplet_margin: float = 5
):
"""
This class implements triplet loss. Given a triplet of (anchor, positive, negative),
the loss minimizes the distance between anchor and positive while it maximizes the distance
between anchor and negative. It compute the following loss function:
``loss = max(||anchor - positive|| - ||anchor - negative|| + margin, 0)``.
Margin is an important hyperparameter and needs to be tuned respectively.
Args:
model: SentenceTransformerModel
distance_metric: Function to compute distance between two
embeddings. The class TripletDistanceMetric contains
common distance metrices that can be used.
triplet_margin: The negative should be at least this much
further away from the anchor than the positive.
References:
- For further details, see: https://en.wikipedia.org/wiki/Triplet_loss
Requirements:
1. (anchor, positive, negative) triplets
Inputs:
+---------------------------------------+--------+
| Texts | Labels |
+=======================================+========+
| (anchor, positive, negative) triplets | none |
+---------------------------------------+--------+
Example:
::
from sentence_transformers import SentenceTransformer, SentenceTransformerTrainer, losses
from datasets import Dataset
model = SentenceTransformer("microsoft/mpnet-base")
train_dataset = Dataset.from_dict({
"anchor": ["It's nice weather outside today.", "He drove to work."],
"positive": ["It's so sunny.", "He took the car to the office."],
"negative": ["It's quite rainy, sadly.", "She walked to the store."],
})
loss = losses.TripletLoss(model=model)
trainer = SentenceTransformerTrainer(
model=model,
train_dataset=train_dataset,
loss=loss,
)
trainer.train()
"""
super(TripletLoss, self).__init__()
self.model = model
self.distance_metric = distance_metric
self.triplet_margin = triplet_margin
def forward(self, sentence_features: Iterable[Dict[str, Tensor]], labels: Tensor):
reps = [self.model(sentence_feature)["sentence_embedding"] for sentence_feature in sentence_features]
rep_anchor, rep_pos, rep_neg = reps
distance_pos = self.distance_metric(rep_anchor, rep_pos)
distance_neg = self.distance_metric(rep_anchor, rep_neg)
losses = F.relu(distance_pos - distance_neg + self.triplet_margin)
return losses.mean()
def get_config_dict(self):
distance_metric_name = self.distance_metric.__name__
for name, value in vars(TripletDistanceMetric).items():
if value == self.distance_metric:
distance_metric_name = "TripletDistanceMetric.{}".format(name)
break
return {"distance_metric": distance_metric_name, "triplet_margin": self.triplet_margin}
@property
def citation(self) -> str:
return """
@misc{hermans2017defense,
title={In Defense of the Triplet Loss for Person Re-Identification},
author={Alexander Hermans and Lucas Beyer and Bastian Leibe},
year={2017},
eprint={1703.07737},
archivePrefix={arXiv},
primaryClass={cs.CV}
}
"""
|
import torch
from torch import nn, Tensor
from typing import Union, Tuple, List, Iterable, Dict
import torch.nn.functional as F
from enum import Enum
from ..SentenceTransformer import SentenceTransformer
class TripletDistanceMetric(Enum):
"""
The metric for the triplet loss
"""
COSINE = lambda x, y: 1 - F.cosine_similarity(x, y)
EUCLIDEAN = lambda x, y: F.pairwise_distance(x, y, p=2)
MANHATTAN = lambda x, y: F.pairwise_distance(x, y, p=1)
class TripletLoss(nn.Module):
"""
This class implements triplet loss. Given a triplet of (anchor, positive, negative),
the loss minimizes the distance between anchor and positive while it maximizes the distance
between anchor and negative. It compute the following loss function:
loss = max(||anchor - positive|| - ||anchor - negative|| + margin, 0).
Margin is an important hyperparameter and needs to be tuned respectively.
For further details, see: https://en.wikipedia.org/wiki/Triplet_loss
:param model: SentenceTransformerModel
:param distance_metric: Function to compute distance between two embeddings. The class TripletDistanceMetric contains common distance metrices that can be used.
:param triplet_margin: The negative should be at least this much further away from the anchor than the positive.
Example::
from sentence_transformers import SentenceTransformer, SentencesDataset, LoggingHandler, losses
from sentence_transformers.readers import InputExample
model = SentenceTransformer('distilbert-base-nli-mean-tokens')
train_examples = [InputExample(texts=['Anchor 1', 'Positive 1', 'Negative 1']),
InputExample(texts=['Anchor 2', 'Positive 2', 'Negative 2'])]
train_dataset = SentencesDataset(train_examples, model)
train_dataloader = DataLoader(train_dataset, shuffle=True, batch_size=train_batch_size)
train_loss = losses.TripletLoss(model=model)
"""
def __init__(self, model: SentenceTransformer, distance_metric=TripletDistanceMetric.EUCLIDEAN, triplet_margin: float = 5):
super(TripletLoss, self).__init__()
self.model = model
self.distance_metric = distance_metric
self.triplet_margin = triplet_margin
def get_config_dict(self):
distance_metric_name = self.distance_metric.__name__
for name, value in vars(TripletDistanceMetric).items():
if value == self.distance_metric:
distance_metric_name = "TripletDistanceMetric.{}".format(name)
break
return {'distance_metric': distance_metric_name, 'triplet_margin': self.triplet_margin}
def forward(self, sentence_features: Iterable[Dict[str, Tensor]], labels: Tensor):
reps = [self.model(sentence_feature)['sentence_embedding'] for sentence_feature in sentence_features]
rep_anchor, rep_pos, rep_neg = reps
distance_pos = self.distance_metric(rep_anchor, rep_pos)
distance_neg = self.distance_metric(rep_anchor, rep_neg)
losses = F.relu(distance_pos - distance_neg + self.triplet_margin)
return losses.mean()
|
"""init.py."""
from llama_index.tools.chatgpt_plugin.base import (
ChatGPTPluginToolSpec,
)
__all__ = ["ChatGPTPluginToolSpec"]
|
"""init.py."""
from llama_index.tools.chatgpt_plugin.base import (
ChatGPTPluginToolSpec,
)
__all__ = ["ChatGPTPluginToolSpec"]
|
_base_ = './queryinst_r50_fpn_ms-480-800-3x_coco.py'
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')))
|
_base_ = './queryinst_r50_fpn_mstrain_480-800_3x_coco.py'
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')))
|
"""
This file is part of the private API. Please do not use directly these classes as they will be modified on
future versions without warning. The classes should be accessed only via the transforms argument of Weights.
"""
from typing import List, Optional, Tuple, Union
import PIL.Image
import torch
from torch import Tensor
from . import functional as F, InterpolationMode
__all__ = ["StereoMatching"]
class StereoMatching(torch.nn.Module):
def __init__(
self,
*,
use_gray_scale: bool = False,
resize_size: Optional[Tuple[int, ...]],
mean: Tuple[float, ...] = (0.5, 0.5, 0.5),
std: Tuple[float, ...] = (0.5, 0.5, 0.5),
interpolation: InterpolationMode = InterpolationMode.BILINEAR,
) -> None:
super().__init__()
# pacify mypy
self.resize_size: Union[None, List]
if resize_size is not None:
self.resize_size = list(resize_size)
else:
self.resize_size = None
self.mean = list(mean)
self.std = list(std)
self.interpolation = interpolation
self.use_gray_scale = use_gray_scale
def forward(self, left_image: Tensor, right_image: Tensor) -> Tuple[Tensor, Tensor]:
def _process_image(img: PIL.Image.Image) -> Tensor:
if not isinstance(img, Tensor):
img = F.pil_to_tensor(img)
if self.resize_size is not None:
# We hard-code antialias=False to preserve results after we changed
# its default from None to True (see
# https://github.com/pytorch/vision/pull/7160)
# TODO: we could re-train the stereo models with antialias=True?
img = F.resize(img, self.resize_size, interpolation=self.interpolation, antialias=False)
if self.use_gray_scale is True:
img = F.rgb_to_grayscale(img)
img = F.convert_image_dtype(img, torch.float)
img = F.normalize(img, mean=self.mean, std=self.std)
img = img.contiguous()
return img
left_image = _process_image(left_image)
right_image = _process_image(right_image)
return left_image, right_image
def __repr__(self) -> str:
format_string = self.__class__.__name__ + "("
format_string += f"\n resize_size={self.resize_size}"
format_string += f"\n mean={self.mean}"
format_string += f"\n std={self.std}"
format_string += f"\n interpolation={self.interpolation}"
format_string += "\n)"
return format_string
def describe(self) -> str:
return (
"Accepts ``PIL.Image``, batched ``(B, C, H, W)`` and single ``(C, H, W)`` image ``torch.Tensor`` objects. "
f"The images are resized to ``resize_size={self.resize_size}`` using ``interpolation={self.interpolation}``. "
f"Finally the values are first rescaled to ``[0.0, 1.0]`` and then normalized using ``mean={self.mean}`` and "
f"``std={self.std}``."
)
|
"""
This file is part of the private API. Please do not use directly these classes as they will be modified on
future versions without warning. The classes should be accessed only via the transforms argument of Weights.
"""
from typing import List, Optional, Tuple, Union
import PIL.Image
import torch
from torch import Tensor
from . import functional as F, InterpolationMode
__all__ = ["StereoMatching"]
class StereoMatching(torch.nn.Module):
def __init__(
self,
*,
use_gray_scale: bool = False,
resize_size: Optional[Tuple[int, ...]],
mean: Tuple[float, ...] = (0.5, 0.5, 0.5),
std: Tuple[float, ...] = (0.5, 0.5, 0.5),
interpolation: InterpolationMode = InterpolationMode.BILINEAR,
) -> None:
super().__init__()
# pacify mypy
self.resize_size: Union[None, List]
if resize_size is not None:
self.resize_size = list(resize_size)
else:
self.resize_size = None
self.mean = list(mean)
self.std = list(std)
self.interpolation = interpolation
self.use_gray_scale = use_gray_scale
def forward(self, left_image: Tensor, right_image: Tensor) -> Tuple[Tensor, Tensor]:
def _process_image(img: PIL.Image.Image) -> Tensor:
if self.resize_size is not None:
img = F.resize(img, self.resize_size, interpolation=self.interpolation)
if not isinstance(img, Tensor):
img = F.pil_to_tensor(img)
if self.use_gray_scale is True:
img = F.rgb_to_grayscale(img)
img = F.convert_image_dtype(img, torch.float)
img = F.normalize(img, mean=self.mean, std=self.std)
img = img.contiguous()
return img
left_image = _process_image(left_image)
right_image = _process_image(right_image)
return left_image, right_image
def __repr__(self) -> str:
format_string = self.__class__.__name__ + "("
format_string += f"\n resize_size={self.resize_size}"
format_string += f"\n mean={self.mean}"
format_string += f"\n std={self.std}"
format_string += f"\n interpolation={self.interpolation}"
format_string += "\n)"
return format_string
def describe(self) -> str:
return (
"Accepts ``PIL.Image``, batched ``(B, C, H, W)`` and single ``(C, H, W)`` image ``torch.Tensor`` objects. "
f"The images are resized to ``resize_size={self.resize_size}`` using ``interpolation={self.interpolation}``. "
f"Finally the values are first rescaled to ``[0.0, 1.0]`` and then normalized using ``mean={self.mean}`` and "
f"``std={self.std}``."
)
|
import pytest
from langchain_core.memory import BaseMemory
from langchain.chains.conversation.memory import (
ConversationBufferMemory,
ConversationBufferWindowMemory,
ConversationSummaryMemory,
)
from langchain.memory import ReadOnlySharedMemory, SimpleMemory
from tests.unit_tests.llms.fake_llm import FakeLLM
def test_simple_memory() -> None:
"""Test SimpleMemory."""
memory = SimpleMemory(memories={"baz": "foo"})
output = memory.load_memory_variables({})
assert output == {"baz": "foo"}
assert memory.memory_variables == ["baz"]
@pytest.mark.parametrize(
"memory",
[
ConversationBufferMemory(memory_key="baz"),
ConversationSummaryMemory(llm=FakeLLM(), memory_key="baz"),
ConversationBufferWindowMemory(memory_key="baz"),
],
)
def test_readonly_memory(memory: BaseMemory) -> None:
read_only_memory = ReadOnlySharedMemory(memory=memory)
memory.save_context({"input": "bar"}, {"output": "foo"})
assert read_only_memory.load_memory_variables({}) == memory.load_memory_variables(
{},
)
|
import pytest
from langchain_core.memory import BaseMemory
from langchain.chains.conversation.memory import (
ConversationBufferMemory,
ConversationBufferWindowMemory,
ConversationSummaryMemory,
)
from langchain.memory import ReadOnlySharedMemory, SimpleMemory
from tests.unit_tests.llms.fake_llm import FakeLLM
def test_simple_memory() -> None:
"""Test SimpleMemory."""
memory = SimpleMemory(memories={"baz": "foo"})
output = memory.load_memory_variables({})
assert output == {"baz": "foo"}
assert memory.memory_variables == ["baz"]
@pytest.mark.parametrize(
"memory",
[
ConversationBufferMemory(memory_key="baz"),
ConversationSummaryMemory(llm=FakeLLM(), memory_key="baz"),
ConversationBufferWindowMemory(memory_key="baz"),
],
)
def test_readonly_memory(memory: BaseMemory) -> None:
read_only_memory = ReadOnlySharedMemory(memory=memory)
memory.save_context({"input": "bar"}, {"output": "foo"})
assert read_only_memory.load_memory_variables({}) == memory.load_memory_variables(
{}
)
|
"""
Manages process groups for distributed compilation in TorchDynamo.
This module handles the initialization and management of process groups used for
distributed compilation. Key features:
- Lazy initialization of compilation process groups
- Only creates groups when distributed mode is enabled and available
- Integrates with compiler_collectives configuration setting
- Provides a single global process group for compilation coordination
The process group is created only when needed and if the distributed environment
is properly initialized, making it safe to import and use this module even in
non-distributed scenarios.
"""
from typing import Optional
import torch.distributed as dist
from . import config
_COMPILE_PG: Optional[dist.ProcessGroup] = None
_GUARD_PG: Optional[dist.ProcessGroup] = None
def get_compile_pg() -> Optional[dist.ProcessGroup]:
if (
config.enable_compiler_collectives
and dist.is_available()
and dist.is_initialized()
):
global _COMPILE_PG
if _COMPILE_PG is None:
# , timeout=datetime.timedelta(seconds=2)
_COMPILE_PG = dist.distributed_c10d._new_group_with_tag(
pg_tag="pt2_compile_pg"
)
return _COMPILE_PG
return None
# NB: Unlike get_compile_pg, this is only called when guard collectives were
# explicitly requested
def get_guard_pg() -> Optional[dist.ProcessGroup]:
if dist.is_available() and dist.is_initialized():
global _GUARD_PG
if _GUARD_PG is None:
_GUARD_PG = dist.distributed_c10d._new_group_with_tag(pg_tag="pt2_guard_pg")
return _GUARD_PG
return None
|
"""
Manages process groups for distributed compilation in TorchDynamo.
This module handles the initialization and management of process groups used for
distributed compilation. Key features:
- Lazy initialization of compilation process groups
- Only creates groups when distributed mode is enabled and available
- Integrates with compiler_collectives configuration setting
- Provides a single global process group for compilation coordination
The process group is created only when needed and if the distributed environment
is properly initialized, making it safe to import and use this module even in
non-distributed scenarios.
"""
from typing import Optional
import torch.distributed as dist
from . import config
_COMPILE_PG: Optional[dist.ProcessGroup] = None
def get_compile_pg() -> Optional[dist.ProcessGroup]:
if (
config.enable_compiler_collectives
and dist.is_available()
and dist.is_initialized()
):
global _COMPILE_PG
if _COMPILE_PG is None:
# , timeout=datetime.timedelta(seconds=2)
_COMPILE_PG = dist.distributed_c10d._new_group_with_tag(
pg_tag="pt2_compile_pg"
)
return _COMPILE_PG
return None
|
import unittest
import torch
from mmengine.structures import PixelData
from mmengine.testing import assert_allclose
from mmdet.models.seg_heads import PanopticFPNHead
from mmdet.structures import DetDataSample
class TestPanopticFPNHead(unittest.TestCase):
def test_init_weights(self):
head = PanopticFPNHead(
num_things_classes=2,
num_stuff_classes=2,
in_channels=1,
inner_channels=1)
head.init_weights()
assert_allclose(head.conv_logits.bias.data,
torch.zeros_like(head.conv_logits.bias.data))
def test_loss(self):
head = PanopticFPNHead(
num_things_classes=2,
num_stuff_classes=2,
in_channels=32,
inner_channels=32,
start_level=0,
end_level=1)
x = [torch.rand((2, 32, 8, 8)), torch.rand((2, 32, 4, 4))]
data_sample1 = DetDataSample()
data_sample1.gt_sem_seg = PixelData(
sem_seg=torch.randint(0, 4, (1, 7, 8)))
data_sample2 = DetDataSample()
data_sample2.gt_sem_seg = PixelData(
sem_seg=torch.randint(0, 4, (1, 7, 8)))
batch_data_samples = [data_sample1, data_sample2]
results = head.loss(x, batch_data_samples)
self.assertIsInstance(results, dict)
def test_predict(self):
head = PanopticFPNHead(
num_things_classes=2,
num_stuff_classes=2,
in_channels=32,
inner_channels=32,
start_level=0,
end_level=1)
x = [torch.rand((2, 32, 8, 8)), torch.rand((2, 32, 4, 4))]
img_meta1 = {
'batch_input_shape': (16, 16),
'img_shape': (14, 14),
'ori_shape': (12, 12),
}
img_meta2 = {
'batch_input_shape': (16, 16),
'img_shape': (16, 16),
'ori_shape': (16, 16),
}
batch_img_metas = [img_meta1, img_meta2]
head.eval()
with torch.no_grad():
seg_preds = head.predict(x, batch_img_metas, rescale=False)
self.assertTupleEqual(seg_preds[0].shape[-2:], (16, 16))
self.assertTupleEqual(seg_preds[1].shape[-2:], (16, 16))
seg_preds = head.predict(x, batch_img_metas, rescale=True)
self.assertTupleEqual(seg_preds[0].shape[-2:], (12, 12))
self.assertTupleEqual(seg_preds[1].shape[-2:], (16, 16))
|
import unittest
import torch
from mmengine.data import PixelData
from mmengine.testing import assert_allclose
from mmdet.models.seg_heads import PanopticFPNHead
from mmdet.structures import DetDataSample
class TestPanopticFPNHead(unittest.TestCase):
def test_init_weights(self):
head = PanopticFPNHead(
num_things_classes=2,
num_stuff_classes=2,
in_channels=1,
inner_channels=1)
head.init_weights()
assert_allclose(head.conv_logits.bias.data,
torch.zeros_like(head.conv_logits.bias.data))
def test_loss(self):
head = PanopticFPNHead(
num_things_classes=2,
num_stuff_classes=2,
in_channels=32,
inner_channels=32,
start_level=0,
end_level=1)
x = [torch.rand((2, 32, 8, 8)), torch.rand((2, 32, 4, 4))]
data_sample1 = DetDataSample()
data_sample1.gt_sem_seg = PixelData(
sem_seg=torch.randint(0, 4, (1, 7, 8)))
data_sample2 = DetDataSample()
data_sample2.gt_sem_seg = PixelData(
sem_seg=torch.randint(0, 4, (1, 7, 8)))
batch_data_samples = [data_sample1, data_sample2]
results = head.loss(x, batch_data_samples)
self.assertIsInstance(results, dict)
def test_predict(self):
head = PanopticFPNHead(
num_things_classes=2,
num_stuff_classes=2,
in_channels=32,
inner_channels=32,
start_level=0,
end_level=1)
x = [torch.rand((2, 32, 8, 8)), torch.rand((2, 32, 4, 4))]
img_meta1 = {
'batch_input_shape': (16, 16),
'img_shape': (14, 14),
'ori_shape': (12, 12),
}
img_meta2 = {
'batch_input_shape': (16, 16),
'img_shape': (16, 16),
'ori_shape': (16, 16),
}
batch_img_metas = [img_meta1, img_meta2]
head.eval()
with torch.no_grad():
seg_preds = head.predict(x, batch_img_metas, rescale=False)
self.assertTupleEqual(seg_preds[0].shape[-2:], (16, 16))
self.assertTupleEqual(seg_preds[1].shape[-2:], (16, 16))
seg_preds = head.predict(x, batch_img_metas, rescale=True)
self.assertTupleEqual(seg_preds[0].shape[-2:], (12, 12))
self.assertTupleEqual(seg_preds[1].shape[-2:], (16, 16))
|
import io
from abc import ABC
from docarray.typing.tensor.abstract_tensor import AbstractTensor
class AbstractImageTensor(AbstractTensor, ABC):
def to_bytes(self, format: str = 'PNG') -> bytes:
"""
Convert image tensor to bytes.
:param format: the image format use to store the image, can be 'PNG' , 'JPG' ...
:return: bytes
"""
from PIL import Image
if format == 'jpg':
format = 'jpeg' # unify it to ISO standard
tensor = self.get_comp_backend().to_numpy(self)
mode = 'RGB' if tensor.ndim == 3 else 'L'
pil_image = Image.fromarray(tensor, mode=mode)
with io.BytesIO() as buffer:
pil_image.save(buffer, format=format)
img_byte_arr = buffer.getvalue()
return img_byte_arr
|
import io
from abc import ABC, abstractmethod
from docarray.typing.tensor.abstract_tensor import AbstractTensor
class AbstractImageTensor(AbstractTensor, ABC):
@abstractmethod
def to_bytes(self, format: str = 'PNG') -> bytes:
"""
Convert image tensor to bytes.
:param format: the image format use to store the image, can be 'PNG' , 'JPG' ...
:return: bytes
"""
from PIL import Image
if format == 'jpg':
format = 'jpeg' # unify it to ISO standard
tensor = self.get_comp_backend().to_numpy(self)
mode = 'RGB' if tensor.ndim == 3 else 'L'
pil_image = Image.fromarray(tensor, mode=mode)
with io.BytesIO() as buffer:
pil_image.save(buffer, format=format)
img_byte_arr = buffer.getvalue()
return img_byte_arr
|
from typing import Dict, Set
from fastapi import WebSocket
from backend.data.execution import (
ExecutionEventType,
GraphExecutionEvent,
NodeExecutionEvent,
)
from backend.server.model import WSMessage, WSMethod
_EVENT_TYPE_TO_METHOD_MAP: dict[ExecutionEventType, WSMethod] = {
ExecutionEventType.GRAPH_EXEC_UPDATE: WSMethod.GRAPH_EXECUTION_EVENT,
ExecutionEventType.NODE_EXEC_UPDATE: WSMethod.NODE_EXECUTION_EVENT,
}
class ConnectionManager:
def __init__(self):
self.active_connections: Set[WebSocket] = set()
self.subscriptions: Dict[str, Set[WebSocket]] = {}
async def connect_socket(self, websocket: WebSocket):
await websocket.accept()
self.active_connections.add(websocket)
def disconnect_socket(self, websocket: WebSocket):
self.active_connections.remove(websocket)
for subscribers in self.subscriptions.values():
subscribers.discard(websocket)
async def subscribe_graph_exec(
self, *, user_id: str, graph_exec_id: str, websocket: WebSocket
) -> str:
key = _graph_exec_channel_key(user_id, graph_exec_id)
if key not in self.subscriptions:
self.subscriptions[key] = set()
self.subscriptions[key].add(websocket)
return key
async def unsubscribe(
self, *, user_id: str, graph_exec_id: str, websocket: WebSocket
) -> str | None:
key = _graph_exec_channel_key(user_id, graph_exec_id)
if key in self.subscriptions:
self.subscriptions[key].discard(websocket)
if not self.subscriptions[key]:
del self.subscriptions[key]
return key
return None
async def send_execution_update(
self, exec_event: GraphExecutionEvent | NodeExecutionEvent
) -> int:
graph_exec_id = (
exec_event.id
if isinstance(exec_event, GraphExecutionEvent)
else exec_event.graph_exec_id
)
key = _graph_exec_channel_key(exec_event.user_id, graph_exec_id)
n_sent = 0
if key in self.subscriptions:
message = WSMessage(
method=_EVENT_TYPE_TO_METHOD_MAP[exec_event.event_type],
channel=key,
data=exec_event.model_dump(),
).model_dump_json()
for connection in self.subscriptions[key]:
await connection.send_text(message)
n_sent += 1
return n_sent
def _graph_exec_channel_key(user_id: str, graph_exec_id: str) -> str:
return f"{user_id}|graph_exec#{graph_exec_id}"
|
from typing import Dict, Set
from fastapi import WebSocket
from backend.data import execution
from backend.server.model import Methods, WsMessage
class ConnectionManager:
def __init__(self):
self.active_connections: Set[WebSocket] = set()
self.subscriptions: Dict[str, Set[WebSocket]] = {}
async def connect(self, websocket: WebSocket):
await websocket.accept()
self.active_connections.add(websocket)
def disconnect(self, websocket: WebSocket):
self.active_connections.remove(websocket)
for subscribers in self.subscriptions.values():
subscribers.discard(websocket)
async def subscribe(
self, *, user_id: str, graph_id: str, graph_version: int, websocket: WebSocket
):
key = f"{user_id}_{graph_id}_{graph_version}"
if key not in self.subscriptions:
self.subscriptions[key] = set()
self.subscriptions[key].add(websocket)
async def unsubscribe(
self, *, user_id: str, graph_id: str, graph_version: int, websocket: WebSocket
):
key = f"{user_id}_{graph_id}_{graph_version}"
if key in self.subscriptions:
self.subscriptions[key].discard(websocket)
if not self.subscriptions[key]:
del self.subscriptions[key]
async def send_execution_result(self, result: execution.ExecutionResult):
key = f"{result.user_id}_{result.graph_id}_{result.graph_version}"
if key in self.subscriptions:
message = WsMessage(
method=Methods.EXECUTION_EVENT,
channel=key,
data=result.model_dump(),
).model_dump_json()
for connection in self.subscriptions[key]:
await connection.send_text(message)
|
from __future__ import annotations
import os
import pytest
from sentence_transformers import SentenceTransformer
from sentence_transformers.models import Pooling, Transformer
from sentence_transformers.util import is_datasets_available
from tests.utils import SafeTemporaryDirectory
if is_datasets_available():
from datasets import DatasetDict, load_dataset
@pytest.fixture()
def stsb_bert_tiny_model() -> SentenceTransformer:
return SentenceTransformer("sentence-transformers-testing/stsb-bert-tiny-safetensors")
@pytest.fixture(scope="session")
def stsb_bert_tiny_model_reused() -> SentenceTransformer:
return SentenceTransformer("sentence-transformers-testing/stsb-bert-tiny-safetensors")
@pytest.fixture()
def stsb_bert_tiny_model_onnx() -> SentenceTransformer:
return SentenceTransformer("sentence-transformers-testing/stsb-bert-tiny-onnx")
@pytest.fixture()
def stsb_bert_tiny_model_openvino() -> SentenceTransformer:
return SentenceTransformer("sentence-transformers-testing/stsb-bert-tiny-openvino")
@pytest.fixture()
def paraphrase_distilroberta_base_v1_model() -> SentenceTransformer:
return SentenceTransformer("paraphrase-distilroberta-base-v1")
@pytest.fixture()
def clip_vit_b_32_model() -> SentenceTransformer:
return SentenceTransformer("clip-ViT-B-32")
@pytest.fixture()
def distilbert_base_uncased_model() -> SentenceTransformer:
word_embedding_model = Transformer("distilbert-base-uncased")
pooling_model = Pooling(word_embedding_model.get_word_embedding_dimension())
model = SentenceTransformer(modules=[word_embedding_model, pooling_model])
return model
@pytest.fixture(scope="session")
def stsb_dataset_dict() -> DatasetDict:
return load_dataset("sentence-transformers/stsb")
@pytest.fixture()
def cache_dir():
"""
In the CI environment, we use a temporary directory as `cache_dir`
to avoid keeping the downloaded models on disk after the test.
"""
if os.environ.get("CI", None):
# Note: `ignore_cleanup_errors=True` is used to avoid NotADirectoryError in Windows on GitHub Actions.
# See https://github.com/python/cpython/issues/107408, https://www.scivision.dev/python-tempfile-permission-error-windows/
with SafeTemporaryDirectory() as tmp_dir:
yield tmp_dir
else:
yield None
|
from __future__ import annotations
import os
import pytest
from sentence_transformers import CrossEncoder, SentenceTransformer
from sentence_transformers.models import Pooling, Transformer
from sentence_transformers.util import is_datasets_available
from tests.utils import SafeTemporaryDirectory
if is_datasets_available():
from datasets import DatasetDict, load_dataset
@pytest.fixture()
def stsb_bert_tiny_model() -> SentenceTransformer:
return SentenceTransformer("sentence-transformers-testing/stsb-bert-tiny-safetensors")
@pytest.fixture(scope="session")
def stsb_bert_tiny_model_reused() -> SentenceTransformer:
return SentenceTransformer("sentence-transformers-testing/stsb-bert-tiny-safetensors")
@pytest.fixture()
def stsb_bert_tiny_model_onnx() -> SentenceTransformer:
return SentenceTransformer("sentence-transformers-testing/stsb-bert-tiny-onnx")
@pytest.fixture()
def stsb_bert_tiny_model_openvino() -> SentenceTransformer:
return SentenceTransformer("sentence-transformers-testing/stsb-bert-tiny-openvino")
@pytest.fixture()
def paraphrase_distilroberta_base_v1_model() -> SentenceTransformer:
return SentenceTransformer("paraphrase-distilroberta-base-v1")
@pytest.fixture()
def distilroberta_base_ce_model() -> CrossEncoder:
return CrossEncoder("distilroberta-base", num_labels=1)
@pytest.fixture()
def clip_vit_b_32_model() -> SentenceTransformer:
return SentenceTransformer("clip-ViT-B-32")
@pytest.fixture()
def distilbert_base_uncased_model() -> SentenceTransformer:
word_embedding_model = Transformer("distilbert-base-uncased")
pooling_model = Pooling(word_embedding_model.get_word_embedding_dimension())
model = SentenceTransformer(modules=[word_embedding_model, pooling_model])
return model
@pytest.fixture(scope="session")
def stsb_dataset_dict() -> DatasetDict:
return load_dataset("mteb/stsbenchmark-sts")
@pytest.fixture()
def cache_dir():
"""
In the CI environment, we use a temporary directory as `cache_dir`
to avoid keeping the downloaded models on disk after the test.
"""
if os.environ.get("CI", None):
# Note: `ignore_cleanup_errors=True` is used to avoid NotADirectoryError in Windows on GitHub Actions.
# See https://github.com/python/cpython/issues/107408, https://www.scivision.dev/python-tempfile-permission-error-windows/
with SafeTemporaryDirectory() as tmp_dir:
yield tmp_dir
else:
yield None
|
from __future__ import annotations
from typing import Any, Iterable
import torch
from torch import Tensor, nn
from sentence_transformers.SentenceTransformer import SentenceTransformer
from sentence_transformers.util import fullname
class CosineSimilarityLoss(nn.Module):
def __init__(
self,
model: SentenceTransformer,
loss_fct: nn.Module = nn.MSELoss(),
cos_score_transformation: nn.Module = nn.Identity(),
) -> None:
"""
CosineSimilarityLoss expects that the InputExamples consists of two texts and a float label. It computes the
vectors ``u = model(sentence_A)`` and ``v = model(sentence_B)`` and measures the cosine-similarity between the two.
By default, it minimizes the following loss: ``||input_label - cos_score_transformation(cosine_sim(u,v))||_2``.
Args:
model: SentenceTransformer model
loss_fct: Which pytorch loss function should be used to
compare the ``cosine_similarity(u, v)`` with the
input_label? By default, MSE is used: ``||input_label -
cosine_sim(u, v)||_2``
cos_score_transformation: The cos_score_transformation
function is applied on top of cosine_similarity. By
default, the identify function is used (i.e. no change).
References:
- `Training Examples > Semantic Textual Similarity <../../examples/training/sts/README.html>`_
Requirements:
1. Sentence pairs with corresponding similarity scores in range `[0, 1]`
Relations:
- :class:`CoSENTLoss` seems to produce a stronger training signal than CosineSimilarityLoss. In our experiments, CoSENTLoss is recommended.
- :class:`AnglELoss` is :class:`CoSENTLoss` with ``pairwise_angle_sim`` as the metric, rather than ``pairwise_cos_sim``. It also produces a stronger training signal than CosineSimilarityLoss.
Inputs:
+--------------------------------+------------------------+
| Texts | Labels |
+================================+========================+
| (sentence_A, sentence_B) pairs | float similarity score |
+--------------------------------+------------------------+
Example:
::
from sentence_transformers import SentenceTransformer, SentenceTransformerTrainer, losses
from datasets import Dataset
model = SentenceTransformer("microsoft/mpnet-base")
train_dataset = Dataset.from_dict({
"sentence1": ["It's nice weather outside today.", "He drove to work."],
"sentence2": ["It's so sunny.", "She walked to the store."],
"score": [1.0, 0.3],
})
loss = losses.CosineSimilarityLoss(model)
trainer = SentenceTransformerTrainer(
model=model,
train_dataset=train_dataset,
loss=loss,
)
trainer.train()
"""
super().__init__()
self.model = model
self.loss_fct = loss_fct
self.cos_score_transformation = cos_score_transformation
def forward(self, sentence_features: Iterable[dict[str, Tensor]], labels: Tensor) -> Tensor:
embeddings = [self.model(sentence_feature)["sentence_embedding"] for sentence_feature in sentence_features]
output = self.cos_score_transformation(torch.cosine_similarity(embeddings[0], embeddings[1]))
return self.loss_fct(output, labels.float().view(-1))
def get_config_dict(self) -> dict[str, Any]:
return {"loss_fct": fullname(self.loss_fct)}
|
from __future__ import annotations
from typing import Any, Iterable
import torch
from torch import Tensor, nn
from sentence_transformers.SentenceTransformer import SentenceTransformer
from sentence_transformers.util import fullname
class CosineSimilarityLoss(nn.Module):
def __init__(
self,
model: SentenceTransformer,
loss_fct: nn.Module = nn.MSELoss(),
cos_score_transformation: nn.Module = nn.Identity(),
) -> None:
"""
CosineSimilarityLoss expects that the InputExamples consists of two texts and a float label. It computes the
vectors ``u = model(sentence_A)`` and ``v = model(sentence_B)`` and measures the cosine-similarity between the two.
By default, it minimizes the following loss: ``||input_label - cos_score_transformation(cosine_sim(u,v))||_2``.
Args:
model: SentenceTransformer model
loss_fct: Which pytorch loss function should be used to
compare the ``cosine_similarity(u, v)`` with the
input_label? By default, MSE is used: ``||input_label -
cosine_sim(u, v)||_2``
cos_score_transformation: The cos_score_transformation
function is applied on top of cosine_similarity. By
default, the identify function is used (i.e. no change).
References:
- `Training Examples > Semantic Textual Similarity <../../examples/training/sts/README.html>`_
Requirements:
1. Sentence pairs with corresponding similarity scores in range `[0, 1]`
Relations:
- :class:`CoSENTLoss` seems to produce a stronger training signal than CosineSimilarityLoss. In our experiments, CoSENTLoss is recommended.
- :class:`AnglELoss` is :class:`CoSENTLoss` with ``pairwise_angle_sim`` as the metric, rather than ``pairwise_cos_sim``. It also produces a stronger training signal than CosineSimilarityLoss.
Inputs:
+--------------------------------+------------------------+
| Texts | Labels |
+================================+========================+
| (sentence_A, sentence_B) pairs | float similarity score |
+--------------------------------+------------------------+
Example:
::
from sentence_transformers import SentenceTransformer, SentenceTransformerTrainer, losses
from datasets import Dataset
model = SentenceTransformer("microsoft/mpnet-base")
train_dataset = Dataset.from_dict({
"sentence1": ["It's nice weather outside today.", "He drove to work."],
"sentence2": ["It's so sunny.", "She walked to the store."],
"score": [1.0, 0.3],
})
loss = losses.CosineSimilarityLoss(model)
trainer = SentenceTransformerTrainer(
model=model,
train_dataset=train_dataset,
loss=loss,
)
trainer.train()
"""
super(CosineSimilarityLoss, self).__init__()
self.model = model
self.loss_fct = loss_fct
self.cos_score_transformation = cos_score_transformation
def forward(self, sentence_features: Iterable[dict[str, Tensor]], labels: Tensor) -> Tensor:
embeddings = [self.model(sentence_feature)["sentence_embedding"] for sentence_feature in sentence_features]
output = self.cos_score_transformation(torch.cosine_similarity(embeddings[0], embeddings[1]))
return self.loss_fct(output, labels.float().view(-1))
def get_config_dict(self) -> dict[str, Any]:
return {"loss_fct": fullname(self.loss_fct)}
|
_base_ = [
'../_base_/models/mask-rcnn_r50_fpn.py',
'../_base_/datasets/coco_instance.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
model = dict(
rpn_head=dict(
anchor_generator=dict(type='LegacyAnchorGenerator', center_offset=0.5),
bbox_coder=dict(type='LegacyDeltaXYWHBBoxCoder'),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0)),
roi_head=dict(
bbox_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(
type='RoIAlign',
output_size=7,
sampling_ratio=2,
aligned=False)),
mask_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(
type='RoIAlign',
output_size=14,
sampling_ratio=2,
aligned=False)),
bbox_head=dict(
bbox_coder=dict(type='LegacyDeltaXYWHBBoxCoder'),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0))),
# model training and testing settings
train_cfg=dict(
rpn_proposal=dict(max_per_img=2000),
rcnn=dict(assigner=dict(match_low_quality=True))))
|
_base_ = [
'../_base_/models/mask_rcnn_r50_fpn.py',
'../_base_/datasets/coco_instance.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
model = dict(
rpn_head=dict(
anchor_generator=dict(type='LegacyAnchorGenerator', center_offset=0.5),
bbox_coder=dict(type='LegacyDeltaXYWHBBoxCoder'),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0)),
roi_head=dict(
bbox_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(
type='RoIAlign',
output_size=7,
sampling_ratio=2,
aligned=False)),
mask_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(
type='RoIAlign',
output_size=14,
sampling_ratio=2,
aligned=False)),
bbox_head=dict(
bbox_coder=dict(type='LegacyDeltaXYWHBBoxCoder'),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0))),
# model training and testing settings
train_cfg=dict(
rpn_proposal=dict(max_per_img=2000),
rcnn=dict(assigner=dict(match_low_quality=True))))
|
import os
import sys
import unittest
ROOT_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(ROOT_DIR, "utils"))
import create_dependency_mapping # noqa: E402
# This is equivalent to `all` in the current library state (as of 09/01/2025)
MODEL_ROOT = os.path.join("src", "transformers", "models")
FILES_TO_PARSE = [
os.path.join(MODEL_ROOT, "starcoder2", "modular_starcoder2.py"),
os.path.join(MODEL_ROOT, "gemma", "modular_gemma.py"),
os.path.join(MODEL_ROOT, "olmo2", "modular_olmo2.py"),
os.path.join(MODEL_ROOT, "diffllama", "modular_diffllama.py"),
os.path.join(MODEL_ROOT, "granite", "modular_granite.py"),
os.path.join(MODEL_ROOT, "gemma2", "modular_gemma2.py"),
os.path.join(MODEL_ROOT, "mixtral", "modular_mixtral.py"),
os.path.join(MODEL_ROOT, "olmo", "modular_olmo.py"),
os.path.join(MODEL_ROOT, "rt_detr", "modular_rt_detr.py"),
os.path.join(MODEL_ROOT, "qwen2", "modular_qwen2.py"),
os.path.join(MODEL_ROOT, "qwen3", "modular_qwen3.py"),
os.path.join(MODEL_ROOT, "llava_next_video", "modular_llava_next_video.py"),
os.path.join(MODEL_ROOT, "cohere2", "modular_cohere2.py"),
os.path.join(MODEL_ROOT, "modernbert", "modular_modernbert.py"),
os.path.join(MODEL_ROOT, "colpali", "modular_colpali.py"),
os.path.join(MODEL_ROOT, "deformable_detr", "modular_deformable_detr.py"),
os.path.join(MODEL_ROOT, "aria", "modular_aria.py"),
os.path.join(MODEL_ROOT, "ijepa", "modular_ijepa.py"),
os.path.join(MODEL_ROOT, "bamba", "modular_bamba.py"),
os.path.join(MODEL_ROOT, "dinov2_with_registers", "modular_dinov2_with_registers.py"),
os.path.join(MODEL_ROOT, "instructblipvideo", "modular_instructblipvideo.py"),
os.path.join(MODEL_ROOT, "glm", "modular_glm.py"),
os.path.join(MODEL_ROOT, "phi", "modular_phi.py"),
os.path.join(MODEL_ROOT, "mistral", "modular_mistral.py"),
os.path.join(MODEL_ROOT, "phi3", "modular_phi3.py"),
os.path.join(MODEL_ROOT, "cohere", "modular_cohere.py"),
os.path.join(MODEL_ROOT, "glm4", "modular_glm4.py"),
]
def appear_after(model1: str, model2: str, priority_list: list[list[str]]) -> bool:
"""Return True if `model1` appear after `model2` in `priority_list`."""
model1_index, model2_index = None, None
for i, level in enumerate(priority_list):
if model1 in level:
model1_index = i
if model2 in level:
model2_index = i
if model1_index is None or model2_index is None:
raise ValueError(f"Model {model1} or {model2} not found in {priority_list}")
return model1_index > model2_index
class ConversionOrderTest(unittest.TestCase):
def test_conversion_order(self):
# Find the order
priority_list, _ = create_dependency_mapping.find_priority_list(FILES_TO_PARSE)
# Extract just the model names (list of lists)
model_priority_list = [[file.split("/")[-2] for file in level] for level in priority_list]
# These are based on what the current library order should be (as of 09/01/2025)
self.assertTrue(appear_after("mixtral", "mistral", model_priority_list))
self.assertTrue(appear_after("gemma2", "gemma", model_priority_list))
self.assertTrue(appear_after("starcoder2", "mistral", model_priority_list))
self.assertTrue(appear_after("olmo2", "olmo", model_priority_list))
self.assertTrue(appear_after("diffllama", "mistral", model_priority_list))
self.assertTrue(appear_after("cohere2", "gemma2", model_priority_list))
self.assertTrue(appear_after("cohere2", "cohere", model_priority_list))
self.assertTrue(appear_after("phi3", "mistral", model_priority_list))
self.assertTrue(appear_after("glm4", "glm", model_priority_list))
|
import os
import sys
import unittest
ROOT_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(ROOT_DIR, "utils"))
import create_dependency_mapping # noqa: E402
# This is equivalent to `all` in the current library state (as of 09/01/2025)
MODEL_ROOT = os.path.join("src", "transformers", "models")
FILES_TO_PARSE = [
os.path.join(MODEL_ROOT, "starcoder2", "modular_starcoder2.py"),
os.path.join(MODEL_ROOT, "gemma", "modular_gemma.py"),
os.path.join(MODEL_ROOT, "olmo2", "modular_olmo2.py"),
os.path.join(MODEL_ROOT, "diffllama", "modular_diffllama.py"),
os.path.join(MODEL_ROOT, "granite", "modular_granite.py"),
os.path.join(MODEL_ROOT, "gemma2", "modular_gemma2.py"),
os.path.join(MODEL_ROOT, "mixtral", "modular_mixtral.py"),
os.path.join(MODEL_ROOT, "olmo", "modular_olmo.py"),
os.path.join(MODEL_ROOT, "rt_detr", "modular_rt_detr.py"),
os.path.join(MODEL_ROOT, "qwen2", "modular_qwen2.py"),
os.path.join(MODEL_ROOT, "qwen3", "modular_qwen3.py"),
os.path.join(MODEL_ROOT, "llava_next_video", "modular_llava_next_video.py"),
os.path.join(MODEL_ROOT, "cohere2", "modular_cohere2.py"),
os.path.join(MODEL_ROOT, "modernbert", "modular_modernbert.py"),
os.path.join(MODEL_ROOT, "colpali", "modular_colpali.py"),
os.path.join(MODEL_ROOT, "deformable_detr", "modular_deformable_detr.py"),
os.path.join(MODEL_ROOT, "aria", "modular_aria.py"),
os.path.join(MODEL_ROOT, "ijepa", "modular_ijepa.py"),
os.path.join(MODEL_ROOT, "bamba", "modular_bamba.py"),
os.path.join(MODEL_ROOT, "dinov2_with_registers", "modular_dinov2_with_registers.py"),
os.path.join(MODEL_ROOT, "instructblipvideo", "modular_instructblipvideo.py"),
os.path.join(MODEL_ROOT, "glm", "modular_glm.py"),
os.path.join(MODEL_ROOT, "phi", "modular_phi.py"),
os.path.join(MODEL_ROOT, "mistral", "modular_mistral.py"),
os.path.join(MODEL_ROOT, "phi3", "modular_phi3.py"),
os.path.join(MODEL_ROOT, "cohere", "modular_cohere.py"),
]
def appear_after(model1: str, model2: str, priority_list: list[str]) -> bool:
"""Return True if `model1` appear after `model2` in `priority_list`."""
return priority_list.index(model1) > priority_list.index(model2)
class ConversionOrderTest(unittest.TestCase):
def test_conversion_order(self):
# Find the order
priority_list, _ = create_dependency_mapping.find_priority_list(FILES_TO_PARSE)
# Extract just the model names
model_priority_list = [file.rsplit("modular_")[-1].replace(".py", "") for file in priority_list]
# These are based on what the current library order should be (as of 09/01/2025)
self.assertTrue(appear_after("mixtral", "mistral", model_priority_list))
self.assertTrue(appear_after("gemma2", "gemma", model_priority_list))
self.assertTrue(appear_after("starcoder2", "mistral", model_priority_list))
self.assertTrue(appear_after("olmo2", "olmo", model_priority_list))
self.assertTrue(appear_after("diffllama", "mistral", model_priority_list))
self.assertTrue(appear_after("cohere2", "gemma2", model_priority_list))
self.assertTrue(appear_after("cohere2", "cohere", model_priority_list))
self.assertTrue(appear_after("phi3", "mistral", model_priority_list))
|
"""Integration test for SerpAPI."""
from langchain_community.utilities import SerpAPIWrapper
def test_call() -> None:
"""Test that call gives the correct answer."""
chain = SerpAPIWrapper()
output = chain.run("What was Obama's first name?")
assert output == "Barack Hussein Obama II"
|
"""Integration test for SerpAPI."""
from langchain_community.utilities import SerpAPIWrapper
def test_call() -> None:
"""Test that call gives the correct answer."""
chain = SerpAPIWrapper() # type: ignore[call-arg]
output = chain.run("What was Obama's first name?")
assert output == "Barack Hussein Obama II"
|
from typing import Dict, List, Optional, Callable
from jina.importer import ImportExtensions
from jina.types.request.data import DataRequest
from jina import DocumentArray
from jina._docarray import docarray_v2
if docarray_v2:
from docarray import DocList
def get_fastapi_app(
request_models_map: Dict,
caller: Callable,
**kwargs
):
"""
Get the app from FastAPI as the REST interface.
:param request_models_map: Map describing the endpoints and its Pydantic models
:param caller: Callable to be handled by the endpoints of the returned FastAPI app
:param kwargs: Extra kwargs to make it compatible with other methods
:return: fastapi app
"""
with ImportExtensions(required=True):
from fastapi import FastAPI, Response, HTTPException
import pydantic
from jina.proto import jina_pb2
app = FastAPI()
def add_route(endpoint_path, input_model, output_model, input_doc_list_model=None, output_doc_list_model=None):
app_kwargs = dict(path=f'/{endpoint_path.strip("/")}',
methods=['POST'],
summary=f'Endpoint {endpoint_path}',
response_model=output_model, )
if docarray_v2:
from docarray.base_doc.docarray_response import DocArrayResponse
app_kwargs['response_class'] = DocArrayResponse
@app.api_route(
**app_kwargs
)
async def post(body: input_model, response: Response):
req = DataRequest()
if not docarray_v2:
req.data.docs = DocumentArray.from_pydantic_model(body.data)
else:
req.data.docs = DocList[input_doc_list_model](body.data)
req.parameters = body.parameters
req.header.exec_endpoint = endpoint_path
resp = await caller(req)
status = resp.header.status
if status.code == jina_pb2.StatusProto.ERROR:
raise HTTPException(status_code=499, detail=status.description)
else:
if not docarray_v2:
docs_response = resp.docs.to_dict()
else:
docs_response = resp.docs._data
ret = output_model(data=docs_response, parameters=resp.parameters)
return ret
for endpoint, input_output_map in request_models_map.items():
if endpoint != '_jina_dry_run_':
input_doc_model = input_output_map['input']['model']
output_doc_model = input_output_map['output']['model']
endpoint_input_model = pydantic.create_model(
f'{endpoint.strip("/")}_input_model',
data=(List[input_doc_model], []),
parameters=(Optional[Dict], None),
__config__=input_doc_model.__config__
)
endpoint_output_model = pydantic.create_model(
f'{endpoint.strip("/")}_output_model',
data=(List[output_doc_model], []),
parameters=(Optional[Dict], None),
__config__=output_doc_model.__config__
)
add_route(endpoint,
input_model=endpoint_input_model,
output_model=endpoint_output_model,
input_doc_list_model=input_doc_model,
output_doc_list_model=output_doc_model)
from jina.serve.runtimes.gateway.health_model import JinaHealthModel
@app.get(
path='/',
summary='Get the health of Jina Executor service',
response_model=JinaHealthModel,
)
async def _executor_health():
"""
Get the health of this Gateway service.
.. # noqa: DAR201
"""
return {}
return app
|
from typing import Dict, List, Optional, Callable
from jina.importer import ImportExtensions
from jina.types.request.data import DataRequest
from jina import DocumentArray
from jina._docarray import docarray_v2
if docarray_v2:
from docarray import DocList
def get_fastapi_app(
request_models_map: Dict,
caller: Callable,
**kwargs
):
"""
Get the app from FastAPI as the REST interface.
:param request_models_map: Map describing the endpoints and its Pydantic models
:param caller: Callable to be handled by the endpoints of the returned FastAPI app
:param kwargs: Extra kwargs to make it compatible with other methods
:return: fastapi app
"""
with ImportExtensions(required=True):
from fastapi import FastAPI, Response, HTTPException
import pydantic
from jina.proto import jina_pb2
app = FastAPI()
def add_route(endpoint_path, input_model, output_model, input_doc_list_model=None, output_doc_list_model=None):
@app.api_route(
path=f'/{endpoint_path.strip("/")}',
methods=['POST'],
summary=f'Endpoint {endpoint_path}',
response_model=output_model
)
async def post(body: input_model, response: Response):
req = DataRequest()
if not docarray_v2:
req.data.docs = DocumentArray.from_pydantic_model(body.data)
else:
req.data.docs = DocList[input_doc_list_model](body.data)
req.parameters = body.parameters
req.header.exec_endpoint = endpoint_path
resp = await caller(req)
status = resp.header.status
if status.code == jina_pb2.StatusProto.ERROR:
raise HTTPException(status_code=499, detail=status.description)
else:
if not docarray_v2:
docs_response = resp.docs.to_dict()
else:
docs_response = resp.docs._data
return output_model(data=docs_response, parameters=resp.parameters)
for endpoint, input_output_map in request_models_map.items():
if endpoint != '_jina_dry_run_':
input_doc_model = input_output_map['input']['model']
output_doc_model = input_output_map['output']['model']
endpoint_input_model = pydantic.create_model(
f'{endpoint.strip("/")}_input_model',
data=(List[input_doc_model], []),
parameters=(Optional[Dict], None)
)
endpoint_output_model = pydantic.create_model(
f'{endpoint.strip("/")}_output_model',
data=(List[output_doc_model], []),
parameters=(Optional[Dict], None)
)
add_route(endpoint,
input_model=endpoint_input_model,
output_model=endpoint_output_model,
input_doc_list_model=input_doc_model,
output_doc_list_model=output_doc_model)
from jina.serve.runtimes.gateway.health_model import JinaHealthModel
@app.get(
path='/',
summary='Get the health of Jina Executor service',
response_model=JinaHealthModel,
)
async def _executor_health():
"""
Get the health of this Gateway service.
.. # noqa: DAR201
"""
return {}
return app
|
# mypy: allow-untyped-defs
import torch
def is_available():
r"""Return whether PyTorch is built with MKL support."""
return torch._C.has_mkl
VERBOSE_OFF = 0
VERBOSE_ON = 1
class verbose:
"""
On-demand oneMKL verbosing functionality.
To make it easier to debug performance issues, oneMKL can dump verbose
messages containing execution information like duration while executing
the kernel. The verbosing functionality can be invoked via an environment
variable named `MKL_VERBOSE`. However, this methodology dumps messages in
all steps. Those are a large amount of verbose messages. Moreover, for
investigating the performance issues, generally taking verbose messages
for one single iteration is enough. This on-demand verbosing functionality
makes it possible to control scope for verbose message dumping. In the
following example, verbose messages will be dumped out for the second
inference only.
.. highlight:: python
.. code-block:: python
import torch
model(data)
with torch.backends.mkl.verbose(torch.backends.mkl.VERBOSE_ON):
model(data)
Args:
level: Verbose level
- ``VERBOSE_OFF``: Disable verbosing
- ``VERBOSE_ON``: Enable verbosing
"""
def __init__(self, enable):
self.enable = enable
def __enter__(self):
if self.enable == VERBOSE_OFF:
return
st = torch._C._verbose.mkl_set_verbose(self.enable)
assert st, (
"Failed to set MKL into verbose mode. Please consider to disable this verbose scope."
)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
torch._C._verbose.mkl_set_verbose(VERBOSE_OFF)
return False
|
# mypy: allow-untyped-defs
import torch
def is_available():
r"""Return whether PyTorch is built with MKL support."""
return torch._C.has_mkl
VERBOSE_OFF = 0
VERBOSE_ON = 1
class verbose:
"""
On-demand oneMKL verbosing functionality.
To make it easier to debug performance issues, oneMKL can dump verbose
messages containing execution information like duration while executing
the kernel. The verbosing functionality can be invoked via an environment
variable named `MKL_VERBOSE`. However, this methodology dumps messages in
all steps. Those are a large amount of verbose messages. Moreover, for
investigating the performance issues, generally taking verbose messages
for one single iteration is enough. This on-demand verbosing functionality
makes it possible to control scope for verbose message dumping. In the
following example, verbose messages will be dumped out for the second
inference only.
.. highlight:: python
.. code-block:: python
import torch
model(data)
with torch.backends.mkl.verbose(torch.backends.mkl.VERBOSE_ON):
model(data)
Args:
level: Verbose level
- ``VERBOSE_OFF``: Disable verbosing
- ``VERBOSE_ON``: Enable verbosing
"""
def __init__(self, enable):
self.enable = enable
def __enter__(self):
if self.enable == VERBOSE_OFF:
return
st = torch._C._verbose.mkl_set_verbose(self.enable)
assert (
st
), "Failed to set MKL into verbose mode. Please consider to disable this verbose scope."
return self
def __exit__(self, exc_type, exc_val, exc_tb):
torch._C._verbose.mkl_set_verbose(VERBOSE_OFF)
return False
|
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import subprocess
import pytest
from jina import Document, DocumentArray, Flow
from laser_encoder import LaserEncoder
def data_generator(num_docs):
for i in range(num_docs):
doc = Document(text='it is a good day! the dog sits on the floor.')
yield doc
@pytest.mark.docker
def test_docker_runtime(build_docker_image: str):
with pytest.raises(subprocess.TimeoutExpired):
subprocess.run(
['jina', 'executor', f'--uses=docker://{build_docker_image}'],
timeout=30,
check=True,
)
@pytest.mark.gpu
@pytest.mark.docker
def test_docker_runtime_gpu(build_docker_image_gpu: str):
with pytest.raises(subprocess.TimeoutExpired):
subprocess.run(
[
'jina',
'executor',
f'--uses=docker://{build_docker_image_gpu}',
'--gpus',
'all',
'--uses-with',
'device:cuda',
],
timeout=30,
check=True,
)
_EMBEDDING_DIM = 1024
@pytest.mark.parametrize('request_size', [1, 10, 50, 100])
def test_integration(request_size: int):
docs = DocumentArray(
[Document(text='just some random text here') for _ in range(50)]
)
with Flow(return_results=True).add(uses=LaserEncoder) as flow:
resp = flow.post(
on='/index',
inputs=docs,
request_size=request_size,
return_results=True,
)
assert sum(len(resp_batch.docs) for resp_batch in resp) == 50
for r in resp:
for doc in r.docs:
assert doc.embedding.shape == (_EMBEDDING_DIM,)
@pytest.mark.docker
def test_docker_runtime(build_docker_image: str):
with pytest.raises(subprocess.TimeoutExpired):
subprocess.run(
[
'jina',
'executor',
f'--uses=docker://{build_docker_image}',
],
timeout=30,
check=True,
)
|
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import subprocess
import pytest
from jina import Document, DocumentArray, Flow
from laser_encoder import LaserEncoder
_EMBEDDING_DIM = 1024
@pytest.mark.parametrize('request_size', [1, 10, 50, 100])
def test_integration(request_size: int):
docs = DocumentArray(
[Document(text='just some random text here') for _ in range(50)]
)
with Flow(return_results=True).add(uses=LaserEncoder) as flow:
resp = flow.post(
on='/index',
inputs=docs,
request_size=request_size,
return_results=True,
)
assert sum(len(resp_batch.docs) for resp_batch in resp) == 50
for r in resp:
for doc in r.docs:
assert doc.embedding.shape == (_EMBEDDING_DIM,)
@pytest.mark.docker
def test_docker_runtime(build_docker_image: str):
with pytest.raises(subprocess.TimeoutExpired):
subprocess.run(
[
'jina',
'executor',
f'--uses=docker://{build_docker_image}',
],
timeout=30,
check=True,
)
|
_base_ = '../common/lsj-200e_coco-detection.py'
image_size = (1024, 1024)
batch_augments = [dict(type='BatchFixedSizePad', size=image_size)]
model = dict(
type='CenterNet',
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
bgr_to_rgb=True,
pad_size_divisor=32,
batch_augments=batch_augments),
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
add_extra_convs='on_output',
num_outs=5,
init_cfg=dict(type='Caffe2Xavier', layer='Conv2d'),
relu_before_extra_convs=True),
bbox_head=dict(
type='CenterNetUpdateHead',
num_classes=80,
in_channels=256,
stacked_convs=4,
feat_channels=256,
strides=[8, 16, 32, 64, 128],
loss_cls=dict(
type='GaussianFocalLoss',
pos_weight=0.25,
neg_weight=0.75,
loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=2.0),
),
train_cfg=None,
test_cfg=dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.6),
max_per_img=100))
train_dataloader = dict(batch_size=8, num_workers=4)
# Enable automatic-mixed-precision training with AmpOptimWrapper.
optim_wrapper = dict(
type='AmpOptimWrapper',
optimizer=dict(
type='SGD', lr=0.01 * 4, momentum=0.9, weight_decay=0.00004),
paramwise_cfg=dict(norm_decay_mult=0.))
param_scheduler = [
dict(
type='LinearLR',
start_factor=0.00025,
by_epoch=False,
begin=0,
end=4000),
dict(
type='MultiStepLR',
begin=0,
end=25,
by_epoch=True,
milestones=[22, 24],
gamma=0.1)
]
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (8 GPUs) x (8 samples per GPU)
auto_scale_lr = dict(base_batch_size=64)
|
_base_ = '../common/lsj_200e_coco_detection.py'
image_size = (1024, 1024)
batch_augments = [dict(type='BatchFixedSizePad', size=image_size)]
model = dict(
type='CenterNet',
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
bgr_to_rgb=True,
pad_size_divisor=32,
batch_augments=batch_augments),
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
add_extra_convs='on_output',
num_outs=5,
init_cfg=dict(type='Caffe2Xavier', layer='Conv2d'),
relu_before_extra_convs=True),
bbox_head=dict(
type='CenterNetUpdateHead',
num_classes=80,
in_channels=256,
stacked_convs=4,
feat_channels=256,
strides=[8, 16, 32, 64, 128],
loss_cls=dict(
type='GaussianFocalLoss',
pos_weight=0.25,
neg_weight=0.75,
loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=2.0),
),
train_cfg=None,
test_cfg=dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.6),
max_per_img=100))
train_dataloader = dict(batch_size=8, num_workers=4)
# Enable automatic-mixed-precision training with AmpOptimWrapper.
optim_wrapper = dict(
type='AmpOptimWrapper',
optimizer=dict(
type='SGD', lr=0.01 * 4, momentum=0.9, weight_decay=0.00004),
paramwise_cfg=dict(norm_decay_mult=0.))
param_scheduler = [
dict(
type='LinearLR',
start_factor=0.00025,
by_epoch=False,
begin=0,
end=4000),
dict(
type='MultiStepLR',
begin=0,
end=25,
by_epoch=True,
milestones=[22, 24],
gamma=0.1)
]
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (8 GPUs) x (8 samples per GPU)
auto_scale_lr = dict(base_batch_size=64)
|
from jina.parsers.helper import add_arg_group
def mixin_head_parser(parser):
"""Mixing in arguments required by head pods and runtimes into the given parser.
:param parser: the parser instance to which we add arguments
"""
gp = add_arg_group(parser, title='Head')
gp.add_argument(
'--compression',
choices=['NoCompression', 'Deflate', 'Gzip'],
help='The compression mechanism used when sending requests from the Head to the WorkerRuntimes. For more details, '
'check https://grpc.github.io/grpc/python/grpc.html#compression.',
)
gp.add_argument(
'--uses-before-address',
type=str,
help='The address of the uses-before runtime',
)
gp.add_argument(
'--uses-after-address',
type=str,
help='The address of the uses-before runtime',
)
gp.add_argument(
'--connection-list',
type=str,
help='dictionary JSON with a list of connections to configure',
)
gp.add_argument(
'--disable-reduce',
action='store_true',
default=False,
help='Disable the built-in reduce mechanism, set this if the reduction is to be handled by the Executor connected to this Head',
)
gp.add_argument(
'--timeout-send',
type=int,
default=None,
help='The timeout in milliseconds used when sending data requests to Executors, -1 means no timeout, disabled by default',
)
|
import argparse
from jina.parsers.helper import add_arg_group
def mixin_head_parser(parser):
"""Mixing in arguments required by head pods and runtimes into the given parser.
:param parser: the parser instance to which we add arguments
"""
gp = add_arg_group(parser, title='Head')
gp.add_argument(
'--compression',
choices=['NoCompression', 'Deflate', 'Gzip'],
help='The compression mechanism used when sending requests from the Head to the WorkerRuntimes. For more details, '
'check https://grpc.github.io/grpc/python/grpc.html#compression.',
)
gp.add_argument(
'--uses-before-address',
type=str,
help='The address of the uses-before runtime',
)
gp.add_argument(
'--uses-after-address',
type=str,
help='The address of the uses-before runtime',
)
gp.add_argument(
'--connection-list',
type=str,
help='dictionary JSON with a list of connections to configure',
)
gp.add_argument(
'--disable-reduce',
action='store_true',
default=False,
help='Disable the built-in reduce mechanism, set this if the reduction is to be handled by the Executor connected to this Head',
)
gp.add_argument(
'--timeout-send',
type=int,
default=None,
help='The timeout in milliseconds used when sending data requests to Executors, -1 means no timeout, disabled by default',
)
|
# Licensed to the LF AI & Data foundation under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from docarray import BaseDoc, DocList
def test_instance_and_equivalence():
class MyDoc(BaseDoc):
text: str
docs = DocList[MyDoc]([MyDoc(text='hello')])
assert issubclass(DocList[MyDoc], DocList[MyDoc])
assert issubclass(docs.__class__, DocList[MyDoc])
assert isinstance(docs, DocList[MyDoc])
def test_subclassing():
class MyDoc(BaseDoc):
text: str
class MyDocList(DocList[MyDoc]):
pass
docs = MyDocList([MyDoc(text='hello')])
assert issubclass(MyDocList, DocList[MyDoc])
assert issubclass(docs.__class__, DocList[MyDoc])
assert isinstance(docs, MyDocList)
assert isinstance(docs, DocList[MyDoc])
assert issubclass(MyDoc, BaseDoc)
assert not issubclass(DocList[MyDoc], DocList[BaseDoc])
assert not issubclass(MyDocList, DocList[BaseDoc])
|
from docarray import BaseDoc, DocList
def test_instance_and_equivalence():
class MyDoc(BaseDoc):
text: str
docs = DocList[MyDoc]([MyDoc(text='hello')])
assert issubclass(DocList[MyDoc], DocList[MyDoc])
assert issubclass(docs.__class__, DocList[MyDoc])
assert isinstance(docs, DocList[MyDoc])
def test_subclassing():
class MyDoc(BaseDoc):
text: str
class MyDocList(DocList[MyDoc]):
pass
docs = MyDocList([MyDoc(text='hello')])
assert issubclass(MyDocList, DocList[MyDoc])
assert issubclass(docs.__class__, DocList[MyDoc])
assert isinstance(docs, MyDocList)
assert isinstance(docs, DocList[MyDoc])
assert issubclass(MyDoc, BaseDoc)
assert not issubclass(DocList[MyDoc], DocList[BaseDoc])
assert not issubclass(MyDocList, DocList[BaseDoc])
|
import pytest
import torchaudio
from torchaudio.pipelines import (
HUBERT_ASR_LARGE,
HUBERT_ASR_XLARGE,
HUBERT_BASE,
HUBERT_LARGE,
HUBERT_XLARGE,
VOXPOPULI_ASR_BASE_10K_DE,
VOXPOPULI_ASR_BASE_10K_EN,
VOXPOPULI_ASR_BASE_10K_ES,
VOXPOPULI_ASR_BASE_10K_FR,
VOXPOPULI_ASR_BASE_10K_IT,
WAV2VEC2_ASR_BASE_100H,
WAV2VEC2_ASR_BASE_10M,
WAV2VEC2_ASR_BASE_960H,
WAV2VEC2_ASR_LARGE_100H,
WAV2VEC2_ASR_LARGE_10M,
WAV2VEC2_ASR_LARGE_960H,
WAV2VEC2_ASR_LARGE_LV60K_100H,
WAV2VEC2_ASR_LARGE_LV60K_10M,
WAV2VEC2_ASR_LARGE_LV60K_960H,
WAV2VEC2_BASE,
WAV2VEC2_LARGE,
WAV2VEC2_LARGE_LV60K,
WAV2VEC2_XLSR53,
)
@pytest.mark.parametrize(
"bundle",
[
WAV2VEC2_BASE,
WAV2VEC2_LARGE,
WAV2VEC2_LARGE_LV60K,
WAV2VEC2_XLSR53,
HUBERT_BASE,
HUBERT_LARGE,
HUBERT_XLARGE,
],
)
def test_pretraining_models(bundle):
"""Smoke test of downloading weights for pretraining models"""
bundle.get_model()
@pytest.mark.parametrize(
"bundle,lang,expected",
[
(WAV2VEC2_ASR_BASE_10M, "en", "I|HAD|THAT|CURIYOSSITY|BESID|ME|AT|THIS|MOMENT|"),
(WAV2VEC2_ASR_BASE_100H, "en", "I|HAD|THAT|CURIOSITY|BESIDE|ME|AT|THIS|MOMENT|"),
(WAV2VEC2_ASR_BASE_960H, "en", "I|HAD|THAT|CURIOSITY|BESIDE|ME|AT|THIS|MOMENT|"),
(WAV2VEC2_ASR_LARGE_10M, "en", "I|HAD|THAT|CURIOUSITY|BESIDE|ME|AT|THIS|MOMENT|"),
(WAV2VEC2_ASR_LARGE_100H, "en", "I|HAD|THAT|CURIOSITY|BESIDE|ME|AT|THIS|MOMENT|"),
(WAV2VEC2_ASR_LARGE_960H, "en", "I|HAD|THAT|CURIOSITY|BESIDE|ME|AT|THIS|MOMENT|"),
(WAV2VEC2_ASR_LARGE_LV60K_10M, "en", "I|HAD|THAT|CURIOUSSITY|BESID|ME|AT|THISS|MOMENT|"),
(WAV2VEC2_ASR_LARGE_LV60K_100H, "en", "I|HAVE|THAT|CURIOSITY|BESIDE|ME|AT|THIS|MOMENT|"),
(WAV2VEC2_ASR_LARGE_LV60K_960H, "en", "I|HAVE|THAT|CURIOSITY|BESIDE|ME|AT|THIS|MOMENT|"),
(HUBERT_ASR_LARGE, "en", "I|HAVE|THAT|CURIOSITY|BESIDE|ME|AT|THIS|MOMENT|"),
(HUBERT_ASR_XLARGE, "en", "I|HAVE|THAT|CURIOSITY|BESIDE|ME|AT|THIS|MOMENT|"),
(
VOXPOPULI_ASR_BASE_10K_EN,
"en2",
"i|hope|that|we|will|see|a|ddrasstic|decrease|of|funding|for|the|failed|eu|project|and|that|more|money|will|come|back|to|the|taxpayers", # noqa: E501
),
(
VOXPOPULI_ASR_BASE_10K_ES,
"es",
"la|primera|que|es|imprescindible|pensar|a|pequeña|a|escala|para|implicar|y|complementar|así|la|actuación|global", # noqa: E501
),
(VOXPOPULI_ASR_BASE_10K_DE, "de", "dabei|spielt|auch|eine|sorgfältige|berichterstattung|eine|wichtige|rolle"),
(
VOXPOPULI_ASR_BASE_10K_FR,
"fr",
"la|commission|va|faire|des|propositions|sur|ce|sujet|comment|mettre|en|place|cette|capacité|fiscale|et|le|conseil|européen|y|reviendra|sour|les|sujets|au|moins|de|mars", # noqa: E501
),
(
VOXPOPULI_ASR_BASE_10K_IT,
"it",
"credo|che|illatino|non|sia|contemplato|tra|le|traduzioni|e|quindi|mi|attengo|allitaliano",
),
],
)
def test_finetune_asr_model(
bundle,
lang,
expected,
sample_speech,
ctc_decoder,
):
"""Smoke test of downloading weights for fine-tuning models and simple transcription"""
model = bundle.get_model().eval()
waveform, sample_rate = torchaudio.load(sample_speech)
emission, _ = model(waveform)
decoder = ctc_decoder(bundle.get_labels())
result = decoder(emission[0])
assert result == expected
|
import pytest
import torchaudio
from torchaudio.pipelines import (
WAV2VEC2_BASE,
WAV2VEC2_LARGE,
WAV2VEC2_LARGE_LV60K,
WAV2VEC2_ASR_BASE_10M,
WAV2VEC2_ASR_BASE_100H,
WAV2VEC2_ASR_BASE_960H,
WAV2VEC2_ASR_LARGE_10M,
WAV2VEC2_ASR_LARGE_100H,
WAV2VEC2_ASR_LARGE_960H,
WAV2VEC2_ASR_LARGE_LV60K_10M,
WAV2VEC2_ASR_LARGE_LV60K_100H,
WAV2VEC2_ASR_LARGE_LV60K_960H,
WAV2VEC2_XLSR53,
HUBERT_BASE,
HUBERT_LARGE,
HUBERT_XLARGE,
HUBERT_ASR_LARGE,
HUBERT_ASR_XLARGE,
VOXPOPULI_ASR_BASE_10K_EN,
VOXPOPULI_ASR_BASE_10K_ES,
VOXPOPULI_ASR_BASE_10K_DE,
VOXPOPULI_ASR_BASE_10K_FR,
VOXPOPULI_ASR_BASE_10K_IT,
)
@pytest.mark.parametrize(
"bundle",
[
WAV2VEC2_BASE,
WAV2VEC2_LARGE,
WAV2VEC2_LARGE_LV60K,
WAV2VEC2_XLSR53,
HUBERT_BASE,
HUBERT_LARGE,
HUBERT_XLARGE,
],
)
def test_pretraining_models(bundle):
"""Smoke test of downloading weights for pretraining models"""
bundle.get_model()
@pytest.mark.parametrize(
"bundle,lang,expected",
[
(WAV2VEC2_ASR_BASE_10M, "en", "I|HAD|THAT|CURIYOSSITY|BESID|ME|AT|THIS|MOMENT|"),
(WAV2VEC2_ASR_BASE_100H, "en", "I|HAD|THAT|CURIOSITY|BESIDE|ME|AT|THIS|MOMENT|"),
(WAV2VEC2_ASR_BASE_960H, "en", "I|HAD|THAT|CURIOSITY|BESIDE|ME|AT|THIS|MOMENT|"),
(WAV2VEC2_ASR_LARGE_10M, "en", "I|HAD|THAT|CURIOUSITY|BESIDE|ME|AT|THIS|MOMENT|"),
(WAV2VEC2_ASR_LARGE_100H, "en", "I|HAD|THAT|CURIOSITY|BESIDE|ME|AT|THIS|MOMENT|"),
(WAV2VEC2_ASR_LARGE_960H, "en", "I|HAD|THAT|CURIOSITY|BESIDE|ME|AT|THIS|MOMENT|"),
(WAV2VEC2_ASR_LARGE_LV60K_10M, "en", "I|HAD|THAT|CURIOUSSITY|BESID|ME|AT|THISS|MOMENT|"),
(WAV2VEC2_ASR_LARGE_LV60K_100H, "en", "I|HAVE|THAT|CURIOSITY|BESIDE|ME|AT|THIS|MOMENT|"),
(WAV2VEC2_ASR_LARGE_LV60K_960H, "en", "I|HAVE|THAT|CURIOSITY|BESIDE|ME|AT|THIS|MOMENT|"),
(HUBERT_ASR_LARGE, "en", "I|HAVE|THAT|CURIOSITY|BESIDE|ME|AT|THIS|MOMENT|"),
(HUBERT_ASR_XLARGE, "en", "I|HAVE|THAT|CURIOSITY|BESIDE|ME|AT|THIS|MOMENT|"),
(
VOXPOPULI_ASR_BASE_10K_EN,
"en2",
"i|hope|that|we|will|see|a|ddrasstic|decrease|of|funding|for|the|failed|eu|project|and|that|more|money|will|come|back|to|the|taxpayers", # noqa: E501
),
(
VOXPOPULI_ASR_BASE_10K_ES,
"es",
"la|primera|que|es|imprescindible|pensar|a|pequeña|a|escala|para|implicar|y|complementar|así|la|actuación|global", # noqa: E501
),
(VOXPOPULI_ASR_BASE_10K_DE, "de", "dabei|spielt|auch|eine|sorgfältige|berichterstattung|eine|wichtige|rolle"),
(
VOXPOPULI_ASR_BASE_10K_FR,
"fr",
"la|commission|va|faire|des|propositions|sur|ce|sujet|comment|mettre|en|place|cette|capacité|fiscale|et|le|conseil|européen|y|reviendra|sour|les|sujets|au|moins|de|mars", # noqa: E501
),
(
VOXPOPULI_ASR_BASE_10K_IT,
"it",
"credo|che|illatino|non|sia|contemplato|tra|le|traduzioni|e|quindi|mi|attengo|allitaliano",
),
],
)
def test_finetune_asr_model(
bundle,
lang,
expected,
sample_speech,
ctc_decoder,
):
"""Smoke test of downloading weights for fine-tuning models and simple transcription"""
model = bundle.get_model().eval()
waveform, sample_rate = torchaudio.load(sample_speech)
emission, _ = model(waveform)
decoder = ctc_decoder(bundle.get_labels())
result = decoder(emission[0])
assert result == expected
|
_base_ = 'fcos_r50_caffe_fpn_gn-head_1x_coco.py'
# model setting
model = dict(
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[103.530, 116.280, 123.675],
std=[1.0, 1.0, 1.0],
bgr_to_rgb=False,
pad_size_divisor=32),
backbone=dict(
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet50_caffe')),
bbox_head=dict(
norm_on_bbox=True,
centerness_on_reg=True,
dcn_on_last_conv=False,
center_sampling=True,
conv_bias=True,
loss_bbox=dict(type='GIoULoss', loss_weight=1.0)),
# training and testing settings
test_cfg=dict(nms=dict(type='nms', iou_threshold=0.6)))
# learning rate
param_scheduler = [
dict(
type='LinearLR',
start_factor=1.0 / 3.0,
by_epoch=False,
begin=0,
end=500),
dict(
type='MultiStepLR',
begin=0,
end=12,
by_epoch=True,
milestones=[8, 11],
gamma=0.1)
]
# optimizer
optim_wrapper = dict(clip_grad=None)
|
_base_ = 'fcos_r50_caffe_fpn_gn-head_1x_coco.py'
# model setting
preprocess_cfg = dict(
mean=[103.530, 116.280, 123.675],
std=[1.0, 1.0, 1.0],
to_rgb=False,
pad_size_divisor=32)
model = dict(
preprocess_cfg=preprocess_cfg,
backbone=dict(
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet50_caffe')),
bbox_head=dict(
norm_on_bbox=True,
centerness_on_reg=True,
dcn_on_last_conv=False,
center_sampling=True,
conv_bias=True,
loss_bbox=dict(type='GIoULoss', loss_weight=1.0)),
# training and testing settings
test_cfg=dict(nms=dict(type='nms', iou_threshold=0.6)))
# learning rate
param_scheduler = [
dict(
type='LinearLR',
start_factor=1.0 / 3.0,
by_epoch=False,
begin=0,
end=500),
dict(
type='MultiStepLR',
begin=0,
end=12,
by_epoch=True,
milestones=[8, 11],
gamma=0.1)
]
# optimizer
default_hooks = dict(optimizer=dict(type='OptimizerHook', grad_clip=None))
|
from __future__ import annotations
from collections.abc import Iterable
import torch
import torch.nn as nn
from sentence_transformers.sparse_encoder.losses.ReconstructionLoss import ReconstructionLoss
from sentence_transformers.sparse_encoder.losses.SparseMultipleNegativesRankingLoss import (
SparseMultipleNegativesRankingLoss,
)
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
class CSRLoss(nn.Module):
"""
CSR Loss module that combines Reconstruction Loss and Sparse Multiple Negatives Ranking Loss.
This module computes the combined loss according to the formula:
L_CSR = L_recon + γ * L_MRL
where:
- L_recon = L(k) + L(4k)/8 + β*L_aux
- L_MRL is the Multiple Negatives Ranking Loss
"""
def __init__(
self,
model: SparseEncoder,
beta: float = 0.1,
gamma: float = 1.0,
scale: float = 20.0,
):
super().__init__()
self.model = model
self.beta = beta
self.gamma = gamma
self.scale = scale
# Initialize the component losses
self.reconstruction_loss = ReconstructionLoss(model, beta)
self.ranking_loss = SparseMultipleNegativesRankingLoss(model, scale)
def forward(
self,
sentence_features: Iterable[dict[str, torch.Tensor]],
labels: torch.Tensor = None,
) -> dict[str, torch.Tensor]:
"""
Forward pass of the CSR Loss module.
This method is used when the loss is computed as part of the model's forward pass.
Args:
sentence_features: Iterable of dictionaries containing sentence embeddings
labels: Optional tensor of labels (not used in this implementation)
Returns:
Dictionary containing the total loss and individual loss components
"""
# Compute embeddings using the model
outputs = [self.model(sentence_feature) for sentence_feature in sentence_features]
sparse_embeddings = [output["sparse_embedding"] for output in outputs]
recon_loss = self.reconstruction_loss.compute_loss_from_embeddings(outputs)
ranking_loss = self.ranking_loss.compute_loss_from_embeddings(sparse_embeddings)
# Compute total loss: L_CSR = L_recon + γ * L_MRL
total_loss = recon_loss + self.gamma * ranking_loss
return total_loss
def get_config_dict(self):
"""
Get the configuration dictionary.
Returns:
Dictionary containing the configuration parameters
"""
return {
"beta": self.beta,
"gamma": self.gamma,
"scale": self.scale,
}
|
from __future__ import annotations
from collections.abc import Iterable
import torch
import torch.nn as nn
from sentence_transformers.sparse_encoder.losses.ReconstructionLoss import ReconstructionLoss
from sentence_transformers.sparse_encoder.losses.SparseMultipleNegativesRankingLoss import (
SparseMultipleNegativesRankingLoss,
)
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
class CSRLoss(nn.Module):
"""
CSR Loss module that combines Reconstruction Loss and Sparse Multiple Negatives Ranking Loss.
This module computes the combined loss according to the formula:
L_CSR = L_recon + γ * L_MRL
where:
- L_recon = L(k) + L(4k)/8 + β*L_aux
- L_MRL is the Multiple Negatives Ranking Loss
"""
def __init__(
self,
model: SparseEncoder,
beta: float = 1 / 32,
gamma: float = 0.1,
scale: float = 20.0,
):
super().__init__()
self.model = model
self.beta = beta
self.gamma = gamma
self.scale = scale
# Initialize the component losses
self.reconstruction_loss = ReconstructionLoss(model, beta)
self.ranking_loss = SparseMultipleNegativesRankingLoss(model, scale)
def forward(
self,
sentence_features: Iterable[dict[str, torch.Tensor]],
labels: torch.Tensor = None,
) -> dict[str, torch.Tensor]:
"""
Forward pass of the CSR Loss module.
This method is used when the loss is computed as part of the model's forward pass.
Args:
sentence_features: Iterable of dictionaries containing sentence embeddings
labels: Optional tensor of labels (not used in this implementation)
Returns:
Dictionary containing the total loss and individual loss components
"""
# Compute embeddings using the model
outputs = [self.model(sentence_feature) for sentence_feature in sentence_features]
sparse_embeddings = [output["sparse_embedding"] for output in outputs]
recon_loss = self.reconstruction_loss.compute_loss_from_embeddings(outputs)
ranking_loss = self.ranking_loss.compute_loss_from_embeddings(sparse_embeddings)
# Compute total loss: L_CSR = L_recon + γ * L_MRL
total_loss = recon_loss + self.gamma * ranking_loss
return total_loss
def get_config_dict(self):
"""
Get the configuration dictionary.
Returns:
Dictionary containing the configuration parameters
"""
return {
"beta": self.beta,
"gamma": self.gamma,
"scale": self.scale,
}
|
from typing import Optional
from docarray.document import BaseDocument
from docarray.typing import TextUrl
from docarray.typing.tensor.embedding import Embedding
class Text(BaseDocument):
"""
Document for handling text.
It can contain a TextUrl (`Text.url`), a str (`Text.text`),
and an Embedding (`Text.embedding`).
EXAMPLE USAGE:
You can use this Document directly:
.. code-block:: python
from docarray import Text
# use it directly
txt_doc = Text(url='http://www.jina.ai/')
txt_doc.text = txt_doc.url.load()
model = MyEmbeddingModel()
txt_doc.embedding = model(txt_doc.text)
You can extend this Document:
.. code-block:: python
from docarray import Text
from docarray.typing import Embedding
from typing import Optional
# extend it
class MyText(Text):
second_embedding: Optional[Embedding]
txt_doc = MyText(url='http://www.jina.ai/')
txt_doc.text = txt_doc.url.load()
model = MyEmbeddingModel()
txt_doc.embedding = model(txt_doc.text)
txt_doc.second_embedding = model(txt_doc.text)
You can use this Document for composition:
.. code-block:: python
from docarray import Document, Image, Text
# compose it
class MultiModalDoc(Document):
image_doc: Image
text_doc: Text
mmdoc = MultiModalDoc(
image_doc=Image(url="http://www.jina.ai/image.jpg"),
text_doc=Text(text="hello world, how are you doing?"),
)
mmdoc.text_doc.text = mmdoc.text_doc.url.load()
"""
text: Optional[str] = None
url: Optional[TextUrl] = None
embedding: Optional[Embedding] = None
|
from typing import Optional
from docarray.document import BaseDocument
from docarray.typing.tensor.embedding import Embedding, Tensor
class Text(BaseDocument):
"""
base Document for Text handling
"""
text: str = ''
tensor: Optional[Tensor]
embedding: Optional[Embedding]
|
_base_ = [
'../_base_/models/ssd300.py', '../_base_/datasets/voc0712.py',
'../_base_/default_runtime.py'
]
model = dict(
bbox_head=dict(
num_classes=20, anchor_generator=dict(basesize_ratio_range=(0.2,
0.9))))
# dataset settings
dataset_type = 'VOCDataset'
data_root = 'data/VOCdevkit/'
img_norm_cfg = dict(mean=[123.675, 116.28, 103.53], std=[1, 1, 1], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='Expand',
mean=img_norm_cfg['mean'],
to_rgb=img_norm_cfg['to_rgb'],
ratio_range=(1, 4)),
dict(
type='MinIoURandomCrop',
min_ious=(0.1, 0.3, 0.5, 0.7, 0.9),
min_crop_size=0.3),
dict(type='Resize', img_scale=(300, 300), keep_ratio=False),
dict(type='RandomFlip', flip_ratio=0.5),
dict(
type='PhotoMetricDistortion',
brightness_delta=32,
contrast_range=(0.5, 1.5),
saturation_range=(0.5, 1.5),
hue_delta=18),
dict(type='Normalize', **img_norm_cfg),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(300, 300),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=False),
dict(type='Normalize', **img_norm_cfg),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
samples_per_gpu=8,
workers_per_gpu=3,
train=dict(
type='RepeatDataset', times=10, dataset=dict(pipeline=train_pipeline)),
val=dict(pipeline=test_pipeline),
test=dict(pipeline=test_pipeline))
# optimizer
optimizer = dict(type='SGD', lr=1e-3, momentum=0.9, weight_decay=5e-4)
optimizer_config = dict()
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=0.001,
step=[16, 20])
checkpoint_config = dict(interval=1)
# runtime settings
runner = dict(type='EpochBasedRunner', max_epochs=24)
|
_base_ = [
'../_base_/models/ssd300.py', '../_base_/datasets/voc0712.py',
'../_base_/default_runtime.py'
]
model = dict(
bbox_head=dict(
num_classes=20, anchor_generator=dict(basesize_ratio_range=(0.2,
0.9))))
# dataset settings
dataset_type = 'VOCDataset'
data_root = 'data/VOCdevkit/'
img_norm_cfg = dict(mean=[123.675, 116.28, 103.53], std=[1, 1, 1], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile', to_float32=True),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PhotoMetricDistortion',
brightness_delta=32,
contrast_range=(0.5, 1.5),
saturation_range=(0.5, 1.5),
hue_delta=18),
dict(
type='Expand',
mean=img_norm_cfg['mean'],
to_rgb=img_norm_cfg['to_rgb'],
ratio_range=(1, 4)),
dict(
type='MinIoURandomCrop',
min_ious=(0.1, 0.3, 0.5, 0.7, 0.9),
min_crop_size=0.3),
dict(type='Resize', img_scale=(300, 300), keep_ratio=False),
dict(type='Normalize', **img_norm_cfg),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(300, 300),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=False),
dict(type='Normalize', **img_norm_cfg),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
samples_per_gpu=8,
workers_per_gpu=3,
train=dict(
type='RepeatDataset', times=10, dataset=dict(pipeline=train_pipeline)),
val=dict(pipeline=test_pipeline),
test=dict(pipeline=test_pipeline))
# optimizer
optimizer = dict(type='SGD', lr=1e-3, momentum=0.9, weight_decay=5e-4)
optimizer_config = dict()
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=0.001,
step=[16, 20])
checkpoint_config = dict(interval=1)
# runtime settings
runner = dict(type='EpochBasedRunner', max_epochs=24)
|
import os
from typing import Optional
import numpy as np
import pytest
import torch
from pydantic.tools import parse_obj_as, schema_json_of
from docarray import BaseDoc
from docarray.base_doc.io.json import orjson_dumps
from docarray.typing import AudioBytes, AudioTorchTensor, AudioUrl
from docarray.typing.url.mimetypes import (
OBJ_MIMETYPE,
AUDIO_MIMETYPE,
VIDEO_MIMETYPE,
IMAGE_MIMETYPE,
TEXT_MIMETYPE,
)
from docarray.utils._internal.misc import is_tf_available
from tests import TOYDATA_DIR
tf_available = is_tf_available()
if tf_available:
import tensorflow as tf
from docarray.typing.tensor.audio import AudioTensorFlowTensor
AUDIO_FILES = [
str(TOYDATA_DIR / 'hello.wav'),
str(TOYDATA_DIR / 'olleh.wav'),
]
REMOTE_AUDIO_FILE = 'https://github.com/docarray/docarray/blob/main/tests/toydata/olleh.wav?raw=true' # noqa: E501
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'file_url',
[*AUDIO_FILES, REMOTE_AUDIO_FILE],
)
def test_audio_url(file_url):
uri = parse_obj_as(AudioUrl, file_url)
tensor, _ = uri.load()
assert isinstance(tensor, np.ndarray)
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'file_url',
[*AUDIO_FILES, REMOTE_AUDIO_FILE],
)
def test_load_audio_url_to_audio_torch_tensor_field(file_url):
class MyAudioDoc(BaseDoc):
audio_url: AudioUrl
tensor: Optional[AudioTorchTensor]
doc = MyAudioDoc(audio_url=file_url)
doc.tensor, _ = doc.audio_url.load()
assert isinstance(doc.tensor, torch.Tensor)
assert isinstance(doc.tensor, AudioTorchTensor)
@pytest.mark.tensorflow
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'file_url',
[*AUDIO_FILES, REMOTE_AUDIO_FILE],
)
def test_load_audio_url_to_audio_tensorflow_tensor_field(file_url):
class MyAudioDoc(BaseDoc):
audio_url: AudioUrl
tensor: Optional[AudioTensorFlowTensor]
doc = MyAudioDoc(audio_url=file_url)
doc.tensor, _ = doc.audio_url.load()
assert isinstance(doc.tensor, AudioTensorFlowTensor)
assert isinstance(doc.tensor.tensor, tf.Tensor)
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'file_url',
[*AUDIO_FILES, REMOTE_AUDIO_FILE],
)
def test_load(file_url):
url = parse_obj_as(AudioUrl, file_url)
tensor, _ = url.load()
assert isinstance(tensor, np.ndarray)
def test_json_schema():
schema_json_of(AudioUrl)
def test_dump_json():
url = parse_obj_as(AudioUrl, REMOTE_AUDIO_FILE)
orjson_dumps(url)
@pytest.mark.parametrize(
'path_to_file',
[*AUDIO_FILES, REMOTE_AUDIO_FILE],
)
def test_validation(path_to_file):
url = parse_obj_as(AudioUrl, path_to_file)
assert isinstance(url, AudioUrl)
assert isinstance(url, str)
@pytest.mark.proto
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'file_url',
[*AUDIO_FILES, REMOTE_AUDIO_FILE],
)
def test_proto_audio_url(file_url):
uri = parse_obj_as(AudioUrl, file_url)
proto = uri._to_node_protobuf()
assert 'audio_url' in str(proto)
def test_load_bytes():
uri = parse_obj_as(AudioUrl, REMOTE_AUDIO_FILE)
audio_bytes = uri.load_bytes()
assert isinstance(audio_bytes, bytes)
assert isinstance(audio_bytes, AudioBytes)
assert len(audio_bytes) > 0
@pytest.mark.parametrize(
'file_type, file_source',
[
(AUDIO_MIMETYPE, AUDIO_FILES[0]),
(AUDIO_MIMETYPE, AUDIO_FILES[1]),
(AUDIO_MIMETYPE, REMOTE_AUDIO_FILE),
(IMAGE_MIMETYPE, os.path.join(TOYDATA_DIR, 'test.png')),
(VIDEO_MIMETYPE, os.path.join(TOYDATA_DIR, 'mov_bbb.mp4')),
(TEXT_MIMETYPE, os.path.join(TOYDATA_DIR, 'test' 'test.html')),
(TEXT_MIMETYPE, os.path.join(TOYDATA_DIR, 'test' 'test.md')),
(TEXT_MIMETYPE, os.path.join(TOYDATA_DIR, 'penal_colony.txt')),
(OBJ_MIMETYPE, os.path.join(TOYDATA_DIR, 'test.glb')),
],
)
def test_file_validation(file_type, file_source):
if file_type != AudioUrl.mime_type():
with pytest.raises(ValueError):
parse_obj_as(AudioUrl, file_source)
else:
parse_obj_as(AudioUrl, file_source)
|
from typing import Optional
import numpy as np
import pytest
import torch
from pydantic.tools import parse_obj_as, schema_json_of
from docarray import BaseDoc
from docarray.base_doc.io.json import orjson_dumps
from docarray.typing import AudioBytes, AudioTorchTensor, AudioUrl
from docarray.utils._internal.misc import is_tf_available
from tests import TOYDATA_DIR
tf_available = is_tf_available()
if tf_available:
import tensorflow as tf
from docarray.typing.tensor.audio import AudioTensorFlowTensor
AUDIO_FILES = [
str(TOYDATA_DIR / 'hello.wav'),
str(TOYDATA_DIR / 'olleh.wav'),
]
REMOTE_AUDIO_FILE = 'https://github.com/docarray/docarray/blob/main/tests/toydata/olleh.wav?raw=true' # noqa: E501
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'file_url',
[*AUDIO_FILES, REMOTE_AUDIO_FILE],
)
def test_audio_url(file_url):
uri = parse_obj_as(AudioUrl, file_url)
tensor, _ = uri.load()
assert isinstance(tensor, np.ndarray)
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'file_url',
[*AUDIO_FILES, REMOTE_AUDIO_FILE],
)
def test_load_audio_url_to_audio_torch_tensor_field(file_url):
class MyAudioDoc(BaseDoc):
audio_url: AudioUrl
tensor: Optional[AudioTorchTensor]
doc = MyAudioDoc(audio_url=file_url)
doc.tensor, _ = doc.audio_url.load()
assert isinstance(doc.tensor, torch.Tensor)
assert isinstance(doc.tensor, AudioTorchTensor)
@pytest.mark.tensorflow
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'file_url',
[*AUDIO_FILES, REMOTE_AUDIO_FILE],
)
def test_load_audio_url_to_audio_tensorflow_tensor_field(file_url):
class MyAudioDoc(BaseDoc):
audio_url: AudioUrl
tensor: Optional[AudioTensorFlowTensor]
doc = MyAudioDoc(audio_url=file_url)
doc.tensor, _ = doc.audio_url.load()
assert isinstance(doc.tensor, AudioTensorFlowTensor)
assert isinstance(doc.tensor.tensor, tf.Tensor)
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'file_url',
[*AUDIO_FILES, REMOTE_AUDIO_FILE],
)
def test_load(file_url):
url = parse_obj_as(AudioUrl, file_url)
tensor, _ = url.load()
assert isinstance(tensor, np.ndarray)
def test_json_schema():
schema_json_of(AudioUrl)
def test_dump_json():
url = parse_obj_as(AudioUrl, REMOTE_AUDIO_FILE)
orjson_dumps(url)
@pytest.mark.parametrize(
'path_to_file',
[*AUDIO_FILES, REMOTE_AUDIO_FILE],
)
def test_validation(path_to_file):
url = parse_obj_as(AudioUrl, path_to_file)
assert isinstance(url, AudioUrl)
assert isinstance(url, str)
@pytest.mark.proto
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'file_url',
[*AUDIO_FILES, REMOTE_AUDIO_FILE],
)
def test_proto_audio_url(file_url):
uri = parse_obj_as(AudioUrl, file_url)
proto = uri._to_node_protobuf()
assert 'audio_url' in str(proto)
def test_load_bytes():
uri = parse_obj_as(AudioUrl, REMOTE_AUDIO_FILE)
audio_bytes = uri.load_bytes()
assert isinstance(audio_bytes, bytes)
assert isinstance(audio_bytes, AudioBytes)
assert len(audio_bytes) > 0
|
import logging
from datasets import load_dataset
from sentence_transformers.sparse_encoder import (
MLMTransformer,
SparseBinaryClassificationEvaluator,
SparseEncoder,
SpladePooling,
)
logging.basicConfig(format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO)
# Initialize the SPLADE model
model_name = "naver/splade-cocondenser-ensembledistil"
model = SparseEncoder(
modules=[
MLMTransformer(model_name),
SpladePooling(pooling_strategy="max"), # You can also use 'sum'
],
device="cuda:0",
)
# Load a dataset with two text columns and a class label column
# Using the Quora Duplicates dataset as an example
eval_dataset = load_dataset("sentence-transformers/quora-duplicates", "pair-class", split="train[-1000:]")
# Initialize the evaluator
binary_acc_evaluator = SparseBinaryClassificationEvaluator(
sentences1=eval_dataset["sentence1"],
sentences2=eval_dataset["sentence2"],
labels=eval_dataset["label"],
name="quora_duplicates_dev",
show_progress_bar=True,
similarity_fn_names=["cosine", "dot", "euclidean", "manhattan"],
)
results = binary_acc_evaluator(model)
# Print the results
print(f"Primary metric: {binary_acc_evaluator.primary_metric}")
print(f"Primary metric value: {results[binary_acc_evaluator.primary_metric]:.4f}")
|
from datasets import load_dataset
from sentence_transformers.sparse_encoder import (
MLMTransformer,
SparseBinaryClassificationEvaluator,
SparseEncoder,
SpladePooling,
)
# Initialize the SPLADE model
model_name = "naver/splade-cocondenser-ensembledistil"
model = SparseEncoder(
modules=[
MLMTransformer(model_name),
SpladePooling(pooling_strategy="max"), # You can also use 'sum'
],
device="cuda:0",
)
# Load a dataset with two text columns and a class label column
# Using the Quora Duplicates dataset as an example
eval_dataset = load_dataset("sentence-transformers/quora-duplicates", "pair-class", split="train[-1000:]")
# Initialize the evaluator
binary_acc_evaluator = SparseBinaryClassificationEvaluator(
sentences1=eval_dataset["sentence1"],
sentences2=eval_dataset["sentence2"],
labels=eval_dataset["label"],
name="quora_duplicates_dev",
show_progress_bar=True,
similarity_fn_names=["cosine", "dot", "euclidean", "manhattan"],
)
results = binary_acc_evaluator(model)
# Print the results
print(f"Primary metric: {binary_acc_evaluator.primary_metric}")
print(f"Primary metric value: {results[binary_acc_evaluator.primary_metric]:.4f}")
|
import torch
import os
import clip
import numpy as np
from glob import glob
from PIL import Image
from jina import Flow, Document
from ...clip_image import CLIPImageEncoder
cur_dir = os.path.dirname(os.path.abspath(__file__))
def test_clip_data():
docs = []
for file in glob(os.path.join(cur_dir, 'test_data', '*')):
pil_image = Image.open(file)
nd_image = np.array(pil_image)
docs.append(Document(id=file, blob=nd_image))
with Flow().add(uses=CLIPImageEncoder) as f:
results = f.post(on='/test', inputs=docs, return_results=True)
os.path.join(cur_dir, 'test_data', 'banana2.png')
image_name_to_ndarray = {}
for d in results[0].docs:
image_name_to_ndarray[d.id] = d.embedding
def dist(a, b):
nonlocal image_name_to_ndarray
a_embedding = image_name_to_ndarray[os.path.join(cur_dir, 'test_data', f'{a}.png')]
b_embedding = image_name_to_ndarray[os.path.join(cur_dir, 'test_data', f'{b}.png')]
return np.linalg.norm(a_embedding - b_embedding)
# assert semantic meaning is captured in the encoding
small_distance = dist('banana1', 'banana2')
assert small_distance < dist('banana1', 'airplane')
assert small_distance < dist('banana1', 'satellite')
assert small_distance < dist('banana1', 'studio')
assert small_distance < dist('banana2', 'airplane')
assert small_distance < dist('banana2', 'satellite')
assert small_distance < dist('banana2', 'studio')
assert small_distance < dist('airplane', 'studio')
assert small_distance < dist('airplane', 'satellite')
assert small_distance < dist('studio', 'satellite')
# assert same results like calculating it manually
model, preprocess = clip.load('ViT-B/32', device='cpu')
assert len(image_name_to_ndarray) == 5
for file, actual_embedding in image_name_to_ndarray.items():
image = preprocess(Image.open(file)).unsqueeze(0).to('cpu')
with torch.no_grad():
expected_embedding = model.encode_image(image).numpy()[0]
np.testing.assert_almost_equal(actual_embedding, expected_embedding, 5)
|
import torch
import os
import clip
import numpy as np
from glob import glob
from PIL import Image
from jina import Flow, Document
from jinahub.encoder.clip_image import CLIPImageEncoder
cur_dir = os.path.dirname(os.path.abspath(__file__))
def test_clip_data():
docs = []
for file in glob(os.path.join(cur_dir, 'test_data', '*')):
pil_image = Image.open(file)
nd_image = np.array(pil_image)
docs.append(Document(id=file, blob=nd_image))
with Flow().add(uses=CLIPImageEncoder) as f:
results = f.post(on='/test', inputs=docs, return_results=True)
os.path.join(cur_dir, 'test_data', 'banana2.png')
image_name_to_ndarray = {}
for d in results[0].docs:
image_name_to_ndarray[d.id] = d.embedding
def dist(a, b):
nonlocal image_name_to_ndarray
a_embedding = image_name_to_ndarray[os.path.join(cur_dir, 'test_data', f'{a}.png')]
b_embedding = image_name_to_ndarray[os.path.join(cur_dir, 'test_data', f'{b}.png')]
return np.linalg.norm(a_embedding - b_embedding)
# assert semantic meaning is captured in the encoding
small_distance = dist('banana1', 'banana2')
assert small_distance < dist('banana1', 'airplane')
assert small_distance < dist('banana1', 'satellite')
assert small_distance < dist('banana1', 'studio')
assert small_distance < dist('banana2', 'airplane')
assert small_distance < dist('banana2', 'satellite')
assert small_distance < dist('banana2', 'studio')
assert small_distance < dist('airplane', 'studio')
assert small_distance < dist('airplane', 'satellite')
assert small_distance < dist('studio', 'satellite')
# assert same results like calculating it manually
model, preprocess = clip.load('ViT-B/32', device='cpu')
assert len(image_name_to_ndarray) == 5
for file, actual_embedding in image_name_to_ndarray.items():
image = preprocess(Image.open(file)).unsqueeze(0).to('cpu')
with torch.no_grad():
expected_embedding = model.encode_image(image).numpy()[0]
np.testing.assert_almost_equal(actual_embedding, expected_embedding, 5)
|
import logging
import os
from argparse import ArgumentParser
import sentencepiece as spm
from average_checkpoints import ensemble
from pytorch_lightning import seed_everything, Trainer
from pytorch_lightning.callbacks import LearningRateMonitor, ModelCheckpoint
from pytorch_lightning.strategies import DDPStrategy
from transforms import get_data_module
def get_trainer(args):
seed_everything(1)
checkpoint = ModelCheckpoint(
dirpath=os.path.join(args.exp_dir, args.exp_name) if args.exp_dir else None,
monitor="monitoring_step",
mode="max",
save_last=True,
filename="{epoch}",
save_top_k=10,
)
lr_monitor = LearningRateMonitor(logging_interval="step")
callbacks = [
checkpoint,
lr_monitor,
]
return Trainer(
sync_batchnorm=True,
default_root_dir=args.exp_dir,
max_epochs=args.epochs,
num_nodes=args.num_nodes,
devices=args.gpus,
accelerator="gpu",
strategy=DDPStrategy(find_unused_parameters=False),
callbacks=callbacks,
reload_dataloaders_every_n_epochs=1,
gradient_clip_val=10.0,
)
def get_lightning_module(args):
sp_model = spm.SentencePieceProcessor(model_file=str(args.sp_model_path))
if args.modality == "audiovisual":
from lightning_av import AVConformerRNNTModule
model = AVConformerRNNTModule(args, sp_model)
else:
from lightning import ConformerRNNTModule
model = ConformerRNNTModule(args, sp_model)
return model
def parse_args():
parser = ArgumentParser()
parser.add_argument(
"--modality",
type=str,
help="Modality",
choices=["audio", "video", "audiovisual"],
required=True,
)
parser.add_argument(
"--mode",
type=str,
help="Perform online or offline recognition.",
required=True,
)
parser.add_argument(
"--root-dir",
type=str,
help="Root directory to LRS3 audio-visual datasets.",
required=True,
)
parser.add_argument(
"--sp-model-path",
type=str,
help="Path to SentencePiece model.",
required=True,
)
parser.add_argument(
"--pretrained-model-path",
type=str,
help="Path to Pretraned model.",
)
parser.add_argument(
"--exp-dir",
default="./exp",
type=str,
help="Directory to save checkpoints and logs to. (Default: './exp')",
)
parser.add_argument(
"--exp-name",
type=str,
help="Experiment name",
)
parser.add_argument(
"--num-nodes",
default=4,
type=int,
help="Number of nodes to use for training. (Default: 4)",
)
parser.add_argument(
"--gpus",
default=8,
type=int,
help="Number of GPUs per node to use for training. (Default: 8)",
)
parser.add_argument(
"--epochs",
default=55,
type=int,
help="Number of epochs to train for. (Default: 55)",
)
parser.add_argument(
"--resume-from-checkpoint",
default=None,
type=str,
help="Path to the checkpoint to resume from",
)
parser.add_argument(
"--debug",
action="store_true",
help="Whether to use debug level for logging",
)
return parser.parse_args()
def init_logger(debug):
fmt = "%(asctime)s %(message)s" if debug else "%(message)s"
level = logging.DEBUG if debug else logging.INFO
logging.basicConfig(format=fmt, level=level, datefmt="%Y-%m-%d %H:%M:%S")
def cli_main():
args = parse_args()
init_logger(args.debug)
model = get_lightning_module(args)
data_module = get_data_module(args, str(args.sp_model_path))
trainer = get_trainer(args)
trainer.fit(model, data_module)
ensemble(args)
if __name__ == "__main__":
cli_main()
|
import logging
import os
from argparse import ArgumentParser
import sentencepiece as spm
from average_checkpoints import ensemble
from pytorch_lightning import seed_everything, Trainer
from pytorch_lightning.callbacks import LearningRateMonitor, ModelCheckpoint
from pytorch_lightning.strategies import DDPStrategy
from transforms import get_data_module
def get_trainer(args):
seed_everything(1)
checkpoint = ModelCheckpoint(
dirpath=os.path.join(args.exp_dir, args.exp_name) if args.exp_dir else None,
monitor="monitoring_step",
mode="max",
save_last=True,
filename="{epoch}",
save_top_k=10,
)
lr_monitor = LearningRateMonitor(logging_interval="step")
callbacks = [
checkpoint,
lr_monitor,
]
return Trainer(
sync_batchnorm=True,
default_root_dir=args.exp_dir,
max_epochs=args.epochs,
num_nodes=args.num_nodes,
devices=args.gpus,
accelerator="gpu",
strategy=DDPStrategy(find_unused_parameters=False),
callbacks=callbacks,
reload_dataloaders_every_n_epochs=1,
gradient_clip_val=10.0,
)
def get_lightning_module(args):
sp_model = spm.SentencePieceProcessor(model_file=str(args.sp_model_path))
if args.modality == "audiovisual":
from lightning_av import AVConformerRNNTModule
model = AVConformerRNNTModule(args, sp_model)
else:
from lightning import ConformerRNNTModule
model = ConformerRNNTModule(args, sp_model)
return model
def parse_args():
parser = ArgumentParser()
parser.add_argument(
"--modality",
type=str,
help="Modality",
required=True,
)
parser.add_argument(
"--mode",
type=str,
help="Perform online or offline recognition.",
required=True,
)
parser.add_argument(
"--root-dir",
type=str,
help="Root directory to LRS3 audio-visual datasets.",
required=True,
)
parser.add_argument(
"--sp-model-path",
type=str,
help="Path to SentencePiece model.",
required=True,
)
parser.add_argument(
"--pretrained-model-path",
type=str,
help="Path to Pretraned model.",
)
parser.add_argument(
"--exp-dir",
default="./exp",
type=str,
help="Directory to save checkpoints and logs to. (Default: './exp')",
)
parser.add_argument(
"--exp-name",
type=str,
help="Experiment name",
)
parser.add_argument(
"--num-nodes",
default=4,
type=int,
help="Number of nodes to use for training. (Default: 4)",
)
parser.add_argument(
"--gpus",
default=8,
type=int,
help="Number of GPUs per node to use for training. (Default: 8)",
)
parser.add_argument(
"--epochs",
default=55,
type=int,
help="Number of epochs to train for. (Default: 55)",
)
parser.add_argument(
"--resume-from-checkpoint",
default=None,
type=str,
help="Path to the checkpoint to resume from",
)
parser.add_argument(
"--debug",
action="store_true",
help="Whether to use debug level for logging",
)
return parser.parse_args()
def init_logger(debug):
fmt = "%(asctime)s %(message)s" if debug else "%(message)s"
level = logging.DEBUG if debug else logging.INFO
logging.basicConfig(format=fmt, level=level, datefmt="%Y-%m-%d %H:%M:%S")
def cli_main():
args = parse_args()
init_logger(args.debug)
model = get_lightning_module(args)
data_module = get_data_module(args, str(args.sp_model_path))
trainer = get_trainer(args)
trainer.fit(model, data_module)
ensemble(args)
if __name__ == "__main__":
cli_main()
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2023 Imperial College London (Pingchuan Ma)
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
import torch
import torchaudio
import torchvision
class AVSRDataLoader:
def __init__(self, modality, detector="retinaface", resize=None):
self.modality = modality
if modality == "video":
if detector == "retinaface":
from detectors.retinaface.detector import LandmarksDetector
from detectors.retinaface.video_process import VideoProcess
self.landmarks_detector = LandmarksDetector(device="cuda:0")
self.video_process = VideoProcess(resize=resize)
if detector == "mediapipe":
from detectors.mediapipe.detector import LandmarksDetector
from detectors.mediapipe.video_process import VideoProcess
self.landmarks_detector = LandmarksDetector()
self.video_process = VideoProcess(resize=resize)
def load_data(self, data_filename, transform=True):
if self.modality == "audio":
audio, sample_rate = self.load_audio(data_filename)
audio = self.audio_process(audio, sample_rate)
return audio
if self.modality == "video":
landmarks = self.landmarks_detector(data_filename)
video = self.load_video(data_filename)
video = self.video_process(video, landmarks)
video = torch.tensor(video)
return video
def load_audio(self, data_filename):
waveform, sample_rate = torchaudio.load(data_filename, normalize=True)
return waveform, sample_rate
def load_video(self, data_filename):
return torchvision.io.read_video(data_filename, pts_unit="sec")[0].numpy()
def audio_process(self, waveform, sample_rate, target_sample_rate=16000):
if sample_rate != target_sample_rate:
waveform = torchaudio.functional.resample(waveform, sample_rate, target_sample_rate)
waveform = torch.mean(waveform, dim=0, keepdim=True)
return waveform
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2023 Imperial College London (Pingchuan Ma)
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
import torch
import torchaudio
import torchvision
class AVSRDataLoader:
def __init__(self, modality, detector="retinaface", resize=None):
self.modality = modality
if modality == "video":
if detector == "retinaface":
from detectors.retinaface.detector import LandmarksDetector
from detectors.retinaface.video_process import VideoProcess
self.landmarks_detector = LandmarksDetector(device="cuda:0")
self.video_process = VideoProcess(resize=resize)
def load_data(self, data_filename, transform=True):
if self.modality == "audio":
audio, sample_rate = self.load_audio(data_filename)
audio = self.audio_process(audio, sample_rate)
return audio
if self.modality == "video":
landmarks = self.landmarks_detector(data_filename)
video = self.load_video(data_filename)
video = self.video_process(video, landmarks)
video = torch.tensor(video)
return video
def load_audio(self, data_filename):
waveform, sample_rate = torchaudio.load(data_filename, normalize=True)
return waveform, sample_rate
def load_video(self, data_filename):
return torchvision.io.read_video(data_filename, pts_unit="sec")[0].numpy()
def audio_process(self, waveform, sample_rate, target_sample_rate=16000):
if sample_rate != target_sample_rate:
waveform = torchaudio.functional.resample(waveform, sample_rate, target_sample_rate)
waveform = torch.mean(waveform, dim=0, keepdim=True)
return waveform
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.