input
stringlengths 33
5k
| output
stringlengths 32
5k
|
---|---|
from typing import Any, Optional, Type, TypeVar, Union
from docarray.base_document import BaseDocument
from docarray.typing import AnyEmbedding, AnyTensor, Mesh3DUrl
T = TypeVar('T', bound='Mesh3D')
class Mesh3D(BaseDocument):
"""
Document for handling meshes for 3D data representation.
A mesh is a representation for 3D data and contains vertices and faces information.
Vertices are points in a 3D space, represented as a tensor of shape (n_points, 3).
Faces are triangular surfaces that can be defined by three points in 3D space,
corresponding to the three vertices of a triangle. Faces can be represented as a
tensor of shape (n_faces, 3). Each number in that tensor refers to an index of a
vertex in the tensor of vertices.
The Mesh3D Document can contain an Mesh3DUrl (`Mesh3D.url`), an AnyTensor of
vertices (`Mesh3D.vertices`), an AnyTensor of faces (`Mesh3D.faces`) and an
AnyEmbedding (`Mesh3D.embedding`).
EXAMPLE USAGE:
You can use this Document directly:
.. code-block:: python
from docarray.documents import Mesh3D
# use it directly
mesh = Mesh3D(url='https://people.sc.fsu.edu/~jburkardt/data/obj/al.obj')
mesh.vertices, mesh.faces = mesh.url.load()
model = MyEmbeddingModel()
mesh.embedding = model(mesh.vertices)
You can extend this Document:
.. code-block:: python
from docarray.documents import Mesh3D
from docarray.typing import AnyEmbedding
from typing import Optional
# extend it
class MyMesh3D(Mesh3D):
name: Optional[Text]
mesh = MyMesh3D(url='https://people.sc.fsu.edu/~jburkardt/data/obj/al.obj')
mesh.vertices, mesh.faces = mesh.url.load()
model = MyEmbeddingModel()
mesh.embedding = model(mesh.vertices)
mesh.name = 'my first mesh'
You can use this Document for composition:
.. code-block:: python
from docarray import BaseDocument
from docarray.documents import Mesh3D, Text
# compose it
class MultiModalDoc(BaseDocument):
mesh: Mesh3D
text: Text
mmdoc = MultiModalDoc(
mesh=Mesh3D(url='https://people.sc.fsu.edu/~jburkardt/data/obj/al.obj'),
text=Text(text='hello world, how are you doing?'),
)
mmdoc.mesh.vertices, mmdoc.mesh.faces = mmdoc.mesh.url.load()
# or
mmdoc.mesh.bytes = mmdoc.mesh.url.load_bytes()
"""
url: Optional[Mesh3DUrl]
vertices: Optional[AnyTensor]
faces: Optional[AnyTensor]
embedding: Optional[AnyEmbedding]
bytes: Optional[bytes]
@classmethod
def validate(
cls: Type[T],
value: Union[str, Any],
) -> T:
if isinstance(value, str):
value = cls(url=value)
return super().validate(value)
|
from typing import Any, Optional, Type, TypeVar, Union
from docarray.base_document import BaseDocument
from docarray.typing import AnyEmbedding, AnyTensor, Mesh3DUrl
T = TypeVar('T', bound='Mesh3D')
class Mesh3D(BaseDocument):
"""
Document for handling meshes for 3D data representation.
A mesh is a representation for 3D data and contains vertices and faces information.
Vertices are points in a 3D space, represented as a tensor of shape (n_points, 3).
Faces are triangular surfaces that can be defined by three points in 3D space,
corresponding to the three vertices of a triangle. Faces can be represented as a
tensor of shape (n_faces, 3). Each number in that tensor refers to an index of a
vertex in the tensor of vertices.
The Mesh3D Document can contain an Mesh3DUrl (`Mesh3D.url`), an AnyTensor of
vertices (`Mesh3D.vertices`), an AnyTensor of faces (`Mesh3D.faces`) and an
AnyEmbedding (`Mesh3D.embedding`).
EXAMPLE USAGE:
You can use this Document directly:
.. code-block:: python
from docarray.documents import Mesh3D
# use it directly
mesh = Mesh3D(url='https://people.sc.fsu.edu/~jburkardt/data/obj/al.obj')
mesh.vertices, mesh.faces = mesh.url.load()
model = MyEmbeddingModel()
mesh.embedding = model(mesh.vertices)
You can extend this Document:
.. code-block:: python
from docarray.documents import Mesh3D
from docarray.typing import AnyEmbedding
from typing import Optional
# extend it
class MyMesh3D(Mesh3D):
name: Optional[Text]
mesh = MyMesh3D(url='https://people.sc.fsu.edu/~jburkardt/data/obj/al.obj')
mesh.vertices, mesh.faces = mesh.url.load()
model = MyEmbeddingModel()
mesh.embedding = model(mesh.vertices)
mesh.name = 'my first mesh'
You can use this Document for composition:
.. code-block:: python
from docarray import BaseDocument
from docarray.documents import Mesh3D, Text
# compose it
class MultiModalDoc(BaseDocument):
mesh: Mesh3D
text: Text
mmdoc = MultiModalDoc(
mesh=Mesh3D(url='https://people.sc.fsu.edu/~jburkardt/data/obj/al.obj'),
text=Text(text='hello world, how are you doing?'),
)
mmdoc.mesh.vertices, mmdoc.mesh.faces = mmdoc.mesh.url.load()
"""
url: Optional[Mesh3DUrl]
vertices: Optional[AnyTensor]
faces: Optional[AnyTensor]
embedding: Optional[AnyEmbedding]
@classmethod
def validate(
cls: Type[T],
value: Union[str, Any],
) -> T:
if isinstance(value, str):
value = cls(url=value)
return super().validate(value)
|
import sys
from os import path
from setuptools import find_packages
from setuptools import setup
if sys.version_info < (3, 7, 0):
raise OSError(f'DocArray requires Python >=3.7, but yours is {sys.version}')
try:
pkg_name = 'docarray'
libinfo_py = path.join(pkg_name, '__init__.py')
libinfo_content = open(libinfo_py, 'r', encoding='utf8').readlines()
version_line = [l.strip() for l in libinfo_content if l.startswith('__version__')][
0
]
exec(version_line) # gives __version__
except FileNotFoundError:
__version__ = '0.0.0'
try:
with open('README.md', encoding='utf8') as fp:
_long_description = fp.read()
except FileNotFoundError:
_long_description = ''
setup(
name=pkg_name,
packages=find_packages(),
version=__version__,
include_package_data=True,
description='The data structure for unstructured data',
author='Jina AI',
author_email='hello@jina.ai',
license='Apache 2.0',
url='https://github.com/jina-ai/docarray',
download_url='https://github.com/jina-ai/docarray/tags',
long_description=_long_description,
long_description_content_type='text/markdown',
zip_safe=False,
setup_requires=['setuptools>=18.0', 'wheel'],
install_requires=['numpy', 'rich>=12.0.0'],
extras_require={
# req usage, please see https://docarray.jina.ai/#install
'common': [
'protobuf>=3.13.0,<=3.20.1',
'lz4',
'requests',
'matplotlib',
'Pillow',
'fastapi',
'uvicorn',
'jina-hubble-sdk>=0.11.0',
],
'full': [
'protobuf>=3.13.0,<=3.20.1',
'lz4',
'requests',
'matplotlib',
'Pillow',
'trimesh',
'scipy',
'jina-hubble-sdk>=0.10.0',
'av',
'fastapi',
'uvicorn',
'strawberry-graphql',
'weaviate-client~=3.3.0',
'annlite>=0.3.2',
'qdrant-client~=0.7.3',
'elasticsearch>=8.2.0',
],
'qdrant': [
'qdrant-client~=0.7.3',
],
'annlite': [
'annlite>=0.3.2',
],
'weaviate': [
'weaviate-client~=3.3.0',
],
'elasticsearch': [
'elasticsearch>=8.2.0',
],
'test': [
'pytest',
'pytest-timeout',
'pytest-mock',
'pytest-cov',
'pytest-repeat',
'pytest-reraise',
'mock',
'pytest-custom_exit_code',
'black==22.3.0',
'tensorflow==2.7.0',
'paddlepaddle==2.2.0',
'torch==1.9.0',
'torchvision==0.10.0',
'datasets',
'onnx',
'onnxruntime',
'jupyterlab',
'transformers>=4.16.2',
'weaviate-client~=3.3.0',
'annlite>=0.3.2',
'elasticsearch>=8.2.0',
'jina',
],
},
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Intended Audience :: Science/Research',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
'Programming Language :: Unix Shell',
'Environment :: Console',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Topic :: Database :: Database Engines/Servers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Internet :: WWW/HTTP :: Indexing/Search',
'Topic :: Scientific/Engineering :: Image Recognition',
'Topic :: Multimedia :: Video',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Mathematics',
'Topic :: Software Development',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
],
project_urls={
'Documentation': 'https://docarray.jina.ai',
'Source': 'https://github.com/jina-ai/docarray/',
'Tracker': 'https://github.com/jina-ai/docarray/issues',
},
keywords='docarray deep-learning data-structures cross-modal multi-modal unstructured-data nested-data neural-search',
)
|
import sys
from os import path
from setuptools import find_packages
from setuptools import setup
if sys.version_info < (3, 7, 0):
raise OSError(f'DocArray requires Python >=3.7, but yours is {sys.version}')
try:
pkg_name = 'docarray'
libinfo_py = path.join(pkg_name, '__init__.py')
libinfo_content = open(libinfo_py, 'r', encoding='utf8').readlines()
version_line = [l.strip() for l in libinfo_content if l.startswith('__version__')][
0
]
exec(version_line) # gives __version__
except FileNotFoundError:
__version__ = '0.0.0'
try:
with open('README.md', encoding='utf8') as fp:
_long_description = fp.read()
except FileNotFoundError:
_long_description = ''
setup(
name=pkg_name,
packages=find_packages(),
version=__version__,
include_package_data=True,
description='The data structure for unstructured data',
author='Jina AI',
author_email='hello@jina.ai',
license='Apache 2.0',
url='https://github.com/jina-ai/docarray',
download_url='https://github.com/jina-ai/docarray/tags',
long_description=_long_description,
long_description_content_type='text/markdown',
zip_safe=False,
setup_requires=['setuptools>=18.0', 'wheel'],
install_requires=['numpy', 'rich>=12.0.0'],
extras_require={
# req usage, please see https://docarray.jina.ai/#install
'common': [
'protobuf>=3.13.0,<=3.20.1',
'lz4',
'requests',
'matplotlib',
'Pillow',
'fastapi',
'uvicorn',
'jina-hubble-sdk>=0.11.0',
],
'full': [
'protobuf>=3.13.0,<=3.20.1',
'lz4',
'requests',
'matplotlib',
'Pillow',
'trimesh',
'scipy',
'jina-hubble-sdk>=0.10.0',
'av',
'fastapi',
'uvicorn',
'strawberry-graphql',
'weaviate-client~=3.3.0',
'annlite>=0.3.2,<0.3.6',
'qdrant-client~=0.7.3',
'elasticsearch>=8.2.0',
],
'qdrant': [
'qdrant-client~=0.7.3',
],
'annlite': [
'annlite>=0.3.2,<0.3.6',
],
'weaviate': [
'weaviate-client~=3.3.0',
],
'elasticsearch': [
'elasticsearch>=8.2.0',
],
'test': [
'pytest',
'pytest-timeout',
'pytest-mock',
'pytest-cov',
'pytest-repeat',
'pytest-reraise',
'mock',
'pytest-custom_exit_code',
'black==22.3.0',
'tensorflow==2.7.0',
'paddlepaddle==2.2.0',
'torch==1.9.0',
'torchvision==0.10.0',
'datasets',
'onnx',
'onnxruntime',
'jupyterlab',
'transformers>=4.16.2',
'weaviate-client~=3.3.0',
'annlite>=0.3.2,<0.3.6',
'elasticsearch>=8.2.0',
'jina',
],
},
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Intended Audience :: Science/Research',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
'Programming Language :: Unix Shell',
'Environment :: Console',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Topic :: Database :: Database Engines/Servers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Internet :: WWW/HTTP :: Indexing/Search',
'Topic :: Scientific/Engineering :: Image Recognition',
'Topic :: Multimedia :: Video',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Mathematics',
'Topic :: Software Development',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
],
project_urls={
'Documentation': 'https://docarray.jina.ai',
'Source': 'https://github.com/jina-ai/docarray/',
'Tracker': 'https://github.com/jina-ai/docarray/issues',
},
keywords='docarray deep-learning data-structures cross-modal multi-modal unstructured-data nested-data neural-search',
)
|
import os
from typing import Dict
DEPLOYMENT_FILES = [
'statefulset-executor',
'deployment-executor',
'deployment-gateway',
'deployment-uses-before',
'deployment-uses-after',
'deployment-uses-before-after',
]
cur_dir = os.path.dirname(__file__)
DEFAULT_RESOURCE_DIR = os.path.join(
cur_dir, '..', '..', '..', '..', 'resources', 'k8s', 'template'
)
def get_yaml(template: str, params: Dict) -> Dict:
"""Create a resource on Kubernetes based on the `template`. It fills the `template` using the `params`.
:param template: path to the template file.
:param params: dictionary for replacing the placeholders (keys) with the actual values.
:return: The yaml dictionary with the corresponding template filled with parameters
"""
if template == 'configmap':
yaml = _get_configmap_yaml(template, params)
elif template in DEPLOYMENT_FILES and params.get('device_plugins'):
yaml = _get_yaml(template, params)
yaml = _get_deployment_with_device_plugins(yaml, params)
else:
yaml = _get_yaml(template, params)
return yaml
def _get_yaml(template: str, params: Dict) -> Dict:
import yaml
path = os.path.join(DEFAULT_RESOURCE_DIR, f'{template}.yml')
with open(path) as f:
content = f.read()
for k, v in params.items():
content = content.replace(f'{{{k}}}', str(v))
d = yaml.safe_load(content)
return d
def _get_configmap_yaml(template: str, params: Dict):
import yaml
path = os.path.join(DEFAULT_RESOURCE_DIR, f'{template}.yml')
with open(path) as f:
config_map = yaml.safe_load(f)
config_map['metadata']['name'] = params.get('name') + '-' + 'configmap'
config_map['metadata']['namespace'] = params.get('namespace')
if params.get('data'):
for key, value in params['data'].items():
config_map['data'][key] = value
return config_map
def _get_device_plugins(params: Dict):
data = {'limits': {}}
for key, value in params.items():
data['limits'][key] = value
return data
def _get_deployment_with_device_plugins(deployment: Dict, params: Dict) -> Dict:
device_plugins = _get_device_plugins(params['device_plugins'])
deployment['spec']['template']['spec']['containers'][0][
'resources'
] = device_plugins
return deployment
|
import os
from typing import Dict
DEPLOYMENT_FILES = [
'deployment-executor',
'deployment-gateway',
'deployment-uses-before',
'deployment-uses-after',
'deployment-uses-before-after',
]
cur_dir = os.path.dirname(__file__)
DEFAULT_RESOURCE_DIR = os.path.join(
cur_dir, '..', '..', '..', '..', 'resources', 'k8s', 'template'
)
def get_yaml(template: str, params: Dict) -> Dict:
"""Create a resource on Kubernetes based on the `template`. It fills the `template` using the `params`.
:param template: path to the template file.
:param params: dictionary for replacing the placeholders (keys) with the actual values.
:return: The yaml dictionary with the corresponding template filled with parameters
"""
if template == 'configmap':
yaml = _get_configmap_yaml(template, params)
elif template in DEPLOYMENT_FILES and params.get('device_plugins'):
yaml = _get_yaml(template, params)
yaml = _get_deployment_with_device_plugins(yaml, params)
else:
yaml = _get_yaml(template, params)
return yaml
def _get_yaml(template: str, params: Dict) -> Dict:
import yaml
path = os.path.join(DEFAULT_RESOURCE_DIR, f'{template}.yml')
with open(path) as f:
content = f.read()
for k, v in params.items():
content = content.replace(f'{{{k}}}', str(v))
d = yaml.safe_load(content)
return d
def _get_configmap_yaml(template: str, params: Dict):
import yaml
path = os.path.join(DEFAULT_RESOURCE_DIR, f'{template}.yml')
with open(path) as f:
config_map = yaml.safe_load(f)
config_map['metadata']['name'] = params.get('name') + '-' + 'configmap'
config_map['metadata']['namespace'] = params.get('namespace')
if params.get('data'):
for key, value in params['data'].items():
config_map['data'][key] = value
return config_map
def _get_device_plugins(params: Dict):
data = {'limits': {}}
for key, value in params.items():
data['limits'][key] = value
return data
def _get_deployment_with_device_plugins(deployment: Dict, params: Dict) -> Dict:
device_plugins = _get_device_plugins(params['device_plugins'])
deployment['spec']['template']['spec']['containers'][0][
'resources'
] = device_plugins
return deployment
|
import pytest
from keras.src import backend
from keras.src import testing
class DeviceTest(testing.TestCase):
@pytest.mark.skipif(backend.backend() != "tensorflow", reason="tf only")
def test_tf_device_scope(self):
import tensorflow as tf
if not tf.config.list_physical_devices("GPU"):
self.skipTest("Need at least one GPU for testing")
with backend.device("cpu:0"):
t = backend.numpy.ones((2, 1))
self.assertIn("CPU:0", t.device)
with backend.device("CPU:0"):
t = backend.numpy.ones((2, 1))
self.assertIn("CPU:0", t.device)
# When leaving the scope, the device should be back with gpu:0
t = backend.numpy.ones((2, 1))
self.assertIn("GPU:0", t.device)
# Also verify the explicit gpu device
with backend.device("gpu:0"):
t = backend.numpy.ones((2, 1))
self.assertIn("GPU:0", t.device)
@pytest.mark.skipif(backend.backend() != "jax", reason="jax only")
def test_jax_device_scope(self):
import jax
def get_device(t):
# After updating to Jax 0.4.33, Directly access via t.device attr.
return list(t.devices())[0]
platform = jax.default_backend()
if platform != "gpu":
self.skipTest("Need at least one GPU for testing")
with backend.device("cpu:0"):
t = backend.numpy.ones((2, 1))
self.assertEqual(get_device(t), jax.devices("cpu")[0])
with backend.device("CPU:0"):
t = backend.numpy.ones((2, 1))
self.assertEqual(get_device(t), jax.devices("cpu")[0])
# When leaving the scope, the device should be back with gpu:0
t = backend.numpy.ones((2, 1))
self.assertEqual(get_device(t), jax.devices("gpu")[0])
# Also verify the explicit gpu device
with backend.device("gpu:0"):
t = backend.numpy.ones((2, 1))
self.assertEqual(get_device(t), jax.devices("gpu")[0])
@pytest.mark.skipif(backend.backend() != "jax", reason="jax only")
def test_invalid_jax_device(self):
with self.assertRaisesRegex(ValueError, "Received: device_name='123'"):
backend.device(123).__enter__()
@pytest.mark.skipif(backend.backend() != "torch", reason="torch only")
def test_torch_device_scope(self):
import torch
if not torch.cuda.device_count():
self.skipTest("Need at least one GPU for testing")
with backend.device("cpu:0"):
t = backend.numpy.ones((2, 1))
self.assertEqual(t.device, torch.device("cpu"))
with backend.device("CPU:0"):
t = backend.numpy.ones((2, 1))
self.assertEqual(t.device, torch.device("cpu"))
# When leaving the scope, the device should be back with gpu:0
t = backend.numpy.ones((2, 1))
self.assertEqual(t.device, torch.device("cuda", 0))
# Also verify the explicit gpu -> cuda conversion
with backend.device("gpu:0"):
t = backend.numpy.ones((2, 1))
self.assertEqual(t.device, torch.device("cuda", 0))
@pytest.mark.skipif(backend.backend() != "torch", reason="torch only")
def test_invalid_torch_device(self):
with self.assertRaisesRegex(ValueError, "Received: device_name='123'"):
backend.device(123).__enter__()
|
import pytest
from keras.src import backend
from keras.src import testing
class DeviceTest(testing.TestCase):
@pytest.mark.skipif(backend.backend() != "tensorflow", reason="tf only")
def test_tf_device_scope(self):
import tensorflow as tf
if not tf.config.list_physical_devices("GPU"):
self.skipTest("Need at least one GPU for testing")
with backend.device("cpu:0"):
t = backend.numpy.ones((2, 1))
self.assertIn("CPU:0", t.device)
with backend.device("CPU:0"):
t = backend.numpy.ones((2, 1))
self.assertIn("CPU:0", t.device)
# When leaving the scope, the device should be back with gpu:0
t = backend.numpy.ones((2, 1))
self.assertIn("GPU:0", t.device)
# Also verify the explicit gpu device
with backend.device("gpu:0"):
t = backend.numpy.ones((2, 1))
self.assertIn("GPU:0", t.device)
@pytest.mark.skipif(backend.backend() != "jax", reason="jax only")
def test_jax_device_scope(self):
import jax
from jax.lib import xla_bridge
def get_device(t):
# After updating to Jax 0.4.33, Directly access via t.device attr.
return list(t.devices())[0]
platform = xla_bridge.get_backend().platform
if platform != "gpu":
self.skipTest("Need at least one GPU for testing")
with backend.device("cpu:0"):
t = backend.numpy.ones((2, 1))
self.assertEqual(get_device(t), jax.devices("cpu")[0])
with backend.device("CPU:0"):
t = backend.numpy.ones((2, 1))
self.assertEqual(get_device(t), jax.devices("cpu")[0])
# When leaving the scope, the device should be back with gpu:0
t = backend.numpy.ones((2, 1))
self.assertEqual(get_device(t), jax.devices("gpu")[0])
# Also verify the explicit gpu device
with backend.device("gpu:0"):
t = backend.numpy.ones((2, 1))
self.assertEqual(get_device(t), jax.devices("gpu")[0])
@pytest.mark.skipif(backend.backend() != "jax", reason="jax only")
def test_invalid_jax_device(self):
with self.assertRaisesRegex(ValueError, "Received: device_name='123'"):
backend.device(123).__enter__()
@pytest.mark.skipif(backend.backend() != "torch", reason="torch only")
def test_torch_device_scope(self):
import torch
if not torch.cuda.device_count():
self.skipTest("Need at least one GPU for testing")
with backend.device("cpu:0"):
t = backend.numpy.ones((2, 1))
self.assertEqual(t.device, torch.device("cpu"))
with backend.device("CPU:0"):
t = backend.numpy.ones((2, 1))
self.assertEqual(t.device, torch.device("cpu"))
# When leaving the scope, the device should be back with gpu:0
t = backend.numpy.ones((2, 1))
self.assertEqual(t.device, torch.device("cuda", 0))
# Also verify the explicit gpu -> cuda conversion
with backend.device("gpu:0"):
t = backend.numpy.ones((2, 1))
self.assertEqual(t.device, torch.device("cuda", 0))
@pytest.mark.skipif(backend.backend() != "torch", reason="torch only")
def test_invalid_torch_device(self):
with self.assertRaisesRegex(ValueError, "Received: device_name='123'"):
backend.device(123).__enter__()
|
"""Patentsview reader that reads patent abstract."""
from typing import List
import requests
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
BASE_URL = "https://api.patentsview.org/patents/query"
class PatentsviewReader(BaseReader):
"""
Patentsview reader.
Read patent abstract.
"""
def __init__(self) -> None:
"""Initialize with request body."""
self.json = {"q": {"patent_id": None}, "f": ["patent_abstract"]}
def load_data(self, patent_number: List[str]) -> List[Document]:
"""
Load patent abstract given list of patent numbers.
Args:
patent_number: List[str]: List of patent numbers, e.g., 8848839.
Returens:
List[Document]: A list of Document objects, each including the abstract for a patent.
"""
if not patent_number:
raise ValueError("Please input patent number")
self.json["q"]["patent_id"] = patent_number
response = requests.post(BASE_URL, json=self.json)
if response.status_code == 200:
data = response.json()
patents = data.get("patents", [])
results = []
for patent in patents:
results.append(Document(text=patent["patent_abstract"]))
else:
raise Exception(f"Request failed with status code: {response.status_code}")
return results
|
"""Patentsview reader that reads patent abstract."""
from typing import List
import requests
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
BASE_URL = "https://api.patentsview.org/patents/query"
class PatentsviewReader(BaseReader):
"""Patentsview reader.
Read patent abstract.
"""
def __init__(self) -> None:
"""Initialize with request body."""
self.json = {"q": {"patent_id": None}, "f": ["patent_abstract"]}
def load_data(self, patent_number: List[str]) -> List[Document]:
"""
Load patent abstract given list of patent numbers.
Args:
patent_number: List[str]: List of patent numbers, e.g., 8848839.
Returens:
List[Document]: A list of Document objects, each including the abstract for a patent.
"""
if not patent_number:
raise ValueError("Please input patent number")
self.json["q"]["patent_id"] = patent_number
response = requests.post(BASE_URL, json=self.json)
if response.status_code == 200:
data = response.json()
patents = data.get("patents", [])
results = []
for patent in patents:
results.append(Document(text=patent["patent_abstract"]))
else:
raise Exception(f"Request failed with status code: {response.status_code}")
return results
|
# Copyright (c) OpenMMLab. All rights reserved.
from .dist import (all_gather_object, all_reduce, all_gather, all_reduce_dict,
collect_results, gather, broadcast, gather_object,
sync_random_seed, broadcast_object_list,
collect_results_cpu, collect_results_gpu, all_reduce_params)
from .utils import (get_dist_info, init_dist, init_local_group, get_backend,
get_world_size, get_rank, get_local_size, get_local_rank,
is_main_process, master_only, barrier, get_local_group,
is_distributed, get_default_group, get_data_device,
get_comm_device, cast_data_device, infer_launcher)
__all__ = [
'all_gather_object', 'all_reduce', 'all_gather', 'all_reduce_dict',
'collect_results', 'collect_results_cpu', 'collect_results_gpu', 'gather',
'broadcast', 'gather_object', 'sync_random_seed', 'broadcast_object_list',
'get_dist_info', 'init_dist', 'init_local_group', 'get_backend',
'get_world_size', 'get_rank', 'get_local_size', 'get_local_group',
'get_local_rank', 'is_main_process', 'master_only', 'barrier',
'is_distributed', 'get_default_group', 'all_reduce_params',
'get_data_device', 'get_comm_device', 'cast_data_device', 'infer_launcher'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .dist import (all_gather_object, all_reduce, all_gather, all_reduce_dict,
collect_results, gather, broadcast, gather_object,
sync_random_seed, broadcast_object_list,
collect_results_cpu, collect_results_gpu, all_reduce_params)
from .utils import (get_dist_info, init_dist, init_local_group, get_backend,
get_world_size, get_rank, get_local_size, get_local_rank,
is_main_process, master_only, barrier, get_local_group,
is_distributed, get_default_group, get_data_device,
get_comm_device, cast_data_device)
__all__ = [
'all_gather_object', 'all_reduce', 'all_gather', 'all_reduce_dict',
'collect_results', 'collect_results_cpu', 'collect_results_gpu', 'gather',
'broadcast', 'gather_object', 'sync_random_seed', 'broadcast_object_list',
'get_dist_info', 'init_dist', 'init_local_group', 'get_backend',
'get_world_size', 'get_rank', 'get_local_size', 'get_local_group',
'get_local_rank', 'is_main_process', 'master_only', 'barrier',
'is_distributed', 'get_default_group', 'all_reduce_params',
'get_data_device', 'get_comm_device', 'cast_data_device'
]
|
import logging
import sys
import traceback
from datasets import Dataset, load_dataset
from peft import LoraConfig, TaskType
from sentence_transformers import (
SentenceTransformer,
SentenceTransformerModelCardData,
SentenceTransformerTrainer,
SentenceTransformerTrainingArguments,
)
from sentence_transformers.evaluation import NanoBEIREvaluator
from sentence_transformers.losses import CachedMultipleNegativesRankingLoss
from sentence_transformers.training_args import BatchSamplers
# Set the log level to INFO to get more information
logging.basicConfig(format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO)
# You can specify any Hugging Face pre-trained model here, for example, bert-base-uncased, roberta-base, xlm-roberta-base
model_name = sys.argv[1] if len(sys.argv) > 1 else "bert-base-uncased"
model_name_only = model_name.split("/")[-1]
# 1. Load a model to finetune with 2. (Optional) model card data
model = SentenceTransformer(
model_name,
model_card_data=SentenceTransformerModelCardData(
language="en",
license="apache-2.0",
model_name=f"{model_name_only} adapter finetuned on GooAQ pairs",
),
)
# Create a LoRA adapter for the model
peft_config = LoraConfig(
task_type=TaskType.FEATURE_EXTRACTION,
inference_mode=False,
r=64,
lora_alpha=128,
lora_dropout=0.1,
)
model.add_adapter(peft_config)
# 3. Load a dataset to finetune on
dataset = load_dataset("sentence-transformers/gooaq", split="train")
dataset_dict = dataset.train_test_split(test_size=10_000, seed=12)
train_dataset: Dataset = dataset_dict["train"].select(range(1_000_000))
eval_dataset: Dataset = dataset_dict["test"]
# 4. Define a loss function
loss = CachedMultipleNegativesRankingLoss(model, mini_batch_size=32)
# 5. (Optional) Specify training arguments
run_name = f"{model_name_only}-gooaq-peft"
args = SentenceTransformerTrainingArguments(
# Required parameter:
output_dir=f"models/{run_name}",
# Optional training parameters:
num_train_epochs=1,
per_device_train_batch_size=1024,
per_device_eval_batch_size=1024,
learning_rate=2e-5,
warmup_ratio=0.1,
fp16=False, # Set to False if you get an error that your GPU can't run on FP16
bf16=True, # Set to True if you have a GPU that supports BF16
batch_sampler=BatchSamplers.NO_DUPLICATES, # MultipleNegativesRankingLoss benefits from no duplicate samples in a batch
# Optional tracking/debugging parameters:
eval_strategy="steps",
eval_steps=100,
save_strategy="steps",
save_steps=100,
save_total_limit=2,
logging_steps=25,
logging_first_step=True,
run_name=run_name, # Will be used in W&B if `wandb` is installed
)
# 6. (Optional) Create an evaluator & evaluate the base model
# The full corpus, but only the evaluation queries
dev_evaluator = NanoBEIREvaluator()
dev_evaluator(model)
# 7. Create a trainer & train
trainer = SentenceTransformerTrainer(
model=model,
args=args,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
loss=loss,
evaluator=dev_evaluator,
)
trainer.train()
# (Optional) Evaluate the trained model on the evaluator after training
dev_evaluator(model)
# 8. Save the trained model
final_output_dir = f"models/{run_name}/final"
model.save_pretrained(final_output_dir)
# 9. (Optional) save the model to the Hugging Face Hub!
# It is recommended to run `huggingface-cli login` to log into your Hugging Face account first
try:
model.push_to_hub(run_name)
except Exception:
logging.error(
f"Error uploading model to the Hugging Face Hub:\n{traceback.format_exc()}To upload it manually, you can run "
f"`huggingface-cli login`, followed by loading the model using `model = SentenceTransformer({final_output_dir!r})` "
f"and saving it using `model.push_to_hub('{run_name}')`."
)
|
import logging
import sys
import traceback
from datasets import Dataset, load_dataset
from peft import LoraConfig, TaskType
from sentence_transformers import (
SentenceTransformer,
SentenceTransformerModelCardData,
SentenceTransformerTrainer,
SentenceTransformerTrainingArguments,
)
from sentence_transformers.evaluation import NanoBEIREvaluator
from sentence_transformers.losses import MultipleNegativesRankingLoss
from sentence_transformers.training_args import BatchSamplers
# Set the log level to INFO to get more information
logging.basicConfig(format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO)
# You can specify any Hugging Face pre-trained model here, for example, bert-base-uncased, roberta-base, xlm-roberta-base
model_name = sys.argv[1] if len(sys.argv) > 1 else "bert-base-uncased"
model_name_only = model_name.split("/")[-1]
# 1. Load a model to finetune with 2. (Optional) model card data
model = SentenceTransformer(
model_name,
model_card_data=SentenceTransformerModelCardData(
language="en",
license="apache-2.0",
model_name=f"{model_name_only} adapter finetuned on GooAQ pairs",
),
)
# Create a LoRA adapter for the model
peft_config = LoraConfig(
task_type=TaskType.FEATURE_EXTRACTION,
inference_mode=False,
r=64,
lora_alpha=128,
lora_dropout=0.1,
)
model.add_adapter(peft_config)
# 3. Load a dataset to finetune on
dataset = load_dataset("sentence-transformers/gooaq", split="train")
dataset_dict = dataset.train_test_split(test_size=10_000, seed=12)
train_dataset: Dataset = dataset_dict["train"].select(range(1_000_000))
eval_dataset: Dataset = dataset_dict["test"]
# 4. Define a loss function
loss = MultipleNegativesRankingLoss(model)
# 5. (Optional) Specify training arguments
run_name = f"{model_name_only}-gooaq-peft"
args = SentenceTransformerTrainingArguments(
# Required parameter:
output_dir=f"models/{run_name}",
# Optional training parameters:
num_train_epochs=1,
per_device_train_batch_size=1024,
per_device_eval_batch_size=1024,
learning_rate=2e-5,
warmup_ratio=0.1,
fp16=False, # Set to False if you get an error that your GPU can't run on FP16
bf16=True, # Set to True if you have a GPU that supports BF16
batch_sampler=BatchSamplers.NO_DUPLICATES, # MultipleNegativesRankingLoss benefits from no duplicate samples in a batch
# Optional tracking/debugging parameters:
eval_strategy="steps",
eval_steps=100,
save_strategy="steps",
save_steps=100,
save_total_limit=2,
logging_steps=25,
logging_first_step=True,
run_name=run_name, # Will be used in W&B if `wandb` is installed
)
# 6. (Optional) Create an evaluator & evaluate the base model
# The full corpus, but only the evaluation queries
dev_evaluator = NanoBEIREvaluator(batch_size=1024)
dev_evaluator(model)
# 7. Create a trainer & train
trainer = SentenceTransformerTrainer(
model=model,
args=args,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
loss=loss,
evaluator=dev_evaluator,
)
trainer.train()
# (Optional) Evaluate the trained model on the evaluator after training
dev_evaluator(model)
# 8. Save the trained model
final_output_dir = f"models/{run_name}/final"
model.save_pretrained(final_output_dir)
# 9. (Optional) save the model to the Hugging Face Hub!
# It is recommended to run `huggingface-cli login` to log into your Hugging Face account first
try:
model.push_to_hub(run_name)
except Exception:
logging.error(
f"Error uploading model to the Hugging Face Hub:\n{traceback.format_exc()}To upload it manually, you can run "
f"`huggingface-cli login`, followed by loading the model using `model = SentenceTransformer({final_output_dir!r})` "
f"and saving it using `model.push_to_hub('{run_name}')`."
)
|
from typing import TYPE_CHECKING
import pytest
from langchain_core.messages import AIMessage, HumanMessage, SystemMessage
from pytest_mock import MockerFixture
from langchain_community.chat_message_histories import ZepChatMessageHistory
if TYPE_CHECKING:
from zep_python import ZepClient
@pytest.fixture
@pytest.mark.requires("zep_python")
def zep_chat(mocker: MockerFixture) -> ZepChatMessageHistory:
mock_zep_client: ZepClient = mocker.patch("zep_python.ZepClient", autospec=True)
mock_zep_client.memory = mocker.patch(
"zep_python.memory.client.MemoryClient", autospec=True
)
zep_chat: ZepChatMessageHistory = ZepChatMessageHistory(
"test_session", "http://localhost:8000"
)
zep_chat.zep_client = mock_zep_client
return zep_chat
@pytest.mark.requires("zep_python")
def test_messages(mocker: MockerFixture, zep_chat: ZepChatMessageHistory) -> None:
from zep_python import Memory, Message, Summary
mock_memory: Memory = Memory(
summary=Summary(
content="summary",
),
messages=[
Message(content="message", role="ai", metadata={"key": "value"}),
Message(content="message2", role="human", metadata={"key2": "value2"}),
],
)
zep_chat.zep_client.memory.get_memory.return_value = mock_memory
result = zep_chat.messages
assert len(result) == 3
assert isinstance(result[0], SystemMessage) # summary
assert isinstance(result[1], AIMessage)
assert isinstance(result[2], HumanMessage)
@pytest.mark.requires("zep_python")
def test_add_user_message(
mocker: MockerFixture, zep_chat: ZepChatMessageHistory
) -> None:
zep_chat.add_user_message("test message")
zep_chat.zep_client.memory.add_memory.assert_called_once()
@pytest.mark.requires("zep_python")
def test_add_ai_message(mocker: MockerFixture, zep_chat: ZepChatMessageHistory) -> None:
zep_chat.add_ai_message("test message")
zep_chat.zep_client.memory.add_memory.assert_called_once()
@pytest.mark.requires("zep_python")
def test_append(mocker: MockerFixture, zep_chat: ZepChatMessageHistory) -> None:
zep_chat.add_message(AIMessage(content="test message"))
zep_chat.zep_client.memory.add_memory.assert_called_once()
@pytest.mark.requires("zep_python")
def test_search(mocker: MockerFixture, zep_chat: ZepChatMessageHistory) -> None:
zep_chat.search("test query")
zep_chat.zep_client.memory.search_memory.assert_called_once_with(
"test_session", mocker.ANY, limit=None
)
@pytest.mark.requires("zep_python")
def test_clear(mocker: MockerFixture, zep_chat: ZepChatMessageHistory) -> None:
zep_chat.clear()
zep_chat.zep_client.memory.delete_memory.assert_called_once_with("test_session")
|
from typing import TYPE_CHECKING
import pytest
from langchain_core.messages import AIMessage, HumanMessage, SystemMessage
from pytest_mock import MockerFixture
from langchain_community.chat_message_histories import ZepChatMessageHistory
if TYPE_CHECKING:
from zep_python import ZepClient
@pytest.fixture
@pytest.mark.requires("zep_python")
def zep_chat(mocker: MockerFixture) -> ZepChatMessageHistory:
mock_zep_client: ZepClient = mocker.patch("zep_python.ZepClient", autospec=True)
mock_zep_client.memory = mocker.patch(
"zep_python.memory.client.MemoryClient", autospec=True
)
zep_chat: ZepChatMessageHistory = ZepChatMessageHistory(
"test_session", "http://localhost:8000"
)
zep_chat.zep_client = mock_zep_client
return zep_chat
@pytest.mark.requires("zep_python")
def test_messages(mocker: MockerFixture, zep_chat: ZepChatMessageHistory) -> None:
from zep_python import Memory, Message, Summary
mock_memory: Memory = Memory(
summary=Summary(
content="summary",
),
messages=[
Message(content="message", role="ai", metadata={"key": "value"}),
Message(content="message2", role="human", metadata={"key2": "value2"}),
],
)
zep_chat.zep_client.memory.get_memory.return_value = mock_memory # type: ignore
result = zep_chat.messages
assert len(result) == 3
assert isinstance(result[0], SystemMessage) # summary
assert isinstance(result[1], AIMessage)
assert isinstance(result[2], HumanMessage)
@pytest.mark.requires("zep_python")
def test_add_user_message(
mocker: MockerFixture, zep_chat: ZepChatMessageHistory
) -> None:
zep_chat.add_user_message("test message")
zep_chat.zep_client.memory.add_memory.assert_called_once() # type: ignore
@pytest.mark.requires("zep_python")
def test_add_ai_message(mocker: MockerFixture, zep_chat: ZepChatMessageHistory) -> None:
zep_chat.add_ai_message("test message")
zep_chat.zep_client.memory.add_memory.assert_called_once() # type: ignore
@pytest.mark.requires("zep_python")
def test_append(mocker: MockerFixture, zep_chat: ZepChatMessageHistory) -> None:
zep_chat.add_message(AIMessage(content="test message"))
zep_chat.zep_client.memory.add_memory.assert_called_once() # type: ignore
@pytest.mark.requires("zep_python")
def test_search(mocker: MockerFixture, zep_chat: ZepChatMessageHistory) -> None:
zep_chat.search("test query")
zep_chat.zep_client.memory.search_memory.assert_called_once_with( # type: ignore
"test_session", mocker.ANY, limit=None
)
@pytest.mark.requires("zep_python")
def test_clear(mocker: MockerFixture, zep_chat: ZepChatMessageHistory) -> None:
zep_chat.clear()
zep_chat.zep_client.memory.delete_memory.assert_called_once_with( # type: ignore
"test_session"
)
|
import io
import warnings
from abc import ABC
from typing import TYPE_CHECKING
from docarray.typing.tensor.abstract_tensor import AbstractTensor
from docarray.utils._internal.misc import import_library, is_notebook
class AbstractImageTensor(AbstractTensor, ABC):
def to_bytes(self, format: str = 'PNG') -> bytes:
"""
Convert image tensor to bytes.
:param format: the image format use to store the image, can be 'PNG' , 'JPG' ...
:return: bytes
"""
if TYPE_CHECKING:
from PIL import Image as PILImage
else:
PIL = import_library('PIL', raise_error=True) # noqa: F841
from PIL import Image as PILImage
if format == 'jpg':
format = 'jpeg' # unify it to ISO standard
tensor = self.get_comp_backend().to_numpy(self)
mode = 'RGB' if tensor.ndim == 3 else 'L'
pil_image = PILImage.fromarray(tensor, mode=mode)
with io.BytesIO() as buffer:
pil_image.save(buffer, format=format)
img_byte_arr = buffer.getvalue()
return img_byte_arr
def display(self) -> None:
"""
Display image data from tensor in notebook.
"""
if is_notebook():
if TYPE_CHECKING:
from PIL import Image as PILImage
else:
PIL = import_library('PIL', raise_error=True) # noqa: F841
from PIL import Image as PILImage
np_array = self.get_comp_backend().to_numpy(self)
img = PILImage.fromarray(np_array)
from IPython.display import display
display(img)
else:
warnings.warn('Display of image is only possible in a notebook.')
|
import io
import warnings
from abc import ABC
from docarray.typing.tensor.abstract_tensor import AbstractTensor
from docarray.utils._internal.misc import is_notebook
class AbstractImageTensor(AbstractTensor, ABC):
def to_bytes(self, format: str = 'PNG') -> bytes:
"""
Convert image tensor to bytes.
:param format: the image format use to store the image, can be 'PNG' , 'JPG' ...
:return: bytes
"""
from PIL import Image
if format == 'jpg':
format = 'jpeg' # unify it to ISO standard
tensor = self.get_comp_backend().to_numpy(self)
mode = 'RGB' if tensor.ndim == 3 else 'L'
pil_image = Image.fromarray(tensor, mode=mode)
with io.BytesIO() as buffer:
pil_image.save(buffer, format=format)
img_byte_arr = buffer.getvalue()
return img_byte_arr
def display(self) -> None:
"""
Display image data from tensor in notebook.
"""
if is_notebook():
from PIL import Image
np_array = self.get_comp_backend().to_numpy(self)
img = Image.fromarray(np_array)
from IPython.display import display
display(img)
else:
warnings.warn('Display of image is only possible in a notebook.')
|
# coding: utf-8
"""LightGBM, Light Gradient Boosting Machine.
Contributors: https://github.com/microsoft/LightGBM/graphs/contributors.
"""
from pathlib import Path
from .basic import Booster, Dataset, Sequence, register_logger
from .callback import early_stopping, log_evaluation, record_evaluation, reset_parameter
from .engine import CVBooster, cv, train
try:
from .sklearn import LGBMClassifier, LGBMModel, LGBMRanker, LGBMRegressor
except ImportError:
pass
try:
from .plotting import create_tree_digraph, plot_importance, plot_metric, plot_split_value_histogram, plot_tree
except ImportError:
pass
try:
from .dask import DaskLGBMClassifier, DaskLGBMRanker, DaskLGBMRegressor
except ImportError:
pass
_version_path = Path(__file__).absolute().parent / 'VERSION.txt'
if _version_path.is_file():
__version__ = _version_path.read_text(encoding='utf-8').strip()
__all__ = ['Dataset', 'Booster', 'CVBooster', 'Sequence',
'register_logger',
'train', 'cv',
'LGBMModel', 'LGBMRegressor', 'LGBMClassifier', 'LGBMRanker',
'DaskLGBMRegressor', 'DaskLGBMClassifier', 'DaskLGBMRanker',
'log_evaluation', 'record_evaluation', 'reset_parameter', 'early_stopping',
'plot_importance', 'plot_split_value_histogram', 'plot_metric', 'plot_tree', 'create_tree_digraph']
|
# coding: utf-8
"""LightGBM, Light Gradient Boosting Machine.
Contributors: https://github.com/microsoft/LightGBM/graphs/contributors.
"""
from pathlib import Path
from .basic import Booster, Dataset, Sequence, register_logger
from .callback import early_stopping, log_evaluation, print_evaluation, record_evaluation, reset_parameter
from .engine import CVBooster, cv, train
try:
from .sklearn import LGBMClassifier, LGBMModel, LGBMRanker, LGBMRegressor
except ImportError:
pass
try:
from .plotting import create_tree_digraph, plot_importance, plot_metric, plot_split_value_histogram, plot_tree
except ImportError:
pass
try:
from .dask import DaskLGBMClassifier, DaskLGBMRanker, DaskLGBMRegressor
except ImportError:
pass
_version_path = Path(__file__).absolute().parent / 'VERSION.txt'
if _version_path.is_file():
__version__ = _version_path.read_text(encoding='utf-8').strip()
__all__ = ['Dataset', 'Booster', 'CVBooster', 'Sequence',
'register_logger',
'train', 'cv',
'LGBMModel', 'LGBMRegressor', 'LGBMClassifier', 'LGBMRanker',
'DaskLGBMRegressor', 'DaskLGBMClassifier', 'DaskLGBMRanker',
'log_evaluation', 'print_evaluation', 'record_evaluation', 'reset_parameter', 'early_stopping',
'plot_importance', 'plot_split_value_histogram', 'plot_metric', 'plot_tree', 'create_tree_digraph']
|
from typing import Callable, Optional
from .. import Features, NamedSplit, Split
from ..packaged_modules.generator.generator import Generator
from .abc import AbstractDatasetInputStream
class GeneratorDatasetInputStream(AbstractDatasetInputStream):
def __init__(
self,
generator: Callable,
features: Optional[Features] = None,
cache_dir: str = None,
keep_in_memory: bool = False,
streaming: bool = False,
gen_kwargs: Optional[dict] = None,
num_proc: Optional[int] = None,
split: NamedSplit = Split.TRAIN,
**kwargs,
):
super().__init__(
features=features,
cache_dir=cache_dir,
keep_in_memory=keep_in_memory,
streaming=streaming,
num_proc=num_proc,
**kwargs,
)
self.builder = Generator(
cache_dir=cache_dir,
features=features,
generator=generator,
gen_kwargs=gen_kwargs,
split=split,
**kwargs,
)
def read(self):
# Build iterable dataset
if self.streaming:
dataset = self.builder.as_streaming_dataset(split=self.builder.config.split)
# Build regular (map-style) dataset
else:
download_config = None
download_mode = None
verification_mode = None
base_path = None
self.builder.download_and_prepare(
download_config=download_config,
download_mode=download_mode,
verification_mode=verification_mode,
base_path=base_path,
num_proc=self.num_proc,
)
dataset = self.builder.as_dataset(
split=self.builder.config.split, verification_mode=verification_mode, in_memory=self.keep_in_memory
)
return dataset
|
from typing import Callable, Optional
from .. import Features
from ..packaged_modules.generator.generator import Generator
from .abc import AbstractDatasetInputStream
class GeneratorDatasetInputStream(AbstractDatasetInputStream):
def __init__(
self,
generator: Callable,
features: Optional[Features] = None,
cache_dir: str = None,
keep_in_memory: bool = False,
streaming: bool = False,
gen_kwargs: Optional[dict] = None,
num_proc: Optional[int] = None,
**kwargs,
):
super().__init__(
features=features,
cache_dir=cache_dir,
keep_in_memory=keep_in_memory,
streaming=streaming,
num_proc=num_proc,
**kwargs,
)
self.builder = Generator(
cache_dir=cache_dir,
features=features,
generator=generator,
gen_kwargs=gen_kwargs,
**kwargs,
)
def read(self):
# Build iterable dataset
if self.streaming:
dataset = self.builder.as_streaming_dataset(split="train")
# Build regular (map-style) dataset
else:
download_config = None
download_mode = None
verification_mode = None
base_path = None
self.builder.download_and_prepare(
download_config=download_config,
download_mode=download_mode,
verification_mode=verification_mode,
base_path=base_path,
num_proc=self.num_proc,
)
dataset = self.builder.as_dataset(
split="train", verification_mode=verification_mode, in_memory=self.keep_in_memory
)
return dataset
|
from collections import Counter
from pathlib import Path
from typing import Optional, Tuple
import yaml
class _NoDuplicateSafeLoader(yaml.SafeLoader):
def _check_no_duplicates_on_constructed_node(self, node):
keys = [self.constructed_objects[key_node] for key_node, _ in node.value]
keys = [tuple(key) if isinstance(key, list) else key for key in keys]
counter = Counter(keys)
duplicate_keys = [key for key in counter if counter[key] > 1]
if duplicate_keys:
raise TypeError(f"Got duplicate yaml keys: {duplicate_keys}")
def construct_mapping(self, node, deep=False):
mapping = super().construct_mapping(node, deep=deep)
self._check_no_duplicates_on_constructed_node(node)
return mapping
def _split_yaml_from_readme(readme_content: str) -> Tuple[Optional[str], str]:
full_content = [line for line in readme_content.splitlines()]
if full_content and full_content[0] == "---" and "---" in full_content[1:]:
sep_idx = full_content[1:].index("---") + 1
yamlblock = "\n".join(full_content[1:sep_idx])
return yamlblock, "\n".join(full_content[sep_idx + 1 :])
return None, "\n".join(full_content)
class DatasetMetadata(dict):
# class attributes
_FIELDS_WITH_DASHES = {"train_eval_index"} # train-eval-index in the YAML metadata
@classmethod
def from_readme(cls, path: Path) -> "DatasetMetadata":
"""Loads and validates the dataset metadat from its dataset card (README.md)
Args:
path (:obj:`Path`): Path to the dataset card (its README.md file)
Returns:
:class:`DatasetMetadata`: The dataset's metadata
Raises:
:obj:`TypeError`: If the dataset's metadata is invalid
"""
with open(path, encoding="utf-8") as readme_file:
yaml_string, _ = _split_yaml_from_readme(readme_file.read())
if yaml_string is not None:
return cls.from_yaml_string(yaml_string)
else:
return cls()
def to_readme(self, path: Path):
if path.exists():
with open(path, encoding="utf-8") as readme_file:
readme_content = readme_file.read()
else:
readme_content = None
updated_readme_content = self._to_readme(readme_content)
with open(path, "w", encoding="utf-8") as readme_file:
readme_file.write(updated_readme_content)
def _to_readme(self, readme_content: Optional[str] = None) -> str:
if readme_content is not None:
_, content = _split_yaml_from_readme(readme_content)
full_content = "---\n" + self.to_yaml_string() + "---\n" + content
else:
full_content = "---\n" + self.to_yaml_string() + "---\n"
return full_content
@classmethod
def from_yaml_string(cls, string: str) -> "DatasetMetadata":
"""Loads and validates the dataset metadata from a YAML string
Args:
string (:obj:`str`): The YAML string
Returns:
:class:`DatasetMetadata`: The dataset's metadata
Raises:
:obj:`TypeError`: If the dataset's metadata is invalid
"""
metadata_dict = yaml.load(string, Loader=_NoDuplicateSafeLoader) or dict()
# Convert the YAML keys to DatasetMetadata fields
metadata_dict = {
(key.replace("-", "_") if key.replace("-", "_") in cls._FIELDS_WITH_DASHES else key): value
for key, value in metadata_dict.items()
}
return cls(**metadata_dict)
def to_yaml_string(self) -> str:
return yaml.safe_dump(
{
(key.replace("_", "-") if key in self._FIELDS_WITH_DASHES else key): value
for key, value in self.items()
},
sort_keys=False,
allow_unicode=True,
encoding="utf-8",
).decode("utf-8")
if __name__ == "__main__":
from argparse import ArgumentParser
ap = ArgumentParser(usage="Validate the yaml metadata block of a README.md file.")
ap.add_argument("readme_filepath")
args = ap.parse_args()
readme_filepath = Path(args.readme_filepath)
dataset_metadata = DatasetMetadata.from_readme(readme_filepath)
print(dataset_metadata)
dataset_metadata.to_readme(readme_filepath)
|
from collections import Counter
from pathlib import Path
from typing import Optional, Tuple
import yaml
class _NoDuplicateSafeLoader(yaml.SafeLoader):
def _check_no_duplicates_on_constructed_node(self, node):
keys = [self.constructed_objects[key_node] for key_node, _ in node.value]
keys = [tuple(key) if isinstance(key, list) else key for key in keys]
counter = Counter(keys)
duplicate_keys = [key for key in counter if counter[key] > 1]
if duplicate_keys:
raise TypeError(f"Got duplicate yaml keys: {duplicate_keys}")
def construct_mapping(self, node, deep=False):
mapping = super().construct_mapping(node, deep=deep)
self._check_no_duplicates_on_constructed_node(node)
return mapping
def _split_yaml_from_readme(readme_content: str) -> Tuple[Optional[str], str]:
full_content = [line for line in readme_content.splitlines()]
if full_content[0] == "---" and "---" in full_content[1:]:
sep_idx = full_content[1:].index("---") + 1
yamlblock = "\n".join(full_content[1:sep_idx])
return yamlblock, "\n".join(full_content[sep_idx + 1 :])
return None, "\n".join(full_content)
class DatasetMetadata(dict):
# class attributes
_FIELDS_WITH_DASHES = {"train_eval_index"} # train-eval-index in the YAML metadata
@classmethod
def from_readme(cls, path: Path) -> "DatasetMetadata":
"""Loads and validates the dataset metadat from its dataset card (README.md)
Args:
path (:obj:`Path`): Path to the dataset card (its README.md file)
Returns:
:class:`DatasetMetadata`: The dataset's metadata
Raises:
:obj:`TypeError`: If the dataset's metadata is invalid
"""
with open(path, encoding="utf-8") as readme_file:
yaml_string, _ = _split_yaml_from_readme(readme_file.read())
if yaml_string is not None:
return cls.from_yaml_string(yaml_string)
else:
return cls()
def to_readme(self, path: Path):
if path.exists():
with open(path, encoding="utf-8") as readme_file:
readme_content = readme_file.read()
else:
readme_content = None
updated_readme_content = self._to_readme(readme_content)
with open(path, "w", encoding="utf-8") as readme_file:
readme_file.write(updated_readme_content)
def _to_readme(self, readme_content: Optional[str] = None) -> str:
if readme_content is not None:
_, content = _split_yaml_from_readme(readme_content)
full_content = "---\n" + self.to_yaml_string() + "---\n" + content
else:
full_content = "---\n" + self.to_yaml_string() + "---\n"
return full_content
@classmethod
def from_yaml_string(cls, string: str) -> "DatasetMetadata":
"""Loads and validates the dataset metadata from a YAML string
Args:
string (:obj:`str`): The YAML string
Returns:
:class:`DatasetMetadata`: The dataset's metadata
Raises:
:obj:`TypeError`: If the dataset's metadata is invalid
"""
metadata_dict = yaml.load(string, Loader=_NoDuplicateSafeLoader) or dict()
# Convert the YAML keys to DatasetMetadata fields
metadata_dict = {
(key.replace("-", "_") if key.replace("-", "_") in cls._FIELDS_WITH_DASHES else key): value
for key, value in metadata_dict.items()
}
return cls(**metadata_dict)
def to_yaml_string(self) -> str:
return yaml.safe_dump(
{
(key.replace("_", "-") if key in self._FIELDS_WITH_DASHES else key): value
for key, value in self.items()
},
sort_keys=False,
allow_unicode=True,
encoding="utf-8",
).decode("utf-8")
if __name__ == "__main__":
from argparse import ArgumentParser
ap = ArgumentParser(usage="Validate the yaml metadata block of a README.md file.")
ap.add_argument("readme_filepath")
args = ap.parse_args()
readme_filepath = Path(args.readme_filepath)
dataset_metadata = DatasetMetadata.from_readme(readme_filepath)
print(dataset_metadata)
dataset_metadata.to_readme(readme_filepath)
|
import os
import yaml
from jina.serve.runtimes.gateway.gateway import BaseGateway, Gateway
from jina.jaml import JAML
class MyDummyGateway(Gateway):
async def setup_server(self):
self.server = 'dummy server'
async def run_server(self):
self.logger.info(self.server)
async def shutdown(self):
pass
def test_cls_from_tag():
assert JAML.cls_from_tag('MyDummyGateway') == MyDummyGateway
assert JAML.cls_from_tag('!MyDummyGateway') == MyDummyGateway
assert JAML.cls_from_tag('BaseGateway') == BaseGateway
assert JAML.cls_from_tag('Nonexisting') is None
def test_base_jtype(tmpdir):
gateway_path = os.path.join(tmpdir, 'gateway.yml')
g = BaseGateway.load_config('Gateway', runtime_args={'port': [12345]})
g.save_config(gateway_path)
with open(gateway_path, 'r') as file:
conf = yaml.safe_load(file)
assert 'jtype' in conf
assert conf['jtype'] == 'Gateway'
assert (
type(BaseGateway.load_config(gateway_path, runtime_args={'port': [12345]}))
== Gateway
)
def test_custom_jtype(tmpdir):
gateway_path = os.path.join(tmpdir, 'gateway.yml')
e = BaseGateway.load_config('MyDummyGateway', runtime_args={'port': [12345]})
print(f' e {type(e)} => {e.__dict__}')
e.save_config(gateway_path)
with open(gateway_path, 'r') as file:
conf = yaml.safe_load(file)
assert 'jtype' in conf
assert conf['jtype'] == 'MyDummyGateway'
assert (
type(BaseGateway.load_config(gateway_path, runtime_args={'port': [12345]}))
== MyDummyGateway
)
|
import os
import pytest
import yaml
from jina import Gateway
from jina.jaml import JAML
from jina.serve.executors import BaseExecutor
class MyDummyGateway(Gateway):
async def setup_server(self):
self.server = 'dummy server'
async def run_server(self):
self.logger.info(self.server)
async def shutdown(self):
pass
def test_cls_from_tag():
assert JAML.cls_from_tag('MyDummyGateway') == MyDummyGateway
assert JAML.cls_from_tag('!MyDummyGateway') == MyDummyGateway
assert JAML.cls_from_tag('BaseGateway') == Gateway
assert JAML.cls_from_tag('Nonexisting') is None
def test_base_jtype(tmpdir):
gateway_path = os.path.join(tmpdir, 'gateway.yml')
from jina.serve.runtimes.gateway import BaseGateway
g = BaseGateway.load_config('BaseGateway', runtime_args={'port': [12345]})
g.save_config(gateway_path)
with open(gateway_path, 'r') as file:
conf = yaml.safe_load(file)
assert 'jtype' in conf
assert conf['jtype'] == 'BaseGateway'
assert (
type(Gateway.load_config(gateway_path, runtime_args={'port': [12345]}))
== Gateway
)
def test_custom_jtype(tmpdir):
gateway_path = os.path.join(tmpdir, 'gateway.yml')
e = Gateway.load_config('MyDummyGateway', runtime_args={'port': [12345]})
e.save_config(gateway_path)
with open(gateway_path, 'r') as file:
conf = yaml.safe_load(file)
assert 'jtype' in conf
assert conf['jtype'] == 'MyDummyGateway'
assert (
type(Gateway.load_config(gateway_path, runtime_args={'port': [12345]}))
== MyDummyGateway
)
|
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
from pathlib import Path
import os
from PIL import Image
from jina import Executor
from jina.executors import BaseExecutor
def test_config():
ex = Executor.load_config(str(Path(__file__).parents[2] / 'config.yml'))
def test_io_images_and_text(test_dir,doc_generator_img_text, expected_text):
crafter = BaseExecutor.load_config('config.yml')
doc_array = doc_generator_img_text
for doc in doc_array:
crafter.craft(doc)
chunks = doc[0].chunks
assert len(chunks) == 3
# Check images
for idx, c in enumerate(chunks[:2]):
with Image.open(os.path.join(test_dir, f'data/test_img_{idx}.jpg')) as img:
blob = chunks[idx].blob
assert chunks[idx].mime_type == 'image/*'
assert blob.shape[1], blob.shape[0] == img.size
if idx == 0:
assert blob.shape == (660, 1024, 3)
if idx == 1:
assert blob.shape == (626, 1191, 3)
# Check text
assert chunks[2].text == expected_text
assert chunks[2].mime_type == 'text/plain'
def test_io_text(doc_generator_text, expected_text):
crafter = BaseExecutor.load_config('config.yml')
doc_array = doc_generator_text
for doc in doc_array:
crafter.craft(doc)
chunks = doc[0].chunks
assert len(chunks) == 1
# Check test
assert chunks[0].text == expected_text
assert chunks[0].mime_type == 'text/plain'
def test_io_img(test_dir, doc_generator_img):
crafter = BaseExecutor.load_config('config.yml')
doc_array = doc_generator_img
for doc in doc_array:
crafter.craft(doc)
chunks = doc[0].chunks
assert len(chunks) == 3
# Check images
for idx, c in enumerate(chunks[:2]):
with Image.open(os.path.join(test_dir, f'data/test_img_{idx}.jpg')) as img:
blob = chunks[idx].blob
assert chunks[idx].mime_type == 'image/*'
assert blob.shape[1], blob.shape[0] == img.size
if idx == 0:
assert blob.shape == (660, 1024, 3)
if idx == 1:
assert blob.shape == (626, 1191, 3)
|
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import os
from PIL import Image
from jina.executors import BaseExecutor
def test_io_images_and_text(test_dir,doc_generator_img_text, expected_text):
crafter = BaseExecutor.load_config('config.yml')
doc_array = doc_generator_img_text
for doc in doc_array:
crafter.craft(doc)
chunks = doc[0].chunks
assert len(chunks) == 3
# Check images
for idx, c in enumerate(chunks[:2]):
with Image.open(os.path.join(test_dir, f'data/test_img_{idx}.jpg')) as img:
blob = chunks[idx].blob
assert chunks[idx].mime_type == 'image/*'
assert blob.shape[1], blob.shape[0] == img.size
if idx == 0:
assert blob.shape == (660, 1024, 3)
if idx == 1:
assert blob.shape == (626, 1191, 3)
# Check text
assert chunks[2].text == expected_text
assert chunks[2].mime_type == 'text/plain'
def test_io_text(doc_generator_text, expected_text):
crafter = BaseExecutor.load_config('config.yml')
doc_array = doc_generator_text
for doc in doc_array:
crafter.craft(doc)
chunks = doc[0].chunks
assert len(chunks) == 1
# Check test
assert chunks[0].text == expected_text
assert chunks[0].mime_type == 'text/plain'
def test_io_img(test_dir, doc_generator_img):
crafter = BaseExecutor.load_config('config.yml')
doc_array = doc_generator_img
for doc in doc_array:
crafter.craft(doc)
chunks = doc[0].chunks
assert len(chunks) == 3
# Check images
for idx, c in enumerate(chunks[:2]):
with Image.open(os.path.join(test_dir, f'data/test_img_{idx}.jpg')) as img:
blob = chunks[idx].blob
assert chunks[idx].mime_type == 'image/*'
assert blob.shape[1], blob.shape[0] == img.size
if idx == 0:
assert blob.shape == (660, 1024, 3)
if idx == 1:
assert blob.shape == (626, 1191, 3)
|
import copy
from dataclasses import dataclass, asdict, field
from typing import (
Union,
Dict,
Optional,
TYPE_CHECKING,
Iterable,
List,
Tuple,
)
import numpy as np
from docarray.array.storage.base.backend import BaseBackendMixin, TypeMap
from docarray.helper import dataclass_from_dict, filter_dict, _safe_cast_int
if TYPE_CHECKING: # pragma: no cover
from docarray.typing import DocumentArraySourceType, ArrayType
@dataclass
class AnnliteConfig:
n_dim: int
metric: str = 'cosine'
serialize_config: Dict = field(default_factory=dict)
data_path: Optional[str] = None
ef_construction: Optional[int] = None
ef_search: Optional[int] = None
max_connection: Optional[int] = None
n_components: Optional[int] = None
columns: Optional[Union[List[Tuple[str, str]], Dict[str, str]]] = None
class BackendMixin(BaseBackendMixin):
"""Provide necessary functions to enable this storage backend."""
TYPE_MAP = {
'str': TypeMap(type='str', converter=str),
'float': TypeMap(type='float', converter=float),
'int': TypeMap(type='int', converter=_safe_cast_int),
}
def _map_embedding(self, embedding: 'ArrayType') -> 'ArrayType':
if embedding is None:
embedding = np.zeros(self.n_dim, dtype=np.float32)
elif isinstance(embedding, list):
from docarray.math.ndarray import to_numpy_array
embedding = to_numpy_array(embedding)
if embedding.ndim > 1:
embedding = np.asarray(embedding).squeeze()
return embedding
def _normalize_columns(self, columns):
columns = super()._normalize_columns(columns)
for key in columns.keys():
columns[key] = self._map_type(columns[key])
return columns
def _ensure_unique_config(
self,
config_root: dict,
config_subindex: dict,
config_joined: dict,
subindex_name: str,
) -> dict:
import os
if 'data_path' not in config_subindex:
config_joined['data_path'] = os.path.join(
config_joined['data_path'], 'subindex_' + subindex_name
)
return config_joined
def _init_storage(
self,
_docs: Optional['DocumentArraySourceType'] = None,
config: Optional[Union[AnnliteConfig, Dict]] = None,
subindex_configs: Optional[Dict] = None,
**kwargs,
):
config = copy.deepcopy(config)
from docarray import Document
if not config:
raise ValueError('Config object must be specified')
elif isinstance(config, dict):
config = dataclass_from_dict(AnnliteConfig, config)
if config.data_path is None:
from tempfile import TemporaryDirectory
config.data_path = TemporaryDirectory().name
self._config = config
self._config.columns = self._normalize_columns(self._config.columns)
config = asdict(config)
self.n_dim = config.pop('n_dim')
from annlite import AnnLite
self._annlite = AnnLite(self.n_dim, lock=False, **filter_dict(config))
super()._init_storage()
if _docs is None:
return
self.clear()
if isinstance(_docs, Iterable):
self.extend(_docs)
elif isinstance(_docs, Document):
self.append(_docs)
def __getstate__(self):
state = dict(self.__dict__)
del state['_annlite']
del state['_offsetmapping']
return state
def __setstate__(self, state):
self.__dict__ = state
config = state['_config']
config = asdict(config)
n_dim = config.pop('n_dim')
from annlite import AnnLite
self._annlite = AnnLite(n_dim, lock=False, **filter_dict(config))
def __len__(self):
return self._annlite.index_size
|
from dataclasses import dataclass, asdict, field
from typing import (
Union,
Dict,
Optional,
TYPE_CHECKING,
Iterable,
List,
Tuple,
)
import numpy as np
from docarray.array.storage.base.backend import BaseBackendMixin, TypeMap
from docarray.helper import dataclass_from_dict, filter_dict, _safe_cast_int
if TYPE_CHECKING: # pragma: no cover
from docarray.typing import DocumentArraySourceType, ArrayType
@dataclass
class AnnliteConfig:
n_dim: int
metric: str = 'cosine'
serialize_config: Dict = field(default_factory=dict)
data_path: Optional[str] = None
ef_construction: Optional[int] = None
ef_search: Optional[int] = None
max_connection: Optional[int] = None
n_components: Optional[int] = None
columns: Optional[Union[List[Tuple[str, str]], Dict[str, str]]] = None
class BackendMixin(BaseBackendMixin):
"""Provide necessary functions to enable this storage backend."""
TYPE_MAP = {
'str': TypeMap(type='str', converter=str),
'float': TypeMap(type='float', converter=float),
'int': TypeMap(type='int', converter=_safe_cast_int),
}
def _map_embedding(self, embedding: 'ArrayType') -> 'ArrayType':
if embedding is None:
embedding = np.zeros(self.n_dim, dtype=np.float32)
elif isinstance(embedding, list):
from docarray.math.ndarray import to_numpy_array
embedding = to_numpy_array(embedding)
if embedding.ndim > 1:
embedding = np.asarray(embedding).squeeze()
return embedding
def _normalize_columns(self, columns):
columns = super()._normalize_columns(columns)
for key in columns.keys():
columns[key] = self._map_type(columns[key])
return columns
def _ensure_unique_config(
self,
config_root: dict,
config_subindex: dict,
config_joined: dict,
subindex_name: str,
) -> dict:
import os
if 'data_path' not in config_subindex:
config_joined['data_path'] = os.path.join(
config_joined['data_path'], 'subindex_' + subindex_name
)
return config_joined
def _init_storage(
self,
_docs: Optional['DocumentArraySourceType'] = None,
config: Optional[Union[AnnliteConfig, Dict]] = None,
subindex_configs: Optional[Dict] = None,
**kwargs,
):
from docarray import Document
if not config:
raise ValueError('Config object must be specified')
elif isinstance(config, dict):
config = dataclass_from_dict(AnnliteConfig, config)
if config.data_path is None:
from tempfile import TemporaryDirectory
config.data_path = TemporaryDirectory().name
self._config = config
self._config.columns = self._normalize_columns(self._config.columns)
config = asdict(config)
self.n_dim = config.pop('n_dim')
from annlite import AnnLite
self._annlite = AnnLite(self.n_dim, lock=False, **filter_dict(config))
super()._init_storage()
if _docs is None:
return
self.clear()
if isinstance(_docs, Iterable):
self.extend(_docs)
elif isinstance(_docs, Document):
self.append(_docs)
def __getstate__(self):
state = dict(self.__dict__)
del state['_annlite']
del state['_offsetmapping']
return state
def __setstate__(self, state):
self.__dict__ = state
config = state['_config']
config = asdict(config)
n_dim = config.pop('n_dim')
from annlite import AnnLite
self._annlite = AnnLite(n_dim, lock=False, **filter_dict(config))
def __len__(self):
return self._annlite.index_size
|
import itertools
from keras.src import tree
from keras.src.trainers.data_adapters import data_adapter_utils
from keras.src.trainers.data_adapters.data_adapter import DataAdapter
class GeneratorDataAdapter(DataAdapter):
"""Adapter for Python generators."""
def __init__(self, generator):
first_batches, generator = peek_and_restore(generator)
self.generator = generator
self._first_batches = first_batches
self._output_signature = None
if not isinstance(first_batches[0], tuple):
raise ValueError(
"When passing a Python generator to a Keras model, "
"the generator must return a tuple, either "
"(input,) or (inputs, targets) or "
"(inputs, targets, sample_weights). "
f"Received: {first_batches[0]}"
)
def get_numpy_iterator(self):
return data_adapter_utils.get_numpy_iterator(self.generator())
def get_jax_iterator(self):
return data_adapter_utils.get_jax_iterator(self.generator())
def get_tf_dataset(self):
from keras.src.utils.module_utils import tensorflow as tf
def convert_to_tf(x, spec):
if data_adapter_utils.is_scipy_sparse(x):
x = data_adapter_utils.scipy_sparse_to_tf_sparse(x)
elif data_adapter_utils.is_jax_sparse(x):
x = data_adapter_utils.jax_sparse_to_tf_sparse(x)
if not spec.shape.is_compatible_with(x.shape):
raise TypeError(
f"Generator yielded an element of shape {x.shape} where "
f"an element of shape {spec.shape} was expected. Your "
"generator provides tensors with variable input "
"dimensions other than the batch size. Make sure that the "
"generator's first two batches do not have the same "
"dimension value wherever there is a variable input "
"dimension."
)
return x
def get_tf_iterator():
for batch in self.generator():
batch = tree.map_structure(
convert_to_tf, batch, self._output_signature
)
yield batch
if self._output_signature is None:
self._output_signature = data_adapter_utils.get_tensor_spec(
self._first_batches
)
ds = tf.data.Dataset.from_generator(
get_tf_iterator,
output_signature=self._output_signature,
)
ds = ds.prefetch(tf.data.AUTOTUNE)
return ds
def get_torch_dataloader(self):
return data_adapter_utils.get_torch_dataloader(self.generator())
@property
def num_batches(self):
return None
@property
def batch_size(self):
return None
def peek_and_restore(generator):
batches = list(
itertools.islice(
generator, data_adapter_utils.NUM_BATCHES_FOR_TENSOR_SPEC
)
)
return batches, lambda: itertools.chain(batches, generator)
|
import itertools
from keras.src import tree
from keras.src.trainers.data_adapters import data_adapter_utils
from keras.src.trainers.data_adapters.data_adapter import DataAdapter
class GeneratorDataAdapter(DataAdapter):
"""Adapter for Python generators."""
def __init__(self, generator):
first_batches, generator = peek_and_restore(generator)
self.generator = generator
self._first_batches = first_batches
self._output_signature = None
if not isinstance(first_batches[0], tuple):
raise ValueError(
"When passing a Python generator to a Keras model, "
"the generator must return a tuple, either "
"(input,) or (inputs, targets) or "
"(inputs, targets, sample_weights). "
f"Received: {first_batches[0]}"
)
def get_numpy_iterator(self):
return data_adapter_utils.get_numpy_iterator(self.generator)
def get_jax_iterator(self):
return data_adapter_utils.get_jax_iterator(self.generator)
def get_tf_dataset(self):
from keras.src.utils.module_utils import tensorflow as tf
def convert_to_tf(x, spec):
if data_adapter_utils.is_scipy_sparse(x):
x = data_adapter_utils.scipy_sparse_to_tf_sparse(x)
elif data_adapter_utils.is_jax_sparse(x):
x = data_adapter_utils.jax_sparse_to_tf_sparse(x)
if not spec.shape.is_compatible_with(x.shape):
raise TypeError(
f"Generator yielded an element of shape {x.shape} where "
f"an element of shape {spec.shape} was expected. Your "
"generator provides tensors with variable input "
"dimensions other than the batch size. Make sure that the "
"generator's first two batches do not have the same "
"dimension value wherever there is a variable input "
"dimension."
)
return x
def get_tf_iterator():
for batch in self.generator:
batch = tree.map_structure(
convert_to_tf, batch, self._output_signature
)
yield batch
if self._output_signature is None:
self._output_signature = data_adapter_utils.get_tensor_spec(
self._first_batches
)
ds = tf.data.Dataset.from_generator(
get_tf_iterator,
output_signature=self._output_signature,
)
ds = ds.prefetch(tf.data.AUTOTUNE)
return ds
def get_torch_dataloader(self):
return data_adapter_utils.get_torch_dataloader(self.generator)
@property
def num_batches(self):
return None
@property
def batch_size(self):
return None
def peek_and_restore(generator):
batches = list(
itertools.islice(
generator, data_adapter_utils.NUM_BATCHES_FOR_TENSOR_SPEC
)
)
return batches, itertools.chain(batches, generator)
|
"""langchain-core version information and utilities."""
VERSION = "0.3.56rc1"
|
"""langchain-core version information and utilities."""
VERSION = "0.3.55"
|
from typing import Dict, Iterable
import torch
from torch import Tensor, nn
class MSELoss(nn.Module):
def __init__(self, model):
"""
Computes the MSE loss between the computed sentence embedding and a target sentence embedding. This loss
is used when extending sentence embeddings to new languages as described in our publication
Making Monolingual Sentence Embeddings Multilingual using Knowledge Distillation.
For an example, see `the distillation documentation <../../examples/training/distillation/README.html>`_ on extending language models to new languages.
Args:
model: SentenceTransformerModel
References:
- Making Monolingual Sentence Embeddings Multilingual using Knowledge Distillation: https://arxiv.org/abs/2004.09813
- `Training > Model Distillation <../../examples/training/distillation/README.html>`_
- `Training > Multilingual Models <../../examples/training/multilingual/README.html>`_
Requirements:
1. Usually uses a finetuned teacher M in a knowledge distillation setup
Relations:
- :class:`MarginMSELoss` is equivalent to this loss, but with a margin through a negative pair.
Input:
+-----------------------------------------+-----------------------------+
| Texts | Labels |
+=========================================+=============================+
| sentence | model sentence embeddings |
+-----------------------------------------+-----------------------------+
| sentence_1, sentence_2, ..., sentence_N | model sentence embeddings |
+-----------------------------------------+-----------------------------+
Example:
::
from sentence_transformers import SentenceTransformer, SentenceTransformerTrainer, losses
from datasets import Dataset
student_model = SentenceTransformer("microsoft/mpnet-base")
teacher_model = SentenceTransformer("all-mpnet-base-v2")
train_dataset = Dataset.from_dict({
"english": ["The first sentence", "The second sentence", "The third sentence", "The fourth sentence"],
"french": ["La première phrase", "La deuxième phrase", "La troisième phrase", "La quatrième phrase"],
})
def compute_labels(batch):
return {
"label": teacher_model.encode(batch["english"])
}
train_dataset = train_dataset.map(compute_labels, batched=True)
loss = losses.MSELoss(student_model)
trainer = SentenceTransformerTrainer(
model=student_model,
train_dataset=train_dataset,
loss=loss,
)
trainer.train()
"""
super(MSELoss, self).__init__()
self.model = model
self.loss_fct = nn.MSELoss()
def forward(self, sentence_features: Iterable[Dict[str, Tensor]], labels: Tensor):
# Concatenate multiple inputs on the batch dimension
if len(sentence_features) > 1:
embeddings = torch.cat([self.model(inputs)["sentence_embedding"] for inputs in sentence_features], dim=0)
# Repeat the labels for each input
return self.loss_fct(embeddings, labels.repeat(len(sentence_features), 1))
embeddings = self.model(sentence_features[0])["sentence_embedding"]
return self.loss_fct(embeddings, labels)
@property
def citation(self) -> str:
return """
@inproceedings{reimers-2020-multilingual-sentence-bert,
title = "Making Monolingual Sentence Embeddings Multilingual using Knowledge Distillation",
author = "Reimers, Nils and Gurevych, Iryna",
booktitle = "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing",
month = "11",
year = "2020",
publisher = "Association for Computational Linguistics",
url = "https://arxiv.org/abs/2004.09813",
}
"""
|
import torch
from torch import nn, Tensor
from typing import Union, Tuple, List, Iterable, Dict
class MSELoss(nn.Module):
"""
Computes the MSE loss between the computed sentence embedding and a target sentence embedding. This loss
is used when extending sentence embeddings to new languages as described in our publication
Making Monolingual Sentence Embeddings Multilingual using Knowledge Distillation: https://arxiv.org/abs/2004.09813
For an example, see the documentation on extending language models to new languages.
"""
def __init__(self, model):
"""
:param model: SentenceTransformerModel
"""
super(MSELoss, self).__init__()
self.model = model
self.loss_fct = nn.MSELoss()
def forward(self, sentence_features: Iterable[Dict[str, Tensor]], labels: Tensor):
rep = self.model(sentence_features[0])['sentence_embedding']
return self.loss_fct(rep, labels)
|
from typing import Any, Optional
from llama_index.core.storage.index_store.keyval_index_store import KVIndexStore
from llama_index.storage.kvstore.couchbase import CouchbaseKVStore
class CouchbaseIndexStore(KVIndexStore):
"""Couchbase Index store."""
def __init__(
self,
couchbase_kvstore: CouchbaseKVStore,
namespace: Optional[str] = None,
collection_suffix: Optional[str] = None,
) -> None:
"""
Initialize a CouchbaseIndexStore.
Args:
couchbase_kvstore (CouchbaseKVStore): Couchbase key-value store
namespace (str): namespace for the index store
collection_suffix (str): suffix for the collection name
"""
super().__init__(
couchbase_kvstore,
namespace=namespace,
collection_suffix=collection_suffix,
)
@classmethod
def from_couchbase_client(
cls,
client: Any,
bucket_name: str,
scope_name: str,
namespace: Optional[str] = None,
collection_suffix: Optional[str] = None,
async_client: Optional[Any] = None,
) -> "CouchbaseIndexStore":
"""Initialize a CouchbaseIndexStore from a Couchbase client."""
couchbase_kvstore = CouchbaseKVStore.from_couchbase_client(
client=client,
bucket_name=bucket_name,
scope_name=scope_name,
async_client=async_client,
)
return cls(couchbase_kvstore, namespace, collection_suffix)
|
from typing import Any, Optional
from llama_index.core.storage.index_store.keyval_index_store import KVIndexStore
from llama_index.storage.kvstore.couchbase import CouchbaseKVStore
class CouchbaseIndexStore(KVIndexStore):
"""Couchbase Index store."""
def __init__(
self,
couchbase_kvstore: CouchbaseKVStore,
namespace: Optional[str] = None,
collection_suffix: Optional[str] = None,
) -> None:
"""
Initialize a CouchbaseIndexStore.
Args:
couchbase_kvstore (CouchbaseKVStore): Couchbase key-value store
namespace (str): namespace for the index store
collection_suffix (str): suffix for the collection name
"""
super().__init__(
couchbase_kvstore,
namespace=namespace,
collection_suffix=collection_suffix,
)
@classmethod
def from_couchbase_client(
cls,
client: Any,
bucket_name: str,
scope_name: str,
namespace: Optional[str] = None,
collection_suffix: Optional[str] = None,
async_client: Optional[Any] = None,
) -> "CouchbaseIndexStore":
"""Initialize a CouchbaseIndexStore from a Couchbase client."""
couchbase_kvstore = CouchbaseKVStore.from_couchbase_client(
client=client,
bucket_name=bucket_name,
scope_name=scope_name,
async_client=async_client,
)
return cls(couchbase_kvstore, namespace, collection_suffix)
|
from docarray.base_document.any_document import AnyDocument
from docarray.base_document.base_node import BaseNode
from docarray.base_document.document import BaseDocument
from docarray.base_document.document_response import DocumentResponse
__all__ = ['AnyDocument', 'BaseDocument', 'BaseNode', 'DocumentResponse']
|
from docarray.base_document.any_document import AnyDocument
from docarray.base_document.base_node import BaseNode
from docarray.base_document.document import BaseDocument
__all__ = ['AnyDocument', 'BaseDocument', 'BaseNode']
|
# flake8: noqa
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
__version__ = "2.16.0"
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_default_config_name,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
_arrow_dataset.concatenate_datasets = concatenate_datasets
_utils.DownloadConfig = DownloadConfig
_utils.DownloadManager = DownloadManager
_utils.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadConfig = DownloadConfig
_deprecated_download_manager.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadManager = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
|
# flake8: noqa
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
__version__ = "2.15.1.dev0"
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_default_config_name,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
_arrow_dataset.concatenate_datasets = concatenate_datasets
_utils.DownloadConfig = DownloadConfig
_utils.DownloadManager = DownloadManager
_utils.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadConfig = DownloadConfig
_deprecated_download_manager.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadManager = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.chains.graph_qa.prompts import (
AQL_FIX_TEMPLATE,
AQL_GENERATION_TEMPLATE,
AQL_QA_TEMPLATE,
CYPHER_GENERATION_PROMPT,
CYPHER_GENERATION_TEMPLATE,
CYPHER_QA_PROMPT,
CYPHER_QA_TEMPLATE,
GRAPHDB_QA_TEMPLATE,
GRAPHDB_SPARQL_FIX_TEMPLATE,
GRAPHDB_SPARQL_GENERATION_TEMPLATE,
GREMLIN_GENERATION_TEMPLATE,
KUZU_EXTRA_INSTRUCTIONS,
KUZU_GENERATION_TEMPLATE,
NEBULAGRAPH_EXTRA_INSTRUCTIONS,
NEPTUNE_OPENCYPHER_EXTRA_INSTRUCTIONS,
NEPTUNE_OPENCYPHER_GENERATION_SIMPLE_TEMPLATE,
NEPTUNE_OPENCYPHER_GENERATION_TEMPLATE,
NGQL_GENERATION_TEMPLATE,
SPARQL_GENERATION_SELECT_TEMPLATE,
SPARQL_GENERATION_UPDATE_TEMPLATE,
SPARQL_INTENT_TEMPLATE,
SPARQL_QA_TEMPLATE,
)
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"AQL_FIX_TEMPLATE": "langchain_community.chains.graph_qa.prompts",
"AQL_GENERATION_TEMPLATE": "langchain_community.chains.graph_qa.prompts",
"AQL_QA_TEMPLATE": "langchain_community.chains.graph_qa.prompts",
"CYPHER_GENERATION_TEMPLATE": "langchain_community.chains.graph_qa.prompts",
"CYPHER_QA_TEMPLATE": "langchain_community.chains.graph_qa.prompts",
"CYPHER_QA_PROMPT": "langchain_community.chains.graph_qa.prompts",
"CYPHER_GENERATION_PROMPT": "langchain_community.chains.graph_qa.prompts",
"GRAPHDB_QA_TEMPLATE": "langchain_community.chains.graph_qa.prompts",
"GRAPHDB_SPARQL_FIX_TEMPLATE": "langchain_community.chains.graph_qa.prompts",
"GRAPHDB_SPARQL_GENERATION_TEMPLATE": "langchain_community.chains.graph_qa.prompts",
"GREMLIN_GENERATION_TEMPLATE": "langchain_community.chains.graph_qa.prompts",
"KUZU_EXTRA_INSTRUCTIONS": "langchain_community.chains.graph_qa.prompts",
"KUZU_GENERATION_TEMPLATE": "langchain_community.chains.graph_qa.prompts",
"NEBULAGRAPH_EXTRA_INSTRUCTIONS": "langchain_community.chains.graph_qa.prompts",
"NEPTUNE_OPENCYPHER_EXTRA_INSTRUCTIONS": (
"langchain_community.chains.graph_qa.prompts"
),
"NEPTUNE_OPENCYPHER_GENERATION_SIMPLE_TEMPLATE": (
"langchain_community.chains.graph_qa.prompts"
),
"NEPTUNE_OPENCYPHER_GENERATION_TEMPLATE": (
"langchain_community.chains.graph_qa.prompts"
),
"NGQL_GENERATION_TEMPLATE": "langchain_community.chains.graph_qa.prompts",
"SPARQL_GENERATION_SELECT_TEMPLATE": "langchain_community.chains.graph_qa.prompts",
"SPARQL_GENERATION_UPDATE_TEMPLATE": "langchain_community.chains.graph_qa.prompts",
"SPARQL_INTENT_TEMPLATE": "langchain_community.chains.graph_qa.prompts",
"SPARQL_QA_TEMPLATE": "langchain_community.chains.graph_qa.prompts",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"AQL_FIX_TEMPLATE",
"AQL_GENERATION_TEMPLATE",
"AQL_QA_TEMPLATE",
"CYPHER_GENERATION_PROMPT",
"CYPHER_GENERATION_TEMPLATE",
"CYPHER_QA_PROMPT",
"CYPHER_QA_TEMPLATE",
"GRAPHDB_QA_TEMPLATE",
"GRAPHDB_SPARQL_FIX_TEMPLATE",
"GRAPHDB_SPARQL_GENERATION_TEMPLATE",
"GREMLIN_GENERATION_TEMPLATE",
"KUZU_EXTRA_INSTRUCTIONS",
"KUZU_GENERATION_TEMPLATE",
"NEBULAGRAPH_EXTRA_INSTRUCTIONS",
"NEPTUNE_OPENCYPHER_EXTRA_INSTRUCTIONS",
"NEPTUNE_OPENCYPHER_GENERATION_SIMPLE_TEMPLATE",
"NEPTUNE_OPENCYPHER_GENERATION_TEMPLATE",
"NGQL_GENERATION_TEMPLATE",
"SPARQL_GENERATION_SELECT_TEMPLATE",
"SPARQL_GENERATION_UPDATE_TEMPLATE",
"SPARQL_INTENT_TEMPLATE",
"SPARQL_QA_TEMPLATE",
]
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.chains.graph_qa.prompts import (
AQL_FIX_TEMPLATE,
AQL_GENERATION_TEMPLATE,
AQL_QA_TEMPLATE,
CYPHER_GENERATION_PROMPT,
CYPHER_GENERATION_TEMPLATE,
CYPHER_QA_PROMPT,
CYPHER_QA_TEMPLATE,
GRAPHDB_QA_TEMPLATE,
GRAPHDB_SPARQL_FIX_TEMPLATE,
GRAPHDB_SPARQL_GENERATION_TEMPLATE,
GREMLIN_GENERATION_TEMPLATE,
KUZU_EXTRA_INSTRUCTIONS,
KUZU_GENERATION_TEMPLATE,
NEBULAGRAPH_EXTRA_INSTRUCTIONS,
NEPTUNE_OPENCYPHER_EXTRA_INSTRUCTIONS,
NEPTUNE_OPENCYPHER_GENERATION_SIMPLE_TEMPLATE,
NEPTUNE_OPENCYPHER_GENERATION_TEMPLATE,
NGQL_GENERATION_TEMPLATE,
SPARQL_GENERATION_SELECT_TEMPLATE,
SPARQL_GENERATION_UPDATE_TEMPLATE,
SPARQL_INTENT_TEMPLATE,
SPARQL_QA_TEMPLATE,
)
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"AQL_FIX_TEMPLATE": "langchain_community.chains.graph_qa.prompts",
"AQL_GENERATION_TEMPLATE": "langchain_community.chains.graph_qa.prompts",
"AQL_QA_TEMPLATE": "langchain_community.chains.graph_qa.prompts",
"CYPHER_GENERATION_TEMPLATE": "langchain_community.chains.graph_qa.prompts",
"CYPHER_QA_TEMPLATE": "langchain_community.chains.graph_qa.prompts",
"CYPHER_QA_PROMPT": "langchain_community.chains.graph_qa.prompts",
"CYPHER_GENERATION_PROMPT": "langchain_community.chains.graph_qa.prompts",
"GRAPHDB_QA_TEMPLATE": "langchain_community.chains.graph_qa.prompts",
"GRAPHDB_SPARQL_FIX_TEMPLATE": "langchain_community.chains.graph_qa.prompts",
"GRAPHDB_SPARQL_GENERATION_TEMPLATE": "langchain_community.chains.graph_qa.prompts",
"GREMLIN_GENERATION_TEMPLATE": "langchain_community.chains.graph_qa.prompts",
"KUZU_EXTRA_INSTRUCTIONS": "langchain_community.chains.graph_qa.prompts",
"KUZU_GENERATION_TEMPLATE": "langchain_community.chains.graph_qa.prompts",
"NEBULAGRAPH_EXTRA_INSTRUCTIONS": "langchain_community.chains.graph_qa.prompts",
"NEPTUNE_OPENCYPHER_EXTRA_INSTRUCTIONS": (
"langchain_community.chains.graph_qa.prompts"
),
"NEPTUNE_OPENCYPHER_GENERATION_SIMPLE_TEMPLATE": (
"langchain_community.chains.graph_qa.prompts"
),
"NEPTUNE_OPENCYPHER_GENERATION_TEMPLATE": (
"langchain_community.chains.graph_qa.prompts"
),
"NGQL_GENERATION_TEMPLATE": "langchain_community.chains.graph_qa.prompts",
"SPARQL_GENERATION_SELECT_TEMPLATE": "langchain_community.chains.graph_qa.prompts",
"SPARQL_GENERATION_UPDATE_TEMPLATE": "langchain_community.chains.graph_qa.prompts",
"SPARQL_INTENT_TEMPLATE": "langchain_community.chains.graph_qa.prompts",
"SPARQL_QA_TEMPLATE": "langchain_community.chains.graph_qa.prompts",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"AQL_FIX_TEMPLATE",
"AQL_GENERATION_TEMPLATE",
"AQL_QA_TEMPLATE",
"CYPHER_GENERATION_TEMPLATE",
"CYPHER_QA_TEMPLATE",
"GRAPHDB_QA_TEMPLATE",
"GRAPHDB_SPARQL_FIX_TEMPLATE",
"GRAPHDB_SPARQL_GENERATION_TEMPLATE",
"GREMLIN_GENERATION_TEMPLATE",
"KUZU_EXTRA_INSTRUCTIONS",
"KUZU_GENERATION_TEMPLATE",
"NEBULAGRAPH_EXTRA_INSTRUCTIONS",
"NEPTUNE_OPENCYPHER_EXTRA_INSTRUCTIONS",
"NEPTUNE_OPENCYPHER_GENERATION_SIMPLE_TEMPLATE",
"NEPTUNE_OPENCYPHER_GENERATION_TEMPLATE",
"NGQL_GENERATION_TEMPLATE",
"SPARQL_GENERATION_SELECT_TEMPLATE",
"SPARQL_GENERATION_UPDATE_TEMPLATE",
"SPARQL_INTENT_TEMPLATE",
"SPARQL_QA_TEMPLATE",
"CYPHER_QA_PROMPT",
"CYPHER_GENERATION_PROMPT",
]
|
import os
from tempfile import TemporaryDirectory
from unittest import TestCase
import pytest
from absl.testing import parameterized
from datasets import config
from datasets.arrow_reader import HF_GCP_BASE_URL
from datasets.builder import DatasetBuilder
from datasets.dataset_dict import IterableDatasetDict
from datasets.iterable_dataset import IterableDataset
from datasets.load import dataset_module_factory, import_main_class
from datasets.utils.file_utils import cached_path
DATASETS_ON_HF_GCP = [
{"dataset": "wikipedia", "config_name": "20220301.de"},
{"dataset": "wikipedia", "config_name": "20220301.en"},
{"dataset": "wikipedia", "config_name": "20220301.fr"},
{"dataset": "wikipedia", "config_name": "20220301.frr"},
{"dataset": "wikipedia", "config_name": "20220301.it"},
{"dataset": "wikipedia", "config_name": "20220301.simple"},
{"dataset": "wiki40b", "config_name": "en"},
{"dataset": "wiki_dpr", "config_name": "psgs_w100.nq.compressed"},
{"dataset": "wiki_dpr", "config_name": "psgs_w100.nq.no_index"},
{"dataset": "wiki_dpr", "config_name": "psgs_w100.multiset.no_index"},
{"dataset": "natural_questions", "config_name": "default"},
]
def list_datasets_on_hf_gcp_parameters(with_config=True):
if with_config:
return [
{
"testcase_name": d["dataset"] + "/" + d["config_name"],
"dataset": d["dataset"],
"config_name": d["config_name"],
}
for d in DATASETS_ON_HF_GCP
]
else:
return [
{"testcase_name": dataset, "dataset": dataset} for dataset in {d["dataset"] for d in DATASETS_ON_HF_GCP}
]
@parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=True))
class TestDatasetOnHfGcp(TestCase):
dataset = None
config_name = None
def test_dataset_info_available(self, dataset, config_name):
with TemporaryDirectory() as tmp_dir:
dataset_module = dataset_module_factory(dataset, cache_dir=tmp_dir)
builder_cls = import_main_class(dataset_module.module_path, dataset=True)
builder_instance: DatasetBuilder = builder_cls(
cache_dir=tmp_dir,
config_name=config_name,
hash=dataset_module.hash,
)
dataset_info_url = "/".join(
[
HF_GCP_BASE_URL,
builder_instance._relative_data_dir(with_hash=False).replace(os.sep, "/"),
config.DATASET_INFO_FILENAME,
]
)
datset_info_path = cached_path(dataset_info_url, cache_dir=tmp_dir)
self.assertTrue(os.path.exists(datset_info_path))
@pytest.mark.integration
def test_as_dataset_from_hf_gcs(tmp_path_factory):
tmp_dir = tmp_path_factory.mktemp("test_hf_gcp") / "test_wikipedia_simple"
dataset_module = dataset_module_factory("wikipedia", cache_dir=tmp_dir)
builder_cls = import_main_class(dataset_module.module_path)
builder_instance: DatasetBuilder = builder_cls(
cache_dir=tmp_dir,
config_name="20220301.frr",
hash=dataset_module.hash,
)
# use the HF cloud storage, not the original download_and_prepare that uses apache-beam
builder_instance._download_and_prepare = None
builder_instance.download_and_prepare()
ds = builder_instance.as_dataset()
assert ds
@pytest.mark.integration
def test_as_streaming_dataset_from_hf_gcs(tmp_path):
dataset_module = dataset_module_factory("wikipedia", cache_dir=tmp_path)
builder_cls = import_main_class(dataset_module.module_path, dataset=True)
builder_instance: DatasetBuilder = builder_cls(
cache_dir=tmp_path,
config_name="20220301.frr",
hash=dataset_module.hash,
)
ds = builder_instance.as_streaming_dataset()
assert ds
assert isinstance(ds, IterableDatasetDict)
assert "train" in ds
assert isinstance(ds["train"], IterableDataset)
assert next(iter(ds["train"]))
|
import os
from tempfile import TemporaryDirectory
from unittest import TestCase
import pytest
from absl.testing import parameterized
from datasets import config
from datasets.arrow_reader import HF_GCP_BASE_URL
from datasets.builder import DatasetBuilder
from datasets.dataset_dict import IterableDatasetDict
from datasets.iterable_dataset import IterableDataset
from datasets.load import dataset_module_factory, import_main_class
from datasets.utils.file_utils import cached_path
DATASETS_ON_HF_GCP = [
{"dataset": "wikipedia", "config_name": "20220301.de"},
{"dataset": "wikipedia", "config_name": "20220301.en"},
{"dataset": "wikipedia", "config_name": "20220301.fr"},
{"dataset": "wikipedia", "config_name": "20220301.frr"},
{"dataset": "wikipedia", "config_name": "20220301.it"},
{"dataset": "wikipedia", "config_name": "20220301.simple"},
{"dataset": "eli5", "config_name": "LFQA_reddit"},
{"dataset": "wiki40b", "config_name": "en"},
{"dataset": "wiki_dpr", "config_name": "psgs_w100.nq.compressed"},
{"dataset": "wiki_dpr", "config_name": "psgs_w100.nq.no_index"},
{"dataset": "wiki_dpr", "config_name": "psgs_w100.multiset.no_index"},
{"dataset": "natural_questions", "config_name": "default"},
]
def list_datasets_on_hf_gcp_parameters(with_config=True):
if with_config:
return [
{
"testcase_name": d["dataset"] + "/" + d["config_name"],
"dataset": d["dataset"],
"config_name": d["config_name"],
}
for d in DATASETS_ON_HF_GCP
]
else:
return [
{"testcase_name": dataset, "dataset": dataset} for dataset in {d["dataset"] for d in DATASETS_ON_HF_GCP}
]
@parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=True))
class TestDatasetOnHfGcp(TestCase):
dataset = None
config_name = None
def test_dataset_info_available(self, dataset, config_name):
with TemporaryDirectory() as tmp_dir:
dataset_module = dataset_module_factory(dataset, cache_dir=tmp_dir)
builder_cls = import_main_class(dataset_module.module_path, dataset=True)
builder_instance: DatasetBuilder = builder_cls(
cache_dir=tmp_dir,
config_name=config_name,
hash=dataset_module.hash,
)
dataset_info_url = "/".join(
[
HF_GCP_BASE_URL,
builder_instance._relative_data_dir(with_hash=False).replace(os.sep, "/"),
config.DATASET_INFO_FILENAME,
]
)
datset_info_path = cached_path(dataset_info_url, cache_dir=tmp_dir)
self.assertTrue(os.path.exists(datset_info_path))
@pytest.mark.integration
def test_as_dataset_from_hf_gcs(tmp_path_factory):
tmp_dir = tmp_path_factory.mktemp("test_hf_gcp") / "test_wikipedia_simple"
dataset_module = dataset_module_factory("wikipedia", cache_dir=tmp_dir)
builder_cls = import_main_class(dataset_module.module_path)
builder_instance: DatasetBuilder = builder_cls(
cache_dir=tmp_dir,
config_name="20220301.frr",
hash=dataset_module.hash,
)
# use the HF cloud storage, not the original download_and_prepare that uses apache-beam
builder_instance._download_and_prepare = None
builder_instance.download_and_prepare()
ds = builder_instance.as_dataset()
assert ds
@pytest.mark.integration
def test_as_streaming_dataset_from_hf_gcs(tmp_path):
dataset_module = dataset_module_factory("wikipedia", cache_dir=tmp_path)
builder_cls = import_main_class(dataset_module.module_path, dataset=True)
builder_instance: DatasetBuilder = builder_cls(
cache_dir=tmp_path,
config_name="20220301.frr",
hash=dataset_module.hash,
)
ds = builder_instance.as_streaming_dataset()
assert ds
assert isinstance(ds, IterableDatasetDict)
assert "train" in ds
assert isinstance(ds["train"], IterableDataset)
assert next(iter(ds["train"]))
|
import csv
import os
from pathlib import Path
from typing import Union
import torchaudio
from torch.utils.data import Dataset
class FluentSpeechCommands(Dataset):
"""Create *Fluent Speech Commands* [:footcite:`fluent`] Dataset
Args:
root (str of Path): Path to the directory where the dataset is found.
subset (str, optional): subset of the dataset to use. Options: [`"train"`, `"valid"`, `"test"`].
(Default: ``"train"``)
"""
def __init__(self, root: Union[str, Path], subset: str = "train"):
if subset not in ["train", "valid", "test"]:
raise ValueError("`subset` must be one of ['train', 'valid', 'test']")
root = os.fspath(root)
self._path = os.path.join(root, "fluent_speech_commands_dataset")
if not os.path.isdir(self._path):
raise RuntimeError("Dataset not found.")
subset_path = os.path.join(self._path, "data", f"{subset}_data.csv")
with open(subset_path) as subset_csv:
subset_reader = csv.reader(subset_csv)
data = list(subset_reader)
self.header = data[0]
self.data = data[1:]
def __len__(self):
return len(self.data)
def __getitem__(self, n: int):
"""Load the n-th sample from the dataset.
Args:
n (int): The index of the sample to be loaded
Returns:
(Tensor, int, str, int, str, str, str, str):
``(waveform, sample_rate, file_name, speaker_id, transcription, action, object, location)``
"""
sample = self.data[n]
file_name = sample[self.header.index("path")].split("/")[-1]
file_name = file_name.split(".")[0]
speaker_id, transcription, action, obj, location = sample[2:]
wav_path = os.path.join(self._path, "wavs", "speakers", speaker_id, f"{file_name}.wav")
wav, sample_rate = torchaudio.load(wav_path)
return wav, sample_rate, file_name, speaker_id, transcription, action, obj, location
|
import csv
import os
from pathlib import Path
from typing import Union
import torchaudio
from torch.utils.data import Dataset
class FluentSpeechCommands(Dataset):
"""Create *Fluent Speech Commands* [:footcite:`fluent`] Dataset
Args:
root (str of Path): Path to the directory where the dataset is found.
subset (str, optional): subset of the dataset to use. Options: [`"train"`, `"valid"`, `"test"`].
(Default: ``"train"``)
"""
def __init__(self, root: Union[str, Path], subset: str = "train"):
assert subset in ["train", "valid", "test"], "`subset` must be one of ['train', 'valid', 'test']"
root = os.fspath(root)
self._path = os.path.join(root, "fluent_speech_commands_dataset")
if not os.path.isdir(self._path):
raise RuntimeError("Dataset not found.")
subset_path = os.path.join(self._path, "data", f"{subset}_data.csv")
with open(subset_path) as subset_csv:
subset_reader = csv.reader(subset_csv)
data = list(subset_reader)
self.header = data[0]
self.data = data[1:]
def __len__(self):
return len(self.data)
def __getitem__(self, n: int):
"""Load the n-th sample from the dataset.
Args:
n (int): The index of the sample to be loaded
Returns:
(Tensor, int, str, int, str, str, str, str):
``(waveform, sample_rate, file_name, speaker_id, transcription, action, object, location)``
"""
sample = self.data[n]
file_name = sample[self.header.index("path")].split("/")[-1]
file_name = file_name.split(".")[0]
speaker_id, transcription, action, obj, location = sample[2:]
wav_path = os.path.join(self._path, "wavs", "speakers", speaker_id, f"{file_name}.wav")
wav, sample_rate = torchaudio.load(wav_path)
return wav, sample_rate, file_name, speaker_id, transcription, action, obj, location
|
from __future__ import annotations
from collections.abc import Iterable
import torch
from torch import Tensor, nn
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
class FlopsLoss(nn.Module):
def __init__(self, model: SparseEncoder, threshold: float = None) -> None:
"""
FlopsLoss implements a regularization technique to promote sparsity in sparse encoder models.
It calculates the squared L2 norm of the mean embedding vector, which helps reduce the number of floating-point
operations (FLOPs) required during inference by encouraging more zero values in the embeddings.
It can use a threshold to ignore embeddings with too few non-zero elements.
This loss is used as a regularization component within other losses like :class:`SpladeLoss` rather than
being used as a standalone loss function.
Args:
model: SparseEncoder model to be regularized
threshold: Optional threshold for the number of non-zero elements in the embeddings.
If specified, only embeddings with more than this number of non-zero elements will be considered.
This can help to ignore embeddings that are too sparse and may not contribute meaningfully to the loss.
References:
- For further details, see: https://arxiv.org/pdf/2004.05665 for the general FLOPS loss and https://arxiv.org/pdf/2504.14839 for FLOPS with thresholds, a.k.a. FLOPS with l0 masking.
Relations:
- Used as a component within :class:`SpladeLoss` to regularize both query and document embeddings
Example:
- This loss is typically used within the :class:`SpladeLoss` class, which combines it with other loss components.
"""
super().__init__()
self.model = model
self.threshold = threshold
def forward(self, sentence_features: Iterable[dict[str, Tensor]], labels: Tensor) -> Tensor:
# Compute the embeddings and distribute them to anchor and candidates (positive and optionally negatives)
embeddings = [self.model(sentence_feature)["sentence_embedding"] for sentence_feature in sentence_features]
return self.compute_loss_from_embeddings(embeddings)
def compute_loss_from_embeddings(self, embeddings: list[torch.Tensor]) -> torch.Tensor:
if self.threshold is not None:
l0_norm = (embeddings != 0).sum(dim=1)
mask = (l0_norm > self.threshold).float()
embeddings = embeddings * mask.unsqueeze(1)
return torch.sum(torch.mean(embeddings, dim=0) ** 2)
@property
def citation(self) -> str:
return """
@article{paria2020minimizing,
title={Minimizing flops to learn efficient sparse representations},
author={Paria, Biswajit and Yeh, Chih-Kuan and Yen, Ian EH and Xu, Ning and Ravikumar, Pradeep and P{\'o}czos, Barnab{\'a}s},
journal={arXiv preprint arXiv:2004.05665},
year={2020}
}
"""
|
from __future__ import annotations
from collections.abc import Iterable
import torch
from torch import Tensor, nn
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
class FlopsLoss(nn.Module):
def __init__(self, model: SparseEncoder, threshold: float = None) -> None:
"""
FlopsLoss implements a regularization technique to promote sparsity in sparse encoder models.
It calculates the squared L2 norm of the mean embedding vector, which helps reduce the number of floating-point
operations (FLOPs) required during inference by encouraging more zero values in the embeddings.
It can use a threshold to ignore embeddings with too few non-zero elements.
This loss is used as a regularization component within other losses like :class:`SpladeLoss` rather than
being used as a standalone loss function.
Args:
model: SparseEncoder model to be regularized
threshold: Optional threshold for the number of non-zero elements in the embeddings.
If specified, only embeddings with more than this number of non-zero elements will be considered.
This can help to ignore embeddings that are too sparse and may not contribute meaningfully to the loss.
References:
- For further details, see: https://arxiv.org/pdf/2004.05665 for the general FLOPS loss and https://arxiv.org/pdf/2504.14839 for FLOPS with thresholds, a.k.a. FLOPS with l0 masking.
Relations:
- Used as a component within :class:`SpladeLoss` to regularize both query and document embeddings
Example:
- This loss is typically used within the :class:`SpladeLoss` class, which combines it with other loss components.
"""
super().__init__()
self.model = model
self.threshold = threshold
def forward(self, sentence_features: Iterable[dict[str, Tensor]], labels: Tensor) -> Tensor:
# Compute the embeddings and distribute them to anchor and candidates (positive and optionally negatives)
embeddings = [self.model(sentence_feature)["sentence_embedding"] for sentence_feature in sentence_features]
return self.compute_loss_from_embeddings(embeddings)
def compute_loss_from_embeddings(self, embeddings: list[torch.Tensor], embeddings_type: str) -> torch.Tensor:
if embeddings_type == "query":
embeddings_to_use = embeddings[0] # (batch_size, embedding_dim)
else:
embeddings_to_use = torch.cat(embeddings[1:]) # (batch_size * (1 + num_negatives), embedding_dim)
if self.threshold is not None:
l0_norm = (embeddings_to_use != 0).sum(dim=1)
mask = (l0_norm > self.threshold).float()
embeddings_to_use = embeddings_to_use * mask.unsqueeze(1)
return torch.sum(torch.mean(embeddings_to_use, dim=0) ** 2)
@property
def citation(self) -> str:
return """
@article{paria2020minimizing,
title={Minimizing flops to learn efficient sparse representations},
author={Paria, Biswajit and Yeh, Chih-Kuan and Yen, Ian EH and Xu, Ning and Ravikumar, Pradeep and P{\'o}czos, Barnab{\'a}s},
journal={arXiv preprint arXiv:2004.05665},
year={2020}
}
"""
|
_base_ = './yolox_s_8x8_300e_coco.py'
# model settings
model = dict(
random_size_range=(10, 20),
backbone=dict(deepen_factor=0.33, widen_factor=0.375),
neck=dict(in_channels=[96, 192, 384], out_channels=96),
bbox_head=dict(in_channels=96, feat_channels=96))
img_scale = (640, 640) # height, width
train_pipeline = [
dict(type='Mosaic', img_scale=img_scale, pad_val=114.0),
dict(
type='RandomAffine',
scaling_ratio_range=(0.5, 1.5),
border=(-img_scale[0] // 2, -img_scale[1] // 2)),
dict(type='YOLOXHSVRandomAug'),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Resize', img_scale=img_scale, keep_ratio=True),
dict(
type='Pad',
pad_to_square=True,
pad_val=dict(img=(114.0, 114.0, 114.0))),
dict(type='FilterAnnotations', min_gt_bbox_wh=(1, 1), keep_empty=False),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(416, 416),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(
type='Pad',
pad_to_square=True,
pad_val=dict(img=(114.0, 114.0, 114.0))),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img'])
])
]
train_dataset = dict(pipeline=train_pipeline)
data = dict(
train=train_dataset,
val=dict(pipeline=test_pipeline),
test=dict(pipeline=test_pipeline))
|
_base_ = './yolox_s_8x8_300e_coco.py'
# model settings
model = dict(
random_size_range=(10, 20),
backbone=dict(deepen_factor=0.33, widen_factor=0.375),
neck=dict(in_channels=[96, 192, 384], out_channels=96),
bbox_head=dict(in_channels=96, feat_channels=96))
img_scale = (640, 640)
train_pipeline = [
dict(type='Mosaic', img_scale=img_scale, pad_val=114.0),
dict(
type='RandomAffine',
scaling_ratio_range=(0.5, 1.5),
border=(-img_scale[0] // 2, -img_scale[1] // 2)),
dict(type='YOLOXHSVRandomAug'),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Resize', img_scale=img_scale, keep_ratio=True),
dict(
type='Pad',
pad_to_square=True,
pad_val=dict(img=(114.0, 114.0, 114.0))),
dict(type='FilterAnnotations', min_gt_bbox_wh=(1, 1), keep_empty=False),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(416, 416),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(
type='Pad',
pad_to_square=True,
pad_val=dict(img=(114.0, 114.0, 114.0))),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img'])
])
]
train_dataset = dict(pipeline=train_pipeline)
data = dict(
train=train_dataset,
val=dict(pipeline=test_pipeline),
test=dict(pipeline=test_pipeline))
|
from contextlib import contextmanager
from functools import partial
from unittest.mock import patch
import torch
from parameterized import parameterized
from torchaudio._internal.module_utils import is_module_available
from torchaudio_unittest.common_utils import skipIfNoModule, TorchaudioTestCase
from .utils import MockCustomDataset, MockDataloader, MockSentencePieceProcessor
if is_module_available("pytorch_lightning", "sentencepiece"):
from asr.emformer_rnnt.tedlium3.lightning import TEDLIUM3RNNTModule
class MockTEDLIUM:
def __init__(self, *args, **kwargs):
pass
def __getitem__(self, n: int):
return (
torch.rand(1, 32640),
16000,
"sup",
2,
3,
4,
)
def __len__(self):
return 10
@contextmanager
def get_lightning_module():
with patch("sentencepiece.SentencePieceProcessor", new=partial(MockSentencePieceProcessor, num_symbols=500)), patch(
"asr.emformer_rnnt.tedlium3.lightning.GlobalStatsNormalization", new=torch.nn.Identity
), patch("torchaudio.datasets.TEDLIUM", new=MockTEDLIUM), patch(
"asr.emformer_rnnt.tedlium3.lightning.CustomDataset", new=MockCustomDataset
), patch(
"torch.utils.data.DataLoader", new=MockDataloader
):
yield TEDLIUM3RNNTModule(
tedlium_path="tedlium_path",
sp_model_path="sp_model_path",
global_stats_path="global_stats_path",
)
@skipIfNoModule("pytorch_lightning")
@skipIfNoModule("sentencepiece")
class TestTEDLIUM3RNNTModule(TorchaudioTestCase):
@classmethod
def setUpClass(cls) -> None:
super().setUpClass()
torch.random.manual_seed(31)
@parameterized.expand(
[
("training_step", "train_dataloader"),
("validation_step", "val_dataloader"),
("test_step", "test_dataloader"),
]
)
def test_step(self, step_fname, dataloader_fname):
with get_lightning_module() as lightning_module:
dataloader = getattr(lightning_module, dataloader_fname)()
batch = next(iter(dataloader))
getattr(lightning_module, step_fname)(batch, 0)
@parameterized.expand(
[
("val_dataloader",),
]
)
def test_forward(self, dataloader_fname):
with get_lightning_module() as lightning_module:
dataloader = getattr(lightning_module, dataloader_fname)()
batch = next(iter(dataloader))
lightning_module(batch)
|
from contextlib import contextmanager
from functools import partial
from unittest.mock import patch
import torch
from parameterized import parameterized
from torchaudio._internal.module_utils import is_module_available
from torchaudio_unittest.common_utils import TorchaudioTestCase, skipIfNoModule
from .utils import MockSentencePieceProcessor, MockCustomDataset, MockDataloader
if is_module_available("pytorch_lightning", "sentencepiece"):
from asr.emformer_rnnt.tedlium3.lightning import TEDLIUM3RNNTModule
class MockTEDLIUM:
def __init__(self, *args, **kwargs):
pass
def __getitem__(self, n: int):
return (
torch.rand(1, 32640),
16000,
"sup",
2,
3,
4,
)
def __len__(self):
return 10
@contextmanager
def get_lightning_module():
with patch("sentencepiece.SentencePieceProcessor", new=partial(MockSentencePieceProcessor, num_symbols=500)), patch(
"asr.emformer_rnnt.tedlium3.lightning.GlobalStatsNormalization", new=torch.nn.Identity
), patch("torchaudio.datasets.TEDLIUM", new=MockTEDLIUM), patch(
"asr.emformer_rnnt.tedlium3.lightning.CustomDataset", new=MockCustomDataset
), patch(
"torch.utils.data.DataLoader", new=MockDataloader
):
yield TEDLIUM3RNNTModule(
tedlium_path="tedlium_path",
sp_model_path="sp_model_path",
global_stats_path="global_stats_path",
)
@skipIfNoModule("pytorch_lightning")
@skipIfNoModule("sentencepiece")
class TestTEDLIUM3RNNTModule(TorchaudioTestCase):
@classmethod
def setUpClass(cls) -> None:
super().setUpClass()
torch.random.manual_seed(31)
@parameterized.expand(
[
("training_step", "train_dataloader"),
("validation_step", "val_dataloader"),
("test_step", "test_dataloader"),
]
)
def test_step(self, step_fname, dataloader_fname):
with get_lightning_module() as lightning_module:
dataloader = getattr(lightning_module, dataloader_fname)()
batch = next(iter(dataloader))
getattr(lightning_module, step_fname)(batch, 0)
@parameterized.expand(
[
("val_dataloader",),
]
)
def test_forward(self, dataloader_fname):
with get_lightning_module() as lightning_module:
dataloader = getattr(lightning_module, dataloader_fname)()
batch = next(iter(dataloader))
lightning_module(batch)
|
"""Argparser module for pinging"""
from jina.parsers.base import set_base_parser
def set_new_project_parser(parser=None):
"""Set the parser for `new`
:param parser: an existing parser to build upon
:return: the parser
"""
if not parser:
parser = set_base_parser()
parser.add_argument(
'name', type=str, help='The name of the project', default='hello-jina'
)
parser.add_argument(
'--type',
type=str,
help='The type of project to be created (either flow or deployment)',
default='flow',
)
return parser
|
"""Argparser module for pinging"""
from jina.parsers.base import set_base_parser
def set_new_project_parser(parser=None):
"""Set the parser for `new`
:param parser: an existing parser to build upon
:return: the parser
"""
if not parser:
parser = set_base_parser()
parser.add_argument(
'name', type=str, help='The name of the project', default='hello-jina'
)
parser.add_argument(
'--type', type=str, help='The type of project to be created (either flow or deployment)', default='flow'
)
return parser
|
from torchaudio.datasets import librispeech
from torchaudio_unittest.common_utils import TorchaudioTestCase
from torchaudio_unittest.datasets.librispeech_test_impl import LibriSpeechTestMixin
class TestLibriSpeech(LibriSpeechTestMixin, TorchaudioTestCase):
librispeech_cls = librispeech.LIBRISPEECH
|
import os
from pathlib import Path
from torchaudio.datasets import librispeech
from torchaudio_unittest.common_utils import (
get_whitenoise,
normalize_wav,
save_wav,
TempDirMixin,
TorchaudioTestCase,
)
# Used to generate a unique transcript for each dummy audio file
_NUMBERS = ["ZERO", "ONE", "TWO", "THREE", "FOUR", "FIVE", "SIX", "SEVEN", "EIGHT", "NINE"]
def get_mock_dataset(root_dir):
"""
root_dir: directory to the mocked dataset
"""
mocked_data = []
dataset_dir = os.path.join(root_dir, librispeech.FOLDER_IN_ARCHIVE, librispeech.URL)
os.makedirs(dataset_dir, exist_ok=True)
sample_rate = 16000 # 16kHz
seed = 0
for speaker_id in range(5):
speaker_path = os.path.join(dataset_dir, str(speaker_id))
os.makedirs(speaker_path, exist_ok=True)
for chapter_id in range(3):
chapter_path = os.path.join(speaker_path, str(chapter_id))
os.makedirs(chapter_path, exist_ok=True)
trans_content = []
for utterance_id in range(10):
filename = f"{speaker_id}-{chapter_id}-{utterance_id:04d}.wav"
path = os.path.join(chapter_path, filename)
transcript = " ".join([_NUMBERS[x] for x in [speaker_id, chapter_id, utterance_id]])
trans_content.append(f"{speaker_id}-{chapter_id}-{utterance_id:04d} {transcript}")
data = get_whitenoise(sample_rate=sample_rate, duration=0.01, n_channels=1, dtype="float32", seed=seed)
save_wav(path, data, sample_rate)
sample = (normalize_wav(data), sample_rate, transcript, speaker_id, chapter_id, utterance_id)
mocked_data.append(sample)
seed += 1
trans_filename = f"{speaker_id}-{chapter_id}.trans.txt"
trans_path = os.path.join(chapter_path, trans_filename)
with open(trans_path, "w") as f:
f.write("\n".join(trans_content))
return mocked_data
class TestLibriSpeech(TempDirMixin, TorchaudioTestCase):
backend = "default"
root_dir = None
samples = []
@classmethod
def setUpClass(cls):
cls.root_dir = cls.get_base_temp_dir()
cls.samples = get_mock_dataset(cls.root_dir)
@classmethod
def tearDownClass(cls):
# In case of test failure
librispeech.LIBRISPEECH._ext_audio = ".flac"
def _test_librispeech(self, dataset):
num_samples = 0
for i, (data, sample_rate, transcript, speaker_id, chapter_id, utterance_id) in enumerate(dataset):
self.assertEqual(data, self.samples[i][0], atol=5e-5, rtol=1e-8)
assert sample_rate == self.samples[i][1]
assert transcript == self.samples[i][2]
assert speaker_id == self.samples[i][3]
assert chapter_id == self.samples[i][4]
assert utterance_id == self.samples[i][5]
num_samples += 1
assert num_samples == len(self.samples)
librispeech.LIBRISPEECH._ext_audio = ".flac"
def test_librispeech_str(self):
librispeech.LIBRISPEECH._ext_audio = ".wav"
dataset = librispeech.LIBRISPEECH(self.root_dir)
self._test_librispeech(dataset)
def test_librispeech_path(self):
librispeech.LIBRISPEECH._ext_audio = ".wav"
dataset = librispeech.LIBRISPEECH(Path(self.root_dir))
self._test_librispeech(dataset)
|
# Copyright (c) OpenMMLab. All rights reserved.
import pytest
from mmcv.utils import ConfigDict
from mmdet.models.utils.transformer import (DetrTransformerDecoder,
DetrTransformerEncoder,
Transformer)
def test_detr_transformer_dencoder_encoder_layer():
config = ConfigDict(
dict(
return_intermediate=True,
num_layers=6,
transformerlayers=dict(
type='DetrTransformerDecoderLayer',
attn_cfgs=dict(
type='MultiheadAttention',
embed_dims=256,
num_heads=8,
dropout=0.1),
feedforward_channels=2048,
ffn_dropout=0.1,
operation_order=(
'norm',
'self_attn',
'norm',
'cross_attn',
'norm',
'ffn',
))))
assert DetrTransformerDecoder(**config).layers[0].pre_norm
assert len(DetrTransformerDecoder(**config).layers) == 6
DetrTransformerDecoder(**config)
with pytest.raises(AssertionError):
config = ConfigDict(
dict(
return_intermediate=True,
num_layers=6,
transformerlayers=[
dict(
type='DetrTransformerDecoderLayer',
attn_cfgs=dict(
type='MultiheadAttention',
embed_dims=256,
num_heads=8,
dropout=0.1),
feedforward_channels=2048,
ffn_dropout=0.1,
operation_order=('self_attn', 'norm', 'cross_attn',
'norm', 'ffn', 'norm'))
] * 5))
DetrTransformerDecoder(**config)
config = ConfigDict(
dict(
num_layers=6,
transformerlayers=dict(
type='DetrTransformerDecoderLayer',
attn_cfgs=dict(
type='MultiheadAttention',
embed_dims=256,
num_heads=8,
dropout=0.1),
feedforward_channels=2048,
ffn_dropout=0.1,
operation_order=('norm', 'self_attn', 'norm', 'cross_attn',
'norm', 'ffn', 'norm'))))
with pytest.raises(AssertionError):
# len(operation_order) == 6
DetrTransformerEncoder(**config)
def test_transformer():
config = ConfigDict(
dict(
encoder=dict(
type='DetrTransformerEncoder',
num_layers=6,
transformerlayers=dict(
type='BaseTransformerLayer',
attn_cfgs=[
dict(
type='MultiheadAttention',
embed_dims=256,
num_heads=8,
dropout=0.1)
],
feedforward_channels=2048,
ffn_dropout=0.1,
operation_order=('self_attn', 'norm', 'ffn', 'norm'))),
decoder=dict(
type='DetrTransformerDecoder',
return_intermediate=True,
num_layers=6,
transformerlayers=dict(
type='DetrTransformerDecoderLayer',
attn_cfgs=dict(
type='MultiheadAttention',
embed_dims=256,
num_heads=8,
dropout=0.1),
feedforward_channels=2048,
ffn_dropout=0.1,
operation_order=('self_attn', 'norm', 'cross_attn', 'norm',
'ffn', 'norm')),
)))
transformer = Transformer(**config)
transformer.init_weights()
|
import pytest
from mmcv.utils import ConfigDict
from mmdet.models.utils.transformer import (DetrTransformerDecoder,
DetrTransformerEncoder,
Transformer)
def test_detr_transformer_dencoder_encoder_layer():
config = ConfigDict(
dict(
return_intermediate=True,
num_layers=6,
transformerlayers=dict(
type='DetrTransformerDecoderLayer',
attn_cfgs=dict(
type='MultiheadAttention',
embed_dims=256,
num_heads=8,
dropout=0.1),
feedforward_channels=2048,
ffn_dropout=0.1,
operation_order=(
'norm',
'self_attn',
'norm',
'cross_attn',
'norm',
'ffn',
))))
assert DetrTransformerDecoder(**config).layers[0].pre_norm
assert len(DetrTransformerDecoder(**config).layers) == 6
DetrTransformerDecoder(**config)
with pytest.raises(AssertionError):
config = ConfigDict(
dict(
return_intermediate=True,
num_layers=6,
transformerlayers=[
dict(
type='DetrTransformerDecoderLayer',
attn_cfgs=dict(
type='MultiheadAttention',
embed_dims=256,
num_heads=8,
dropout=0.1),
feedforward_channels=2048,
ffn_dropout=0.1,
operation_order=('self_attn', 'norm', 'cross_attn',
'norm', 'ffn', 'norm'))
] * 5))
DetrTransformerDecoder(**config)
config = ConfigDict(
dict(
num_layers=6,
transformerlayers=dict(
type='DetrTransformerDecoderLayer',
attn_cfgs=dict(
type='MultiheadAttention',
embed_dims=256,
num_heads=8,
dropout=0.1),
feedforward_channels=2048,
ffn_dropout=0.1,
operation_order=('norm', 'self_attn', 'norm', 'cross_attn',
'norm', 'ffn', 'norm'))))
with pytest.raises(AssertionError):
# len(operation_order) == 6
DetrTransformerEncoder(**config)
def test_transformer():
config = ConfigDict(
dict(
encoder=dict(
type='DetrTransformerEncoder',
num_layers=6,
transformerlayers=dict(
type='BaseTransformerLayer',
attn_cfgs=[
dict(
type='MultiheadAttention',
embed_dims=256,
num_heads=8,
dropout=0.1)
],
feedforward_channels=2048,
ffn_dropout=0.1,
operation_order=('self_attn', 'norm', 'ffn', 'norm'))),
decoder=dict(
type='DetrTransformerDecoder',
return_intermediate=True,
num_layers=6,
transformerlayers=dict(
type='DetrTransformerDecoderLayer',
attn_cfgs=dict(
type='MultiheadAttention',
embed_dims=256,
num_heads=8,
dropout=0.1),
feedforward_channels=2048,
ffn_dropout=0.1,
operation_order=('self_attn', 'norm', 'cross_attn', 'norm',
'ffn', 'norm')),
)))
transformer = Transformer(**config)
transformer.init_weights()
|
"""Tool for the OpenWeatherMap API."""
from typing import Optional
from langchain_core.callbacks import CallbackManagerForToolRun
from langchain_core.tools import BaseTool
from pydantic import Field
from langchain_community.utilities.openweathermap import OpenWeatherMapAPIWrapper
class OpenWeatherMapQueryRun(BaseTool):
"""Tool that queries the OpenWeatherMap API."""
api_wrapper: OpenWeatherMapAPIWrapper = Field(
default_factory=OpenWeatherMapAPIWrapper
)
name: str = "open_weather_map"
description: str = (
"A wrapper around OpenWeatherMap API. "
"Useful for fetching current weather information for a specified location. "
"Input should be a location string (e.g. London,GB)."
)
def _run(
self, location: str, run_manager: Optional[CallbackManagerForToolRun] = None
) -> str:
"""Use the OpenWeatherMap tool."""
return self.api_wrapper.run(location)
|
"""Tool for the OpenWeatherMap API."""
from typing import Optional
from langchain_core.callbacks import CallbackManagerForToolRun
from langchain_core.tools import BaseTool
from pydantic import Field
from langchain_community.utilities.openweathermap import OpenWeatherMapAPIWrapper
class OpenWeatherMapQueryRun(BaseTool): # type: ignore[override]
"""Tool that queries the OpenWeatherMap API."""
api_wrapper: OpenWeatherMapAPIWrapper = Field(
default_factory=OpenWeatherMapAPIWrapper # type: ignore[arg-type]
)
name: str = "open_weather_map"
description: str = (
"A wrapper around OpenWeatherMap API. "
"Useful for fetching current weather information for a specified location. "
"Input should be a location string (e.g. London,GB)."
)
def _run(
self, location: str, run_manager: Optional[CallbackManagerForToolRun] = None
) -> str:
"""Use the OpenWeatherMap tool."""
return self.api_wrapper.run(location)
|
"""AgentQL Web Reader."""
import httpx
from typing import Optional, List
from llama_index.core.readers.base import BasePydanticReader
from llama_index.core.schema import Document
import logging
logging.getLogger("root").setLevel(logging.INFO)
QUERY_DATA_ENDPOINT = "https://api.agentql.com/v1/query-data"
API_TIMEOUT_SECONDS = 900
REQUEST_ORIGIN = "llamaindex"
class AgentQLWebReader(BasePydanticReader):
"""
Scrape a URL with or without a agentql query and returns document in json format.
Args:
api_key (str): The AgentQL API key, get one at https://dev.agentql.com
params (dict): Additional parameters to pass to the AgentQL API. Visit https://docs.agentql.com/rest-api/api-reference for details.
"""
api_key: str
params: Optional[dict]
def __init__(
self,
api_key: str,
params: Optional[dict] = None,
) -> None:
super().__init__(api_key=api_key, params=params)
def load_data(
self, url: str, query: Optional[str] = None, prompt: Optional[str] = None
) -> List[Document]:
"""
Load data from the input directory.
Args:
url (str): URL to scrape or crawl.
query (Optional[str]): AgentQL query used to specify the scraped data.
prompt (Optional[str]): Natural language description of the data you want to scrape.
Either query or prompt must be provided.
params (Optional[dict]): Additional parameters to pass to the AgentQL API. Visit https://docs.agentql.com/rest-api/api-reference for details.
Returns:
List[Document]: List of documents.
"""
payload = {"url": url, "query": query, "prompt": prompt, "params": self.params}
headers = {
"X-API-Key": f"{self.api_key}",
"Content-Type": "application/json",
"X-TF-Request-Origin": REQUEST_ORIGIN,
}
try:
response = httpx.post(
QUERY_DATA_ENDPOINT,
headers=headers,
json=payload,
timeout=API_TIMEOUT_SECONDS,
)
response.raise_for_status()
except httpx.HTTPStatusError as e:
response = e.response
if response.status_code in [401, 403]:
raise ValueError(
"Please, provide a valid API Key. You can create one at https://dev.agentql.com."
) from e
else:
try:
error_json = response.json()
msg = (
error_json["error_info"]
if "error_info" in error_json
else error_json["detail"]
)
except (ValueError, TypeError):
msg = f"HTTP {e}."
raise ValueError(msg) from e
else:
json = response.json()
return [Document(text=str(json["data"]), metadata=json["metadata"])]
|
"""AgentQL Web Reader."""
import httpx
from typing import Optional, List
from llama_index.core.readers.base import BasePydanticReader
from llama_index.core.schema import Document
import logging
logging.getLogger("root").setLevel(logging.INFO)
QUERY_DATA_ENDPOINT = "https://api.agentql.com/v1/query-data"
API_TIMEOUT_SECONDS = 900
class AgentQLWebReader(BasePydanticReader):
"""
Scrape a URL with or without a agentql query and returns document in json format.
Args:
api_key (str): The AgentQL API key, get one at https://dev.agentql.com
params (dict): Additional parameters to pass to the AgentQL API. Visit https://docs.agentql.com/rest-api/api-reference for details.
"""
api_key: str
params: Optional[dict]
def __init__(
self,
api_key: str,
params: Optional[dict] = None,
) -> None:
super().__init__(api_key=api_key, params=params)
def load_data(
self, url: str, query: Optional[str] = None, prompt: Optional[str] = None
) -> List[Document]:
"""
Load data from the input directory.
Args:
url (str): URL to scrape or crawl.
query (Optional[str]): AgentQL query used to specify the scraped data.
prompt (Optional[str]): Natural language description of the data you want to scrape.
Either query or prompt must be provided.
params (Optional[dict]): Additional parameters to pass to the AgentQL API. Visit https://docs.agentql.com/rest-api/api-reference for details.
Returns:
List[Document]: List of documents.
"""
payload = {"url": url, "query": query, "prompt": prompt, "params": self.params}
headers = {"X-API-Key": f"{self.api_key}", "Content-Type": "application/json"}
try:
response = httpx.post(
QUERY_DATA_ENDPOINT,
headers=headers,
json=payload,
timeout=API_TIMEOUT_SECONDS,
)
response.raise_for_status()
except httpx.HTTPStatusError as e:
response = e.response
if response.status_code in [401, 403]:
raise ValueError(
"Please, provide a valid API Key. You can create one at https://dev.agentql.com."
) from e
else:
try:
error_json = response.json()
msg = (
error_json["error_info"]
if "error_info" in error_json
else error_json["detail"]
)
except (ValueError, TypeError):
msg = f"HTTP {e}."
raise ValueError(msg) from e
else:
json = response.json()
return [Document(text=str(json["data"]), metadata=json["metadata"])]
|
from __future__ import annotations
from dataclasses import dataclass
from sentence_transformers.training_args import SentenceTransformerTrainingArguments
@dataclass
class SparseEncoderTrainingArguments(SentenceTransformerTrainingArguments):
r"""
SparseEncoderTrainingArguments extends :class:`~SentenceTransformerTrainingArguments` which itself extend
:class:`~transformers.TrainingArguments` with additional arguments specific to Sentence Transformers.
See :class:`~transformers.TrainingArguments` for the complete list of available arguments.
Args:
output_dir (`str`):
The output directory where the model checkpoints will be written.
prompts (`Union[Dict[str, Dict[str, str]], Dict[str, str], str]`, *optional*):
The prompts to use for each column in the training, evaluation and test datasets. Four formats are accepted:
1. `str`: A single prompt to use for all columns in the datasets, regardless of whether the training/evaluation/test
datasets are :class:`datasets.Dataset` or a :class:`datasets.DatasetDict`.
2. `Dict[str, str]`: A dictionary mapping column names to prompts, regardless of whether the training/evaluation/test
datasets are :class:`datasets.Dataset` or a :class:`datasets.DatasetDict`.
3. `Dict[str, str]`: A dictionary mapping dataset names to prompts. This should only be used if your training/evaluation/test
datasets are a :class:`datasets.DatasetDict` or a dictionary of :class:`datasets.Dataset`.
4. `Dict[str, Dict[str, str]]`: A dictionary mapping dataset names to dictionaries mapping column names to
prompts. This should only be used if your training/evaluation/test datasets are a
:class:`datasets.DatasetDict` or a dictionary of :class:`datasets.Dataset`.
batch_sampler (Union[:class:`~sentence_transformers.training_args.BatchSamplers`, `str`], *optional*):
The batch sampler to use. See :class:`~sentence_transformers.training_args.BatchSamplers` for valid options.
Defaults to ``BatchSamplers.BATCH_SAMPLER``.
multi_dataset_batch_sampler (Union[:class:`~sentence_transformers.training_args.MultiDatasetBatchSamplers`, `str`], *optional*):
The multi-dataset batch sampler to use. See :class:`~sentence_transformers.training_args.MultiDatasetBatchSamplers`
for valid options. Defaults to ``MultiDatasetBatchSamplers.PROPORTIONAL``.
router_mapping (`Optional[Dict[str, str]]`, *optional*):
A mapping of dataset column names to Router routes, like "query" or "document". This is used to specify
which Router submodule to use for each dataset. Two formats are accepted:
1. `Dict[str, str]`: A mapping of column names to routes.
2. `Dict[str, Dict[str, str]]`: A mapping of dataset names to a mapping of column names to routes for
multi-dataset training/evaluation.
learning_rate_mapping (`Optional[Dict[str, float]]`, *optional*):
A mapping of parameter name regular expressions to learning rates. This allows you to set different
learning rates for different parts of the model, e.g., `{'IDF\.*': 1e-3}` for the IDF module. This is
useful when you want to fine-tune specific parts of the model with different learning rates.
"""
|
from __future__ import annotations
from dataclasses import dataclass
from sentence_transformers.training_args import SentenceTransformerTrainingArguments
@dataclass
class SparseEncoderTrainingArguments(SentenceTransformerTrainingArguments):
"""
SparseEncoderTrainingArguments extends :class:`~SentenceTransformerTrainingArguments` which itself extend
:class:`~transformers.TrainingArguments` with additional arguments specific to Sentence Transformers.
See :class:`~transformers.TrainingArguments` for the complete list of available arguments.
Args:
output_dir (`str`):
The output directory where the model checkpoints will be written.
prompts (`Union[Dict[str, Dict[str, str]], Dict[str, str], str]`, *optional*):
The prompts to use for each column in the training, evaluation and test datasets. Four formats are accepted:
1. `str`: A single prompt to use for all columns in the datasets, regardless of whether the training/evaluation/test
datasets are :class:`datasets.Dataset` or a :class:`datasets.DatasetDict`.
2. `Dict[str, str]`: A dictionary mapping column names to prompts, regardless of whether the training/evaluation/test
datasets are :class:`datasets.Dataset` or a :class:`datasets.DatasetDict`.
3. `Dict[str, str]`: A dictionary mapping dataset names to prompts. This should only be used if your training/evaluation/test
datasets are a :class:`datasets.DatasetDict` or a dictionary of :class:`datasets.Dataset`.
4. `Dict[str, Dict[str, str]]`: A dictionary mapping dataset names to dictionaries mapping column names to
prompts. This should only be used if your training/evaluation/test datasets are a
:class:`datasets.DatasetDict` or a dictionary of :class:`datasets.Dataset`.
batch_sampler (Union[:class:`~sentence_transformers.training_args.BatchSamplers`, `str`], *optional*):
The batch sampler to use. See :class:`~sentence_transformers.training_args.BatchSamplers` for valid options.
Defaults to ``BatchSamplers.BATCH_SAMPLER``.
multi_dataset_batch_sampler (Union[:class:`~sentence_transformers.training_args.MultiDatasetBatchSamplers`, `str`], *optional*):
The multi-dataset batch sampler to use. See :class:`~sentence_transformers.training_args.MultiDatasetBatchSamplers`
for valid options. Defaults to ``MultiDatasetBatchSamplers.PROPORTIONAL``.
"""
|
# ruff: noqa
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__version__ = "2.18.0"
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_default_config_name,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
_arrow_dataset.concatenate_datasets = concatenate_datasets
_utils.DownloadConfig = DownloadConfig
_utils.DownloadManager = DownloadManager
_utils.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadConfig = DownloadConfig
_deprecated_download_manager.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadManager = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
|
# ruff: noqa
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__version__ = "2.17.2.dev0"
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_default_config_name,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
_arrow_dataset.concatenate_datasets = concatenate_datasets
_utils.DownloadConfig = DownloadConfig
_utils.DownloadManager = DownloadManager
_utils.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadConfig = DownloadConfig
_deprecated_download_manager.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadManager = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
|
import pytest as pytest
from langchain_core.documents import Document
from langchain.retrievers.multi_query import LineListOutputParser, _unique_documents
@pytest.mark.parametrize(
"documents,expected",
[
([], []),
([Document(page_content="foo")], [Document(page_content="foo")]),
([Document(page_content="foo")] * 2, [Document(page_content="foo")]),
(
[Document(page_content="foo", metadata={"bar": "baz"})] * 2,
[Document(page_content="foo", metadata={"bar": "baz"})],
),
(
[Document(page_content="foo", metadata={"bar": [1, 2]})] * 2,
[Document(page_content="foo", metadata={"bar": [1, 2]})],
),
(
[Document(page_content="foo", metadata={"bar": {1, 2}})] * 2,
[Document(page_content="foo", metadata={"bar": {1, 2}})],
),
(
[
Document(page_content="foo", metadata={"bar": [1, 2]}),
Document(page_content="foo", metadata={"bar": [2, 1]}),
],
[
Document(page_content="foo", metadata={"bar": [1, 2]}),
Document(page_content="foo", metadata={"bar": [2, 1]}),
],
),
],
)
def test__unique_documents(documents: list[Document], expected: list[Document]) -> None:
assert _unique_documents(documents) == expected
@pytest.mark.parametrize(
"text,expected",
[
("foo\nbar\nbaz", ["foo", "bar", "baz"]),
("foo\nbar\nbaz\n", ["foo", "bar", "baz"]),
("foo\n\nbar", ["foo", "bar"]),
],
)
def test_line_list_output_parser(text: str, expected: list[str]) -> None:
parser = LineListOutputParser()
assert parser.parse(text) == expected
|
from typing import List
import pytest as pytest
from langchain_core.documents import Document
from langchain.retrievers.multi_query import LineListOutputParser, _unique_documents
@pytest.mark.parametrize(
"documents,expected",
[
([], []),
([Document(page_content="foo")], [Document(page_content="foo")]),
([Document(page_content="foo")] * 2, [Document(page_content="foo")]),
(
[Document(page_content="foo", metadata={"bar": "baz"})] * 2,
[Document(page_content="foo", metadata={"bar": "baz"})],
),
(
[Document(page_content="foo", metadata={"bar": [1, 2]})] * 2,
[Document(page_content="foo", metadata={"bar": [1, 2]})],
),
(
[Document(page_content="foo", metadata={"bar": {1, 2}})] * 2,
[Document(page_content="foo", metadata={"bar": {1, 2}})],
),
(
[
Document(page_content="foo", metadata={"bar": [1, 2]}),
Document(page_content="foo", metadata={"bar": [2, 1]}),
],
[
Document(page_content="foo", metadata={"bar": [1, 2]}),
Document(page_content="foo", metadata={"bar": [2, 1]}),
],
),
],
)
def test__unique_documents(documents: List[Document], expected: List[Document]) -> None:
assert _unique_documents(documents) == expected
@pytest.mark.parametrize(
"text,expected",
[
("foo\nbar\nbaz", ["foo", "bar", "baz"]),
("foo\nbar\nbaz\n", ["foo", "bar", "baz"]),
("foo\n\nbar", ["foo", "bar"]),
],
)
def test_line_list_output_parser(text: str, expected: List[str]) -> None:
parser = LineListOutputParser()
assert parser.parse(text) == expected
|
# coding: utf-8
"""Tests for dual GPU+CPU support."""
import os
import platform
import pytest
from sklearn.metrics import log_loss
import lightgbm as lgb
from .utils import load_breast_cancer
@pytest.mark.skipif(
os.environ.get("LIGHTGBM_TEST_DUAL_CPU_GPU", None) is None,
reason="Only run if appropriate env variable is set",
)
def test_cpu_and_gpu_work():
# If compiled appropriately, the same installation will support both GPU and CPU.
X, y = load_breast_cancer(return_X_y=True)
data = lgb.Dataset(X, y)
params_cpu = {"verbosity": -1, "num_leaves": 31, "objective": "binary", "device": "cpu"}
cpu_bst = lgb.train(params_cpu, data, num_boost_round=10)
cpu_score = log_loss(y, cpu_bst.predict(X))
params_gpu = params_cpu.copy()
params_gpu["device"] = "gpu"
# Double-precision floats are only supported on x86_64 with PoCL
params_gpu["gpu_use_dp"] = platform.machine() == "x86_64"
gpu_bst = lgb.train(params_gpu, data, num_boost_round=10)
gpu_score = log_loss(y, gpu_bst.predict(X))
rel = 1e-6 if params_gpu["gpu_use_dp"] else 1e-4
assert cpu_score == pytest.approx(gpu_score, rel=rel)
assert gpu_score < 0.242
|
# coding: utf-8
"""Tests for dual GPU+CPU support."""
import os
import platform
import pytest
from sklearn.metrics import log_loss
import lightgbm as lgb
from .utils import load_breast_cancer
@pytest.mark.skipif(
os.environ.get("LIGHTGBM_TEST_DUAL_CPU_GPU", None) is None,
reason="Only run if appropriate env variable is set",
)
def test_cpu_and_gpu_work():
# If compiled appropriately, the same installation will support both GPU and CPU.
X, y = load_breast_cancer(return_X_y=True)
data = lgb.Dataset(X, y)
params_cpu = {"verbosity": -1, "num_leaves": 31, "objective": "binary", "device": "cpu"}
cpu_bst = lgb.train(params_cpu, data, num_boost_round=10)
cpu_score = log_loss(y, cpu_bst.predict(X))
params_gpu = params_cpu.copy()
params_gpu["device"] = "gpu"
# Double-precision floats are only supported on x86_64 with PoCL
params_gpu["gpu_use_dp"] = (platform.machine() == "x86_64")
gpu_bst = lgb.train(params_gpu, data, num_boost_round=10)
gpu_score = log_loss(y, gpu_bst.predict(X))
rel = 1e-6 if params_gpu["gpu_use_dp"] else 1e-4
assert cpu_score == pytest.approx(gpu_score, rel=rel)
assert gpu_score < 0.242
|
"""A tracer that runs evaluators over completed runs."""
from langchain_core.tracers.evaluation import (
EvaluatorCallbackHandler,
wait_for_all_evaluators,
)
__all__ = ["EvaluatorCallbackHandler", "wait_for_all_evaluators"]
|
"""A tracer that runs evaluators over completed runs."""
from langchain_core.tracers.evaluation import (
EvaluatorCallbackHandler,
wait_for_all_evaluators,
)
__all__ = ["wait_for_all_evaluators", "EvaluatorCallbackHandler"]
|
# CoSENTLoss must be imported before AnglELoss
from .CoSENTLoss import CoSENTLoss # isort: skip
from .AdaptiveLayerLoss import AdaptiveLayerLoss
from .AnglELoss import AnglELoss
from .BatchAllTripletLoss import BatchAllTripletLoss
from .BatchHardSoftMarginTripletLoss import BatchHardSoftMarginTripletLoss
from .BatchHardTripletLoss import BatchHardTripletLoss, BatchHardTripletLossDistanceFunction
from .BatchSemiHardTripletLoss import BatchSemiHardTripletLoss
from .CachedGISTEmbedLoss import CachedGISTEmbedLoss
from .CachedMultipleNegativesRankingLoss import CachedMultipleNegativesRankingLoss
from .ContrastiveLoss import ContrastiveLoss, SiameseDistanceMetric
from .ContrastiveTensionLoss import (
ContrastiveTensionDataLoader,
ContrastiveTensionLoss,
ContrastiveTensionLossInBatchNegatives,
)
from .CosineSimilarityLoss import CosineSimilarityLoss
from .DenoisingAutoEncoderLoss import DenoisingAutoEncoderLoss
from .GISTEmbedLoss import GISTEmbedLoss
from .MarginMSELoss import MarginMSELoss
from .Matryoshka2dLoss import Matryoshka2dLoss
from .MatryoshkaLoss import MatryoshkaLoss
from .MegaBatchMarginLoss import MegaBatchMarginLoss
from .MSELoss import MSELoss
from .MultipleNegativesRankingLoss import MultipleNegativesRankingLoss
from .MultipleNegativesSymmetricRankingLoss import MultipleNegativesSymmetricRankingLoss
from .OnlineContrastiveLoss import OnlineContrastiveLoss
from .SoftmaxLoss import SoftmaxLoss
from .TripletLoss import TripletDistanceMetric, TripletLoss
__all__ = [
"AdaptiveLayerLoss",
"CosineSimilarityLoss",
"SoftmaxLoss",
"MultipleNegativesRankingLoss",
"MultipleNegativesSymmetricRankingLoss",
"TripletLoss",
"TripletDistanceMetric",
"MarginMSELoss",
"MatryoshkaLoss",
"Matryoshka2dLoss",
"MSELoss",
"ContrastiveLoss",
"SiameseDistanceMetric",
"CachedGISTEmbedLoss",
"CachedMultipleNegativesRankingLoss",
"ContrastiveTensionLoss",
"ContrastiveTensionLossInBatchNegatives",
"ContrastiveTensionDataLoader",
"CoSENTLoss",
"AnglELoss",
"OnlineContrastiveLoss",
"MegaBatchMarginLoss",
"DenoisingAutoEncoderLoss",
"GISTEmbedLoss",
"BatchHardTripletLoss",
"BatchHardTripletLossDistanceFunction",
"BatchHardSoftMarginTripletLoss",
"BatchSemiHardTripletLoss",
"BatchAllTripletLoss",
]
|
from .AdaptiveLayerLoss import AdaptiveLayerLoss
from .CosineSimilarityLoss import CosineSimilarityLoss
from .SoftmaxLoss import SoftmaxLoss
from .MultipleNegativesRankingLoss import MultipleNegativesRankingLoss
from .MultipleNegativesSymmetricRankingLoss import MultipleNegativesSymmetricRankingLoss
from .TripletLoss import TripletDistanceMetric, TripletLoss
from .MarginMSELoss import MarginMSELoss
from .MatryoshkaLoss import MatryoshkaLoss
from .Matryoshka2dLoss import Matryoshka2dLoss
from .MSELoss import MSELoss
from .CachedMultipleNegativesRankingLoss import CachedMultipleNegativesRankingLoss
from .ContrastiveLoss import SiameseDistanceMetric, ContrastiveLoss
from .ContrastiveTensionLoss import (
ContrastiveTensionLoss,
ContrastiveTensionLossInBatchNegatives,
ContrastiveTensionDataLoader,
)
from .CoSENTLoss import CoSENTLoss
from .AnglELoss import AnglELoss
from .OnlineContrastiveLoss import OnlineContrastiveLoss
from .MegaBatchMarginLoss import MegaBatchMarginLoss
from .DenoisingAutoEncoderLoss import DenoisingAutoEncoderLoss
from .GISTEmbedLoss import GISTEmbedLoss
from .CachedGISTEmbedLoss import CachedGISTEmbedLoss
# Triplet losses
from .BatchHardTripletLoss import BatchHardTripletLoss, BatchHardTripletLossDistanceFunction
from .BatchHardSoftMarginTripletLoss import BatchHardSoftMarginTripletLoss
from .BatchSemiHardTripletLoss import BatchSemiHardTripletLoss
from .BatchAllTripletLoss import BatchAllTripletLoss
__all__ = [
"AdaptiveLayerLoss",
"CosineSimilarityLoss",
"SoftmaxLoss",
"MultipleNegativesRankingLoss",
"MultipleNegativesSymmetricRankingLoss",
"TripletLoss",
"TripletDistanceMetric",
"MarginMSELoss",
"MatryoshkaLoss",
"Matryoshka2dLoss",
"MSELoss",
"ContrastiveLoss",
"SiameseDistanceMetric",
"CachedGISTEmbedLoss",
"CachedMultipleNegativesRankingLoss",
"ContrastiveTensionLoss",
"ContrastiveTensionLossInBatchNegatives",
"ContrastiveTensionDataLoader",
"CoSENTLoss",
"AnglELoss",
"OnlineContrastiveLoss",
"MegaBatchMarginLoss",
"DenoisingAutoEncoderLoss",
"GISTEmbedLoss",
"BatchHardTripletLoss",
"BatchHardTripletLossDistanceFunction",
"BatchHardSoftMarginTripletLoss",
"BatchSemiHardTripletLoss",
"BatchAllTripletLoss",
]
|
from __future__ import annotations
import torch
from sentence_transformers.models.Module import Module
class SpladePooling(Module):
"""
SPLADE Pooling module for creating the sparse embeddings.
This module implements the SPLADE pooling mechanism that:
1. Takes token logits from a masked language model (MLM).
2. Applies a sparse transformation using an activation function followed by log1p (i.e., log(1 + activation(MLM_logits))).
3. Applies a pooling strategy `max` or `sum` to produce sparse embeddings.
The resulting embeddings are highly sparse and capture lexical information,
making them suitable for efficient information retrieval.
Args:
pooling_strategy (str): Pooling method across token dimensions.
Choices:
- `sum`: Sum pooling (used in original SPLADE see https://arxiv.org/pdf/2107.05720).
- `max`: Max pooling (used in SPLADEv2 and later models see https://arxiv.org/pdf/2109.10086 or https://arxiv.org/pdf/2205.04733).
activation_function (str): Activation function applied before log1p transformation.
Choices:
- `relu`: ReLU activation (standard in all Splade models).
- `log1p_relu`: log(1 + ReLU(x)) variant used in Opensearch Splade models see arxiv.org/pdf/2504.14839.
word_embedding_dimension (int, optional): Dimensionality of the output embeddings (if needed).
"""
SPLADE_POOLING_MODES = ("sum", "max")
SPLADE_ACTIVATION = ["relu", "log1p_relu"]
config_keys: list[str] = ["pooling_strategy", "activation_function", "word_embedding_dimension"]
def __init__(
self, pooling_strategy: str = "max", activation_function="relu", word_embedding_dimension: int = None
) -> None:
super().__init__()
self.pooling_strategy = pooling_strategy
if pooling_strategy not in self.SPLADE_POOLING_MODES:
raise ValueError("pooling_strategy must be either 'max' or 'sum'")
self.activation_function = activation_function
if activation_function not in self.SPLADE_ACTIVATION:
raise ValueError("activation_function must be either 'relu' or 'log1p_relu'")
self.word_embedding_dimension = word_embedding_dimension # This will be set in the forward method
def forward(self, features: dict[str, torch.Tensor]) -> dict[str, torch.Tensor]:
"""Forward pass of the model.
Args:
features: Dictionary containing input features with 'token_embeddings' key as MLM logits.
Returns:
Dictionary containing SPLADE pooled embeddings
"""
# Get the MLM head logits (shape: batch_size, seq_length, vocab_size)
mlm_logits = features["token_embeddings"]
# Apply ReLU and log transformation for SPLADE
if self.activation_function == "relu":
splade_scores = torch.log1p(torch.relu(mlm_logits))
elif self.activation_function == "log1p_relu":
splade_scores = torch.log1p(torch.log1p(torch.relu(mlm_logits)))
else:
raise ValueError("activation_function must be either 'relu' or 'log1p_relu'")
# Pool across sequence length dimension
if self.pooling_strategy == "max":
pooled_scores = torch.max(splade_scores, dim=1)[0] # shape: batch_size, vocab_size
else: # sum
pooled_scores = torch.sum(splade_scores, dim=1) # shape: batch_size, vocab_size
# Set the word embedding dimension
if self.word_embedding_dimension is None:
self.word_embedding_dimension = pooled_scores.shape[1]
features["sentence_embedding"] = pooled_scores
return features
def save(self, output_path: str, *args, safe_serialization: bool = True, **kwargs) -> None:
self.save_config(output_path)
def __repr__(self) -> str:
return f"SpladePooling({self.get_config_dict()})"
def get_sentence_embedding_dimension(self) -> int:
"""Get the dimension of the sentence embedding.
Returns:
int: Dimension of the sentence embedding
"""
return self.word_embedding_dimension
|
from __future__ import annotations
import json
import os
from typing import Any
import torch
from torch import nn
class SpladePooling(nn.Module):
"""
SPLADE Pooling module for creating the sparse embeddings.
This module implements the SPLADE pooling mechanism that:
1. Takes token logits from a masked language model (MLM).
2. Applies a sparse transformation using an activation function followed by log1p (i.e., log(1 + activation(MLM_logits))).
3. Applies a pooling strategy `max` or `sum` to produce sparse embeddings.
The resulting embeddings are highly sparse and capture lexical information,
making them suitable for efficient information retrieval.
Args:
pooling_strategy (str): Pooling method across token dimensions.
Choices:
- `sum`: Sum pooling (used in original SPLADE see https://arxiv.org/pdf/2107.05720).
- `max`: Max pooling (used in SPLADEv2 and later models see https://arxiv.org/pdf/2109.10086 or https://arxiv.org/pdf/2205.04733).
activation_function (str): Activation function applied before log1p transformation.
Choices:
- `relu`: ReLU activation (standard in all Splade models).
- `log1p_relu`: log(1 + ReLU(x)) variant used in Opensearch Splade models see arxiv.org/pdf/2504.14839.
word_embedding_dimension (int, optional): Dimensionality of the output embeddings (if needed).
"""
SPLADE_POOLING_MODES = ("sum", "max")
SPLADE_ACTIVATION = ["relu", "log1p_relu"]
def __init__(
self, pooling_strategy: str = "max", activation_function="relu", word_embedding_dimension: int = None
) -> None:
super().__init__()
self.pooling_strategy = pooling_strategy
if pooling_strategy not in self.SPLADE_POOLING_MODES:
raise ValueError("pooling_strategy must be either 'max' or 'sum'")
self.activation_function = activation_function
if activation_function not in self.SPLADE_ACTIVATION:
raise ValueError("activation_function must be either 'relu' or 'log1p_relu'")
self.config_keys = ["pooling_strategy", "activation_function", "word_embedding_dimension"]
self.word_embedding_dimension = word_embedding_dimension # This will be set in the forward method
def forward(self, features: dict[str, torch.Tensor]) -> dict[str, torch.Tensor]:
"""Forward pass of the model.
Args:
features: Dictionary containing input features with 'token_embeddings' key as MLM logits.
Returns:
Dictionary containing SPLADE pooled embeddings
"""
# Get the MLM head logits (shape: batch_size, seq_length, vocab_size)
mlm_logits = features["token_embeddings"]
# Apply ReLU and log transformation for SPLADE
if self.activation_function == "relu":
splade_scores = torch.log1p(torch.relu(mlm_logits))
elif self.activation_function == "log1p_relu":
splade_scores = torch.log1p(torch.log1p(torch.relu(mlm_logits)))
else:
raise ValueError("activation_function must be either 'relu' or 'log1p_relu'")
# Pool across sequence length dimension
if self.pooling_strategy == "max":
pooled_scores = torch.max(splade_scores, dim=1)[0] # shape: batch_size, vocab_size
else: # sum
pooled_scores = torch.sum(splade_scores, dim=1) # shape: batch_size, vocab_size
# Set the word embedding dimension
if self.word_embedding_dimension is None:
self.word_embedding_dimension = pooled_scores.shape[1]
features["sentence_embedding"] = pooled_scores
return features
def get_config_dict(self) -> dict[str, Any]:
return {key: self.__dict__[key] for key in self.config_keys}
def save(self, output_path) -> None:
with open(os.path.join(output_path, "config.json"), "w") as fOut:
json.dump(self.get_config_dict(), fOut, indent=2)
@staticmethod
def load(input_path) -> SpladePooling:
with open(os.path.join(input_path, "config.json")) as fIn:
config = json.load(fIn)
return SpladePooling(**config)
def __repr__(self) -> str:
return f"SpladePooling({self.get_config_dict()})"
def get_sentence_embedding_dimension(self) -> int:
"""Get the dimension of the sentence embedding.
Returns:
int: Dimension of the sentence embedding
"""
return self.word_embedding_dimension
|
import logging
from datasets import load_dataset
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.evaluation import SparseTranslationEvaluator
logging.basicConfig(format="%(message)s", level=logging.INFO)
# Load a model, not mutilingual but hope to see some on the hub soon
model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
# Load a parallel sentences dataset
dataset = load_dataset("sentence-transformers/parallel-sentences-news-commentary", "en-nl", split="train[:1000]")
# Initialize the TranslationEvaluator using the same texts from two languages
translation_evaluator = SparseTranslationEvaluator(
source_sentences=dataset["english"],
target_sentences=dataset["non_english"],
name="news-commentary-en-nl",
)
results = translation_evaluator(model)
"""
Evaluating translation matching Accuracy of the model on the news-commentary-en-nl dataset:
Accuracy src2trg: 41.40
Accuracy trg2src: 47.70
Model Sparsity: Active Dimensions: 113.6, Sparsity Ratio: 0.9963
"""
# Print the results
print(f"Primary metric: {translation_evaluator.primary_metric}")
# => Primary metric: news-commentary-en-nl_mean_accuracy
print(f"Primary metric value: {results[translation_evaluator.primary_metric]:.4f}")
# => Primary metric value: 0.4455
|
import logging
from datasets import load_dataset
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.evaluation import SparseTranslationEvaluator
logging.basicConfig(format="%(message)s", level=logging.INFO)
# Load a model, not mutilingual but hope to see some on the hub soon
model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
# Load a parallel sentences dataset
dataset = load_dataset("sentence-transformers/parallel-sentences-news-commentary", "en-nl", split="train[:1000]")
# Initialize the TranslationEvaluator using the same texts from two languages
translation_evaluator = SparseTranslationEvaluator(
source_sentences=dataset["english"],
target_sentences=dataset["non_english"],
name="news-commentary-en-nl",
)
results = translation_evaluator(model)
"""
Evaluating translation matching Accuracy of the model on the news-commentary-en-nl dataset:
Accuracy src2trg: 41.40
Accuracy trg2src: 47.70
Model Sparsity Stats: Row Non-Zero Mean: 113.6150016784668, Row Sparsity Mean: 0.9962776005268097
"""
# Print the results
print(f"Primary metric: {translation_evaluator.primary_metric}")
# => Primary metric: news-commentary-en-nl_mean_accuracy
print(f"Primary metric value: {results[translation_evaluator.primary_metric]:.4f}")
# => Primary metric value: 0.4455
|
from ...utils import is_flax_available, is_torch_available
if is_torch_available():
from .controlnet import ControlNetModel, ControlNetOutput
from .controlnet_flux import FluxControlNetModel, FluxControlNetOutput, FluxMultiControlNetModel
from .controlnet_hunyuan import (
HunyuanControlNetOutput,
HunyuanDiT2DControlNetModel,
HunyuanDiT2DMultiControlNetModel,
)
from .controlnet_sana import SanaControlNetModel
from .controlnet_sd3 import SD3ControlNetModel, SD3ControlNetOutput, SD3MultiControlNetModel
from .controlnet_sparsectrl import (
SparseControlNetConditioningEmbedding,
SparseControlNetModel,
SparseControlNetOutput,
)
from .controlnet_union import ControlNetUnionModel
from .controlnet_xs import ControlNetXSAdapter, ControlNetXSOutput, UNetControlNetXSModel
from .multicontrolnet import MultiControlNetModel
from .multicontrolnet_union import MultiControlNetUnionModel
if is_flax_available():
from .controlnet_flax import FlaxControlNetModel
|
from ...utils import is_flax_available, is_torch_available
if is_torch_available():
from .controlnet import ControlNetModel, ControlNetOutput
from .controlnet_flux import FluxControlNetModel, FluxControlNetOutput, FluxMultiControlNetModel
from .controlnet_hunyuan import (
HunyuanControlNetOutput,
HunyuanDiT2DControlNetModel,
HunyuanDiT2DMultiControlNetModel,
)
from .controlnet_sd3 import SD3ControlNetModel, SD3ControlNetOutput, SD3MultiControlNetModel
from .controlnet_sparsectrl import (
SparseControlNetConditioningEmbedding,
SparseControlNetModel,
SparseControlNetOutput,
)
from .controlnet_union import ControlNetUnionModel
from .controlnet_xs import ControlNetXSAdapter, ControlNetXSOutput, UNetControlNetXSModel
from .multicontrolnet import MultiControlNetModel
from .multicontrolnet_union import MultiControlNetUnionModel
if is_flax_available():
from .controlnet_flax import FlaxControlNetModel
|
from typing import Any, Literal
from pydantic import SecretStr
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from backend.data.model import (
APIKeyCredentials,
CredentialsField,
CredentialsMetaInput,
SchemaField,
)
from backend.util.request import requests
TEST_CREDENTIALS = APIKeyCredentials(
id="01234567-89ab-cdef-0123-456789abcdef",
provider="unreal_speech",
api_key=SecretStr("mock-unreal-speech-api-key"),
title="Mock Unreal Speech API key",
expires_at=None,
)
TEST_CREDENTIALS_INPUT = {
"provider": TEST_CREDENTIALS.provider,
"id": TEST_CREDENTIALS.id,
"type": TEST_CREDENTIALS.type,
"title": TEST_CREDENTIALS.type,
}
class UnrealTextToSpeechBlock(Block):
class Input(BlockSchema):
text: str = SchemaField(
description="The text to be converted to speech",
placeholder="Enter the text you want to convert to speech",
)
voice_id: str = SchemaField(
description="The voice ID to use for text-to-speech conversion",
placeholder="Scarlett",
default="Scarlett",
)
credentials: CredentialsMetaInput[
Literal["unreal_speech"], Literal["api_key"]
] = CredentialsField(
provider="unreal_speech",
supported_credential_types={"api_key"},
description="The Unreal Speech integration can be used with "
"any API key with sufficient permissions for the blocks it is used on.",
)
class Output(BlockSchema):
mp3_url: str = SchemaField(description="The URL of the generated MP3 file")
error: str = SchemaField(description="Error message if the API call failed")
def __init__(self):
super().__init__(
id="4ff1ff6d-cc40-4caa-ae69-011daa20c378",
description="Converts text to speech using the Unreal Speech API",
categories={BlockCategory.AI, BlockCategory.TEXT},
input_schema=UnrealTextToSpeechBlock.Input,
output_schema=UnrealTextToSpeechBlock.Output,
test_input={
"text": "This is a test of the text to speech API.",
"voice_id": "Scarlett",
"credentials": TEST_CREDENTIALS_INPUT,
},
test_output=[("mp3_url", "https://example.com/test.mp3")],
test_mock={
"call_unreal_speech_api": lambda *args, **kwargs: {
"OutputUri": "https://example.com/test.mp3"
}
},
test_credentials=TEST_CREDENTIALS,
)
@staticmethod
def call_unreal_speech_api(
api_key: SecretStr, text: str, voice_id: str
) -> dict[str, Any]:
url = "https://api.v7.unrealspeech.com/speech"
headers = {
"Authorization": f"Bearer {api_key.get_secret_value()}",
"Content-Type": "application/json",
}
data = {
"Text": text,
"VoiceId": voice_id,
"Bitrate": "192k",
"Speed": "0",
"Pitch": "1",
"TimestampType": "sentence",
}
response = requests.post(url, headers=headers, json=data)
return response.json()
def run(
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
) -> BlockOutput:
api_response = self.call_unreal_speech_api(
credentials.api_key,
input_data.text,
input_data.voice_id,
)
yield "mp3_url", api_response["OutputUri"]
|
from typing import Any, Literal
from autogpt_libs.supabase_integration_credentials_store.types import APIKeyCredentials
from pydantic import SecretStr
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from backend.data.model import CredentialsField, CredentialsMetaInput, SchemaField
from backend.util.request import requests
TEST_CREDENTIALS = APIKeyCredentials(
id="01234567-89ab-cdef-0123-456789abcdef",
provider="unreal_speech",
api_key=SecretStr("mock-unreal-speech-api-key"),
title="Mock Unreal Speech API key",
expires_at=None,
)
TEST_CREDENTIALS_INPUT = {
"provider": TEST_CREDENTIALS.provider,
"id": TEST_CREDENTIALS.id,
"type": TEST_CREDENTIALS.type,
"title": TEST_CREDENTIALS.type,
}
class UnrealTextToSpeechBlock(Block):
class Input(BlockSchema):
text: str = SchemaField(
description="The text to be converted to speech",
placeholder="Enter the text you want to convert to speech",
)
voice_id: str = SchemaField(
description="The voice ID to use for text-to-speech conversion",
placeholder="Scarlett",
default="Scarlett",
)
credentials: CredentialsMetaInput[
Literal["unreal_speech"], Literal["api_key"]
] = CredentialsField(
provider="unreal_speech",
supported_credential_types={"api_key"},
description="The Unreal Speech integration can be used with "
"any API key with sufficient permissions for the blocks it is used on.",
)
class Output(BlockSchema):
mp3_url: str = SchemaField(description="The URL of the generated MP3 file")
error: str = SchemaField(description="Error message if the API call failed")
def __init__(self):
super().__init__(
id="4ff1ff6d-cc40-4caa-ae69-011daa20c378",
description="Converts text to speech using the Unreal Speech API",
categories={BlockCategory.AI, BlockCategory.TEXT},
input_schema=UnrealTextToSpeechBlock.Input,
output_schema=UnrealTextToSpeechBlock.Output,
test_input={
"text": "This is a test of the text to speech API.",
"voice_id": "Scarlett",
"credentials": TEST_CREDENTIALS_INPUT,
},
test_output=[("mp3_url", "https://example.com/test.mp3")],
test_mock={
"call_unreal_speech_api": lambda *args, **kwargs: {
"OutputUri": "https://example.com/test.mp3"
}
},
test_credentials=TEST_CREDENTIALS,
)
@staticmethod
def call_unreal_speech_api(
api_key: SecretStr, text: str, voice_id: str
) -> dict[str, Any]:
url = "https://api.v7.unrealspeech.com/speech"
headers = {
"Authorization": f"Bearer {api_key.get_secret_value()}",
"Content-Type": "application/json",
}
data = {
"Text": text,
"VoiceId": voice_id,
"Bitrate": "192k",
"Speed": "0",
"Pitch": "1",
"TimestampType": "sentence",
}
response = requests.post(url, headers=headers, json=data)
return response.json()
def run(
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
) -> BlockOutput:
api_response = self.call_unreal_speech_api(
credentials.api_key,
input_data.text,
input_data.voice_id,
)
yield "mp3_url", api_response["OutputUri"]
|
# Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ...configuration_utils import PretrainedConfig
from ...utils import logging
logger = logging.get_logger(__name__)
class SuperPointConfig(PretrainedConfig):
r"""
This is the configuration class to store the configuration of a [`SuperPointForKeypointDetection`]. It is used to instantiate a
SuperPoint model according to the specified arguments, defining the model architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of the SuperPoint
[magic-leap-community/superpoint](https://huggingface.co/magic-leap-community/superpoint) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
encoder_hidden_sizes (`List`, *optional*, defaults to `[64, 64, 128, 128]`):
The number of channels in each convolutional layer in the encoder.
decoder_hidden_size (`int`, *optional*, defaults to 256): The hidden size of the decoder.
keypoint_decoder_dim (`int`, *optional*, defaults to 65): The output dimension of the keypoint decoder.
descriptor_decoder_dim (`int`, *optional*, defaults to 256): The output dimension of the descriptor decoder.
keypoint_threshold (`float`, *optional*, defaults to 0.005):
The threshold to use for extracting keypoints.
max_keypoints (`int`, *optional*, defaults to -1):
The maximum number of keypoints to extract. If `-1`, will extract all keypoints.
nms_radius (`int`, *optional*, defaults to 4):
The radius for non-maximum suppression.
border_removal_distance (`int`, *optional*, defaults to 4):
The distance from the border to remove keypoints.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
Example:
```python
>>> from transformers import SuperPointConfig, SuperPointForKeypointDetection
>>> # Initializing a SuperPoint superpoint style configuration
>>> configuration = SuperPointConfig()
>>> # Initializing a model from the superpoint style configuration
>>> model = SuperPointForKeypointDetection(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "superpoint"
def __init__(
self,
encoder_hidden_sizes: list[int] = [64, 64, 128, 128],
decoder_hidden_size: int = 256,
keypoint_decoder_dim: int = 65,
descriptor_decoder_dim: int = 256,
keypoint_threshold: float = 0.005,
max_keypoints: int = -1,
nms_radius: int = 4,
border_removal_distance: int = 4,
initializer_range=0.02,
**kwargs,
):
self.encoder_hidden_sizes = encoder_hidden_sizes
self.decoder_hidden_size = decoder_hidden_size
self.keypoint_decoder_dim = keypoint_decoder_dim
self.descriptor_decoder_dim = descriptor_decoder_dim
self.keypoint_threshold = keypoint_threshold
self.max_keypoints = max_keypoints
self.nms_radius = nms_radius
self.border_removal_distance = border_removal_distance
self.initializer_range = initializer_range
super().__init__(**kwargs)
__all__ = ["SuperPointConfig"]
|
# Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List
from ...configuration_utils import PretrainedConfig
from ...utils import logging
logger = logging.get_logger(__name__)
class SuperPointConfig(PretrainedConfig):
r"""
This is the configuration class to store the configuration of a [`SuperPointForKeypointDetection`]. It is used to instantiate a
SuperPoint model according to the specified arguments, defining the model architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of the SuperPoint
[magic-leap-community/superpoint](https://huggingface.co/magic-leap-community/superpoint) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
encoder_hidden_sizes (`List`, *optional*, defaults to `[64, 64, 128, 128]`):
The number of channels in each convolutional layer in the encoder.
decoder_hidden_size (`int`, *optional*, defaults to 256): The hidden size of the decoder.
keypoint_decoder_dim (`int`, *optional*, defaults to 65): The output dimension of the keypoint decoder.
descriptor_decoder_dim (`int`, *optional*, defaults to 256): The output dimension of the descriptor decoder.
keypoint_threshold (`float`, *optional*, defaults to 0.005):
The threshold to use for extracting keypoints.
max_keypoints (`int`, *optional*, defaults to -1):
The maximum number of keypoints to extract. If `-1`, will extract all keypoints.
nms_radius (`int`, *optional*, defaults to 4):
The radius for non-maximum suppression.
border_removal_distance (`int`, *optional*, defaults to 4):
The distance from the border to remove keypoints.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
Example:
```python
>>> from transformers import SuperPointConfig, SuperPointForKeypointDetection
>>> # Initializing a SuperPoint superpoint style configuration
>>> configuration = SuperPointConfig()
>>> # Initializing a model from the superpoint style configuration
>>> model = SuperPointForKeypointDetection(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "superpoint"
def __init__(
self,
encoder_hidden_sizes: List[int] = [64, 64, 128, 128],
decoder_hidden_size: int = 256,
keypoint_decoder_dim: int = 65,
descriptor_decoder_dim: int = 256,
keypoint_threshold: float = 0.005,
max_keypoints: int = -1,
nms_radius: int = 4,
border_removal_distance: int = 4,
initializer_range=0.02,
**kwargs,
):
self.encoder_hidden_sizes = encoder_hidden_sizes
self.decoder_hidden_size = decoder_hidden_size
self.keypoint_decoder_dim = keypoint_decoder_dim
self.descriptor_decoder_dim = descriptor_decoder_dim
self.keypoint_threshold = keypoint_threshold
self.max_keypoints = max_keypoints
self.nms_radius = nms_radius
self.border_removal_distance = border_removal_distance
self.initializer_range = initializer_range
super().__init__(**kwargs)
__all__ = ["SuperPointConfig"]
|
"""Sample a fraction of the Spider dataset."""
import argparse
import json
import os
import random
import shutil
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Create a sampled version of the Spider dataset."
)
parser.add_argument(
"--input",
type=str,
required=True,
help="Path to the Spider dataset directory. "
"This directory should contain the train.json, dev.json, "
"and databases, "
"downloaded from https://yale-lily.github.io/spider.",
)
parser.add_argument(
"--output",
type=str,
required=True,
help="Path to the output directory of the sampled benchmark.",
)
parser.add_argument(
"--sample-factor",
type=float,
required=True,
help="The sample factor to apply to sample a fraction "
"of examples in both the train and dev datasets.",
)
parser.add_argument("--seed", type=int, default=0, help="Random seed.")
args = parser.parse_args()
# Create the output directory if it does not exist.
if not os.path.exists(args.output):
os.makedirs(args.output)
# Load the Spider dataset from the input directory.
with open(os.path.join(args.input, "train_spider.json")) as f:
train_spider = json.load(f)
with open(os.path.join(args.input, "train_others.json")) as f:
train_others = json.load(f)
with open(os.path.join(args.input, "dev.json")) as f:
dev = json.load(f)
# Randomly sample (without replacement) the indices using the sample factor.
random.seed(args.seed)
train_spider_indices = list(range(len(train_spider)))
train_others_indices = list(range(len(train_others)))
dev_indices = list(range(len(dev)))
train_spider_indices = random.choices(
train_spider_indices, k=int(args.sample_factor * len(train_spider_indices))
)
train_others_indices = random.choices(
train_others_indices, k=int(args.sample_factor * len(train_others_indices))
)
dev_indices = random.choices(
dev_indices, k=int(args.sample_factor * len(dev_indices))
)
# Sort the indices to ensure same ordering as the original sql files.
train_spider_indices.sort()
train_others_indices.sort()
dev_indices.sort()
# Write the sampled datasets to the output directory.
with open(os.path.join(args.output, "train_spider.json"), "w") as f:
json.dump([train_spider[i] for i in train_spider_indices], f, indent=2)
with open(os.path.join(args.output, "train_others.json"), "w") as f:
json.dump([train_others[i] for i in train_others_indices], f, indent=2)
with open(os.path.join(args.output, "dev.json"), "w") as f:
json.dump([dev[i] for i in dev_indices], f, indent=2)
# Write the sql files to the output directory.
with open(os.path.join(args.output, "train_gold.sql"), "w") as f:
for i in train_spider_indices:
f.write(
train_spider[i]["query"].replace("\t", " ")
+ "\t"
+ train_spider[i]["db_id"]
+ "\n"
)
for i in train_others_indices:
f.write(
train_others[i]["query"].replace("\t", " ")
+ "\t"
+ train_others[i]["db_id"]
+ "\n"
)
with open(os.path.join(args.output, "dev_gold.sql"), "w") as f:
for i in dev_indices:
f.write(dev[i]["query"] + "\t" + dev[i]["db_id"] + "\n")
# Copy the database to the output directory.
shutil.copytree(
os.path.join(args.input, "database"),
os.path.join(args.output, "database"),
dirs_exist_ok=True,
)
# Copy the tables.json file to the output directory.
shutil.copyfile(
os.path.join(args.input, "tables.json"),
os.path.join(args.output, "tables.json"),
)
# Print results.
print(f"Sampled {len(train_spider_indices)} examples from train_spider.json.")
print(f"Sampled {len(train_others_indices)} examples from train_others.json.")
print(f"Sampled {len(dev_indices)} examples from dev.json.")
print(f"All files written to {args.output}.")
|
"""Sample a fraction of the Spider dataset."""
import argparse
import json
import os
import random
import shutil
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Create a sampled version of the Spider dataset."
)
parser.add_argument(
"--input",
type=str,
required=True,
help="Path to the Spider dataset directory. "
"This directory should contain the train.json, dev.json, "
"and databases, "
"downloaded from https://yale-lily.github.io/spider.",
)
parser.add_argument(
"--output",
type=str,
required=True,
help="Path to the output directory of the sampled benchmark.",
)
parser.add_argument(
"--sample-factor",
type=float,
required=True,
help="The sample factor to apply to sample a fraction "
"of examples in both the train and dev datasets.",
)
parser.add_argument("--seed", type=int, default=0, help="Random seed.")
args = parser.parse_args()
# Create the output directory if it does not exist.
if not os.path.exists(args.output):
os.makedirs(args.output)
# Load the Spider dataset from the input directory.
with open(os.path.join(args.input, "train_spider.json")) as f:
train_spider = json.load(f)
with open(os.path.join(args.input, "train_others.json")) as f:
train_others = json.load(f)
with open(os.path.join(args.input, "dev.json")) as f:
dev = json.load(f)
# Randomly sample (without replacement) the indices using the sample factor.
random.seed(args.seed)
train_spider_indices = list(range(len(train_spider)))
train_others_indices = list(range(len(train_others)))
dev_indices = list(range(len(dev)))
train_spider_indices = random.choices(
train_spider_indices, k=int(args.sample_factor * len(train_spider_indices))
)
train_others_indices = random.choices(
train_others_indices, k=int(args.sample_factor * len(train_others_indices))
)
dev_indices = random.choices(
dev_indices, k=int(args.sample_factor * len(dev_indices))
)
# Sort the indices to ensure same ordering as the original sql files.
train_spider_indices.sort()
train_others_indices.sort()
dev_indices.sort()
# Write the sampled datasets to the output directory.
with open(os.path.join(args.output, "train_spider.json"), "w") as f:
json.dump([train_spider[i] for i in train_spider_indices], f, indent=2)
with open(os.path.join(args.output, "train_others.json"), "w") as f:
json.dump([train_others[i] for i in train_others_indices], f, indent=2)
with open(os.path.join(args.output, "dev.json"), "w") as f:
json.dump([dev[i] for i in dev_indices], f, indent=2)
# Write the sql files to the output directory.
with open(os.path.join(args.output, "train_gold.sql"), "w") as f:
for i in train_spider_indices:
f.write(
train_spider[i]["query"].replace("\t", " ")
+ "\t"
+ train_spider[i]["db_id"]
+ "\n"
)
for i in train_others_indices:
f.write(
train_others[i]["query"].replace("\t", " ")
+ "\t"
+ train_others[i]["db_id"]
+ "\n"
)
with open(os.path.join(args.output, "dev_gold.sql"), "w") as f:
for i in dev_indices:
f.write(dev[i]["query"] + "\t" + dev[i]["db_id"] + "\n")
# Copy the database to the output directory.
shutil.copytree(
os.path.join(args.input, "database"),
os.path.join(args.output, "database"),
dirs_exist_ok=True,
)
# Copy the tables.json file to the output directory.
shutil.copyfile(
os.path.join(args.input, "tables.json"),
os.path.join(args.output, "tables.json"),
)
# Print results.
print(f"Sampled {len(train_spider_indices)} examples from train_spider.json.")
print(f"Sampled {len(train_others_indices)} examples from train_others.json.")
print(f"Sampled {len(dev_indices)} examples from dev.json.")
print(f"All files written to {args.output}.")
|
from __future__ import annotations
import logging
from datasets import load_dataset
from sentence_transformers import SparseEncoder, SparseEncoderTrainer, SparseEncoderTrainingArguments
from sentence_transformers.evaluation import SequentialEvaluator, SimilarityFunction
from sentence_transformers.models import Pooling, Transformer
from sentence_transformers.sparse_encoder import evaluation, losses, models
from sentence_transformers.training_args import BatchSamplers
# Set up logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
def main():
# Initialize model components
model_name = "tomaarsen/mpnet-base-nli"
transformer = Transformer(model_name)
transformer.requires_grad_(False) # Freeze the transformer model
pooling = Pooling(transformer.get_word_embedding_dimension(), pooling_mode="mean")
csr_sparsity = models.CSRSparsity(
input_dim=transformer.get_word_embedding_dimension(),
hidden_dim=4 * transformer.get_word_embedding_dimension(),
k=256, # Number of top values to keep
k_aux=512, # Number of top values for auxiliary loss
)
# Create the SparseEncoder model
model = SparseEncoder(modules=[transformer, pooling, csr_sparsity])
output_dir = "examples/sparse_encoder/output/sparse_encoder_nli_frozen_transformer_from_pretrained"
# 2. Load the AllNLI dataset: https://huggingface.co/datasets/sentence-transformers/all-nli
train_dataset = load_dataset("sentence-transformers/all-nli", "triplet", split="train")
eval_dataset = load_dataset("sentence-transformers/all-nli", "triplet", split="dev")
logging.info(train_dataset)
# 3. Initialize the loss
loss = losses.CSRLoss(
model=model,
beta=0.1, # Weight for auxiliary loss
gamma=1, # Weight for ranking loss
scale=20.0, # Scale for similarity computation
)
# 4. Define an evaluator for use during training. This is useful to keep track of alongside the evaluation loss.
stsb_eval_dataset = load_dataset("sentence-transformers/stsb", split="validation")
evaluators = []
for k_dim in [16, 32, 64, 128, 256]:
evaluators.append(
evaluation.SparseEmbeddingSimilarityEvaluator(
sentences1=stsb_eval_dataset["sentence1"],
sentences2=stsb_eval_dataset["sentence2"],
scores=stsb_eval_dataset["score"],
main_similarity=SimilarityFunction.COSINE,
name=f"sts-dev-{k_dim}",
max_active_dims=k_dim,
)
)
dev_evaluator = SequentialEvaluator(evaluators, main_score_function=lambda scores: scores[-1])
# Set up training arguments
training_args = SparseEncoderTrainingArguments(
output_dir=output_dir,
num_train_epochs=1,
per_device_train_batch_size=128,
per_device_eval_batch_size=128,
warmup_ratio=0.1,
fp16=True, # Set to False if you get an error that your GPU can't run on FP16
bf16=False, # Set to True if you have a GPU that supports BF16
batch_sampler=BatchSamplers.NO_DUPLICATES, # MultipleNegativesRankingLoss benefits from no duplicate samples in a batch
logging_steps=100,
eval_strategy="steps",
eval_steps=200,
save_strategy="steps",
save_steps=200,
learning_rate=4e-5,
optim="adamw_torch",
weight_decay=1e-4,
adam_epsilon=6.25e-10,
run_name="sparse_encoder_nli_frozen_transformer_from_pretrained",
)
# Initialize trainer
trainer = SparseEncoderTrainer(
model=model,
args=training_args,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
loss=loss,
evaluator=dev_evaluator,
)
# Train model
trainer.train()
# 7. Evaluate the model performance on the STS Benchmark test dataset
test_dataset = load_dataset("sentence-transformers/stsb", split="test")
evaluators = []
for k_dim in [16, 32, 64, 128, 256]:
evaluators.append(
evaluation.SparseEmbeddingSimilarityEvaluator(
sentences1=test_dataset["sentence1"],
sentences2=test_dataset["sentence2"],
scores=test_dataset["score"],
main_similarity=SimilarityFunction.COSINE,
name=f"sts-test-{k_dim}",
max_active_dims=k_dim,
)
)
test_evaluator = SequentialEvaluator(evaluators)
test_evaluator(model)
# 8. Save the trained & evaluated model locally
model.save(output_dir)
if __name__ == "__main__":
main()
|
from __future__ import annotations
import logging
from datasets import load_dataset
from sentence_transformers.evaluation import SequentialEvaluator, SimilarityFunction
from sentence_transformers.models import Pooling, Transformer
from sentence_transformers.sparse_encoder import SparseEncoder
from sentence_transformers.sparse_encoder.evaluation import (
SparseEmbeddingSimilarityEvaluator,
)
from sentence_transformers.sparse_encoder.losses import CSRLoss
from sentence_transformers.sparse_encoder.models import CSRSparsity
from sentence_transformers.sparse_encoder.trainer import SparseEncoderTrainer
from sentence_transformers.sparse_encoder.training_args import (
SparseEncoderTrainingArguments,
)
from sentence_transformers.training_args import BatchSamplers
# Set up logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
def main():
# Initialize model components
model_name = "tomaarsen/mpnet-base-nli"
transformer = Transformer(model_name)
transformer.requires_grad_(False) # Freeze the transformer model
pooling = Pooling(transformer.get_word_embedding_dimension(), pooling_mode="mean")
csr_sparsity = CSRSparsity(
input_dim=transformer.get_word_embedding_dimension(),
hidden_dim=4 * transformer.get_word_embedding_dimension(),
k=256, # Number of top values to keep
k_aux=512, # Number of top values for auxiliary loss
)
# Create the SparseEncoder model
model = SparseEncoder(modules=[transformer, pooling, csr_sparsity])
output_dir = "examples/sparse_encoder/output/sparse_encoder_nli_frozen_transformer_from_pretrained"
# 2. Load the AllNLI dataset: https://huggingface.co/datasets/sentence-transformers/all-nli
train_dataset = load_dataset("sentence-transformers/all-nli", "triplet", split="train")
eval_dataset = load_dataset("sentence-transformers/all-nli", "triplet", split="dev")
logging.info(train_dataset)
# 3. Initialize the loss
loss = CSRLoss(
model=model,
beta=0.1, # Weight for auxiliary loss
gamma=1, # Weight for ranking loss
scale=20.0, # Scale for similarity computation
)
# 4. Define an evaluator for use during training. This is useful to keep track of alongside the evaluation loss.
stsb_eval_dataset = load_dataset("sentence-transformers/stsb", split="validation")
evaluators = []
for k_dim in [16, 32, 64, 128, 256]:
evaluators.append(
SparseEmbeddingSimilarityEvaluator(
sentences1=stsb_eval_dataset["sentence1"],
sentences2=stsb_eval_dataset["sentence2"],
scores=stsb_eval_dataset["score"],
main_similarity=SimilarityFunction.COSINE,
name=f"sts-dev-{k_dim}",
truncate_dim=k_dim,
)
)
dev_evaluator = SequentialEvaluator(evaluators, main_score_function=lambda scores: scores[-1])
# Set up training arguments
training_args = SparseEncoderTrainingArguments(
output_dir=output_dir,
num_train_epochs=1,
per_device_train_batch_size=128,
per_device_eval_batch_size=128,
warmup_ratio=0.1,
fp16=True, # Set to False if you get an error that your GPU can't run on FP16
bf16=False, # Set to True if you have a GPU that supports BF16
batch_sampler=BatchSamplers.NO_DUPLICATES, # MultipleNegativesRankingLoss benefits from no duplicate samples in a batch
logging_steps=100,
eval_strategy="steps",
eval_steps=200,
save_strategy="steps",
save_steps=200,
learning_rate=4e-5,
optim="adamw_torch",
weight_decay=1e-4,
adam_epsilon=6.25e-10,
run_name="sparse_encoder_nli_frozen_transformer_from_pretrained",
)
# Initialize trainer
trainer = SparseEncoderTrainer(
model=model,
args=training_args,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
loss=loss,
evaluator=dev_evaluator,
)
# Train model
trainer.train()
# 7. Evaluate the model performance on the STS Benchmark test dataset
test_dataset = load_dataset("sentence-transformers/stsb", split="test")
evaluators = []
for k_dim in [16, 32, 64, 128, 256]:
evaluators.append(
SparseEmbeddingSimilarityEvaluator(
sentences1=test_dataset["sentence1"],
sentences2=test_dataset["sentence2"],
scores=test_dataset["score"],
main_similarity=SimilarityFunction.COSINE,
name=f"sts-test-{k_dim}",
truncate_dim=k_dim,
)
)
test_evaluator = SequentialEvaluator(evaluators)
test_evaluator(model)
# 8. Save the trained & evaluated model locally
model.save(output_dir)
if __name__ == "__main__":
main()
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.chat_models.anthropic import (
ChatAnthropic,
convert_messages_to_prompt_anthropic,
)
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"convert_messages_to_prompt_anthropic": "langchain_community.chat_models.anthropic",
"ChatAnthropic": "langchain_community.chat_models.anthropic",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"ChatAnthropic",
"convert_messages_to_prompt_anthropic",
]
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.chat_models.anthropic import (
ChatAnthropic,
convert_messages_to_prompt_anthropic,
)
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"convert_messages_to_prompt_anthropic": "langchain_community.chat_models.anthropic",
"ChatAnthropic": "langchain_community.chat_models.anthropic",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"convert_messages_to_prompt_anthropic",
"ChatAnthropic",
]
|
from typing import Iterable, Type
from docarray.array.abstract_array import AbstractDocumentArray
from docarray.array.mixins import GetAttributeArrayMixin, ProtoArrayMixin
from docarray.document import AnyDocument, BaseDocument, BaseNode
class DocumentArray(
list,
ProtoArrayMixin,
GetAttributeArrayMixin,
AbstractDocumentArray,
BaseNode,
):
"""
a DocumentArray is a list-like container of Document of the same schema
:param docs: iterable of Document
"""
document_type: Type[BaseDocument] = AnyDocument
def __init__(self, docs: Iterable[BaseDocument]):
super().__init__(doc_ for doc_ in docs)
def __class_getitem__(cls, item: Type[BaseDocument]):
if not issubclass(item, BaseDocument):
raise ValueError(
f'DocumentArray[item] item should be a Document not a {item} '
)
class _DocumenArrayTyped(DocumentArray):
document_type: Type[BaseDocument] = item
for field in _DocumenArrayTyped.document_type.__fields__.keys():
def _property_generator(val: str):
return property(lambda self: self._get_documents_attribute(val))
setattr(_DocumenArrayTyped, field, _property_generator(field))
# this generates property on the fly based on the schema of the item
_DocumenArrayTyped.__name__ = f'DocumentArray[{item.__name__}]'
_DocumenArrayTyped.__qualname__ = f'DocumentArray[{item.__name__}]'
return _DocumenArrayTyped
|
from typing import Iterable, Type
from docarray.array.abstract_array import AbstractDocumentArray
from docarray.array.mixins import GetAttributeArrayMixin, ProtoArrayMixin
from docarray.document import AnyDocument, BaseDocument, BaseNode
class DocumentArray(
list,
ProtoArrayMixin,
GetAttributeArrayMixin,
AbstractDocumentArray,
BaseNode,
):
"""
a DocumentArray is a list-like container of Document of the same schema
:param docs: iterable of Document
"""
document_type: Type[BaseDocument] = AnyDocument
def __init__(self, docs: Iterable[BaseDocument]):
super().__init__(doc_ for doc_ in docs)
def __class_getitem__(cls, item: Type[BaseDocument]):
if not issubclass(item, BaseDocument):
raise ValueError(
f'DocumentArray[item] item should be a Document not a {item} '
)
class _DocumenArrayTyped(DocumentArray):
document_type: Type[BaseDocument] = item
for field in _DocumenArrayTyped.document_type.__fields__.keys():
def _proprety_generator(val: str):
return property(lambda self: self._get_documents_attribute(val))
setattr(_DocumenArrayTyped, field, _proprety_generator(field))
# this generates property on the fly based on the schema of the item
_DocumenArrayTyped.__name__ = f'DocumentArray{item.__name__}'
return _DocumenArrayTyped
|
import pytest
from langchain_core.memory import BaseMemory
from langchain.chains.conversation.memory import (
ConversationBufferMemory,
ConversationBufferWindowMemory,
ConversationSummaryMemory,
)
from langchain.memory import ReadOnlySharedMemory, SimpleMemory
from tests.unit_tests.llms.fake_llm import FakeLLM
def test_simple_memory() -> None:
"""Test SimpleMemory."""
memory = SimpleMemory(memories={"baz": "foo"})
output = memory.load_memory_variables({})
assert output == {"baz": "foo"}
assert memory.memory_variables == ["baz"]
@pytest.mark.parametrize(
"memory",
[
ConversationBufferMemory(memory_key="baz"),
ConversationSummaryMemory(llm=FakeLLM(), memory_key="baz"),
ConversationBufferWindowMemory(memory_key="baz"),
],
)
def test_readonly_memory(memory: BaseMemory) -> None:
read_only_memory = ReadOnlySharedMemory(memory=memory)
memory.save_context({"input": "bar"}, {"output": "foo"})
assert read_only_memory.load_memory_variables({}) == memory.load_memory_variables(
{}
)
|
import pytest
from langchain_core.memory import BaseMemory
from langchain.chains.conversation.memory import (
ConversationBufferMemory,
ConversationBufferWindowMemory,
ConversationSummaryMemory,
)
from langchain.memory import ReadOnlySharedMemory, SimpleMemory
from tests.unit_tests.llms.fake_llm import FakeLLM
def test_simple_memory() -> None:
"""Test SimpleMemory."""
memory = SimpleMemory(memories={"baz": "foo"})
output = memory.load_memory_variables({})
assert output == {"baz": "foo"}
assert ["baz"] == memory.memory_variables
@pytest.mark.parametrize(
"memory",
[
ConversationBufferMemory(memory_key="baz"),
ConversationSummaryMemory(llm=FakeLLM(), memory_key="baz"),
ConversationBufferWindowMemory(memory_key="baz"),
],
)
def test_readonly_memory(memory: BaseMemory) -> None:
read_only_memory = ReadOnlySharedMemory(memory=memory)
memory.save_context({"input": "bar"}, {"output": "foo"})
assert read_only_memory.load_memory_variables({}) == memory.load_memory_variables(
{}
)
|
from gravitasml.parser import Parser
from gravitasml.token import tokenize
from backend.data.block import Block, BlockOutput, BlockSchema
from backend.data.model import SchemaField
class XMLParserBlock(Block):
class Input(BlockSchema):
input_xml: str = SchemaField(description="input xml to be parsed")
class Output(BlockSchema):
parsed_xml: dict = SchemaField(description="output parsed xml to dict")
error: str = SchemaField(description="Error in parsing")
def __init__(self):
super().__init__(
id="286380af-9529-4b55-8be0-1d7c854abdb5",
description="Parses XML using gravitasml to tokenize and coverts it to dict",
input_schema=XMLParserBlock.Input,
output_schema=XMLParserBlock.Output,
test_input={"input_xml": "<tag1><tag2>content</tag2></tag1>"},
test_output=[
("parsed_xml", {"tag1": {"tag2": "content"}}),
],
)
async def run(self, input_data: Input, **kwargs) -> BlockOutput:
try:
tokens = tokenize(input_data.input_xml)
parser = Parser(tokens)
parsed_result = parser.parse()
yield "parsed_xml", parsed_result
except ValueError as val_e:
raise ValueError(f"Validation error for dict:{val_e}") from val_e
except SyntaxError as syn_e:
raise SyntaxError(f"Error in input xml syntax: {syn_e}") from syn_e
|
from gravitasml.parser import Parser
from gravitasml.token import tokenize
from backend.data.block import Block, BlockOutput, BlockSchema
from backend.data.model import SchemaField
class XMLParserBlock(Block):
class Input(BlockSchema):
input_xml: str = SchemaField(description="input xml to be parsed")
class Output(BlockSchema):
parsed_xml: dict = SchemaField(description="output parsed xml to dict")
error: str = SchemaField(description="Error in parsing")
def __init__(self):
super().__init__(
id="286380af-9529-4b55-8be0-1d7c854abdb5",
description="Parses XML using gravitasml to tokenize and coverts it to dict",
input_schema=XMLParserBlock.Input,
output_schema=XMLParserBlock.Output,
test_input={"input_xml": "<tag1><tag2>content</tag2></tag1>"},
test_output=[
("parsed_xml", {"tag1": {"tag2": "content"}}),
],
)
def run(self, input_data: Input, **kwargs) -> BlockOutput:
try:
tokens = tokenize(input_data.input_xml)
parser = Parser(tokens)
parsed_result = parser.parse()
yield "parsed_xml", parsed_result
except ValueError as val_e:
raise ValueError(f"Validation error for dict:{val_e}") from val_e
except SyntaxError as syn_e:
raise SyntaxError(f"Error in input xml syntax: {syn_e}") from syn_e
|
_base_ = [
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
teacher_ckpt = 'http://download.openmmlab.com/mmdetection/v2.0/paa/paa_r101_fpn_1x_coco/paa_r101_fpn_1x_coco_20200821-0a1825a4.pth' # noqa
model = dict(
type='LAD',
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
bgr_to_rgb=True,
pad_size_divisor=32),
# student
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
add_extra_convs='on_output',
num_outs=5),
bbox_head=dict(
type='LADHead',
reg_decoded_bbox=True,
score_voting=True,
topk=9,
num_classes=80,
in_channels=256,
stacked_convs=4,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
octave_base_scale=8,
scales_per_octave=1,
strides=[8, 16, 32, 64, 128]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[0.1, 0.1, 0.2, 0.2]),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=1.3),
loss_centerness=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=0.5)),
# teacher
teacher_ckpt=teacher_ckpt,
teacher_backbone=dict(
type='ResNet',
depth=101,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch'),
teacher_neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
add_extra_convs='on_output',
num_outs=5),
teacher_bbox_head=dict(
type='LADHead',
reg_decoded_bbox=True,
score_voting=True,
topk=9,
num_classes=80,
in_channels=256,
stacked_convs=4,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
octave_base_scale=8,
scales_per_octave=1,
strides=[8, 16, 32, 64, 128]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[0.1, 0.1, 0.2, 0.2]),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=1.3),
loss_centerness=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=0.5)),
# training and testing settings
train_cfg=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.1,
neg_iou_thr=0.1,
min_pos_iou=0,
ignore_iof_thr=-1),
allowed_border=-1,
pos_weight=-1,
debug=False),
test_cfg=dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
score_voting=True,
nms=dict(type='nms', iou_threshold=0.6),
max_per_img=100))
train_dataloader = dict(batch_size=8, num_workers=4)
optim_wrapper = dict(type='AmpOptimWrapper', optimizer=dict(lr=0.01))
|
_base_ = [
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
teacher_ckpt = 'http://download.openmmlab.com/mmdetection/v2.0/paa/paa_r101_fpn_1x_coco/paa_r101_fpn_1x_coco_20200821-0a1825a4.pth' # noqa
model = dict(
type='LAD',
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
bgr_to_rgb=True,
pad_size_divisor=32),
# student
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
add_extra_convs='on_output',
num_outs=5),
bbox_head=dict(
type='LADHead',
reg_decoded_bbox=True,
score_voting=True,
topk=9,
num_classes=80,
in_channels=256,
stacked_convs=4,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
octave_base_scale=8,
scales_per_octave=1,
strides=[8, 16, 32, 64, 128]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[0.1, 0.1, 0.2, 0.2]),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=1.3),
loss_centerness=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=0.5)),
# teacher
teacher_ckpt=teacher_ckpt,
teacher_backbone=dict(
type='ResNet',
depth=101,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch'),
teacher_neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
add_extra_convs='on_output',
num_outs=5),
teacher_bbox_head=dict(
type='LADHead',
reg_decoded_bbox=True,
score_voting=True,
topk=9,
num_classes=80,
in_channels=256,
stacked_convs=4,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
octave_base_scale=8,
scales_per_octave=1,
strides=[8, 16, 32, 64, 128]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[0.1, 0.1, 0.2, 0.2]),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=1.3),
loss_centerness=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=0.5)),
# training and testing settings
train_cfg=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.1,
neg_iou_thr=0.1,
min_pos_iou=0,
ignore_iof_thr=-1),
allowed_border=-1,
pos_weight=-1,
debug=False),
test_cfg=dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
score_voting=True,
nms=dict(type='nms', iou_threshold=0.6),
max_per_img=100))
train_dataloader = dict(batch_size=8, num_workers=4)
optim_wrapper = dict(type='AmpOptimWrapper', optimizer=dict(lr=0.01))
# TODO: MMEngine does not support fp16 yet.
# fp16 = dict(loss_scale=512.)
|
import os
from typing import TYPE_CHECKING, Any, Optional, Type, TypeVar, Dict
import orjson
from pydantic import BaseModel, Field
from rich.console import Console
from docarray.base_doc.base_node import BaseNode
from docarray.base_doc.io.json import orjson_dumps_and_decode
from docarray.base_doc.mixins import IOMixin, UpdateMixin
from docarray.typing import ID
from docarray.typing.tensor.abstract_tensor import AbstractTensor
if TYPE_CHECKING:
from docarray.array.stacked.column_storage import ColumnStorageView
_console: Console = Console()
T = TypeVar('T', bound='BaseDoc')
class BaseDoc(BaseModel, IOMixin, UpdateMixin, BaseNode):
"""
The base class for Documents
"""
id: Optional[ID] = Field(default_factory=lambda: ID(os.urandom(16).hex()))
class Config:
json_loads = orjson.loads
json_dumps = orjson_dumps_and_decode
# `DocArrayResponse` is able to handle tensors by itself.
# Therefore, we stop FastAPI from doing any transformations
# on tensors by setting an identity function as a custom encoder.
json_encoders = {AbstractTensor: lambda x: x}
validate_assignment = True
@classmethod
def from_view(cls: Type[T], storage_view: 'ColumnStorageView') -> T:
doc = cls.__new__(cls)
object.__setattr__(doc, '__dict__', storage_view)
object.__setattr__(doc, '__fields_set__', set(storage_view.keys()))
doc._init_private_attributes()
return doc
@classmethod
def _get_field_type(cls, field: str) -> Type:
"""
Accessing the nested python Class define in the schema. Could be useful for
reconstruction of Document in serialization/deserilization
:param field: name of the field
:return:
"""
return cls.__fields__[field].outer_type_
def __str__(self) -> str:
with _console.capture() as capture:
_console.print(self)
return capture.get().strip()
def summary(self) -> None:
"""Print non-empty fields and nested structure of this Document object."""
from docarray.display.document_summary import DocumentSummary
DocumentSummary(doc=self).summary()
@classmethod
def schema_summary(cls) -> None:
"""Print a summary of the Documents schema."""
from docarray.display.document_summary import DocumentSummary
DocumentSummary.schema_summary(cls)
def _ipython_display_(self) -> None:
"""Displays the object in IPython as a summary"""
self.summary()
def is_view(self) -> bool:
from docarray.array.stacked.column_storage import ColumnStorageView
return isinstance(self.__dict__, ColumnStorageView)
def __getattr__(self, item) -> Any:
if item in self.__fields__.keys():
return self.__dict__[item]
else:
return super().__getattribute__(item)
def __setattr__(self, field, value) -> None:
if not self.is_view():
super().__setattr__(field, value)
else:
# here we first validate with pydantic
# Then we apply the value to the remote dict,
# and we change back the __dict__ value to the remote dict
dict_ref = self.__dict__
super().__setattr__(field, value)
for key, val in self.__dict__.items():
dict_ref[key] = val
object.__setattr__(self, '__dict__', dict_ref)
def _docarray_to_json_compatible(self) -> Dict:
"""
Convert itself into a json compatible object
:return: A dictionary of the BaseDoc object
"""
return self.dict()
|
import os
from typing import TYPE_CHECKING, Any, Optional, Type, TypeVar
import orjson
from pydantic import BaseModel, Field
from rich.console import Console
from docarray.base_doc.base_node import BaseNode
from docarray.base_doc.io.json import orjson_dumps, orjson_dumps_and_decode
from docarray.base_doc.mixins import IOMixin, UpdateMixin
from docarray.typing import ID
if TYPE_CHECKING:
from docarray.array.stacked.column_storage import ColumnStorageView
_console: Console = Console()
T = TypeVar('T', bound='BaseDoc')
class BaseDoc(BaseModel, IOMixin, UpdateMixin, BaseNode):
"""
The base class for Documents
"""
id: Optional[ID] = Field(default_factory=lambda: ID(os.urandom(16).hex()))
class Config:
json_loads = orjson.loads
json_dumps = orjson_dumps_and_decode
json_encoders = {dict: orjson_dumps}
validate_assignment = True
@classmethod
def from_view(cls: Type[T], storage_view: 'ColumnStorageView') -> T:
doc = cls.__new__(cls)
object.__setattr__(doc, '__dict__', storage_view)
object.__setattr__(doc, '__fields_set__', set(storage_view.keys()))
doc._init_private_attributes()
return doc
@classmethod
def _get_field_type(cls, field: str) -> Type:
"""
Accessing the nested python Class define in the schema. Could be useful for
reconstruction of Document in serialization/deserilization
:param field: name of the field
:return:
"""
return cls.__fields__[field].outer_type_
def __str__(self) -> str:
with _console.capture() as capture:
_console.print(self)
return capture.get().strip()
def summary(self) -> None:
"""Print non-empty fields and nested structure of this Document object."""
from docarray.display.document_summary import DocumentSummary
DocumentSummary(doc=self).summary()
@classmethod
def schema_summary(cls) -> None:
"""Print a summary of the Documents schema."""
from docarray.display.document_summary import DocumentSummary
DocumentSummary.schema_summary(cls)
def _ipython_display_(self) -> None:
"""Displays the object in IPython as a summary"""
self.summary()
def is_view(self) -> bool:
from docarray.array.stacked.column_storage import ColumnStorageView
return isinstance(self.__dict__, ColumnStorageView)
def __getattr__(self, item) -> Any:
if item in self.__fields__.keys():
return self.__dict__[item]
else:
return super().__getattribute__(item)
def __setattr__(self, field, value) -> None:
if not self.is_view():
super().__setattr__(field, value)
else:
# here we first validate with pydantic
# Then we apply the value to the remote dict,
# and we change back the __dict__ value to the remote dict
dict_ref = self.__dict__
super().__setattr__(field, value)
for key, val in self.__dict__.items():
dict_ref[key] = val
object.__setattr__(self, '__dict__', dict_ref)
|
import pytest
from sklearn.base import (
BaseEstimator,
RegressorMixin,
TransformerMixin,
)
from sklearn.utils._tags import get_tags
class NoTagsEstimator:
pass
class ClassifierEstimator:
# This is to test whether not inheriting from mixins works.
_estimator_type = "classifier"
class EmptyTransformer(TransformerMixin, BaseEstimator):
pass
class EmptyRegressor(RegressorMixin, BaseEstimator):
pass
@pytest.mark.filterwarnings("ignore:.*no __sklearn_tags__ attribute.*:FutureWarning")
@pytest.mark.parametrize(
"estimator, value",
[
[NoTagsEstimator(), False],
[ClassifierEstimator(), True],
[EmptyTransformer(), False],
[EmptyRegressor(), True],
[BaseEstimator(), False],
],
)
def test_requires_y(estimator, value):
assert get_tags(estimator).target_tags.required == value
|
import pytest
from sklearn.base import BaseEstimator, RegressorMixin, TransformerMixin
from sklearn.utils._tags import get_tags
class NoTagsEstimator:
pass
class ClassifierEstimator:
# This is to test whether not inheriting from mixins works.
_estimator_type = "classifier"
@pytest.mark.parametrize(
"estimator, value",
[
[NoTagsEstimator(), False],
[ClassifierEstimator(), True],
[TransformerMixin(), False],
[RegressorMixin(), True],
[BaseEstimator(), False],
],
)
def test_requires_y(estimator, value):
assert get_tags(estimator).target_tags.required == value
|
from __future__ import annotations
from collections.abc import Iterable
from torch import Tensor
from sentence_transformers import util
from sentence_transformers.losses.CoSENTLoss import CoSENTLoss
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
class SparseCoSENTLoss(CoSENTLoss):
def __init__(self, model: SparseEncoder, scale: float = 20.0, similarity_fct=util.cos_sim) -> None:
"""
This class implements CoSENT (Cosine Sentence).
It expects that each of the InputExamples consists of a pair of texts and a float valued label, representing
the expected similarity score between the pair.
It computes the following loss function:
``loss = logsum(1+exp(s(i,j)-s(k,l))+exp...)``, where ``(i,j)`` and ``(k,l)`` are any of the input pairs in the
batch such that the expected similarity of ``(i,j)`` is greater than ``(k,l)``. The summation is over all possible
pairs of input pairs in the batch that match this condition.
Args:
model: SparseEncoder
similarity_fct: Function to compute the PAIRWISE similarity
between embeddings. Default is
``util.pairwise_cos_sim``.
scale: Output of similarity function is multiplied by scale
value. Represents the inverse temperature.
References:
- For further details, see: https://kexue.fm/archives/8847
Requirements:
- Need to be used in SpladeLoss or CSRLoss as a loss function.
- Sentence pairs with corresponding similarity scores in range of the similarity function. Default is [-1,1].
Inputs:
+--------------------------------+------------------------+
| Texts | Labels |
+================================+========================+
| (sentence_A, sentence_B) pairs | float similarity score |
+--------------------------------+------------------------+
Relations:
- :class:`SparseAnglELoss` is SparseCoSENTLoss with ``pairwise_angle_sim`` as the metric, rather than ``pairwise_cos_sim``.
Example:
::
from datasets import Dataset
from sentence_transformers.sparse_encoder import SparseEncoder, SparseEncoderTrainer, losses
model = SparseEncoder("distilbert/distilbert-base-uncased")
train_dataset = Dataset.from_dict(
{
"sentence1": ["It's nice weather outside today.", "He drove to work."],
"sentence2": ["It's so sunny.", "She walked to the store."],
"score": [1.0, 0.3],
}
)
loss = losses.SpladeLoss(
model=model, loss=losses.SparseCoSENTLoss(model), document_regularizer_weight=5e-5, use_document_regularizer_only=True
)
trainer = SparseEncoderTrainer(model=model, train_dataset=train_dataset, loss=loss)
trainer.train()
"""
model.similarity_fn_name = "cosine"
return super().__init__(model, scale=scale, similarity_fct=similarity_fct)
def forward(self, sentence_features: Iterable[dict[str, Tensor]], labels: Tensor) -> Tensor:
raise AttributeError("SparseCoSENTLoss should not be used alone. Use it with SpladeLoss or CSRLoss.")
|
from __future__ import annotations
from collections.abc import Iterable
from torch import Tensor
from sentence_transformers import util
from sentence_transformers.losses.CoSENTLoss import CoSENTLoss
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
class SparseCoSENTLoss(CoSENTLoss):
def __init__(self, model: SparseEncoder, scale: float = 20.0, similarity_fct=util.cos_sim) -> None:
"""
This class implements CoSENT (Cosine Sentence).
It expects that each of the InputExamples consists of a pair of texts and a float valued label, representing
the expected similarity score between the pair.
It computes the following loss function:
``loss = logsum(1+exp(s(i,j)-s(k,l))+exp...)``, where ``(i,j)`` and ``(k,l)`` are any of the input pairs in the
batch such that the expected similarity of ``(i,j)`` is greater than ``(k,l)``. The summation is over all possible
pairs of input pairs in the batch that match this condition.
Args:
model: SparseEncoder
similarity_fct: Function to compute the PAIRWISE similarity
between embeddings. Default is
``util.pairwise_cos_sim``.
scale: Output of similarity function is multiplied by scale
value. Represents the inverse temperature.
References:
- For further details, see: https://kexue.fm/archives/8847
Requirements:
- Need to be used in SpladeLoss or CSRLoss as a loss function.
- Sentence pairs with corresponding similarity scores in range of the similarity function. Default is [-1,1].
Inputs:
+--------------------------------+------------------------+
| Texts | Labels |
+================================+========================+
| (sentence_A, sentence_B) pairs | float similarity score |
+--------------------------------+------------------------+
Relations:
- :class:`SparseAnglELoss` is SparseCoSENTLoss with ``pairwise_angle_sim`` as the metric, rather than ``pairwise_cos_sim``.
Example:
::
from datasets import Dataset
from sentence_transformers.sparse_encoder import SparseEncoder, SparseEncoderTrainer, losses
model = SparseEncoder("distilbert/distilbert-base-uncased")
train_dataset = Dataset.from_dict(
{
"sentence1": ["It's nice weather outside today.", "He drove to work."],
"sentence2": ["It's so sunny.", "She walked to the store."],
"score": [1.0, 0.3],
}
)
loss = losses.SpladeLoss(
model=model, loss=losses.SparseCoSENTLoss(model), corpus_regularizer_weight=5e-5, use_corpus_regularizer_only=True
)
trainer = SparseEncoderTrainer(model=model, train_dataset=train_dataset, loss=loss)
trainer.train()
"""
model.similarity_fn_name = "cosine"
return super().__init__(model, scale=scale, similarity_fct=similarity_fct)
def forward(self, sentence_features: Iterable[dict[str, Tensor]], labels: Tensor) -> Tensor:
raise AttributeError("SparseCoSENTLoss should not be used alone. Use it with SpladeLoss or CSRLoss.")
|
from typing import Literal
from autogpt_libs.supabase_integration_credentials_store.types import APIKeyCredentials
from pydantic import SecretStr
from backend.data.model import CredentialsField, CredentialsMetaInput
JinaCredentials = APIKeyCredentials
JinaCredentialsInput = CredentialsMetaInput[
Literal["jina"],
Literal["api_key"],
]
TEST_CREDENTIALS = APIKeyCredentials(
id="01234567-89ab-cdef-0123-456789abcdef",
provider="jina",
api_key=SecretStr("mock-jina-api-key"),
title="Mock Jina API key",
expires_at=None,
)
TEST_CREDENTIALS_INPUT = {
"provider": TEST_CREDENTIALS.provider,
"id": TEST_CREDENTIALS.id,
"type": TEST_CREDENTIALS.type,
"title": TEST_CREDENTIALS.type,
}
def JinaCredentialsField() -> JinaCredentialsInput:
"""
Creates a Jina credentials input on a block.
"""
return CredentialsField(
provider="jina",
supported_credential_types={"api_key"},
description="The Jina integration can be used with an API Key.",
)
TEST_CREDENTIALS = APIKeyCredentials(
id="01234567-89ab-cdef-0123-456789abcdef",
provider="jina",
api_key=SecretStr("mock-jina-api-key"),
title="Mock Jina API key",
expires_at=None,
)
TEST_CREDENTIALS_INPUT = {
"provider": TEST_CREDENTIALS.provider,
"id": TEST_CREDENTIALS.id,
"type": TEST_CREDENTIALS.type,
"title": TEST_CREDENTIALS.type,
}
|
from typing import Literal
from autogpt_libs.supabase_integration_credentials_store.types import APIKeyCredentials
from pydantic import SecretStr
from backend.data.model import CredentialsField, CredentialsMetaInput
JinaCredentials = APIKeyCredentials
JinaCredentialsInput = CredentialsMetaInput[
Literal["jina"],
Literal["api_key"],
]
def JinaCredentialsField() -> JinaCredentialsInput:
"""
Creates a Jina credentials input on a block.
"""
return CredentialsField(
provider="jina",
supported_credential_types={"api_key"},
description="The Jina integration can be used with an API Key.",
)
TEST_CREDENTIALS = APIKeyCredentials(
id="01234567-89ab-cdef-0123-456789abcdef",
provider="jina",
api_key=SecretStr("mock-jina-api-key"),
title="Mock Jina API key",
expires_at=None,
)
TEST_CREDENTIALS_INPUT = {
"provider": TEST_CREDENTIALS.provider,
"id": TEST_CREDENTIALS.id,
"type": TEST_CREDENTIALS.type,
"title": TEST_CREDENTIALS.type,
}
|
"""Test base tool child implementations."""
import inspect
import re
from typing import List, Type
import pytest
from langchain_core.tools import BaseTool
from langchain_community.tools.amadeus.base import AmadeusBaseTool
from langchain_community.tools.gmail.base import GmailBaseTool
from langchain_community.tools.office365.base import O365BaseTool
from langchain_community.tools.playwright.base import BaseBrowserTool
from langchain_community.tools.slack.base import SlackBaseTool
def get_non_abstract_subclasses(cls: Type[BaseTool]) -> List[Type[BaseTool]]:
to_skip = {
AmadeusBaseTool,
BaseBrowserTool,
GmailBaseTool,
O365BaseTool,
SlackBaseTool,
} # Abstract but not recognized
subclasses = []
for subclass in cls.__subclasses__():
if (
not getattr(subclass, "__abstract__", None)
and not subclass.__name__.startswith("_")
and subclass not in to_skip
):
subclasses.append(subclass)
sc = get_non_abstract_subclasses(subclass)
subclasses.extend(sc)
return subclasses
@pytest.mark.parametrize("cls", get_non_abstract_subclasses(BaseTool)) # type: ignore[type-abstract]
def test_all_subclasses_accept_run_manager(cls: Type[BaseTool]) -> None:
"""Test that tools defined in this repo accept a run manager argument."""
# This wouldn't be necessary if the BaseTool had a strict API.
if cls._run is not BaseTool._run:
run_func = cls._run
params = inspect.signature(run_func).parameters
assert "run_manager" in params
pattern = re.compile(r"(?!Async)CallbackManagerForToolRun")
assert bool(re.search(pattern, str(params["run_manager"].annotation)))
assert params["run_manager"].default is None
if cls._arun is not BaseTool._arun:
run_func = cls._arun
params = inspect.signature(run_func).parameters
assert "run_manager" in params
assert "AsyncCallbackManagerForToolRun" in str(params["run_manager"].annotation)
assert params["run_manager"].default is None
|
"""Test base tool child implementations."""
import inspect
import re
from typing import List, Type
import pytest
from langchain_core.tools import BaseTool
from langchain_community.tools.amadeus.base import AmadeusBaseTool
from langchain_community.tools.gmail.base import GmailBaseTool
from langchain_community.tools.office365.base import O365BaseTool
from langchain_community.tools.playwright.base import BaseBrowserTool
from langchain_community.tools.slack.base import SlackBaseTool
def get_non_abstract_subclasses(cls: Type[BaseTool]) -> List[Type[BaseTool]]:
to_skip = {
AmadeusBaseTool,
BaseBrowserTool,
GmailBaseTool,
O365BaseTool,
SlackBaseTool,
} # Abstract but not recognized
subclasses = []
for subclass in cls.__subclasses__():
if (
not getattr(subclass, "__abstract__", None)
and not subclass.__name__.startswith("_")
and subclass not in to_skip
):
subclasses.append(subclass)
sc = get_non_abstract_subclasses(subclass)
subclasses.extend(sc)
return subclasses
@pytest.mark.parametrize("cls", get_non_abstract_subclasses(BaseTool)) # type: ignore
def test_all_subclasses_accept_run_manager(cls: Type[BaseTool]) -> None:
"""Test that tools defined in this repo accept a run manager argument."""
# This wouldn't be necessary if the BaseTool had a strict API.
if cls._run is not BaseTool._run:
run_func = cls._run
params = inspect.signature(run_func).parameters
assert "run_manager" in params
pattern = re.compile(r"(?!Async)CallbackManagerForToolRun")
assert bool(re.search(pattern, str(params["run_manager"].annotation)))
assert params["run_manager"].default is None
if cls._arun is not BaseTool._arun:
run_func = cls._arun
params = inspect.signature(run_func).parameters
assert "run_manager" in params
assert "AsyncCallbackManagerForToolRun" in str(params["run_manager"].annotation)
assert params["run_manager"].default is None
|
# Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from mmdet.models.backbones import ResNeXt
from mmdet.models.backbones.resnext import Bottleneck as BottleneckX
from .utils import is_block
def test_renext_bottleneck():
with pytest.raises(AssertionError):
# Style must be in ['pytorch', 'caffe']
BottleneckX(64, 64, groups=32, base_width=4, style='tensorflow')
# Test ResNeXt Bottleneck structure
block = BottleneckX(
64, 64, groups=32, base_width=4, stride=2, style='pytorch')
assert block.conv2.stride == (2, 2)
assert block.conv2.groups == 32
assert block.conv2.out_channels == 128
# Test ResNeXt Bottleneck with DCN
dcn = dict(type='DCN', deform_groups=1, fallback_on_stride=False)
with pytest.raises(AssertionError):
# conv_cfg must be None if dcn is not None
BottleneckX(
64,
64,
groups=32,
base_width=4,
dcn=dcn,
conv_cfg=dict(type='Conv'))
BottleneckX(64, 64, dcn=dcn)
# Test ResNeXt Bottleneck forward
block = BottleneckX(64, 16, groups=32, base_width=4)
x = torch.randn(1, 64, 56, 56)
x_out = block(x)
assert x_out.shape == torch.Size([1, 64, 56, 56])
# Test ResNeXt Bottleneck forward with plugins
plugins = [
dict(
cfg=dict(
type='GeneralizedAttention',
spatial_range=-1,
num_heads=8,
attention_type='0010',
kv_stride=2),
stages=(False, False, True, True),
position='after_conv2')
]
block = BottleneckX(64, 16, groups=32, base_width=4, plugins=plugins)
x = torch.randn(1, 64, 56, 56)
x_out = block(x)
assert x_out.shape == torch.Size([1, 64, 56, 56])
def test_resnext_backbone():
with pytest.raises(KeyError):
# ResNeXt depth should be in [50, 101, 152]
ResNeXt(depth=18)
# Test ResNeXt with group 32, base_width 4
model = ResNeXt(depth=50, groups=32, base_width=4)
for m in model.modules():
if is_block(m):
assert m.conv2.groups == 32
model.init_weights()
model.train()
imgs = torch.randn(1, 3, 224, 224)
feat = model(imgs)
assert len(feat) == 4
assert feat[0].shape == torch.Size([1, 256, 56, 56])
assert feat[1].shape == torch.Size([1, 512, 28, 28])
assert feat[2].shape == torch.Size([1, 1024, 14, 14])
assert feat[3].shape == torch.Size([1, 2048, 7, 7])
regnet_test_data = [
('regnetx_400mf',
dict(w0=24, wa=24.48, wm=2.54, group_w=16, depth=22,
bot_mul=1.0), [32, 64, 160, 384]),
('regnetx_800mf',
dict(w0=56, wa=35.73, wm=2.28, group_w=16, depth=16,
bot_mul=1.0), [64, 128, 288, 672]),
('regnetx_1.6gf',
dict(w0=80, wa=34.01, wm=2.25, group_w=24, depth=18,
bot_mul=1.0), [72, 168, 408, 912]),
('regnetx_3.2gf',
dict(w0=88, wa=26.31, wm=2.25, group_w=48, depth=25,
bot_mul=1.0), [96, 192, 432, 1008]),
('regnetx_4.0gf',
dict(w0=96, wa=38.65, wm=2.43, group_w=40, depth=23,
bot_mul=1.0), [80, 240, 560, 1360]),
('regnetx_6.4gf',
dict(w0=184, wa=60.83, wm=2.07, group_w=56, depth=17,
bot_mul=1.0), [168, 392, 784, 1624]),
('regnetx_8.0gf',
dict(w0=80, wa=49.56, wm=2.88, group_w=120, depth=23,
bot_mul=1.0), [80, 240, 720, 1920]),
('regnetx_12gf',
dict(w0=168, wa=73.36, wm=2.37, group_w=112, depth=19,
bot_mul=1.0), [224, 448, 896, 2240]),
]
|
import pytest
import torch
from mmdet.models.backbones import ResNeXt
from mmdet.models.backbones.resnext import Bottleneck as BottleneckX
from .utils import is_block
def test_renext_bottleneck():
with pytest.raises(AssertionError):
# Style must be in ['pytorch', 'caffe']
BottleneckX(64, 64, groups=32, base_width=4, style='tensorflow')
# Test ResNeXt Bottleneck structure
block = BottleneckX(
64, 64, groups=32, base_width=4, stride=2, style='pytorch')
assert block.conv2.stride == (2, 2)
assert block.conv2.groups == 32
assert block.conv2.out_channels == 128
# Test ResNeXt Bottleneck with DCN
dcn = dict(type='DCN', deform_groups=1, fallback_on_stride=False)
with pytest.raises(AssertionError):
# conv_cfg must be None if dcn is not None
BottleneckX(
64,
64,
groups=32,
base_width=4,
dcn=dcn,
conv_cfg=dict(type='Conv'))
BottleneckX(64, 64, dcn=dcn)
# Test ResNeXt Bottleneck forward
block = BottleneckX(64, 16, groups=32, base_width=4)
x = torch.randn(1, 64, 56, 56)
x_out = block(x)
assert x_out.shape == torch.Size([1, 64, 56, 56])
# Test ResNeXt Bottleneck forward with plugins
plugins = [
dict(
cfg=dict(
type='GeneralizedAttention',
spatial_range=-1,
num_heads=8,
attention_type='0010',
kv_stride=2),
stages=(False, False, True, True),
position='after_conv2')
]
block = BottleneckX(64, 16, groups=32, base_width=4, plugins=plugins)
x = torch.randn(1, 64, 56, 56)
x_out = block(x)
assert x_out.shape == torch.Size([1, 64, 56, 56])
def test_resnext_backbone():
with pytest.raises(KeyError):
# ResNeXt depth should be in [50, 101, 152]
ResNeXt(depth=18)
# Test ResNeXt with group 32, base_width 4
model = ResNeXt(depth=50, groups=32, base_width=4)
for m in model.modules():
if is_block(m):
assert m.conv2.groups == 32
model.init_weights()
model.train()
imgs = torch.randn(1, 3, 224, 224)
feat = model(imgs)
assert len(feat) == 4
assert feat[0].shape == torch.Size([1, 256, 56, 56])
assert feat[1].shape == torch.Size([1, 512, 28, 28])
assert feat[2].shape == torch.Size([1, 1024, 14, 14])
assert feat[3].shape == torch.Size([1, 2048, 7, 7])
regnet_test_data = [
('regnetx_400mf',
dict(w0=24, wa=24.48, wm=2.54, group_w=16, depth=22,
bot_mul=1.0), [32, 64, 160, 384]),
('regnetx_800mf',
dict(w0=56, wa=35.73, wm=2.28, group_w=16, depth=16,
bot_mul=1.0), [64, 128, 288, 672]),
('regnetx_1.6gf',
dict(w0=80, wa=34.01, wm=2.25, group_w=24, depth=18,
bot_mul=1.0), [72, 168, 408, 912]),
('regnetx_3.2gf',
dict(w0=88, wa=26.31, wm=2.25, group_w=48, depth=25,
bot_mul=1.0), [96, 192, 432, 1008]),
('regnetx_4.0gf',
dict(w0=96, wa=38.65, wm=2.43, group_w=40, depth=23,
bot_mul=1.0), [80, 240, 560, 1360]),
('regnetx_6.4gf',
dict(w0=184, wa=60.83, wm=2.07, group_w=56, depth=17,
bot_mul=1.0), [168, 392, 784, 1624]),
('regnetx_8.0gf',
dict(w0=80, wa=49.56, wm=2.88, group_w=120, depth=23,
bot_mul=1.0), [80, 240, 720, 1920]),
('regnetx_12gf',
dict(w0=168, wa=73.36, wm=2.37, group_w=112, depth=19,
bot_mul=1.0), [224, 448, 896, 2240]),
]
|
"""Test HyDE."""
from typing import Any, Optional
import numpy as np
from langchain_core.callbacks.manager import (
AsyncCallbackManagerForLLMRun,
CallbackManagerForLLMRun,
)
from langchain_core.embeddings import Embeddings
from langchain_core.language_models.llms import BaseLLM
from langchain_core.outputs import Generation, LLMResult
from langchain.chains.hyde.base import HypotheticalDocumentEmbedder
from langchain.chains.hyde.prompts import PROMPT_MAP
class FakeEmbeddings(Embeddings):
"""Fake embedding class for tests."""
def embed_documents(self, texts: list[str]) -> list[list[float]]:
"""Return random floats."""
return [list(np.random.uniform(0, 1, 10)) for _ in range(10)]
def embed_query(self, text: str) -> list[float]:
"""Return random floats."""
return list(np.random.uniform(0, 1, 10))
class FakeLLM(BaseLLM):
"""Fake LLM wrapper for testing purposes."""
n: int = 1
def _generate(
self,
prompts: list[str],
stop: Optional[list[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> LLMResult:
return LLMResult(generations=[[Generation(text="foo") for _ in range(self.n)]])
async def _agenerate(
self,
prompts: list[str],
stop: Optional[list[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> LLMResult:
return LLMResult(generations=[[Generation(text="foo") for _ in range(self.n)]])
def get_num_tokens(self, text: str) -> int:
"""Return number of tokens."""
return len(text.split())
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "fake"
def test_hyde_from_llm() -> None:
"""Test loading HyDE from all prompts."""
for key in PROMPT_MAP:
embedding = HypotheticalDocumentEmbedder.from_llm(
FakeLLM(), FakeEmbeddings(), key
)
embedding.embed_query("foo")
def test_hyde_from_llm_with_multiple_n() -> None:
"""Test loading HyDE from all prompts."""
for key in PROMPT_MAP:
embedding = HypotheticalDocumentEmbedder.from_llm(
FakeLLM(n=8), FakeEmbeddings(), key
)
embedding.embed_query("foo")
|
"""Test HyDE."""
from typing import Any, List, Optional
import numpy as np
from langchain_core.callbacks.manager import (
AsyncCallbackManagerForLLMRun,
CallbackManagerForLLMRun,
)
from langchain_core.embeddings import Embeddings
from langchain_core.language_models.llms import BaseLLM
from langchain_core.outputs import Generation, LLMResult
from langchain.chains.hyde.base import HypotheticalDocumentEmbedder
from langchain.chains.hyde.prompts import PROMPT_MAP
class FakeEmbeddings(Embeddings):
"""Fake embedding class for tests."""
def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""Return random floats."""
return [list(np.random.uniform(0, 1, 10)) for _ in range(10)]
def embed_query(self, text: str) -> List[float]:
"""Return random floats."""
return list(np.random.uniform(0, 1, 10))
class FakeLLM(BaseLLM):
"""Fake LLM wrapper for testing purposes."""
n: int = 1
def _generate(
self,
prompts: List[str],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> LLMResult:
return LLMResult(generations=[[Generation(text="foo") for _ in range(self.n)]])
async def _agenerate(
self,
prompts: List[str],
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> LLMResult:
return LLMResult(generations=[[Generation(text="foo") for _ in range(self.n)]])
def get_num_tokens(self, text: str) -> int:
"""Return number of tokens."""
return len(text.split())
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "fake"
def test_hyde_from_llm() -> None:
"""Test loading HyDE from all prompts."""
for key in PROMPT_MAP:
embedding = HypotheticalDocumentEmbedder.from_llm(
FakeLLM(), FakeEmbeddings(), key
)
embedding.embed_query("foo")
def test_hyde_from_llm_with_multiple_n() -> None:
"""Test loading HyDE from all prompts."""
for key in PROMPT_MAP:
embedding = HypotheticalDocumentEmbedder.from_llm(
FakeLLM(n=8), FakeEmbeddings(), key
)
embedding.embed_query("foo")
|
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import importlib
import os
from huggingface_hub.constants import HF_HOME
from packaging import version
from ..dependency_versions_check import dep_version_check
from .import_utils import ENV_VARS_TRUE_VALUES, is_peft_available, is_transformers_available
MIN_PEFT_VERSION = "0.6.0"
MIN_TRANSFORMERS_VERSION = "4.34.0"
_CHECK_PEFT = os.environ.get("_CHECK_PEFT", "1") in ENV_VARS_TRUE_VALUES
CONFIG_NAME = "config.json"
WEIGHTS_NAME = "diffusion_pytorch_model.bin"
WEIGHTS_INDEX_NAME = "diffusion_pytorch_model.bin.index.json"
FLAX_WEIGHTS_NAME = "diffusion_flax_model.msgpack"
ONNX_WEIGHTS_NAME = "model.onnx"
SAFETENSORS_WEIGHTS_NAME = "diffusion_pytorch_model.safetensors"
SAFE_WEIGHTS_INDEX_NAME = "diffusion_pytorch_model.safetensors.index.json"
SAFETENSORS_FILE_EXTENSION = "safetensors"
GGUF_FILE_EXTENSION = "gguf"
ONNX_EXTERNAL_WEIGHTS_NAME = "weights.pb"
HUGGINGFACE_CO_RESOLVE_ENDPOINT = os.environ.get("HF_ENDPOINT", "https://huggingface.co")
DIFFUSERS_DYNAMIC_MODULE_NAME = "diffusers_modules"
HF_MODULES_CACHE = os.getenv("HF_MODULES_CACHE", os.path.join(HF_HOME, "modules"))
DEPRECATED_REVISION_ARGS = ["fp16", "non-ema"]
DIFFUSERS_REQUEST_TIMEOUT = 60
# Below should be `True` if the current version of `peft` and `transformers` are compatible with
# PEFT backend. Will automatically fall back to PEFT backend if the correct versions of the libraries are
# available.
# For PEFT it is has to be greater than or equal to 0.6.0 and for transformers it has to be greater than or equal to 4.34.0.
_required_peft_version = is_peft_available() and version.parse(
version.parse(importlib.metadata.version("peft")).base_version
) >= version.parse(MIN_PEFT_VERSION)
_required_transformers_version = is_transformers_available() and version.parse(
version.parse(importlib.metadata.version("transformers")).base_version
) >= version.parse(MIN_TRANSFORMERS_VERSION)
USE_PEFT_BACKEND = _required_peft_version and _required_transformers_version
if USE_PEFT_BACKEND and _CHECK_PEFT:
dep_version_check("peft")
DECODE_ENDPOINT_SD_V1 = "https://q1bj3bpq6kzilnsu.us-east-1.aws.endpoints.huggingface.cloud/"
DECODE_ENDPOINT_SD_XL = "https://x2dmsqunjd6k9prw.us-east-1.aws.endpoints.huggingface.cloud/"
DECODE_ENDPOINT_FLUX = "https://whhx50ex1aryqvw6.us-east-1.aws.endpoints.huggingface.cloud/"
DECODE_ENDPOINT_HUNYUAN_VIDEO = "https://o7ywnmrahorts457.us-east-1.aws.endpoints.huggingface.cloud/"
ENCODE_ENDPOINT_SD_V1 = "https://qc6479g0aac6qwy9.us-east-1.aws.endpoints.huggingface.cloud/"
ENCODE_ENDPOINT_SD_XL = "https://xjqqhmyn62rog84g.us-east-1.aws.endpoints.huggingface.cloud/"
ENCODE_ENDPOINT_FLUX = "https://ptccx55jz97f9zgo.us-east-1.aws.endpoints.huggingface.cloud/"
|
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import importlib
import os
from huggingface_hub.constants import HF_HOME
from packaging import version
from ..dependency_versions_check import dep_version_check
from .import_utils import ENV_VARS_TRUE_VALUES, is_peft_available, is_transformers_available
MIN_PEFT_VERSION = "0.6.0"
MIN_TRANSFORMERS_VERSION = "4.34.0"
_CHECK_PEFT = os.environ.get("_CHECK_PEFT", "1") in ENV_VARS_TRUE_VALUES
CONFIG_NAME = "config.json"
WEIGHTS_NAME = "diffusion_pytorch_model.bin"
WEIGHTS_INDEX_NAME = "diffusion_pytorch_model.bin.index.json"
FLAX_WEIGHTS_NAME = "diffusion_flax_model.msgpack"
ONNX_WEIGHTS_NAME = "model.onnx"
SAFETENSORS_WEIGHTS_NAME = "diffusion_pytorch_model.safetensors"
SAFE_WEIGHTS_INDEX_NAME = "diffusion_pytorch_model.safetensors.index.json"
SAFETENSORS_FILE_EXTENSION = "safetensors"
GGUF_FILE_EXTENSION = "gguf"
ONNX_EXTERNAL_WEIGHTS_NAME = "weights.pb"
HUGGINGFACE_CO_RESOLVE_ENDPOINT = os.environ.get("HF_ENDPOINT", "https://huggingface.co")
DIFFUSERS_DYNAMIC_MODULE_NAME = "diffusers_modules"
HF_MODULES_CACHE = os.getenv("HF_MODULES_CACHE", os.path.join(HF_HOME, "modules"))
DEPRECATED_REVISION_ARGS = ["fp16", "non-ema"]
# Below should be `True` if the current version of `peft` and `transformers` are compatible with
# PEFT backend. Will automatically fall back to PEFT backend if the correct versions of the libraries are
# available.
# For PEFT it is has to be greater than or equal to 0.6.0 and for transformers it has to be greater than or equal to 4.34.0.
_required_peft_version = is_peft_available() and version.parse(
version.parse(importlib.metadata.version("peft")).base_version
) >= version.parse(MIN_PEFT_VERSION)
_required_transformers_version = is_transformers_available() and version.parse(
version.parse(importlib.metadata.version("transformers")).base_version
) >= version.parse(MIN_TRANSFORMERS_VERSION)
USE_PEFT_BACKEND = _required_peft_version and _required_transformers_version
if USE_PEFT_BACKEND and _CHECK_PEFT:
dep_version_check("peft")
DECODE_ENDPOINT_SD_V1 = "https://q1bj3bpq6kzilnsu.us-east-1.aws.endpoints.huggingface.cloud/"
DECODE_ENDPOINT_SD_XL = "https://x2dmsqunjd6k9prw.us-east-1.aws.endpoints.huggingface.cloud/"
DECODE_ENDPOINT_FLUX = "https://whhx50ex1aryqvw6.us-east-1.aws.endpoints.huggingface.cloud/"
DECODE_ENDPOINT_HUNYUAN_VIDEO = "https://o7ywnmrahorts457.us-east-1.aws.endpoints.huggingface.cloud/"
ENCODE_ENDPOINT_SD_V1 = "https://qc6479g0aac6qwy9.us-east-1.aws.endpoints.huggingface.cloud/"
ENCODE_ENDPOINT_SD_XL = "https://xjqqhmyn62rog84g.us-east-1.aws.endpoints.huggingface.cloud/"
ENCODE_ENDPOINT_FLUX = "https://ptccx55jz97f9zgo.us-east-1.aws.endpoints.huggingface.cloud/"
|
from .database import DatabaseManager, DatabaseManagerClient
from .manager import ExecutionManager
from .scheduler import Scheduler
__all__ = [
"DatabaseManager",
"DatabaseManagerClient",
"ExecutionManager",
"Scheduler",
]
|
from .database import DatabaseManager
from .manager import ExecutionManager
from .scheduler import Scheduler
__all__ = [
"DatabaseManager",
"ExecutionManager",
"Scheduler",
]
|
from typing import Dict, List, Optional, Set, Tuple
import numpy as np
import pytest
import torch
from docarray import DocumentArray
from docarray.base_document import BaseDocument
from docarray.typing import NdArray, TorchTensor
@pytest.mark.proto
def test_proto_simple():
class CustomDoc(BaseDocument):
text: str
doc = CustomDoc(text='hello')
CustomDoc.from_protobuf(doc.to_protobuf())
@pytest.mark.proto
def test_proto_ndarray():
class CustomDoc(BaseDocument):
tensor: NdArray
tensor = np.zeros((3, 224, 224))
doc = CustomDoc(tensor=tensor)
new_doc = CustomDoc.from_protobuf(doc.to_protobuf())
assert (new_doc.tensor == tensor).all()
@pytest.mark.proto
def test_proto_with_nested_doc():
class CustomInnerDoc(BaseDocument):
tensor: NdArray
class CustomDoc(BaseDocument):
text: str
inner: CustomInnerDoc
doc = CustomDoc(text='hello', inner=CustomInnerDoc(tensor=np.zeros((3, 224, 224))))
CustomDoc.from_protobuf(doc.to_protobuf())
@pytest.mark.proto
def test_proto_with_chunks_doc():
class CustomInnerDoc(BaseDocument):
tensor: NdArray
class CustomDoc(BaseDocument):
text: str
chunks: DocumentArray[CustomInnerDoc]
doc = CustomDoc(
text='hello',
chunks=DocumentArray[CustomInnerDoc](
[CustomInnerDoc(tensor=np.zeros((3, 224, 224))) for _ in range(5)],
),
)
new_doc = CustomDoc.from_protobuf(doc.to_protobuf())
for chunk1, chunk2 in zip(doc.chunks, new_doc.chunks):
assert (chunk1.tensor == chunk2.tensor).all()
@pytest.mark.proto
def test_proto_with_nested_doc_pytorch():
class CustomInnerDoc(BaseDocument):
tensor: TorchTensor
class CustomDoc(BaseDocument):
text: str
inner: CustomInnerDoc
doc = CustomDoc(
text='hello', inner=CustomInnerDoc(tensor=torch.zeros((3, 224, 224)))
)
CustomDoc.from_protobuf(doc.to_protobuf())
@pytest.mark.proto
def test_proto_with_chunks_doc_pytorch():
class CustomInnerDoc(BaseDocument):
tensor: TorchTensor
class CustomDoc(BaseDocument):
text: str
chunks: DocumentArray[CustomInnerDoc]
doc = CustomDoc(
text='hello',
chunks=DocumentArray[CustomInnerDoc](
[CustomInnerDoc(tensor=torch.zeros((3, 224, 224))) for _ in range(5)],
),
)
new_doc = CustomDoc.from_protobuf(doc.to_protobuf())
for chunk1, chunk2 in zip(doc.chunks, new_doc.chunks):
assert (chunk1.tensor == chunk2.tensor).all()
@pytest.mark.proto
def test_optional_field_in_doc():
class CustomDoc(BaseDocument):
text: Optional[str]
CustomDoc.from_protobuf(CustomDoc().to_protobuf())
@pytest.mark.proto
def test_optional_field_nested_in_doc():
class InnerDoc(BaseDocument):
title: str
class CustomDoc(BaseDocument):
text: Optional[InnerDoc]
CustomDoc.from_protobuf(CustomDoc().to_protobuf())
@pytest.mark.proto
def test_integer_field():
class Meow(BaseDocument):
age: int
wealth: float
registered: bool
d = Meow(age=30, wealth=100.5, registered=True)
rebuilt_doc = Meow.from_protobuf(d.to_protobuf())
assert rebuilt_doc.age == 30
assert rebuilt_doc.wealth == 100.5
assert rebuilt_doc.registered
@pytest.mark.proto
def test_list_set_dict_tuple_field():
class MyDoc(BaseDocument):
list_: List
dict_: Dict
tuple_: Tuple
set_: Set
d = MyDoc(
list_=[0, 1, 2], dict_={'a': 0, 'b': 1}, tuple_=tuple([0, 1]), set_={0, 1}
)
rebuilt_doc = MyDoc.from_protobuf(d.to_protobuf())
assert rebuilt_doc.list_ == [0, 1, 2]
assert rebuilt_doc.dict_ == {'a': 0, 'b': 1}
assert rebuilt_doc.tuple_ == (0, 1)
assert rebuilt_doc.set_ == {0, 1}
@pytest.mark.proto
@pytest.mark.parametrize(
'dtype',
[
np.uint,
np.uint8,
np.uint64,
int,
np.int8,
np.int64,
float,
np.float16,
np.longfloat,
np.double,
],
)
def test_ndarray_dtype(dtype):
class MyDoc(BaseDocument):
tensor: NdArray
doc = MyDoc(tensor=np.ndarray([1, 2, 3], dtype=dtype))
assert doc.tensor.dtype == dtype
assert MyDoc.from_protobuf(doc.to_protobuf()).tensor.dtype == dtype
assert MyDoc.parse_obj(doc.dict()).tensor.dtype == dtype
@pytest.mark.proto
@pytest.mark.parametrize(
'dtype',
[
torch.uint8,
torch.int,
torch.int8,
torch.int64,
torch.float,
torch.float64,
torch.double,
],
)
def test_torch_dtype(dtype):
class MyDoc(BaseDocument):
tensor: TorchTensor
doc = MyDoc(tensor=torch.zeros([5, 5], dtype=dtype))
assert doc.tensor.dtype == dtype
assert MyDoc.from_protobuf(doc.to_protobuf()).tensor.dtype == dtype
assert MyDoc.parse_obj(doc.dict()).tensor.dtype == dtype
|
from typing import Dict, List, Optional, Set, Tuple
import numpy as np
import pytest
import torch
from docarray import DocumentArray
from docarray.base_document import BaseDocument
from docarray.typing import NdArray, TorchTensor
@pytest.mark.proto
def test_proto_simple():
class CustomDoc(BaseDocument):
text: str
doc = CustomDoc(text='hello')
CustomDoc.from_protobuf(doc.to_protobuf())
@pytest.mark.proto
def test_proto_ndarray():
class CustomDoc(BaseDocument):
tensor: NdArray
tensor = np.zeros((3, 224, 224))
doc = CustomDoc(tensor=tensor)
new_doc = CustomDoc.from_protobuf(doc.to_protobuf())
assert (new_doc.tensor == tensor).all()
@pytest.mark.proto
def test_proto_with_nested_doc():
class CustomInnerDoc(BaseDocument):
tensor: NdArray
class CustomDoc(BaseDocument):
text: str
inner: CustomInnerDoc
doc = CustomDoc(text='hello', inner=CustomInnerDoc(tensor=np.zeros((3, 224, 224))))
CustomDoc.from_protobuf(doc.to_protobuf())
@pytest.mark.proto
def test_proto_with_chunks_doc():
class CustomInnerDoc(BaseDocument):
tensor: NdArray
class CustomDoc(BaseDocument):
text: str
chunks: DocumentArray[CustomInnerDoc]
doc = CustomDoc(
text='hello',
chunks=DocumentArray[CustomInnerDoc](
[CustomInnerDoc(tensor=np.zeros((3, 224, 224))) for _ in range(5)],
),
)
new_doc = CustomDoc.from_protobuf(doc.to_protobuf())
for chunk1, chunk2 in zip(doc.chunks, new_doc.chunks):
assert (chunk1.tensor == chunk2.tensor).all()
@pytest.mark.proto
def test_proto_with_nested_doc_pytorch():
class CustomInnerDoc(BaseDocument):
tensor: TorchTensor
class CustomDoc(BaseDocument):
text: str
inner: CustomInnerDoc
doc = CustomDoc(
text='hello', inner=CustomInnerDoc(tensor=torch.zeros((3, 224, 224)))
)
CustomDoc.from_protobuf(doc.to_protobuf())
@pytest.mark.proto
def test_proto_with_chunks_doc_pytorch():
class CustomInnerDoc(BaseDocument):
tensor: TorchTensor
class CustomDoc(BaseDocument):
text: str
chunks: DocumentArray[CustomInnerDoc]
doc = CustomDoc(
text='hello',
chunks=DocumentArray[CustomInnerDoc](
[CustomInnerDoc(tensor=torch.zeros((3, 224, 224))) for _ in range(5)],
),
)
new_doc = CustomDoc.from_protobuf(doc.to_protobuf())
for chunk1, chunk2 in zip(doc.chunks, new_doc.chunks):
assert (chunk1.tensor == chunk2.tensor).all()
@pytest.mark.proto
def test_optional_field_in_doc():
class CustomDoc(BaseDocument):
text: Optional[str]
CustomDoc.from_protobuf(CustomDoc().to_protobuf())
@pytest.mark.proto
def test_optional_field_nested_in_doc():
class InnerDoc(BaseDocument):
title: str
class CustomDoc(BaseDocument):
text: Optional[InnerDoc]
CustomDoc.from_protobuf(CustomDoc().to_protobuf())
@pytest.mark.proto
def test_integer_field():
class Meow(BaseDocument):
age: int
wealth: float
registered: bool
d = Meow(age=30, wealth=100.5, registered=True)
rebuilt_doc = Meow.from_protobuf(d.to_protobuf())
assert rebuilt_doc.age == 30
assert rebuilt_doc.wealth == 100.5
assert rebuilt_doc.registered
@pytest.mark.proto
def test_list_set_dict_tuple_field():
class MyDoc(BaseDocument):
list_: List
dict_: Dict
tuple_: Tuple
set_: Set
d = MyDoc(
list_=[0, 1, 2], dict_={'a': 0, 'b': 1}, tuple_=tuple([0, 1]), set_={0, 1}
)
rebuilt_doc = MyDoc.from_protobuf(d.to_protobuf())
assert rebuilt_doc.list_ == [0, 1, 2]
assert rebuilt_doc.dict_ == {'a': 0, 'b': 1}
assert rebuilt_doc.tuple_ == (0, 1)
assert rebuilt_doc.set_ == {0, 1}
@pytest.mark.proto
@pytest.mark.parametrize(
'dtype',
[
np.uint,
np.uint8,
np.uint64,
int,
np.int8,
np.int64,
float,
np.float16,
np.float128,
np.double,
],
)
def test_ndarray_dtype(dtype):
class MyDoc(BaseDocument):
tensor: NdArray
doc = MyDoc(tensor=np.ndarray([1, 2, 3], dtype=dtype))
assert doc.tensor.dtype == dtype
assert MyDoc.from_protobuf(doc.to_protobuf()).tensor.dtype == dtype
assert MyDoc.parse_obj(doc.dict()).tensor.dtype == dtype
@pytest.mark.proto
@pytest.mark.parametrize(
'dtype',
[
torch.uint8,
torch.int,
torch.int8,
torch.int64,
torch.float,
torch.float64,
torch.double,
],
)
def test_torch_dtype(dtype):
class MyDoc(BaseDocument):
tensor: TorchTensor
doc = MyDoc(tensor=torch.zeros([5, 5], dtype=dtype))
assert doc.tensor.dtype == dtype
assert MyDoc.from_protobuf(doc.to_protobuf()).tensor.dtype == dtype
assert MyDoc.parse_obj(doc.dict()).tensor.dtype == dtype
|
# flake8: noqa
JIRA_ISSUE_CREATE_PROMPT = """
This tool is a wrapper around atlassian-python-api's Jira issue_create API, useful when you need to create a Jira issue.
The input to this tool is a dictionary specifying the fields of the Jira issue, and will be passed into atlassian-python-api's Jira `issue_create` function.
For example, to create a low priority task called "test issue" with description "test description", you would pass in the following dictionary:
{{"summary": "test issue", "description": "test description", "issuetype": {{"name": "Task"}}, "priority": {{"name": "Low"}}}}
"""
JIRA_GET_ALL_PROJECTS_PROMPT = """
This tool is a wrapper around atlassian-python-api's Jira project API,
useful when you need to fetch all the projects the user has access to, find out how many projects there are, or as an intermediary step that involve searching by projects.
there is no input to this tool.
"""
JIRA_JQL_PROMPT = """
This tool is a wrapper around atlassian-python-api's Jira jql API, useful when you need to search for Jira issues.
The input to this tool is a JQL query string, and will be passed into atlassian-python-api's Jira `jql` function,
For example, to find all the issues in project "Test" assigned to the me, you would pass in the following string:
project = Test AND assignee = currentUser()
or to find issues with summaries that contain the word "test", you would pass in the following string:
summary ~ 'test'
"""
JIRA_CATCH_ALL_PROMPT = """
This tool is a wrapper around atlassian-python-api's Jira API.
There are other dedicated tools for fetching all projects, and creating and searching for issues,
use this tool if you need to perform any other actions allowed by the atlassian-python-api Jira API.
The input to this tool is a dictionary specifying a function from atlassian-python-api's Jira API,
as well as a list of arguments and dictionary of keyword arguments to pass into the function.
For example, to get all the users in a group, while increasing the max number of results to 100, you would
pass in the following dictionary: {{"function": "get_all_users_from_group", "args": ["group"], "kwargs": {{"limit":100}} }}
or to find out how many projects are in the Jira instance, you would pass in the following string:
{{"function": "projects"}}
For more information on the Jira API, refer to https://atlassian-python-api.readthedocs.io/jira.html
"""
JIRA_CONFLUENCE_PAGE_CREATE_PROMPT = """This tool is a wrapper around atlassian-python-api's Confluence
atlassian-python-api API, useful when you need to create a Confluence page. The input to this tool is a dictionary
specifying the fields of the Confluence page, and will be passed into atlassian-python-api's Confluence `create_page`
function. For example, to create a page in the DEMO space titled "This is the title" with body "This is the body. You can use
<strong>HTML tags</strong>!", you would pass in the following dictionary: {{"space": "DEMO", "title":"This is the
title","body":"This is the body. You can use <strong>HTML tags</strong>!"}} """
|
# flake8: noqa
JIRA_ISSUE_CREATE_PROMPT = """
This tool is a wrapper around atlassian-python-api's Jira issue_create API, useful when you need to create a Jira issue.
The input to this tool is a dictionary specifying the fields of the Jira issue, and will be passed into atlassian-python-api's Jira `issue_create` function.
For example, to create a low priority task called "test issue" with description "test description", you would pass in the following dictionary:
{{"summary": "test issue", "description": "test description", "issuetype": {{"name": "Task"}}, "priority": {{"name": "Low"}}}}
"""
JIRA_GET_ALL_PROJECTS_PROMPT = """
This tool is a wrapper around atlassian-python-api's Jira project API,
useful when you need to fetch all the projects the user has access to, find out how many projects there are, or as an intermediary step that involv searching by projects.
there is no input to this tool.
"""
JIRA_JQL_PROMPT = """
This tool is a wrapper around atlassian-python-api's Jira jql API, useful when you need to search for Jira issues.
The input to this tool is a JQL query string, and will be passed into atlassian-python-api's Jira `jql` function,
For example, to find all the issues in project "Test" assigned to the me, you would pass in the following string:
project = Test AND assignee = currentUser()
or to find issues with summaries that contain the word "test", you would pass in the following string:
summary ~ 'test'
"""
JIRA_CATCH_ALL_PROMPT = """
This tool is a wrapper around atlassian-python-api's Jira API.
There are other dedicated tools for fetching all projects, and creating and searching for issues,
use this tool if you need to perform any other actions allowed by the atlassian-python-api Jira API.
The input to this tool is a dictionary specifying a function from atlassian-python-api's Jira API,
as well as a list of arguments and dictionary of keyword arguments to pass into the function.
For example, to get all the users in a group, while increasing the max number of results to 100, you would
pass in the following dictionary: {{"function": "get_all_users_from_group", "args": ["group"], "kwargs": {{"limit":100}} }}
or to find out how many projects are in the Jira instance, you would pass in the following string:
{{"function": "projects"}}
For more information on the Jira API, refer to https://atlassian-python-api.readthedocs.io/jira.html
"""
JIRA_CONFLUENCE_PAGE_CREATE_PROMPT = """This tool is a wrapper around atlassian-python-api's Confluence
atlassian-python-api API, useful when you need to create a Confluence page. The input to this tool is a dictionary
specifying the fields of the Confluence page, and will be passed into atlassian-python-api's Confluence `create_page`
function. For example, to create a page in the DEMO space titled "This is the title" with body "This is the body. You can use
<strong>HTML tags</strong>!", you would pass in the following dictionary: {{"space": "DEMO", "title":"This is the
title","body":"This is the body. You can use <strong>HTML tags</strong>!"}} """
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmcv.cnn.bricks import build_plugin_layer
from mmcv.runner import force_fp32
from mmdet.registry import MODELS
from .base_roi_extractor import BaseRoIExtractor
@MODELS.register_module()
class GenericRoIExtractor(BaseRoIExtractor):
"""Extract RoI features from all level feature maps levels.
This is the implementation of `A novel Region of Interest Extraction Layer
for Instance Segmentation <https://arxiv.org/abs/2004.13665>`_.
Args:
aggregation (str): The method to aggregate multiple feature maps.
Options are 'sum', 'concat'. Default: 'sum'.
pre_cfg (dict | None): Specify pre-processing modules. Default: None.
post_cfg (dict | None): Specify post-processing modules. Default: None.
kwargs (keyword arguments): Arguments that are the same
as :class:`BaseRoIExtractor`.
"""
def __init__(self,
aggregation='sum',
pre_cfg=None,
post_cfg=None,
**kwargs):
super(GenericRoIExtractor, self).__init__(**kwargs)
assert aggregation in ['sum', 'concat']
self.aggregation = aggregation
self.with_post = post_cfg is not None
self.with_pre = pre_cfg is not None
# build pre/post processing modules
if self.with_post:
self.post_module = build_plugin_layer(post_cfg, '_post_module')[1]
if self.with_pre:
self.pre_module = build_plugin_layer(pre_cfg, '_pre_module')[1]
@force_fp32(apply_to=('feats', ), out_fp16=True)
def forward(self, feats, rois, roi_scale_factor=None):
"""Forward function."""
if len(feats) == 1:
return self.roi_layers[0](feats[0], rois)
out_size = self.roi_layers[0].output_size
num_levels = len(feats)
roi_feats = feats[0].new_zeros(
rois.size(0), self.out_channels, *out_size)
# some times rois is an empty tensor
if roi_feats.shape[0] == 0:
return roi_feats
if roi_scale_factor is not None:
rois = self.roi_rescale(rois, roi_scale_factor)
# mark the starting channels for concat mode
start_channels = 0
for i in range(num_levels):
roi_feats_t = self.roi_layers[i](feats[i], rois)
end_channels = start_channels + roi_feats_t.size(1)
if self.with_pre:
# apply pre-processing to a RoI extracted from each layer
roi_feats_t = self.pre_module(roi_feats_t)
if self.aggregation == 'sum':
# and sum them all
roi_feats += roi_feats_t
else:
# and concat them along channel dimension
roi_feats[:, start_channels:end_channels] = roi_feats_t
# update channels starting position
start_channels = end_channels
# check if concat channels match at the end
if self.aggregation == 'concat':
assert start_channels == self.out_channels
if self.with_post:
# apply post-processing before return the result
roi_feats = self.post_module(roi_feats)
return roi_feats
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmcv.cnn.bricks import build_plugin_layer
from mmcv.runner import force_fp32
from mmdet.models.builder import ROI_EXTRACTORS
from .base_roi_extractor import BaseRoIExtractor
@ROI_EXTRACTORS.register_module()
class GenericRoIExtractor(BaseRoIExtractor):
"""Extract RoI features from all level feature maps levels.
This is the implementation of `A novel Region of Interest Extraction Layer
for Instance Segmentation <https://arxiv.org/abs/2004.13665>`_.
Args:
aggregation (str): The method to aggregate multiple feature maps.
Options are 'sum', 'concat'. Default: 'sum'.
pre_cfg (dict | None): Specify pre-processing modules. Default: None.
post_cfg (dict | None): Specify post-processing modules. Default: None.
kwargs (keyword arguments): Arguments that are the same
as :class:`BaseRoIExtractor`.
"""
def __init__(self,
aggregation='sum',
pre_cfg=None,
post_cfg=None,
**kwargs):
super(GenericRoIExtractor, self).__init__(**kwargs)
assert aggregation in ['sum', 'concat']
self.aggregation = aggregation
self.with_post = post_cfg is not None
self.with_pre = pre_cfg is not None
# build pre/post processing modules
if self.with_post:
self.post_module = build_plugin_layer(post_cfg, '_post_module')[1]
if self.with_pre:
self.pre_module = build_plugin_layer(pre_cfg, '_pre_module')[1]
@force_fp32(apply_to=('feats', ), out_fp16=True)
def forward(self, feats, rois, roi_scale_factor=None):
"""Forward function."""
if len(feats) == 1:
return self.roi_layers[0](feats[0], rois)
out_size = self.roi_layers[0].output_size
num_levels = len(feats)
roi_feats = feats[0].new_zeros(
rois.size(0), self.out_channels, *out_size)
# some times rois is an empty tensor
if roi_feats.shape[0] == 0:
return roi_feats
if roi_scale_factor is not None:
rois = self.roi_rescale(rois, roi_scale_factor)
# mark the starting channels for concat mode
start_channels = 0
for i in range(num_levels):
roi_feats_t = self.roi_layers[i](feats[i], rois)
end_channels = start_channels + roi_feats_t.size(1)
if self.with_pre:
# apply pre-processing to a RoI extracted from each layer
roi_feats_t = self.pre_module(roi_feats_t)
if self.aggregation == 'sum':
# and sum them all
roi_feats += roi_feats_t
else:
# and concat them along channel dimension
roi_feats[:, start_channels:end_channels] = roi_feats_t
# update channels starting position
start_channels = end_channels
# check if concat channels match at the end
if self.aggregation == 'concat':
assert start_channels == self.out_channels
if self.with_post:
# apply post-processing before return the result
roi_feats = self.post_module(roi_feats)
return roi_feats
|
import numpy as np
import torch
from docarray import BaseDocument, DocumentArray
from docarray.documents import Image, Text
from docarray.typing import (
AnyEmbedding,
AnyTensor,
AnyUrl,
ImageUrl,
Mesh3DUrl,
NdArray,
PointCloud3DUrl,
TextUrl,
TorchEmbedding,
TorchTensor,
)
from docarray.typing.tensor import NdArrayEmbedding
def test_multi_modal_doc_proto():
class MyMultiModalDoc(BaseDocument):
image: Image
text: Text
class MySUperDoc(BaseDocument):
doc: MyMultiModalDoc
description: str
doc = MyMultiModalDoc(
image=Image(tensor=np.zeros((3, 224, 224))), text=Text(text='hello')
)
MyMultiModalDoc.from_protobuf(doc.to_protobuf())
def test_all_types():
class NestedDoc(BaseDocument):
tensor: NdArray
class MyDoc(BaseDocument):
img_url: ImageUrl
txt_url: TextUrl
mesh_url: Mesh3DUrl
point_cloud_url: PointCloud3DUrl
any_url: AnyUrl
torch_tensor: TorchTensor
torch_tensor_param: TorchTensor[224, 224, 3]
np_array: NdArray
np_array_param: NdArray[224, 224, 3]
generic_nd_array: AnyTensor
generic_torch_tensor: AnyTensor
embedding: AnyEmbedding
torch_embedding: TorchEmbedding[128]
np_embedding: NdArrayEmbedding[128]
nested_docs: DocumentArray[NestedDoc]
doc = MyDoc(
img_url='test.png',
txt_url='test.txt',
mesh_url='test.obj',
point_cloud_url='test.obj',
any_url='www.jina.ai',
torch_tensor=torch.zeros((3, 224, 224)),
torch_tensor_param=torch.zeros((3, 224, 224)),
np_array=np.zeros((3, 224, 224)),
np_array_param=np.zeros((3, 224, 224)),
generic_nd_array=np.zeros((3, 224, 224)),
generic_torch_tensor=torch.zeros((3, 224, 224)),
embedding=np.zeros((3, 224, 224)),
torch_embedding=torch.zeros((128,)),
np_embedding=np.zeros((128,)),
nested_docs=DocumentArray[NestedDoc]([NestedDoc(tensor=np.zeros((128,)))]),
)
doc = MyDoc.from_protobuf(doc.to_protobuf())
assert doc.img_url == 'test.png'
assert doc.txt_url == 'test.txt'
assert doc.mesh_url == 'test.obj'
assert doc.point_cloud_url == 'test.obj'
assert doc.any_url == 'www.jina.ai'
assert (doc.torch_tensor == torch.zeros((3, 224, 224))).all()
assert isinstance(doc.torch_tensor, torch.Tensor)
assert (doc.torch_tensor_param == torch.zeros((224, 224, 3))).all()
assert isinstance(doc.torch_tensor_param, torch.Tensor)
assert (doc.np_array == np.zeros((3, 224, 224))).all()
assert isinstance(doc.np_array, np.ndarray)
assert doc.np_array.flags.writeable
assert (doc.np_array_param == np.zeros((224, 224, 3))).all()
assert isinstance(doc.np_array_param, np.ndarray)
assert (doc.generic_nd_array == np.zeros((3, 224, 224))).all()
assert isinstance(doc.generic_nd_array, np.ndarray)
assert (doc.generic_torch_tensor == torch.zeros((3, 224, 224))).all()
assert isinstance(doc.generic_torch_tensor, torch.Tensor)
assert (doc.torch_embedding == torch.zeros((128,))).all()
assert isinstance(doc.torch_embedding, torch.Tensor)
assert (doc.np_embedding == np.zeros((128,))).all()
assert isinstance(doc.np_embedding, np.ndarray)
assert (doc.embedding == np.zeros((3, 224, 224))).all()
|
import numpy as np
import torch
from docarray import BaseDocument, DocumentArray, Image, Text
from docarray.typing import (
AnyEmbedding,
AnyTensor,
AnyUrl,
ImageUrl,
Mesh3DUrl,
NdArray,
PointCloud3DUrl,
TextUrl,
TorchEmbedding,
TorchTensor,
)
from docarray.typing.tensor import NdArrayEmbedding
def test_multi_modal_doc_proto():
class MyMultiModalDoc(BaseDocument):
image: Image
text: Text
class MySUperDoc(BaseDocument):
doc: MyMultiModalDoc
description: str
doc = MyMultiModalDoc(
image=Image(tensor=np.zeros((3, 224, 224))), text=Text(text='hello')
)
MyMultiModalDoc.from_protobuf(doc.to_protobuf())
def test_all_types():
class NestedDoc(BaseDocument):
tensor: NdArray
class MyDoc(BaseDocument):
img_url: ImageUrl
txt_url: TextUrl
mesh_url: Mesh3DUrl
point_cloud_url: PointCloud3DUrl
any_url: AnyUrl
torch_tensor: TorchTensor
torch_tensor_param: TorchTensor[224, 224, 3]
np_array: NdArray
np_array_param: NdArray[224, 224, 3]
generic_nd_array: AnyTensor
generic_torch_tensor: AnyTensor
embedding: AnyEmbedding
torch_embedding: TorchEmbedding[128]
np_embedding: NdArrayEmbedding[128]
nested_docs: DocumentArray[NestedDoc]
doc = MyDoc(
img_url='test.png',
txt_url='test.txt',
mesh_url='test.obj',
point_cloud_url='test.obj',
any_url='www.jina.ai',
torch_tensor=torch.zeros((3, 224, 224)),
torch_tensor_param=torch.zeros((3, 224, 224)),
np_array=np.zeros((3, 224, 224)),
np_array_param=np.zeros((3, 224, 224)),
generic_nd_array=np.zeros((3, 224, 224)),
generic_torch_tensor=torch.zeros((3, 224, 224)),
embedding=np.zeros((3, 224, 224)),
torch_embedding=torch.zeros((128,)),
np_embedding=np.zeros((128,)),
nested_docs=DocumentArray[NestedDoc]([NestedDoc(tensor=np.zeros((128,)))]),
)
doc = MyDoc.from_protobuf(doc.to_protobuf())
assert doc.img_url == 'test.png'
assert doc.txt_url == 'test.txt'
assert doc.mesh_url == 'test.obj'
assert doc.point_cloud_url == 'test.obj'
assert doc.any_url == 'www.jina.ai'
assert (doc.torch_tensor == torch.zeros((3, 224, 224))).all()
assert isinstance(doc.torch_tensor, torch.Tensor)
assert (doc.torch_tensor_param == torch.zeros((224, 224, 3))).all()
assert isinstance(doc.torch_tensor_param, torch.Tensor)
assert (doc.np_array == np.zeros((3, 224, 224))).all()
assert isinstance(doc.np_array, np.ndarray)
assert doc.np_array.flags.writeable
assert (doc.np_array_param == np.zeros((224, 224, 3))).all()
assert isinstance(doc.np_array_param, np.ndarray)
assert (doc.generic_nd_array == np.zeros((3, 224, 224))).all()
assert isinstance(doc.generic_nd_array, np.ndarray)
assert (doc.generic_torch_tensor == torch.zeros((3, 224, 224))).all()
assert isinstance(doc.generic_torch_tensor, torch.Tensor)
assert (doc.torch_embedding == torch.zeros((128,))).all()
assert isinstance(doc.torch_embedding, torch.Tensor)
assert (doc.np_embedding == np.zeros((128,))).all()
assert isinstance(doc.np_embedding, np.ndarray)
assert (doc.embedding == np.zeros((3, 224, 224))).all()
|
from __future__ import annotations
import logging
from typing import Union
from langchain_core.agents import AgentAction, AgentFinish
from langchain_core.exceptions import OutputParserException
from langchain_core.utils.json import parse_json_markdown
from langchain.agents.agent import AgentOutputParser
logger = logging.getLogger(__name__)
class JSONAgentOutputParser(AgentOutputParser):
"""Parses tool invocations and final answers in JSON format.
Expects output to be in one of two formats.
If the output signals that an action should be taken,
should be in the below format. This will result in an AgentAction
being returned.
```
{
"action": "search",
"action_input": "2+2"
}
```
If the output signals that a final answer should be given,
should be in the below format. This will result in an AgentFinish
being returned.
```
{
"action": "Final Answer",
"action_input": "4"
}
```
"""
def parse(self, text: str) -> Union[AgentAction, AgentFinish]:
try:
response = parse_json_markdown(text)
if isinstance(response, list):
# gpt turbo frequently ignores the directive to emit a single action
logger.warning("Got multiple action responses: %s", response)
response = response[0]
if response["action"] == "Final Answer":
return AgentFinish({"output": response["action_input"]}, text)
else:
action_input = response.get("action_input", {})
if action_input is None:
action_input = {}
return AgentAction(response["action"], action_input, text)
except Exception as e:
raise OutputParserException(f"Could not parse LLM output: {text}") from e
@property
def _type(self) -> str:
return "json-agent"
|
from __future__ import annotations
import logging
from typing import Union
from langchain_core.agents import AgentAction, AgentFinish
from langchain_core.exceptions import OutputParserException
from langchain_core.output_parsers.json import parse_json_markdown
from langchain.agents.agent import AgentOutputParser
logger = logging.getLogger(__name__)
class JSONAgentOutputParser(AgentOutputParser):
"""Parses tool invocations and final answers in JSON format.
Expects output to be in one of two formats.
If the output signals that an action should be taken,
should be in the below format. This will result in an AgentAction
being returned.
```
{
"action": "search",
"action_input": "2+2"
}
```
If the output signals that a final answer should be given,
should be in the below format. This will result in an AgentFinish
being returned.
```
{
"action": "Final Answer",
"action_input": "4"
}
```
"""
def parse(self, text: str) -> Union[AgentAction, AgentFinish]:
try:
response = parse_json_markdown(text)
if isinstance(response, list):
# gpt turbo frequently ignores the directive to emit a single action
logger.warning("Got multiple action responses: %s", response)
response = response[0]
if response["action"] == "Final Answer":
return AgentFinish({"output": response["action_input"]}, text)
else:
action_input = response.get("action_input", {})
if action_input is None:
action_input = {}
return AgentAction(response["action"], action_input, text)
except Exception as e:
raise OutputParserException(f"Could not parse LLM output: {text}") from e
@property
def _type(self) -> str:
return "json-agent"
|
from typing import Any
import numpy as np
try:
import torch # noqa: F401
except ImportError:
torch_imported = False
else:
torch_imported = True
try:
import tensorflow as tf # type: ignore # noqa: F401
except (ImportError, TypeError):
tf_imported = False
else:
tf_imported = True
def is_torch_available():
return torch_imported
def is_np_int(item: Any) -> bool:
dtype = getattr(item, 'dtype', None)
ndim = getattr(item, 'ndim', None)
if dtype is not None and ndim is not None:
try:
return ndim == 0 and np.issubdtype(dtype, np.integer)
except TypeError:
return False
return False # this is unreachable, but mypy wants it
def is_tf_available():
return tf_imported
def is_notebook() -> bool:
"""
Check if we're running in a Jupyter notebook, using magic command
`get_ipython` that only available in Jupyter.
:return: True if run in a Jupyter notebook else False.
"""
try:
shell = get_ipython().__class__.__name__ # type: ignore
except NameError:
return False
if shell == 'ZMQInteractiveShell':
return True
elif shell == 'Shell':
return True
elif shell == 'TerminalInteractiveShell':
return False
else:
return False
|
try:
import torch # noqa: F401
except ImportError:
torch_imported = False
else:
torch_imported = True
try:
import tensorflow as tf # type: ignore # noqa: F401
except (ImportError, TypeError):
tf_imported = False
else:
tf_imported = True
def is_torch_available():
return torch_imported
def is_tf_available():
return tf_imported
def is_notebook() -> bool:
"""
Check if we're running in a Jupyter notebook, using magic command
`get_ipython` that only available in Jupyter.
:return: True if run in a Jupyter notebook else False.
"""
try:
shell = get_ipython().__class__.__name__ # type: ignore
except NameError:
return False
if shell == 'ZMQInteractiveShell':
return True
elif shell == 'Shell':
return True
elif shell == 'TerminalInteractiveShell':
return False
else:
return False
|
# Copyright (c) OpenMMLab. All rights reserved.
from math import ceil
from unittest import TestCase
import torch
from mmengine import Config
from mmengine.data import InstanceData
from mmdet import * # noqa
from mmdet.models.dense_heads import SSDHead
class TestSSDHead(TestCase):
def test_ssd_head_loss(self):
"""Tests ssd head loss when truth is empty and non-empty."""
s = 300
img_metas = [{
'img_shape': (s, s, 3),
'scale_factor': 1,
}]
cfg = Config(
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0.,
ignore_iof_thr=-1,
gt_max_assign_all=False),
sampler=dict(type='PseudoSampler'),
smoothl1_beta=1.,
allowed_border=-1,
pos_weight=-1,
neg_pos_ratio=3,
debug=False))
ssd_head = SSDHead(
num_classes=4,
in_channels=(1, 1, 1, 1, 1, 1),
stacked_convs=1,
feat_channels=1,
use_depthwise=True,
anchor_generator=dict(
type='SSDAnchorGenerator',
scale_major=False,
input_size=s,
basesize_ratio_range=(0.15, 0.9),
strides=[8, 16, 32, 64, 100, 300],
ratios=[[2], [2, 3], [2, 3], [2, 3], [2], [2]]),
train_cfg=cfg)
# SSD head expects a multiple levels of features per image
feats = (
torch.rand(1, 1, ceil(s / stride[0]), ceil(s / stride[0]))
for stride in ssd_head.prior_generator.strides)
cls_scores, bbox_preds = ssd_head.forward(feats)
# Test that empty ground truth encourages the network to
# predict background
gt_instances = InstanceData()
gt_instances.bboxes = torch.empty((0, 4))
gt_instances.labels = torch.LongTensor([])
empty_gt_losses = ssd_head.loss(cls_scores, bbox_preds, [gt_instances],
img_metas)
# When there is no truth, cls_loss and box_loss should all be zero.
empty_cls_loss = sum(empty_gt_losses['loss_cls'])
empty_box_loss = sum(empty_gt_losses['loss_bbox'])
self.assertEqual(
empty_cls_loss.item(), 0,
'there should be no cls loss when there are no true boxes')
self.assertEqual(
empty_box_loss.item(), 0,
'there should be no box loss when there are no true boxes')
# When truth is non-empty then both cls and box loss
# should be nonzero for random inputs
gt_instances = InstanceData()
gt_instances.bboxes = torch.Tensor(
[[23.6667, 23.8757, 238.6326, 151.8874]])
gt_instances.labels = torch.LongTensor([2])
one_gt_losses = ssd_head.loss(cls_scores, bbox_preds, [gt_instances],
img_metas)
onegt_cls_loss = sum(one_gt_losses['loss_cls'])
onegt_box_loss = sum(one_gt_losses['loss_bbox'])
self.assertGreater(onegt_cls_loss.item(), 0,
'cls loss should be non-zero')
self.assertGreater(onegt_box_loss.item(), 0,
'box loss should be non-zero')
|
# Copyright (c) OpenMMLab. All rights reserved.
from math import ceil
from unittest import TestCase
import torch
from mmengine import Config
from mmengine.data import InstanceData
from mmdet import * # noqa
from mmdet.models.dense_heads import SSDHead
class TestSSDHead(TestCase):
def test_ssd_head_loss(self):
"""Tests anchor head loss when truth is empty and non-empty."""
s = 300
img_metas = [{
'img_shape': (s, s, 3),
'scale_factor': 1,
}]
cfg = Config(
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0.,
ignore_iof_thr=-1,
gt_max_assign_all=False),
smoothl1_beta=1.,
allowed_border=-1,
pos_weight=-1,
neg_pos_ratio=3,
debug=False))
ssd_head = SSDHead(
num_classes=4,
in_channels=(1, 1, 1, 1, 1, 1),
stacked_convs=1,
feat_channels=1,
use_depthwise=True,
anchor_generator=dict(
type='SSDAnchorGenerator',
scale_major=False,
input_size=s,
basesize_ratio_range=(0.15, 0.9),
strides=[8, 16, 32, 64, 100, 300],
ratios=[[2], [2, 3], [2, 3], [2, 3], [2], [2]]),
train_cfg=cfg)
# Anchor head expects a multiple levels of features per image
feats = (
torch.rand(1, 1, ceil(s / stride[0]), ceil(s / stride[0]))
for stride in ssd_head.prior_generator.strides)
cls_scores, bbox_preds = ssd_head.forward(feats)
# Test that empty ground truth encourages the network to
# predict background
gt_instances = InstanceData()
gt_instances.bboxes = torch.empty((0, 4))
gt_instances.labels = torch.LongTensor([])
empty_gt_losses = ssd_head.loss(cls_scores, bbox_preds, [gt_instances],
img_metas)
# When there is no truth, cls_loss and box_loss should all be zero.
empty_cls_loss = sum(empty_gt_losses['loss_cls'])
empty_box_loss = sum(empty_gt_losses['loss_bbox'])
self.assertEqual(
empty_cls_loss.item(), 0,
'there should be no cls loss when there are no true boxes')
self.assertEqual(
empty_box_loss.item(), 0,
'there should be no box loss when there are no true boxes')
# When truth is non-empty then both cls and box loss
# should be nonzero for random inputs
gt_instances = InstanceData()
gt_instances.bboxes = torch.Tensor(
[[23.6667, 23.8757, 238.6326, 151.8874]])
gt_instances.labels = torch.LongTensor([2])
one_gt_losses = ssd_head.loss(cls_scores, bbox_preds, [gt_instances],
img_metas)
onegt_cls_loss = sum(one_gt_losses['loss_cls'])
onegt_box_loss = sum(one_gt_losses['loss_bbox'])
self.assertGreater(onegt_cls_loss.item(), 0,
'cls loss should be non-zero')
self.assertGreater(onegt_box_loss.item(), 0,
'box loss should be non-zero')
|
"""
This script contains an example how to perform semantic search with OpenSearch.
You need OpenSearch up and running locally:
https://docs.opensearch.org/docs/latest/getting-started/quickstart/
Further, you need the Python OpenSearch Client installed: https://docs.opensearch.org/docs/latest/clients/python-low-level/, e.g.:
```
pip install opensearch-py
```
This script was created for `opensearch` v2.15.0+.
"""
import time
from datasets import load_dataset
from sentence_transformers import SparseEncoder
from sentence_transformers.models import Router
from sentence_transformers.sparse_encoder.models import MLMTransformer, SparseStaticEmbedding, SpladePooling
from sentence_transformers.sparse_encoder.search_engines import semantic_search_opensearch
# 1. Load the natural-questions dataset with 100K answers
dataset = load_dataset("sentence-transformers/natural-questions", split="train")
num_docs = 10_000
corpus = dataset["answer"][:num_docs]
print(f"Finish loading data. Corpus size: {len(corpus)}")
# 2. Come up with some queries
queries = dataset["query"][:2]
# 3. Load the model
model_id = "opensearch-project/opensearch-neural-sparse-encoding-doc-v3-distill"
doc_encoder = MLMTransformer(model_id)
router = Router.for_query_document(
query_modules=[
SparseStaticEmbedding.from_json(
model_id,
tokenizer=doc_encoder.tokenizer,
frozen=True,
),
],
document_modules=[
doc_encoder,
SpladePooling("max", activation_function="log1p_relu"),
],
)
sparse_model = SparseEncoder(modules=[router], similarity_fn_name="dot")
print("Start encoding corpus...")
start_time = time.time()
# 4. Encode the corpus
corpus_embeddings = sparse_model.encode_document(
corpus, convert_to_sparse_tensor=True, batch_size=32, show_progress_bar=True
)
corpus_embeddings_decoded = sparse_model.decode(corpus_embeddings)
print(f"Corpus encoding time: {time.time() - start_time:.6f} seconds")
corpus_index = None
while True:
# 5. Encode the queries using inference-free mode
start_time = time.time()
query_embeddings = sparse_model.encode_query(queries, convert_to_sparse_tensor=True)
query_embeddings_decoded = sparse_model.decode(query_embeddings)
print(f"Query encoding time: {time.time() - start_time:.6f} seconds")
# 6. Perform semantic search using OpenSearch
results, search_time, corpus_index = semantic_search_opensearch(
query_embeddings_decoded,
corpus_embeddings_decoded=corpus_embeddings_decoded if corpus_index is None else None,
corpus_index=corpus_index,
top_k=5,
output_index=True,
)
# 7. Output the results
print(f"Search time: {search_time:.6f} seconds")
for query, result in zip(queries, results):
print(f"Query: {query}")
for entry in result:
print(f"(Score: {entry['score']:.4f}) {corpus[entry['corpus_id']]}, corpus_id: {entry['corpus_id']}")
print("")
# 8. Prompt for more queries
queries = [input("Please enter a question: ")]
|
"""
This script contains an example how to perform semantic search with OpenSearch.
You need OpenSearch up and running locally:
https://docs.opensearch.org/docs/latest/getting-started/quickstart/
Further, you need the Python OpenSearch Client installed: https://docs.opensearch.org/docs/latest/clients/python-low-level/, e.g.:
```
pip install opensearch-py
```
This script was created for `opensearch` v2.15.0+.
"""
import time
from datasets import load_dataset
from sentence_transformers import SparseEncoder
from sentence_transformers.models import Router
from sentence_transformers.sparse_encoder.models import IDF, MLMTransformer, SpladePooling
from sentence_transformers.sparse_encoder.search_engines import semantic_search_opensearch
# 1. Load the natural-questions dataset with 100K answers
dataset = load_dataset("sentence-transformers/natural-questions", split="train")
num_docs = 10_000
corpus = dataset["answer"][:num_docs]
print(f"Finish loading data. Corpus size: {len(corpus)}")
# 2. Come up with some queries
queries = dataset["query"][:2]
# 3. Load the model
model_id = "opensearch-project/opensearch-neural-sparse-encoding-doc-v3-distill"
doc_encoder = MLMTransformer(model_id)
router = Router.for_query_document(
query_modules=[
IDF.from_json(
model_id,
tokenizer=doc_encoder.tokenizer,
frozen=True,
),
],
document_modules=[
doc_encoder,
SpladePooling("max", activation_function="log1p_relu"),
],
)
sparse_model = SparseEncoder(modules=[router], similarity_fn_name="dot")
print("Start encoding corpus...")
start_time = time.time()
# 4. Encode the corpus
corpus_embeddings = sparse_model.encode_document(
corpus, convert_to_sparse_tensor=True, batch_size=32, show_progress_bar=True
)
corpus_embeddings_decoded = sparse_model.decode(corpus_embeddings)
print(f"Corpus encoding time: {time.time() - start_time:.6f} seconds")
corpus_index = None
while True:
# 5. Encode the queries using inference-free mode
start_time = time.time()
query_embeddings = sparse_model.encode_query(queries, convert_to_sparse_tensor=True)
query_embeddings_decoded = sparse_model.decode(query_embeddings)
print(f"Query encoding time: {time.time() - start_time:.6f} seconds")
# 6. Perform semantic search using OpenSearch
results, search_time, corpus_index = semantic_search_opensearch(
query_embeddings_decoded,
corpus_embeddings_decoded=corpus_embeddings_decoded if corpus_index is None else None,
corpus_index=corpus_index,
top_k=5,
output_index=True,
)
# 7. Output the results
print(f"Search time: {search_time:.6f} seconds")
for query, result in zip(queries, results):
print(f"Query: {query}")
for entry in result:
print(f"(Score: {entry['score']:.4f}) {corpus[entry['corpus_id']]}, corpus_id: {entry['corpus_id']}")
print("")
# 8. Prompt for more queries
queries = [input("Please enter a question: ")]
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.initializers import deserialize
from keras.src.initializers import get
from keras.src.initializers import serialize
from keras.src.initializers.constant_initializers import Constant
from keras.src.initializers.constant_initializers import Constant as constant
from keras.src.initializers.constant_initializers import Identity
from keras.src.initializers.constant_initializers import (
Identity as IdentityInitializer,
)
from keras.src.initializers.constant_initializers import Identity as identity
from keras.src.initializers.constant_initializers import Ones
from keras.src.initializers.constant_initializers import Ones as ones
from keras.src.initializers.constant_initializers import STFTInitializer
from keras.src.initializers.constant_initializers import Zeros
from keras.src.initializers.constant_initializers import Zeros as zeros
from keras.src.initializers.initializer import Initializer
from keras.src.initializers.random_initializers import GlorotNormal
from keras.src.initializers.random_initializers import (
GlorotNormal as glorot_normal,
)
from keras.src.initializers.random_initializers import GlorotUniform
from keras.src.initializers.random_initializers import (
GlorotUniform as glorot_uniform,
)
from keras.src.initializers.random_initializers import HeNormal
from keras.src.initializers.random_initializers import HeNormal as he_normal
from keras.src.initializers.random_initializers import HeUniform
from keras.src.initializers.random_initializers import HeUniform as he_uniform
from keras.src.initializers.random_initializers import LecunNormal
from keras.src.initializers.random_initializers import (
LecunNormal as lecun_normal,
)
from keras.src.initializers.random_initializers import LecunUniform
from keras.src.initializers.random_initializers import (
LecunUniform as lecun_uniform,
)
from keras.src.initializers.random_initializers import OrthogonalInitializer
from keras.src.initializers.random_initializers import (
OrthogonalInitializer as Orthogonal,
)
from keras.src.initializers.random_initializers import (
OrthogonalInitializer as orthogonal,
)
from keras.src.initializers.random_initializers import RandomNormal
from keras.src.initializers.random_initializers import (
RandomNormal as random_normal,
)
from keras.src.initializers.random_initializers import RandomUniform
from keras.src.initializers.random_initializers import (
RandomUniform as random_uniform,
)
from keras.src.initializers.random_initializers import TruncatedNormal
from keras.src.initializers.random_initializers import (
TruncatedNormal as truncated_normal,
)
from keras.src.initializers.random_initializers import VarianceScaling
from keras.src.initializers.random_initializers import (
VarianceScaling as variance_scaling,
)
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.initializers import deserialize
from keras.src.initializers import get
from keras.src.initializers import serialize
from keras.src.initializers.constant_initializers import Constant
from keras.src.initializers.constant_initializers import Constant as constant
from keras.src.initializers.constant_initializers import Identity
from keras.src.initializers.constant_initializers import (
Identity as IdentityInitializer,
)
from keras.src.initializers.constant_initializers import Identity as identity
from keras.src.initializers.constant_initializers import Ones
from keras.src.initializers.constant_initializers import Ones as ones
from keras.src.initializers.constant_initializers import Zeros
from keras.src.initializers.constant_initializers import Zeros as zeros
from keras.src.initializers.initializer import Initializer
from keras.src.initializers.random_initializers import GlorotNormal
from keras.src.initializers.random_initializers import (
GlorotNormal as glorot_normal,
)
from keras.src.initializers.random_initializers import GlorotUniform
from keras.src.initializers.random_initializers import (
GlorotUniform as glorot_uniform,
)
from keras.src.initializers.random_initializers import HeNormal
from keras.src.initializers.random_initializers import HeNormal as he_normal
from keras.src.initializers.random_initializers import HeUniform
from keras.src.initializers.random_initializers import HeUniform as he_uniform
from keras.src.initializers.random_initializers import LecunNormal
from keras.src.initializers.random_initializers import (
LecunNormal as lecun_normal,
)
from keras.src.initializers.random_initializers import LecunUniform
from keras.src.initializers.random_initializers import (
LecunUniform as lecun_uniform,
)
from keras.src.initializers.random_initializers import OrthogonalInitializer
from keras.src.initializers.random_initializers import (
OrthogonalInitializer as Orthogonal,
)
from keras.src.initializers.random_initializers import (
OrthogonalInitializer as orthogonal,
)
from keras.src.initializers.random_initializers import RandomNormal
from keras.src.initializers.random_initializers import (
RandomNormal as random_normal,
)
from keras.src.initializers.random_initializers import RandomUniform
from keras.src.initializers.random_initializers import (
RandomUniform as random_uniform,
)
from keras.src.initializers.random_initializers import TruncatedNormal
from keras.src.initializers.random_initializers import (
TruncatedNormal as truncated_normal,
)
from keras.src.initializers.random_initializers import VarianceScaling
from keras.src.initializers.random_initializers import (
VarianceScaling as variance_scaling,
)
|
from .torch_object_detection_segmenter import TorchObjectDetectionSegmenter
|
from .torch_object_detection_segmenter import TorchObjectDetectionSegmenter
|
from __future__ import annotations
from typing import Any, Iterable
import torch
from torch import Tensor, nn
from sentence_transformers import util
from sentence_transformers.SentenceTransformer import SentenceTransformer
class MultipleNegativesSymmetricRankingLoss(nn.Module):
def __init__(self, model: SentenceTransformer, scale: float = 20.0, similarity_fct=util.cos_sim) -> None:
"""
Given a list of (anchor, positive) pairs, this loss sums the following two losses:
1. Forward loss: Given an anchor, find the sample with the highest similarity out of all positives in the batch.
This is equivalent to :class:`MultipleNegativesRankingLoss`.
2. Backward loss: Given a positive, find the sample with the highest similarity out of all anchors in the batch.
For example with question-answer pairs, :class:`MultipleNegativesRankingLoss` just computes the loss to find
the answer given a question, but :class:`MultipleNegativesSymmetricRankingLoss` additionally computes the
loss to find the question given an answer.
Note: If you pass triplets, the negative entry will be ignored. A anchor is just searched for the positive.
Args:
model: SentenceTransformer model
scale: Output of similarity function is multiplied by scale
value
similarity_fct: similarity function between sentence
embeddings. By default, cos_sim. Can also be set to dot
product (and then set scale to 1)
Requirements:
1. (anchor, positive) pairs
Inputs:
+---------------------------------------+--------+
| Texts | Labels |
+=======================================+========+
| (anchor, positive) pairs | none |
+---------------------------------------+--------+
Recommendations:
- Use ``BatchSamplers.NO_DUPLICATES`` (:class:`docs <sentence_transformers.training_args.BatchSamplers>`) to
ensure that no in-batch negatives are duplicates of the anchor or positive samples.
Relations:
- Like :class:`MultipleNegativesRankingLoss`, but with an additional loss term.
- :class:`CachedMultipleNegativesSymmetricRankingLoss` is equivalent to this loss, but it uses caching that
allows for much higher batch sizes (and thus better performance) without extra memory usage. However, it
is slightly slower.
Example:
::
from sentence_transformers import SentenceTransformer, SentenceTransformerTrainer, losses
from datasets import Dataset
model = SentenceTransformer("microsoft/mpnet-base")
train_dataset = Dataset.from_dict({
"anchor": ["It's nice weather outside today.", "He drove to work."],
"positive": ["It's so sunny.", "He took the car to the office."],
})
loss = losses.MultipleNegativesSymmetricRankingLoss(model)
trainer = SentenceTransformerTrainer(
model=model,
train_dataset=train_dataset,
loss=loss,
)
trainer.train()
"""
super().__init__()
self.model = model
self.scale = scale
self.similarity_fct = similarity_fct
self.cross_entropy_loss = nn.CrossEntropyLoss()
def forward(self, sentence_features: Iterable[dict[str, Tensor]], labels: Tensor) -> Tensor:
reps = [self.model(sentence_feature)["sentence_embedding"] for sentence_feature in sentence_features]
anchor = reps[0]
candidates = torch.cat(reps[1:])
scores = self.similarity_fct(anchor, candidates) * self.scale
labels = torch.tensor(
range(len(scores)), dtype=torch.long, device=scores.device
) # Example a[i] should match with b[i]
anchor_positive_scores = scores[:, 0 : len(reps[1])]
forward_loss = self.cross_entropy_loss(scores, labels)
backward_loss = self.cross_entropy_loss(anchor_positive_scores.transpose(0, 1), labels)
return (forward_loss + backward_loss) / 2
def get_config_dict(self) -> dict[str, Any]:
return {"scale": self.scale, "similarity_fct": self.similarity_fct.__name__}
|
from __future__ import annotations
from typing import Any, Iterable
import torch
from torch import Tensor, nn
from sentence_transformers import util
from sentence_transformers.SentenceTransformer import SentenceTransformer
class MultipleNegativesSymmetricRankingLoss(nn.Module):
def __init__(self, model: SentenceTransformer, scale: float = 20.0, similarity_fct=util.cos_sim) -> None:
"""
Given a list of (anchor, positive) pairs, this loss sums the following two losses:
1. Forward loss: Given an anchor, find the sample with the highest similarity out of all positives in the batch.
This is equivalent to :class:`MultipleNegativesRankingLoss`.
2. Backward loss: Given a positive, find the sample with the highest similarity out of all anchors in the batch.
For example with question-answer pairs, :class:`MultipleNegativesRankingLoss` just computes the loss to find
the answer given a question, but :class:`MultipleNegativesSymmetricRankingLoss` additionally computes the
loss to find the question given an answer.
Note: If you pass triplets, the negative entry will be ignored. A anchor is just searched for the positive.
Args:
model: SentenceTransformer model
scale: Output of similarity function is multiplied by scale
value
similarity_fct: similarity function between sentence
embeddings. By default, cos_sim. Can also be set to dot
product (and then set scale to 1)
Requirements:
1. (anchor, positive) pairs
Relations:
- Like :class:`MultipleNegativesRankingLoss`, but with an additional loss term.
- :class:`CachedMultipleNegativesSymmetricRankingLoss` is equivalent to this loss, but it uses caching that
allows for much higher batch sizes (and thus better performance) without extra memory usage. However, it
is slightly slower.
Inputs:
+---------------------------------------+--------+
| Texts | Labels |
+=======================================+========+
| (anchor, positive) pairs | none |
+---------------------------------------+--------+
Example:
::
from sentence_transformers import SentenceTransformer, SentenceTransformerTrainer, losses
from datasets import Dataset
model = SentenceTransformer("microsoft/mpnet-base")
train_dataset = Dataset.from_dict({
"anchor": ["It's nice weather outside today.", "He drove to work."],
"positive": ["It's so sunny.", "He took the car to the office."],
})
loss = losses.MultipleNegativesSymmetricRankingLoss(model)
trainer = SentenceTransformerTrainer(
model=model,
train_dataset=train_dataset,
loss=loss,
)
trainer.train()
"""
super().__init__()
self.model = model
self.scale = scale
self.similarity_fct = similarity_fct
self.cross_entropy_loss = nn.CrossEntropyLoss()
def forward(self, sentence_features: Iterable[dict[str, Tensor]], labels: Tensor) -> Tensor:
reps = [self.model(sentence_feature)["sentence_embedding"] for sentence_feature in sentence_features]
anchor = reps[0]
candidates = torch.cat(reps[1:])
scores = self.similarity_fct(anchor, candidates) * self.scale
labels = torch.tensor(
range(len(scores)), dtype=torch.long, device=scores.device
) # Example a[i] should match with b[i]
anchor_positive_scores = scores[:, 0 : len(reps[1])]
forward_loss = self.cross_entropy_loss(scores, labels)
backward_loss = self.cross_entropy_loss(anchor_positive_scores.transpose(0, 1), labels)
return (forward_loss + backward_loss) / 2
def get_config_dict(self) -> dict[str, Any]:
return {"scale": self.scale, "similarity_fct": self.similarity_fct.__name__}
|
_base_ = [
'../_base_/datasets/coco_instance.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
# model settings
model = dict(
type='SOLO',
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
bgr_to_rgb=True,
pad_mask=True,
pad_size_divisor=32),
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50'),
style='pytorch'),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=0,
num_outs=5),
mask_head=dict(
type='SOLOHead',
num_classes=80,
in_channels=256,
stacked_convs=7,
feat_channels=256,
strides=[8, 8, 16, 32, 32],
scale_ranges=((1, 96), (48, 192), (96, 384), (192, 768), (384, 2048)),
pos_scale=0.2,
num_grids=[40, 36, 24, 16, 12],
cls_down_index=0,
loss_mask=dict(type='DiceLoss', use_sigmoid=True, loss_weight=3.0),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
norm_cfg=dict(type='GN', num_groups=32, requires_grad=True)),
# model training and testing settings
test_cfg=dict(
nms_pre=500,
score_thr=0.1,
mask_thr=0.5,
filter_thr=0.05,
kernel='gaussian', # gaussian/linear
sigma=2.0,
max_per_img=100))
# optimizer
optim_wrapper = dict(optimizer=dict(lr=0.01))
val_evaluator = dict(metric='segm')
test_evaluator = val_evaluator
|
_base_ = [
'../_base_/datasets/coco_instance.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
# model settings
model = dict(
type='SOLO',
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
bgr_to_rgb=True,
pad_size_divisor=32),
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50'),
style='pytorch'),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=0,
num_outs=5),
mask_head=dict(
type='SOLOHead',
num_classes=80,
in_channels=256,
stacked_convs=7,
feat_channels=256,
strides=[8, 8, 16, 32, 32],
scale_ranges=((1, 96), (48, 192), (96, 384), (192, 768), (384, 2048)),
pos_scale=0.2,
num_grids=[40, 36, 24, 16, 12],
cls_down_index=0,
loss_mask=dict(type='DiceLoss', use_sigmoid=True, loss_weight=3.0),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
norm_cfg=dict(type='GN', num_groups=32, requires_grad=True)),
# model training and testing settings
test_cfg=dict(
nms_pre=500,
score_thr=0.1,
mask_thr=0.5,
filter_thr=0.05,
kernel='gaussian', # gaussian/linear
sigma=2.0,
max_per_img=100))
# optimizer
optim_wrapper = dict(optimizer=dict(lr=0.01))
val_evaluator = dict(metric='segm')
test_evaluator = val_evaluator
|
import pytest
from llama_index.readers.github import GithubRepositoryReader
class MockGithubClient:
pass
@pytest.fixture()
def github_reader():
return GithubRepositoryReader(
github_client=MockGithubClient(), owner="owner", repo="repo"
)
@pytest.mark.parametrize(
("blob_url", "expected_base_url"),
[
("https://github.com/owner/repo/blob/main/file.py", "https://github.com/"),
(
"https://github-enterprise.com/owner/repo/blob/main/file.py",
"https://github-enterprise.com/",
),
(
"https://custom-domain.com/owner/repo/blob/main/file.py",
"https://custom-domain.com/",
),
(
"https://subdomain.github.com/owner/repo/blob/main/file.py",
"https://subdomain.github.com/",
),
(
"https://something.org/owner/repo/blob/main/file.py",
"https://github.com/",
),
("", "https://github.com/"),
],
)
def test_get_base_url(github_reader, blob_url, expected_base_url):
base_url = github_reader._get_base_url(blob_url)
assert base_url == expected_base_url, (
f"Expected {expected_base_url}, but got {base_url}"
)
|
import pytest
from llama_index.readers.github import GithubRepositoryReader
class MockGithubClient:
pass
@pytest.fixture()
def github_reader():
return GithubRepositoryReader(
github_client=MockGithubClient(), owner="owner", repo="repo"
)
@pytest.mark.parametrize(
("blob_url", "expected_base_url"),
[
("https://github.com/owner/repo/blob/main/file.py", "https://github.com/"),
(
"https://github-enterprise.com/owner/repo/blob/main/file.py",
"https://github-enterprise.com/",
),
(
"https://custom-domain.com/owner/repo/blob/main/file.py",
"https://custom-domain.com/",
),
(
"https://subdomain.github.com/owner/repo/blob/main/file.py",
"https://subdomain.github.com/",
),
(
"https://something.org/owner/repo/blob/main/file.py",
"https://github.com/",
),
("", "https://github.com/"),
],
)
def test_get_base_url(github_reader, blob_url, expected_base_url):
base_url = github_reader._get_base_url(blob_url)
assert (
base_url == expected_base_url
), f"Expected {expected_base_url}, but got {base_url}"
|
_base_ = 'faster-rcnn_r50_fpn_ms-3x_coco.py'
model = dict(
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[103.530, 116.280, 123.675],
std=[1.0, 1.0, 1.0],
bgr_to_rgb=False,
pad_size_divisor=32),
backbone=dict(
norm_cfg=dict(requires_grad=False),
norm_eval=True,
style='caffe',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet50_caffe')))
|
_base_ = 'faster-rcnn_r50_fpn_ms-3x_coco.py'
model = dict(
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[103.530, 116.280, 123.675],
std=[1.0, 1.0, 1.0],
bgr_to_rgb=False,
pad_size_divisor=32),
backbone=dict(
norm_cfg=dict(requires_grad=False),
norm_eval=True,
style='caffe',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet50_caffe')))
# use caffe img_norm
img_norm_cfg = dict(
mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='Resize',
img_scale=[(1333, 640), (1333, 800)],
multiscale_mode='range',
keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
train=dict(dataset=dict(pipeline=train_pipeline)),
val=dict(pipeline=test_pipeline),
test=dict(pipeline=test_pipeline))
|
from __future__ import annotations
import math
from pathlib import Path
import pytest
from tokenizers import Tokenizer
from sentence_transformers import SentenceTransformer
from sentence_transformers.models.StaticEmbedding import StaticEmbedding
try:
import model2vec
except ImportError:
model2vec = None
skip_if_no_model2vec = pytest.mark.skipif(model2vec is None, reason="The model2vec library is not installed.")
def test_initialization_with_embedding_weights(tokenizer: Tokenizer, embedding_weights) -> None:
model = StaticEmbedding(tokenizer, embedding_weights=embedding_weights)
assert model.embedding.weight.shape == (30522, 768)
def test_initialization_with_embedding_dim(tokenizer: Tokenizer) -> None:
model = StaticEmbedding(tokenizer, embedding_dim=768)
assert model.embedding.weight.shape == (30522, 768)
def test_tokenize(static_embedding_model: StaticEmbedding) -> None:
texts = ["Hello world!", "How are you?"]
tokens = static_embedding_model.tokenize(texts)
assert "input_ids" in tokens
assert "offsets" in tokens
def test_forward(static_embedding_model: StaticEmbedding) -> None:
texts = ["Hello world!", "How are you?"]
tokens = static_embedding_model.tokenize(texts)
output = static_embedding_model(tokens)
assert "sentence_embedding" in output
def test_save_and_load(tmp_path: Path, static_embedding_model: StaticEmbedding) -> None:
save_dir = tmp_path / "model"
save_dir.mkdir()
static_embedding_model.save(str(save_dir))
loaded_model = StaticEmbedding.load(str(save_dir))
assert loaded_model.embedding.weight.shape == static_embedding_model.embedding.weight.shape
@skip_if_no_model2vec()
def test_from_distillation() -> None:
model = StaticEmbedding.from_distillation("sentence-transformers-testing/stsb-bert-tiny-safetensors", pca_dims=32)
# The shape has been 29528 for <0.5.0, 29525 for 0.5.0, and 29524 for >=0.6.0, so let's make a safer test
# that checks the first dimension is close to 29525 and the second dimension is 32.
assert abs(model.embedding.weight.shape[0] - 29525) < 5
assert model.embedding.weight.shape[1] == 32
@skip_if_no_model2vec()
def test_from_model2vec() -> None:
model = StaticEmbedding.from_model2vec("minishlab/M2V_base_output")
assert model.embedding.weight.shape == (29528, 256)
def test_loading_model2vec() -> None:
model = SentenceTransformer("minishlab/potion-base-8M")
assert model.get_sentence_embedding_dimension() == 256
assert model.max_seq_length == math.inf
test_sentences = ["It's so sunny outside!", "The sun is shining outside!"]
embeddings = model.encode(test_sentences)
assert embeddings.shape == (2, 256)
similarity = model.similarity(embeddings[0], embeddings[1])
assert similarity.item() > 0.7
|
from __future__ import annotations
import math
from pathlib import Path
import pytest
from packaging.version import Version, parse
from tokenizers import Tokenizer
from sentence_transformers import SentenceTransformer
from sentence_transformers.models.StaticEmbedding import StaticEmbedding
try:
import model2vec
from model2vec import __version__ as M2V_VERSION
except ImportError:
model2vec = None
skip_if_no_model2vec = pytest.mark.skipif(model2vec is None, reason="The model2vec library is not installed.")
def test_initialization_with_embedding_weights(tokenizer: Tokenizer, embedding_weights) -> None:
model = StaticEmbedding(tokenizer, embedding_weights=embedding_weights)
assert model.embedding.weight.shape == (30522, 768)
def test_initialization_with_embedding_dim(tokenizer: Tokenizer) -> None:
model = StaticEmbedding(tokenizer, embedding_dim=768)
assert model.embedding.weight.shape == (30522, 768)
def test_tokenize(static_embedding_model: StaticEmbedding) -> None:
texts = ["Hello world!", "How are you?"]
tokens = static_embedding_model.tokenize(texts)
assert "input_ids" in tokens
assert "offsets" in tokens
def test_forward(static_embedding_model: StaticEmbedding) -> None:
texts = ["Hello world!", "How are you?"]
tokens = static_embedding_model.tokenize(texts)
output = static_embedding_model(tokens)
assert "sentence_embedding" in output
def test_save_and_load(tmp_path: Path, static_embedding_model: StaticEmbedding) -> None:
save_dir = tmp_path / "model"
save_dir.mkdir()
static_embedding_model.save(str(save_dir))
loaded_model = StaticEmbedding.load(str(save_dir))
assert loaded_model.embedding.weight.shape == static_embedding_model.embedding.weight.shape
@skip_if_no_model2vec()
def test_from_distillation() -> None:
model = StaticEmbedding.from_distillation("sentence-transformers-testing/stsb-bert-tiny-safetensors", pca_dims=32)
expected_shape = (29525 if parse(M2V_VERSION) >= Version("0.5.0") else 29528, 32)
assert model.embedding.weight.shape == expected_shape
@skip_if_no_model2vec()
def test_from_model2vec() -> None:
model = StaticEmbedding.from_model2vec("minishlab/M2V_base_output")
assert model.embedding.weight.shape == (29528, 256)
def test_loading_model2vec() -> None:
model = SentenceTransformer("minishlab/potion-base-8M")
assert model.get_sentence_embedding_dimension() == 256
assert model.max_seq_length == math.inf
test_sentences = ["It's so sunny outside!", "The sun is shining outside!"]
embeddings = model.encode(test_sentences)
assert embeddings.shape == (2, 256)
similarity = model.similarity(embeddings[0], embeddings[1])
assert similarity.item() > 0.7
|
# Copyright (c) OpenMMLab. All rights reserved.
import numpy as np
from mmengine.testing import assert_allclose
from mmdet.core.mask import BitmapMasks, PolygonMasks
def create_random_bboxes(num_bboxes, img_w, img_h):
bboxes_left_top = np.random.uniform(0, 0.5, size=(num_bboxes, 2))
bboxes_right_bottom = np.random.uniform(0.5, 1, size=(num_bboxes, 2))
bboxes = np.concatenate((bboxes_left_top, bboxes_right_bottom), 1)
bboxes = (bboxes * np.array([img_w, img_h, img_w, img_h])).astype(
np.float32)
return bboxes
def create_full_masks(gt_bboxes, img_w, img_h):
xmin, ymin = gt_bboxes[:, 0:1], gt_bboxes[:, 1:2]
xmax, ymax = gt_bboxes[:, 2:3], gt_bboxes[:, 3:4]
gt_masks = np.zeros((len(gt_bboxes), img_h, img_w), dtype=np.uint8)
for i in range(len(gt_bboxes)):
gt_masks[i, int(ymin[i]):int(ymax[i]), int(xmin[i]):int(xmax[i])] = 1
gt_masks = BitmapMasks(gt_masks, img_h, img_w)
return gt_masks
def construct_toy_data(poly2mask):
img = np.array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]],
dtype=np.uint8)
img = np.stack([img, img, img], axis=-1)
results = dict()
results['img'] = img
results['img_shape'] = img.shape[:2]
results['gt_bboxes'] = np.array([[1, 0, 2, 2]], dtype=np.float32)
results['gt_bboxes_labels'] = np.array([13], dtype=np.int64)
if poly2mask:
gt_masks = np.array([[0, 1, 0, 0], [0, 1, 1, 0], [0, 1, 0, 0]],
dtype=np.uint8)[None, :, :]
results['gt_masks'] = BitmapMasks(gt_masks, 3, 4)
else:
raw_masks = [[np.array([1, 2, 1, 0, 2, 1], dtype=np.float32)]]
results['gt_masks'] = PolygonMasks(raw_masks, 3, 4)
results['gt_ignore_flags'] = np.array(np.array([1], dtype=bool))
results['gt_seg_map'] = np.array(
[[255, 13, 255, 255], [255, 13, 13, 255], [255, 13, 255, 255]],
dtype=np.uint8)
return results
def check_result_same(results, pipeline_results, check_keys):
"""Check whether the ``pipeline_results`` is the same with the predefined
``results``.
Args:
results (dict): Predefined results which should be the standard
output of the transform pipeline.
pipeline_results (dict): Results processed by the transform
pipeline.
check_keys (tuple): Keys that need to be checked between
results and pipeline_results.
"""
for key in check_keys:
if results.get(key, None) is None:
continue
if isinstance(results[key], (BitmapMasks, PolygonMasks)):
assert_allclose(pipeline_results[key].to_ndarray(),
results[key].to_ndarray())
else:
assert_allclose(pipeline_results[key], results[key])
|
# Copyright (c) OpenMMLab. All rights reserved.
import numpy as np
from mmdet.core.mask import BitmapMasks
def create_random_bboxes(num_bboxes, img_w, img_h):
bboxes_left_top = np.random.uniform(0, 0.5, size=(num_bboxes, 2))
bboxes_right_bottom = np.random.uniform(0.5, 1, size=(num_bboxes, 2))
bboxes = np.concatenate((bboxes_left_top, bboxes_right_bottom), 1)
bboxes = (bboxes * np.array([img_w, img_h, img_w, img_h])).astype(
np.float32)
return bboxes
def create_full_masks(gt_bboxes, img_w, img_h):
xmin, ymin = gt_bboxes[:, 0:1], gt_bboxes[:, 1:2]
xmax, ymax = gt_bboxes[:, 2:3], gt_bboxes[:, 3:4]
gt_masks = np.zeros((len(gt_bboxes), img_h, img_w), dtype=np.uint8)
for i in range(len(gt_bboxes)):
gt_masks[i, int(ymin[i]):int(ymax[i]), int(xmin[i]):int(xmax[i])] = 1
gt_masks = BitmapMasks(gt_masks, img_h, img_w)
return gt_masks
|
import os
import time
import pytest
from docarray import Document
from jina import Flow
from jina.constants import __cache_path__
cur_dir = os.path.dirname(os.path.abspath(__file__))
@pytest.fixture(scope='module')
def filewriter_exec_docker_image_built():
import docker
client = docker.from_env()
client.images.build(
path=os.path.join(cur_dir, 'filewriter-exec/'), tag='filewriter-exec'
)
client.close()
yield
time.sleep(2)
client = docker.from_env()
client.containers.prune()
@pytest.mark.parametrize(
'source,destination,workspace',
[('test/dir', '/custom/app', '/custom/app')],
)
def test_volumes_in_flow(
tmpdir, source, destination, workspace, filewriter_exec_docker_image_built
):
if source: # test manually set volume and workspace
source = os.path.join(tmpdir, source)
volumes = [str(source) + ':' + destination]
else: # test auto volume and workspace
source = __cache_path__
f = Flow().add(
uses='docker://filewriter-exec', volumes=volumes, workspace=workspace
)
with f:
f.post(inputs=[Document()], on='/foo')
assert os.path.exists(source)
found_output_file = False # workspace has random element, so we search for it
for cur_path, dirs, files in os.walk(source):
if 'out.txt' in files:
with open(os.path.join(cur_path, 'out.txt'), 'r') as f:
if f.read() == 'Filewriter was here':
found_output_file = True
assert found_output_file
|
import os
import time
import pytest
from docarray import Document
from jina import Flow, __cache_path__
cur_dir = os.path.dirname(os.path.abspath(__file__))
@pytest.fixture(scope='module')
def filewriter_exec_docker_image_built():
import docker
client = docker.from_env()
client.images.build(
path=os.path.join(cur_dir, 'filewriter-exec/'), tag='filewriter-exec'
)
client.close()
yield
time.sleep(2)
client = docker.from_env()
client.containers.prune()
@pytest.mark.parametrize(
'source,destination,workspace',
[('test/dir', '/custom/app', '/custom/app')],
)
def test_volumes_in_flow(
tmpdir, source, destination, workspace, filewriter_exec_docker_image_built
):
if source: # test manually set volume and workspace
source = os.path.join(tmpdir, source)
volumes = [str(source) + ':' + destination]
else: # test auto volume and workspace
source = __cache_path__
f = Flow().add(
uses='docker://filewriter-exec', volumes=volumes, workspace=workspace
)
with f:
f.post(inputs=[Document()], on='/foo')
assert os.path.exists(source)
found_output_file = False # workspace has random element, so we search for it
for cur_path, dirs, files in os.walk(source):
if 'out.txt' in files:
with open(os.path.join(cur_path, 'out.txt'), 'r') as f:
if f.read() == 'Filewriter was here':
found_output_file = True
assert found_output_file
|
# Copyright (c) OpenMMLab. All rights reserved.
from .det_data_sample import DetDataSample
__all__ = ['DetDataSample']
|
# Copyright (c) OpenMMLab. All rights reserved.
from .general_data import GeneralData
from .instance_data import InstanceData
__all__ = ['GeneralData', 'InstanceData']
|
"""Test in memory indexer."""
from collections.abc import AsyncGenerator, Generator
import pytest
from langchain_tests.integration_tests.indexer import (
AsyncDocumentIndexTestSuite,
DocumentIndexerTestSuite,
)
from langchain_core.documents import Document
from langchain_core.indexing.base import DocumentIndex
from langchain_core.indexing.in_memory import (
InMemoryDocumentIndex,
)
class TestDocumentIndexerTestSuite(DocumentIndexerTestSuite):
@pytest.fixture
def index(self) -> Generator[DocumentIndex, None, None]:
yield InMemoryDocumentIndex() # noqa: PT022
class TestAsyncDocumentIndexerTestSuite(AsyncDocumentIndexTestSuite):
# Something funky is going on with mypy and async pytest fixture
@pytest.fixture
async def index(self) -> AsyncGenerator[DocumentIndex, None]: # type: ignore[override]
yield InMemoryDocumentIndex() # noqa: PT022
def test_sync_retriever() -> None:
index = InMemoryDocumentIndex()
documents = [
Document(id="1", page_content="hello world"),
Document(id="2", page_content="goodbye cat"),
]
index.upsert(documents)
assert index.invoke("hello") == [documents[0], documents[1]]
assert index.invoke("cat") == [documents[1], documents[0]]
async def test_async_retriever() -> None:
index = InMemoryDocumentIndex()
documents = [
Document(id="1", page_content="hello world"),
Document(id="2", page_content="goodbye cat"),
]
await index.aupsert(documents)
assert (await index.ainvoke("hello")) == [documents[0], documents[1]]
assert (await index.ainvoke("cat")) == [documents[1], documents[0]]
|
"""Test in memory indexer."""
from collections.abc import AsyncGenerator, Generator
import pytest
from langchain_tests.integration_tests.indexer import (
AsyncDocumentIndexTestSuite,
DocumentIndexerTestSuite,
)
from langchain_core.documents import Document
from langchain_core.indexing.base import DocumentIndex
from langchain_core.indexing.in_memory import (
InMemoryDocumentIndex,
)
class TestDocumentIndexerTestSuite(DocumentIndexerTestSuite):
@pytest.fixture
def index(self) -> Generator[DocumentIndex, None, None]:
yield InMemoryDocumentIndex() # noqa: PT022
class TestAsyncDocumentIndexerTestSuite(AsyncDocumentIndexTestSuite):
# Something funky is going on with mypy and async pytest fixture
@pytest.fixture
async def index(self) -> AsyncGenerator[DocumentIndex, None]: # type: ignore
yield InMemoryDocumentIndex() # noqa: PT022
def test_sync_retriever() -> None:
index = InMemoryDocumentIndex()
documents = [
Document(id="1", page_content="hello world"),
Document(id="2", page_content="goodbye cat"),
]
index.upsert(documents)
assert index.invoke("hello") == [documents[0], documents[1]]
assert index.invoke("cat") == [documents[1], documents[0]]
async def test_async_retriever() -> None:
index = InMemoryDocumentIndex()
documents = [
Document(id="1", page_content="hello world"),
Document(id="2", page_content="goodbye cat"),
]
await index.aupsert(documents)
assert (await index.ainvoke("hello")) == [documents[0], documents[1]]
assert (await index.ainvoke("cat")) == [documents[1], documents[0]]
|
"""Llava Completion Pack."""
from typing import Any, Dict
from llama_index.core.llama_pack.base import BaseLlamaPack
from llama_index.llms.replicate import Replicate
class LlavaCompletionPack(BaseLlamaPack):
"""Llava Completion pack."""
def __init__(
self,
image_url: str,
**kwargs: Any,
) -> None:
"""Init params."""
import os
if not os.environ.get("REPLICATE_API_TOKEN", None):
raise ValueError("Replicate API Token is missing or blank.")
self.image_url = image_url
self.llm = Replicate(
model="yorickvp/llava-13b:2facb4a474a0462c15041b78b1ad70952ea46b5ec6ad29583c0b29dbd4249591",
image=self.image_url,
)
def get_modules(self) -> Dict[str, Any]:
"""Get modules."""
return {
"llm": self.llm,
"image_url": self.image_url,
}
def run(self, *args: Any, **kwargs: Any) -> Any:
"""Run the pipeline."""
return self.llm.complete(*args, **kwargs)
|
"""Llava Completion Pack."""
from typing import Any, Dict
from llama_index.core.llama_pack.base import BaseLlamaPack
from llama_index.llms.replicate import Replicate
class LlavaCompletionPack(BaseLlamaPack):
"""Llava Completion pack."""
def __init__(
self,
image_url: str,
**kwargs: Any,
) -> None:
"""Init params."""
import os
if not os.environ.get("REPLICATE_API_TOKEN", None):
raise ValueError("Replicate API Token is missing or blank.")
self.image_url = image_url
self.llm = Replicate(
model="yorickvp/llava-13b:2facb4a474a0462c15041b78b1ad70952ea46b5ec6ad29583c0b29dbd4249591",
image=self.image_url,
)
def get_modules(self) -> Dict[str, Any]:
"""Get modules."""
return {
"llm": self.llm,
"image_url": self.image_url,
}
def run(self, *args: Any, **kwargs: Any) -> Any:
"""Run the pipeline."""
return self.llm.complete(*args, **kwargs)
|
"""
==============================
Ordinary Least Squares Example
==============================
This example shows how to use the ordinary least squares (OLS) model
called :class:`~sklearn.linear_model.LinearRegression` in scikit-learn.
For this purpose, we use a single feature from the diabetes dataset and try to
predict the diabetes progression using this linear model. We therefore load the
diabetes dataset and split it into training and test sets.
Then, we fit the model on the training set and evaluate its performance on the test
set and finally visualize the results on the test set.
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
# %%
# Data Loading and Preparation
# ----------------------------
#
# Load the diabetes dataset. For simplicity, we only keep a single feature in the data.
# Then, we split the data and target into training and test sets.
from sklearn.datasets import load_diabetes
from sklearn.model_selection import train_test_split
X, y = load_diabetes(return_X_y=True)
X = X[:, [2]] # Use only one feature
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=20, shuffle=False)
# %%
# Linear regression model
# -----------------------
#
# We create a linear regression model and fit it on the training data. Note that by
# default, an intercept is added to the model. We can control this behavior by setting
# the `fit_intercept` parameter.
from sklearn.linear_model import LinearRegression
regressor = LinearRegression().fit(X_train, y_train)
# %%
# Model evaluation
# ----------------
#
# We evaluate the model's performance on the test set using the mean squared error
# and the coefficient of determination.
from sklearn.metrics import mean_squared_error, r2_score
y_pred = regressor.predict(X_test)
print(f"Mean squared error: {mean_squared_error(y_test, y_pred):.2f}")
print(f"Coefficient of determination: {r2_score(y_test, y_pred):.2f}")
# %%
# Plotting the results
# --------------------
#
# Finally, we visualize the results on the train and test data.
import matplotlib.pyplot as plt
fig, ax = plt.subplots(ncols=2, figsize=(10, 5), sharex=True, sharey=True)
ax[0].scatter(X_train, y_train, label="Train data points")
ax[0].plot(
X_train,
regressor.predict(X_train),
linewidth=3,
color="tab:orange",
label="Model predictions",
)
ax[0].set(xlabel="Feature", ylabel="Target", title="Train set")
ax[0].legend()
ax[1].scatter(X_test, y_test, label="Test data points")
ax[1].plot(X_test, y_pred, linewidth=3, color="tab:orange", label="Model predictions")
ax[1].set(xlabel="Feature", ylabel="Target", title="Test set")
ax[1].legend()
fig.suptitle("Linear Regression")
plt.show()
# %%
# Conclusion
# ----------
#
# The trained model corresponds to the estimator that minimizes the mean squared error
# between the predicted and the true target values on the training data. We therefore
# obtain an estimator of the conditional mean of the target given the data.
#
# Note that in higher dimensions, minimizing only the squared error might lead to
# overfitting. Therefore, regularization techniques are commonly used to prevent this
# issue, such as those implemented in :class:`~sklearn.linear_model.Ridge` or
# :class:`~sklearn.linear_model.Lasso`.
|
"""
=========================================================
Linear Regression Example
=========================================================
The example below uses only the first feature of the `diabetes` dataset,
in order to illustrate the data points within the two-dimensional plot.
The straight line can be seen in the plot, showing how linear regression
attempts to draw a straight line that will best minimize the
residual sum of squares between the observed responses in the dataset,
and the responses predicted by the linear approximation.
The coefficients, residual sum of squares and the coefficient of
determination are also calculated.
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import matplotlib.pyplot as plt
import numpy as np
from sklearn import datasets, linear_model
from sklearn.metrics import mean_squared_error, r2_score
# Load the diabetes dataset
diabetes_X, diabetes_y = datasets.load_diabetes(return_X_y=True)
# Use only one feature
diabetes_X = diabetes_X[:, np.newaxis, 2]
# Split the data into training/testing sets
diabetes_X_train = diabetes_X[:-20]
diabetes_X_test = diabetes_X[-20:]
# Split the targets into training/testing sets
diabetes_y_train = diabetes_y[:-20]
diabetes_y_test = diabetes_y[-20:]
# Create linear regression object
regr = linear_model.LinearRegression()
# Train the model using the training sets
regr.fit(diabetes_X_train, diabetes_y_train)
# Make predictions using the testing set
diabetes_y_pred = regr.predict(diabetes_X_test)
# The coefficients
print("Coefficients: \n", regr.coef_)
# The mean squared error
print("Mean squared error: %.2f" % mean_squared_error(diabetes_y_test, diabetes_y_pred))
# The coefficient of determination: 1 is perfect prediction
print("Coefficient of determination: %.2f" % r2_score(diabetes_y_test, diabetes_y_pred))
# Plot outputs
plt.scatter(diabetes_X_test, diabetes_y_test, color="black")
plt.plot(diabetes_X_test, diabetes_y_pred, color="blue", linewidth=3)
plt.xticks(())
plt.yticks(())
plt.show()
|
from typing import Optional
import numpy as np
from docarray import DocumentArray
from docarray.document import BaseDocument
from docarray.typing import Tensor
def test_proto_simple():
class CustomDoc(BaseDocument):
text: str
doc = CustomDoc(text='hello')
CustomDoc.from_protobuf(doc.to_protobuf())
def test_proto_ndarray():
class CustomDoc(BaseDocument):
tensor: Tensor
tensor = np.zeros((3, 224, 224))
doc = CustomDoc(tensor=tensor)
new_doc = CustomDoc.from_protobuf(doc.to_protobuf())
assert (new_doc.tensor == tensor).all()
def test_proto_with_nested_doc():
class CustomInnerDoc(BaseDocument):
tensor: Tensor
class CustomDoc(BaseDocument):
text: str
inner: CustomInnerDoc
doc = CustomDoc(text='hello', inner=CustomInnerDoc(tensor=np.zeros((3, 224, 224))))
CustomDoc.from_protobuf(doc.to_protobuf())
def test_proto_with_chunks_doc():
class CustomInnerDoc(BaseDocument):
tensor: Tensor
class CustomDoc(BaseDocument):
text: str
chunks: DocumentArray[CustomInnerDoc]
doc = CustomDoc(
text='hello',
chunks=DocumentArray[CustomInnerDoc](
[CustomInnerDoc(tensor=np.zeros((3, 224, 224))) for _ in range(5)],
),
)
new_doc = CustomDoc.from_protobuf(doc.to_protobuf())
for chunk1, chunk2 in zip(doc.chunks, new_doc.chunks):
assert (chunk1.tensor == chunk2.tensor).all()
def test_optional_field_in_doc():
class CustomDoc(BaseDocument):
text: Optional[str]
CustomDoc.from_protobuf(CustomDoc().to_protobuf())
def test_optional_field_nested_in_doc():
class InnerDoc(BaseDocument):
title: str
class CustomDoc(BaseDocument):
text: Optional[InnerDoc]
CustomDoc.from_protobuf(CustomDoc().to_protobuf())
|
from typing import Optional
import numpy as np
from docarray import DocumentArray
from docarray.document import BaseDocument
def test_proto_simple():
class CustomDoc(BaseDocument):
text: str
doc = CustomDoc(text='hello')
CustomDoc.from_protobuf(doc.to_protobuf())
def test_proto_ndarray():
class CustomDoc(BaseDocument):
tensor: np.ndarray
tensor = np.zeros((3, 224, 224))
doc = CustomDoc(tensor=tensor)
new_doc = CustomDoc.from_protobuf(doc.to_protobuf())
assert (new_doc.tensor == tensor).all()
def test_proto_with_nested_doc():
class CustomInnerDoc(BaseDocument):
tensor: np.ndarray
class CustomDoc(BaseDocument):
text: str
inner: CustomInnerDoc
doc = CustomDoc(text='hello', inner=CustomInnerDoc(tensor=np.zeros((3, 224, 224))))
CustomDoc.from_protobuf(doc.to_protobuf())
def test_proto_with_chunks_doc():
class CustomInnerDoc(BaseDocument):
tensor: np.ndarray
class CustomDoc(BaseDocument):
text: str
chunks: DocumentArray[CustomInnerDoc]
doc = CustomDoc(
text='hello',
chunks=DocumentArray[CustomInnerDoc](
[CustomInnerDoc(tensor=np.zeros((3, 224, 224))) for _ in range(5)],
),
)
new_doc = CustomDoc.from_protobuf(doc.to_protobuf())
for chunk1, chunk2 in zip(doc.chunks, new_doc.chunks):
assert (chunk1.tensor == chunk2.tensor).all()
def test_optional_field_in_doc():
class CustomDoc(BaseDocument):
text: Optional[str]
CustomDoc.from_protobuf(CustomDoc().to_protobuf())
def test_optional_field_nested_in_doc():
class InnerDoc(BaseDocument):
title: str
class CustomDoc(BaseDocument):
text: Optional[InnerDoc]
CustomDoc.from_protobuf(CustomDoc().to_protobuf())
|
# Licensed to the LF AI & Data foundation under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pytest
from typing import Dict, List
from docarray import BaseDoc, DocList
from docarray.base_doc import AnyDoc
from docarray.documents import ImageDoc, TextDoc
from docarray.typing import NdArray
@pytest.mark.proto
def test_simple_proto():
class CustomDoc(BaseDoc):
text: str
tensor: NdArray
da = DocList(
[CustomDoc(text='hello', tensor=np.zeros((3, 224, 224))) for _ in range(10)]
)
new_da = DocList[CustomDoc].from_protobuf(da.to_protobuf())
for doc1, doc2 in zip(da, new_da):
assert doc1.text == doc2.text
assert (doc1.tensor == doc2.tensor).all()
@pytest.mark.proto
def test_nested_proto():
class CustomDocument(BaseDoc):
text: TextDoc
image: ImageDoc
da = DocList[CustomDocument](
[
CustomDocument(
text=TextDoc(text='hello'),
image=ImageDoc(tensor=np.zeros((3, 224, 224))),
)
for _ in range(10)
]
)
DocList[CustomDocument].from_protobuf(da.to_protobuf())
@pytest.mark.proto
def test_nested_proto_any_doc():
class CustomDocument(BaseDoc):
text: TextDoc
image: ImageDoc
da = DocList[CustomDocument](
[
CustomDocument(
text=TextDoc(text='hello'),
image=ImageDoc(tensor=np.zeros((3, 224, 224))),
)
for _ in range(10)
]
)
DocList.from_protobuf(da.to_protobuf())
@pytest.mark.proto
def test_any_doc_list_proto():
doc = AnyDoc(hello='world')
pt = DocList([doc]).to_protobuf()
docs = DocList.from_protobuf(pt)
assert docs[0].hello == 'world'
@pytest.mark.proto
def test_any_nested_doc_list_proto():
from docarray import BaseDoc, DocList
class TextDocWithId(BaseDoc):
id: str
text: str
class ResultTestDoc(BaseDoc):
matches: DocList[TextDocWithId]
index_da = DocList[TextDocWithId](
[TextDocWithId(id=f'{i}', text=f'ID {i}') for i in range(10)]
)
out_da = DocList[ResultTestDoc]([ResultTestDoc(matches=index_da[0:2])])
pb = out_da.to_protobuf()
docs = DocList.from_protobuf(pb)
assert docs[0].matches[0].id == '0'
assert len(docs[0].matches) == 2
assert len(docs) == 1
@pytest.mark.proto
def test_union_type_error():
from typing import Union
class CustomDoc(BaseDoc):
ud: Union[TextDoc, ImageDoc] = TextDoc(text='union type')
docs = DocList[CustomDoc]([CustomDoc(ud=TextDoc(text='union type'))])
with pytest.raises(ValueError):
DocList[CustomDoc].from_protobuf(docs.to_protobuf())
class BasisUnion(BaseDoc):
ud: Union[int, str]
docs_basic = DocList[BasisUnion]([BasisUnion(ud="hello")])
docs_copy = DocList[BasisUnion].from_protobuf(docs_basic.to_protobuf())
assert docs_copy == docs_basic
class MySimpleDoc(BaseDoc):
title: str
class MyComplexDoc(BaseDoc):
content_dict_doclist: Dict[str, DocList[MySimpleDoc]]
content_dict_list: Dict[str, List[MySimpleDoc]]
aux_dict: Dict[str, int]
def test_to_from_proto_complex():
da = DocList[MyComplexDoc](
[
MyComplexDoc(
content_dict_doclist={
'test1': DocList[MySimpleDoc](
[MySimpleDoc(title='123'), MySimpleDoc(title='456')]
)
},
content_dict_list={
'test1': [MySimpleDoc(title='123'), MySimpleDoc(title='456')]
},
aux_dict={'a': 0},
)
]
)
da2 = DocList[MyComplexDoc].from_protobuf(da.to_protobuf())
assert len(da2) == 1
d2 = da2[0]
assert d2.aux_dict == {'a': 0}
assert len(d2.content_dict_doclist['test1']) == 2
assert d2.content_dict_doclist['test1'][0].title == '123'
assert d2.content_dict_doclist['test1'][1].title == '456'
assert len(d2.content_dict_list['test1']) == 2
assert d2.content_dict_list['test1'][0].title == '123'
assert d2.content_dict_list['test1'][1].title == '456'
|
import numpy as np
import pytest
from typing import Dict, List
from docarray import BaseDoc, DocList
from docarray.base_doc import AnyDoc
from docarray.documents import ImageDoc, TextDoc
from docarray.typing import NdArray
@pytest.mark.proto
def test_simple_proto():
class CustomDoc(BaseDoc):
text: str
tensor: NdArray
da = DocList(
[CustomDoc(text='hello', tensor=np.zeros((3, 224, 224))) for _ in range(10)]
)
new_da = DocList[CustomDoc].from_protobuf(da.to_protobuf())
for doc1, doc2 in zip(da, new_da):
assert doc1.text == doc2.text
assert (doc1.tensor == doc2.tensor).all()
@pytest.mark.proto
def test_nested_proto():
class CustomDocument(BaseDoc):
text: TextDoc
image: ImageDoc
da = DocList[CustomDocument](
[
CustomDocument(
text=TextDoc(text='hello'),
image=ImageDoc(tensor=np.zeros((3, 224, 224))),
)
for _ in range(10)
]
)
DocList[CustomDocument].from_protobuf(da.to_protobuf())
@pytest.mark.proto
def test_nested_proto_any_doc():
class CustomDocument(BaseDoc):
text: TextDoc
image: ImageDoc
da = DocList[CustomDocument](
[
CustomDocument(
text=TextDoc(text='hello'),
image=ImageDoc(tensor=np.zeros((3, 224, 224))),
)
for _ in range(10)
]
)
DocList.from_protobuf(da.to_protobuf())
@pytest.mark.proto
def test_any_doc_list_proto():
doc = AnyDoc(hello='world')
pt = DocList([doc]).to_protobuf()
docs = DocList.from_protobuf(pt)
assert docs[0].hello == 'world'
@pytest.mark.proto
def test_any_nested_doc_list_proto():
from docarray import BaseDoc, DocList
class TextDocWithId(BaseDoc):
id: str
text: str
class ResultTestDoc(BaseDoc):
matches: DocList[TextDocWithId]
index_da = DocList[TextDocWithId](
[TextDocWithId(id=f'{i}', text=f'ID {i}') for i in range(10)]
)
out_da = DocList[ResultTestDoc]([ResultTestDoc(matches=index_da[0:2])])
pb = out_da.to_protobuf()
docs = DocList.from_protobuf(pb)
assert docs[0].matches[0].id == '0'
assert len(docs[0].matches) == 2
assert len(docs) == 1
@pytest.mark.proto
def test_union_type_error():
from typing import Union
class CustomDoc(BaseDoc):
ud: Union[TextDoc, ImageDoc] = TextDoc(text='union type')
docs = DocList[CustomDoc]([CustomDoc(ud=TextDoc(text='union type'))])
with pytest.raises(ValueError):
DocList[CustomDoc].from_protobuf(docs.to_protobuf())
class BasisUnion(BaseDoc):
ud: Union[int, str]
docs_basic = DocList[BasisUnion]([BasisUnion(ud="hello")])
docs_copy = DocList[BasisUnion].from_protobuf(docs_basic.to_protobuf())
assert docs_copy == docs_basic
class MySimpleDoc(BaseDoc):
title: str
class MyComplexDoc(BaseDoc):
content_dict_doclist: Dict[str, DocList[MySimpleDoc]]
content_dict_list: Dict[str, List[MySimpleDoc]]
aux_dict: Dict[str, int]
def test_to_from_proto_complex():
da = DocList[MyComplexDoc](
[
MyComplexDoc(
content_dict_doclist={
'test1': DocList[MySimpleDoc](
[MySimpleDoc(title='123'), MySimpleDoc(title='456')]
)
},
content_dict_list={
'test1': [MySimpleDoc(title='123'), MySimpleDoc(title='456')]
},
aux_dict={'a': 0},
)
]
)
da2 = DocList[MyComplexDoc].from_protobuf(da.to_protobuf())
assert len(da2) == 1
d2 = da2[0]
assert d2.aux_dict == {'a': 0}
assert len(d2.content_dict_doclist['test1']) == 2
assert d2.content_dict_doclist['test1'][0].title == '123'
assert d2.content_dict_doclist['test1'][1].title == '456'
assert len(d2.content_dict_list['test1']) == 2
assert d2.content_dict_list['test1'][0].title == '123'
assert d2.content_dict_list['test1'][1].title == '456'
|
import logging
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from backend.util.process import AppProcess
logger = logging.getLogger(__name__)
def run_processes(*processes: "AppProcess", **kwargs):
"""
Execute all processes in the app. The last process is run in the foreground.
Includes enhanced error handling and process lifecycle management.
"""
try:
# Run all processes except the last one in the background.
for process in processes[:-1]:
process.start(background=True, **kwargs)
# Run the last process in the foreground.
processes[-1].start(background=False, **kwargs)
finally:
for process in processes:
try:
process.stop()
except Exception as e:
logger.exception(f"[{process.service_name}] unable to stop: {e}")
def main(**kwargs):
"""
Run all the processes required for the AutoGPT-server (REST and WebSocket APIs).
"""
from backend.executor import DatabaseManager, ExecutionManager, Scheduler
from backend.notifications import NotificationManager
from backend.server.rest_api import AgentServer
from backend.server.ws_api import WebsocketServer
run_processes(
DatabaseManager(),
ExecutionManager(),
Scheduler(),
NotificationManager(),
WebsocketServer(),
AgentServer(),
**kwargs,
)
if __name__ == "__main__":
main()
|
import logging
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from backend.util.process import AppProcess
logger = logging.getLogger(__name__)
def run_processes(*processes: "AppProcess", **kwargs):
"""
Execute all processes in the app. The last process is run in the foreground.
Includes enhanced error handling and process lifecycle management.
"""
try:
# Run all processes except the last one in the background.
for process in processes[:-1]:
process.start(background=True, **kwargs)
# Run the last process in the foreground.
processes[-1].start(background=False, **kwargs)
finally:
for process in processes:
try:
process.stop()
except Exception as e:
logger.exception(f"[{process.service_name}] unable to stop: {e}")
def main(**kwargs):
"""
Run all the processes required for the AutoGPT-server (REST and WebSocket APIs).
"""
from backend.executor import DatabaseManager, ExecutionManager, ExecutionScheduler
from backend.notifications import NotificationManager
from backend.server.rest_api import AgentServer
from backend.server.ws_api import WebsocketServer
run_processes(
DatabaseManager(),
ExecutionManager(),
ExecutionScheduler(),
NotificationManager(),
WebsocketServer(),
AgentServer(),
**kwargs,
)
if __name__ == "__main__":
main()
|
import sys
import pytest
from hypothesis import given, settings, strategies
from xgboost.testing import no_cupy
from xgboost.testing.updater import check_quantile_loss_extmem
sys.path.append("tests/python")
from test_data_iterator import run_data_iterator
from test_data_iterator import test_single_batch as cpu_single_batch
def test_gpu_single_batch() -> None:
cpu_single_batch("gpu_hist")
@pytest.mark.skipif(**no_cupy())
@given(
strategies.integers(0, 1024),
strategies.integers(1, 7),
strategies.integers(0, 8),
strategies.booleans(),
strategies.booleans(),
strategies.booleans(),
)
@settings(deadline=None, max_examples=16, print_blob=True)
def test_gpu_data_iterator(
n_samples_per_batch: int,
n_features: int,
n_batches: int,
subsample: bool,
use_cupy: bool,
on_host: bool,
) -> None:
run_data_iterator(
n_samples_per_batch,
n_features,
n_batches,
"hist",
subsample=subsample,
device="cuda",
use_cupy=use_cupy,
on_host=on_host,
)
def test_cpu_data_iterator() -> None:
"""Make sure CPU algorithm can handle GPU inputs"""
run_data_iterator(
1024,
2,
3,
"approx",
device="cuda",
subsample=False,
use_cupy=True,
on_host=False,
)
def test_quantile_objective() -> None:
with pytest.raises(ValueError, match="external memory"):
check_quantile_loss_extmem(2, 2, 2, "hist", "cuda")
|
import sys
import pytest
from hypothesis import given, settings, strategies
from xgboost.testing import no_cupy
sys.path.append("tests/python")
from test_data_iterator import run_data_iterator
from test_data_iterator import test_single_batch as cpu_single_batch
def test_gpu_single_batch() -> None:
cpu_single_batch("gpu_hist")
@pytest.mark.skipif(**no_cupy())
@given(
strategies.integers(0, 1024),
strategies.integers(1, 7),
strategies.integers(0, 8),
strategies.booleans(),
strategies.booleans(),
strategies.booleans(),
)
@settings(deadline=None, max_examples=16, print_blob=True)
def test_gpu_data_iterator(
n_samples_per_batch: int,
n_features: int,
n_batches: int,
subsample: bool,
use_cupy: bool,
on_host: bool,
) -> None:
run_data_iterator(
n_samples_per_batch,
n_features,
n_batches,
"hist",
subsample=subsample,
device="cuda",
use_cupy=use_cupy,
on_host=on_host,
)
def test_cpu_data_iterator() -> None:
"""Make sure CPU algorithm can handle GPU inputs"""
run_data_iterator(
1024,
2,
3,
"approx",
device="cuda",
subsample=False,
use_cupy=True,
on_host=False,
)
|
from pathlib import Path
from typing import Any, Iterator, List
import pytest
from langchain_core.documents import Document
from langchain_community.document_loaders import DirectoryLoader
from langchain_community.document_loaders.text import TextLoader
def test_raise_error_if_path_not_exist() -> None:
loader = DirectoryLoader("./not_exist_directory")
with pytest.raises(FileNotFoundError) as e:
loader.load()
assert str(e.value) == "Directory not found: './not_exist_directory'"
def test_raise_error_if_path_is_not_directory() -> None:
loader = DirectoryLoader(__file__)
with pytest.raises(ValueError) as e:
loader.load()
assert str(e.value) == f"Expected directory, got file: '{__file__}'"
class CustomLoader(TextLoader):
"""Test loader. Mimics interface of existing file loader."""
def __init__(self, path: Path, **kwargs: Any) -> None:
"""Initialize the loader."""
self.path = path
def load(self) -> List[Document]:
"""Load documents."""
with open(self.path, "r") as f:
return [Document(page_content=f.read())]
def lazy_load(self) -> Iterator[Document]:
raise NotImplementedError("CustomLoader does not implement lazy_load()")
def test_exclude_ignores_matching_files(tmp_path: Path) -> None:
txt_file = tmp_path / "test.txt"
py_file = tmp_path / "test.py"
txt_file.touch()
py_file.touch()
loader = DirectoryLoader(
str(tmp_path),
exclude=["*.py"],
loader_cls=CustomLoader,
)
data = loader.load()
assert len(data) == 1
def test_exclude_as_string_converts_to_sequence() -> None:
loader = DirectoryLoader("./some_directory", exclude="*.py")
assert loader.exclude == ("*.py",)
class CustomLoaderMetadataOnly(CustomLoader):
"""Test loader that just returns the file path in metadata. For test_directory_loader_glob_multiple.""" # noqa: E501
def load(self) -> List[Document]:
metadata = {"source": self.path}
return [Document(page_content="", metadata=metadata)]
def lazy_load(self) -> Iterator[Document]:
return iter(self.load())
def test_directory_loader_glob_multiple() -> None:
"""Verify that globbing multiple patterns in a list works correctly."""
path_to_examples = "tests/examples/"
list_extensions = [".rst", ".txt"]
list_globs = [f"**/*{ext}" for ext in list_extensions]
is_file_type_loaded = {ext: False for ext in list_extensions}
loader = DirectoryLoader(
path=path_to_examples, glob=list_globs, loader_cls=CustomLoaderMetadataOnly
)
list_documents = loader.load()
for doc in list_documents:
path_doc = Path(doc.metadata.get("source", ""))
ext_doc = path_doc.suffix
if is_file_type_loaded.get(ext_doc, False):
continue
elif ext_doc in list_extensions:
is_file_type_loaded[ext_doc] = True
else:
# Loaded a filetype that was not specified in extensions list
assert False
for ext in list_extensions:
assert is_file_type_loaded.get(ext, False)
|
from pathlib import Path
from typing import Any, Iterator, List
import pytest
from langchain_core.documents import Document
from langchain_community.document_loaders import DirectoryLoader
from langchain_community.document_loaders.text import TextLoader
def test_raise_error_if_path_not_exist() -> None:
loader = DirectoryLoader("./not_exist_directory")
with pytest.raises(FileNotFoundError) as e:
loader.load()
assert str(e.value) == "Directory not found: './not_exist_directory'"
def test_raise_error_if_path_is_not_directory() -> None:
loader = DirectoryLoader(__file__)
with pytest.raises(ValueError) as e:
loader.load()
assert str(e.value) == f"Expected directory, got file: '{__file__}'"
class CustomLoader(TextLoader):
"""Test loader. Mimics interface of existing file loader."""
def __init__(self, path: Path, **kwargs: Any) -> None:
"""Initialize the loader."""
self.path = path
def load(self) -> List[Document]:
"""Load documents."""
with open(self.path, "r") as f:
return [Document(page_content=f.read())]
def lazy_load(self) -> Iterator[Document]:
raise NotImplementedError("CustomLoader does not implement lazy_load()")
def test_exclude_ignores_matching_files(tmp_path: Path) -> None:
txt_file = tmp_path / "test.txt"
py_file = tmp_path / "test.py"
txt_file.touch()
py_file.touch()
loader = DirectoryLoader(
str(tmp_path),
exclude=["*.py"],
loader_cls=CustomLoader, # type: ignore
)
data = loader.load()
assert len(data) == 1
def test_exclude_as_string_converts_to_sequence() -> None:
loader = DirectoryLoader("./some_directory", exclude="*.py")
assert loader.exclude == ("*.py",)
class CustomLoaderMetadataOnly(CustomLoader):
"""Test loader that just returns the file path in metadata. For test_directory_loader_glob_multiple.""" # noqa: E501
def load(self) -> List[Document]:
metadata = {"source": self.path}
return [Document(page_content="", metadata=metadata)]
def lazy_load(self) -> Iterator[Document]:
return iter(self.load())
def test_directory_loader_glob_multiple() -> None:
"""Verify that globbing multiple patterns in a list works correctly."""
path_to_examples = "tests/examples/"
list_extensions = [".rst", ".txt"]
list_globs = [f"**/*{ext}" for ext in list_extensions]
is_file_type_loaded = {ext: False for ext in list_extensions}
loader = DirectoryLoader(
path=path_to_examples, glob=list_globs, loader_cls=CustomLoaderMetadataOnly
)
list_documents = loader.load()
for doc in list_documents:
path_doc = Path(doc.metadata.get("source", ""))
ext_doc = path_doc.suffix
if is_file_type_loaded.get(ext_doc, False):
continue
elif ext_doc in list_extensions:
is_file_type_loaded[ext_doc] = True
else:
# Loaded a filetype that was not specified in extensions list
assert False
for ext in list_extensions:
assert is_file_type_loaded.get(ext, False)
|
# Copyright (c) OpenMMLab. All rights reserved.
import warnings
import torch.nn as nn
from mmcv.runner import BaseModule, auto_fp16
from mmdet.models.backbones import ResNet
from mmdet.models.builder import SHARED_HEADS
from mmdet.models.utils import ResLayer as _ResLayer
@SHARED_HEADS.register_module()
class ResLayer(BaseModule):
def __init__(self,
depth,
stage=3,
stride=2,
dilation=1,
style='pytorch',
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
with_cp=False,
dcn=None,
pretrained=None,
init_cfg=None):
super(ResLayer, self).__init__(init_cfg)
self.norm_eval = norm_eval
self.norm_cfg = norm_cfg
self.stage = stage
self.fp16_enabled = False
block, stage_blocks = ResNet.arch_settings[depth]
stage_block = stage_blocks[stage]
planes = 64 * 2**stage
inplanes = 64 * 2**(stage - 1) * block.expansion
res_layer = _ResLayer(
block,
inplanes,
planes,
stage_block,
stride=stride,
dilation=dilation,
style=style,
with_cp=with_cp,
norm_cfg=self.norm_cfg,
dcn=dcn)
self.add_module(f'layer{stage + 1}', res_layer)
assert not (init_cfg and pretrained), \
'init_cfg and pretrained cannot be setting at the same time'
if isinstance(pretrained, str):
warnings.warn('DeprecationWarning: pretrained is a deprecated, '
'please use "init_cfg" instead')
self.init_cfg = dict(type='Pretrained', checkpoint=pretrained)
elif pretrained is None:
if init_cfg is None:
self.init_cfg = [
dict(type='Kaiming', layer='Conv2d'),
dict(
type='Constant',
val=1,
layer=['_BatchNorm', 'GroupNorm'])
]
else:
raise TypeError('pretrained must be a str or None')
@auto_fp16()
def forward(self, x):
res_layer = getattr(self, f'layer{self.stage + 1}')
out = res_layer(x)
return out
def train(self, mode=True):
super(ResLayer, self).train(mode)
if self.norm_eval:
for m in self.modules():
if isinstance(m, nn.BatchNorm2d):
m.eval()
|
import warnings
import torch.nn as nn
from mmcv.runner import BaseModule, auto_fp16
from mmdet.models.backbones import ResNet
from mmdet.models.builder import SHARED_HEADS
from mmdet.models.utils import ResLayer as _ResLayer
@SHARED_HEADS.register_module()
class ResLayer(BaseModule):
def __init__(self,
depth,
stage=3,
stride=2,
dilation=1,
style='pytorch',
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
with_cp=False,
dcn=None,
pretrained=None,
init_cfg=None):
super(ResLayer, self).__init__(init_cfg)
self.norm_eval = norm_eval
self.norm_cfg = norm_cfg
self.stage = stage
self.fp16_enabled = False
block, stage_blocks = ResNet.arch_settings[depth]
stage_block = stage_blocks[stage]
planes = 64 * 2**stage
inplanes = 64 * 2**(stage - 1) * block.expansion
res_layer = _ResLayer(
block,
inplanes,
planes,
stage_block,
stride=stride,
dilation=dilation,
style=style,
with_cp=with_cp,
norm_cfg=self.norm_cfg,
dcn=dcn)
self.add_module(f'layer{stage + 1}', res_layer)
assert not (init_cfg and pretrained), \
'init_cfg and pretrained cannot be setting at the same time'
if isinstance(pretrained, str):
warnings.warn('DeprecationWarning: pretrained is a deprecated, '
'please use "init_cfg" instead')
self.init_cfg = dict(type='Pretrained', checkpoint=pretrained)
elif pretrained is None:
if init_cfg is None:
self.init_cfg = [
dict(type='Kaiming', layer='Conv2d'),
dict(
type='Constant',
val=1,
layer=['_BatchNorm', 'GroupNorm'])
]
else:
raise TypeError('pretrained must be a str or None')
@auto_fp16()
def forward(self, x):
res_layer = getattr(self, f'layer{self.stage + 1}')
out = res_layer(x)
return out
def train(self, mode=True):
super(ResLayer, self).train(mode)
if self.norm_eval:
for m in self.modules():
if isinstance(m, nn.BatchNorm2d):
m.eval()
|
import copy
import os.path as osp
import unittest
from mmcv.transforms import Compose
from mmdet.datasets.transforms import MultiBranch
from mmdet.utils import register_all_modules
register_all_modules()
class TestMultiBranch(unittest.TestCase):
def setUp(self):
"""Setup the model and optimizer which are used in every test method.
TestCase calls functions in this order: setUp() -> testMethod() ->
tearDown() -> cleanUp()
"""
data_prefix = osp.join(osp.dirname(__file__), '../../data')
img_path = osp.join(data_prefix, 'color.jpg')
seg_map = osp.join(data_prefix, 'gray.jpg')
self.meta_keys = ('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor', 'flip', 'flip_direction',
'homography_matrix')
self.results = {
'img_path':
img_path,
'img_id':
12345,
'img_shape': (300, 400),
'seg_map_path':
seg_map,
'instances': [{
'bbox': [0, 0, 10, 20],
'bbox_label': 1,
'mask': [[0, 0, 0, 20, 10, 20, 10, 0]],
'ignore_flag': 0
}, {
'bbox': [10, 10, 110, 120],
'bbox_label': 2,
'mask': [[10, 10, 110, 10, 110, 120, 110, 10]],
'ignore_flag': 0
}, {
'bbox': [50, 50, 60, 80],
'bbox_label': 2,
'mask': [[50, 50, 60, 50, 60, 80, 50, 80]],
'ignore_flag': 1
}]
}
self.weak_pipeline = [
dict(type='ShearX'),
dict(type='PackDetInputs', meta_keys=self.meta_keys)
]
self.strong_pipeline = [
dict(type='ShearX'),
dict(type='ShearY'),
dict(type='PackDetInputs', meta_keys=self.meta_keys)
]
self.labeled_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='LoadAnnotations',
with_bbox=True,
with_mask=True,
with_seg=True),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(
type='MultiBranch',
sup_teacher=self.weak_pipeline,
sup_student=self.strong_pipeline),
]
self.unlabeled_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(
type='MultiBranch',
unsup_teacher=self.weak_pipeline,
unsup_student=self.strong_pipeline),
]
def test_transform(self):
labeled_pipeline = Compose(self.labeled_pipeline)
labeled_results = labeled_pipeline(copy.deepcopy(self.results))
unlabeled_pipeline = Compose(self.unlabeled_pipeline)
unlabeled_results = unlabeled_pipeline(copy.deepcopy(self.results))
# test branch sup_teacher and sup_student
sup_branches = ['sup_teacher', 'sup_student']
for branch in sup_branches:
self.assertIn(branch, labeled_results)
self.assertIn('homography_matrix',
labeled_results[branch]['data_sample'])
self.assertIn('labels',
labeled_results[branch]['data_sample'].gt_instances)
self.assertIn('bboxes',
labeled_results[branch]['data_sample'].gt_instances)
self.assertIn('masks',
labeled_results[branch]['data_sample'].gt_instances)
self.assertIn('gt_sem_seg', labeled_results[branch]['data_sample'])
# test branch unsup_teacher and unsup_student
unsup_branches = ['unsup_teacher', 'unsup_student']
for branch in unsup_branches:
self.assertIn(branch, unlabeled_results)
self.assertIn('homography_matrix',
unlabeled_results[branch]['data_sample'])
self.assertNotIn(
'labels',
unlabeled_results[branch]['data_sample'].gt_instances)
self.assertNotIn(
'bboxes',
unlabeled_results[branch]['data_sample'].gt_instances)
self.assertNotIn(
'masks', unlabeled_results[branch]['data_sample'].gt_instances)
self.assertNotIn('gt_sem_seg',
unlabeled_results[branch]['data_sample'])
def test_repr(self):
pipeline = [dict(type='PackDetInputs', meta_keys=())]
transform = MultiBranch(sup=pipeline, unsup=pipeline)
self.assertEqual(
repr(transform),
("MultiBranch(branch_pipelines=['sup', 'unsup'])"))
|
import copy
import os.path as osp
import unittest
from mmcv.transforms import Compose
from mmdet.datasets.transforms import MultiBranch
from mmdet.utils import register_all_modules
register_all_modules()
class TestMultiBranch(unittest.TestCase):
def setUp(self):
"""Setup the model and optimizer which are used in every test method.
TestCase calls functions in this order: setUp() -> testMethod() ->
tearDown() -> cleanUp()
"""
data_prefix = osp.join(osp.dirname(__file__), '../../data')
img_path = osp.join(data_prefix, 'color.jpg')
seg_map = osp.join(data_prefix, 'gray.jpg')
self.meta_keys = ('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor', 'flip', 'flip_direction',
'homography_matrix')
self.results = {
'img_path':
img_path,
'img_id':
12345,
'img_shape': (300, 400),
'seg_map_path':
seg_map,
'instances': [{
'bbox': [0, 0, 10, 20],
'bbox_label': 1,
'mask': [[0, 0, 0, 20, 10, 20, 10, 0]],
'ignore_flag': 0
}, {
'bbox': [10, 10, 110, 120],
'bbox_label': 2,
'mask': [[10, 10, 110, 10, 110, 120, 110, 10]],
'ignore_flag': 0
}, {
'bbox': [50, 50, 60, 80],
'bbox_label': 2,
'ignore_flag': 1
}]
}
self.weak_pipeline = [
dict(type='ShearX'),
dict(type='PackDetInputs', meta_keys=self.meta_keys)
]
self.strong_pipeline = [
dict(type='ShearX'),
dict(type='ShearY'),
dict(type='PackDetInputs', meta_keys=self.meta_keys)
]
self.labeled_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='LoadAnnotations',
with_bbox=True,
with_mask=True,
with_seg=True),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(
type='MultiBranch',
sup_teacher=self.weak_pipeline,
sup_student=self.strong_pipeline),
]
self.unlabeled_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(
type='MultiBranch',
unsup_teacher=self.weak_pipeline,
unsup_student=self.strong_pipeline),
]
def test_transform(self):
labeled_pipeline = Compose(self.labeled_pipeline)
labeled_results = labeled_pipeline(copy.deepcopy(self.results))
unlabeled_pipeline = Compose(self.unlabeled_pipeline)
unlabeled_results = unlabeled_pipeline(copy.deepcopy(self.results))
# test branch sup_teacher and sup_student
sup_branches = ['sup_teacher', 'sup_student']
for branch in sup_branches:
self.assertIn(branch, labeled_results)
self.assertIn('homography_matrix',
labeled_results[branch]['data_sample'])
self.assertIn('labels',
labeled_results[branch]['data_sample'].gt_instances)
self.assertIn('bboxes',
labeled_results[branch]['data_sample'].gt_instances)
self.assertIn('masks',
labeled_results[branch]['data_sample'].gt_instances)
self.assertIn('gt_sem_seg', labeled_results[branch]['data_sample'])
# test branch unsup_teacher and unsup_student
unsup_branches = ['unsup_teacher', 'unsup_student']
for branch in unsup_branches:
self.assertIn(branch, unlabeled_results)
self.assertIn('homography_matrix',
unlabeled_results[branch]['data_sample'])
self.assertNotIn(
'labels',
unlabeled_results[branch]['data_sample'].gt_instances)
self.assertNotIn(
'bboxes',
unlabeled_results[branch]['data_sample'].gt_instances)
self.assertNotIn(
'masks', unlabeled_results[branch]['data_sample'].gt_instances)
self.assertNotIn('gt_sem_seg',
unlabeled_results[branch]['data_sample'])
def test_repr(self):
pipeline = [dict(type='PackDetInputs', meta_keys=())]
transform = MultiBranch(sup=pipeline, unsup=pipeline)
self.assertEqual(
repr(transform),
("MultiBranch(branch_pipelines=['sup', 'unsup'])"))
|
from typing import TYPE_CHECKING, Any, Optional, Type, TypeVar, Union
import numpy as np
from docarray.base_doc import BaseDoc
from docarray.typing import AnyEmbedding, ImageBytes, ImageUrl
from docarray.typing.tensor.abstract_tensor import AbstractTensor
from docarray.typing.tensor.image.image_tensor import ImageTensor
from docarray.utils._internal.misc import import_library
if TYPE_CHECKING:
import tensorflow as tf # type: ignore
import torch
else:
tf = import_library('tensorflow', raise_error=False)
torch = import_library('torch', raise_error=False)
T = TypeVar('T', bound='ImageDoc')
class ImageDoc(BaseDoc):
"""
Document for handling images.
It can contain an ImageUrl (`Image.url`), an AnyTensor (`Image.tensor`),
and an AnyEmbedding (`Image.embedding`).
EXAMPLE USAGE:
You can use this Document directly:
.. code-block:: python
from docarray.documents import ImageDoc
# use it directly
image = ImageDoc(url='http://www.jina.ai/image.jpg')
image.tensor = image.url.load()
model = MyEmbeddingModel()
image.embedding = model(image.tensor)
You can extend this Document:
.. code-block:: python
from docarray.documents import ImageDoc
from docarray.typing import AnyEmbedding
from typing import Optional
# extend it
class MyImage(ImageDoc):
second_embedding: Optional[AnyEmbedding]
image = MyImage(url='http://www.jina.ai/image.jpg')
image.tensor = image.url.load()
model = MyEmbeddingModel()
image.embedding = model(image.tensor)
image.second_embedding = model(image.tensor)
You can use this Document for composition:
.. code-block:: python
from docarray import BaseDoc
from docarray.documents import ImageDoc, TextDoc
# compose it
class MultiModalDoc(BaseDoc):
image: Image
text: Text
mmdoc = MultiModalDoc(
image=Image(url="http://www.jina.ai/image.jpg"),
text=Text(text="hello world, how are you doing?"),
)
mmdoc.image.tensor = mmdoc.image.url.load()
# or
mmdoc.image.bytes_ = mmdoc.image.url.load_bytes()
mmdoc.image.tensor = mmdoc.image.bytes.load()
"""
url: Optional[ImageUrl]
tensor: Optional[ImageTensor]
embedding: Optional[AnyEmbedding]
bytes_: Optional[ImageBytes]
@classmethod
def validate(
cls: Type[T],
value: Union[str, AbstractTensor, Any],
) -> T:
if isinstance(value, str):
value = cls(url=value)
elif (
isinstance(value, (AbstractTensor, np.ndarray))
or (torch is not None and isinstance(value, torch.Tensor))
or (tf is not None and isinstance(value, tf.Tensor))
):
value = cls(tensor=value)
elif isinstance(value, bytes):
value = cls(byte=value)
return super().validate(value)
|
from typing import Any, Optional, Type, TypeVar, Union
import numpy as np
from docarray.base_doc import BaseDoc
from docarray.typing import AnyEmbedding, ImageBytes, ImageUrl
from docarray.typing.tensor.abstract_tensor import AbstractTensor
from docarray.typing.tensor.image.image_tensor import ImageTensor
from docarray.utils._internal.misc import is_tf_available, is_torch_available
T = TypeVar('T', bound='ImageDoc')
torch_available = is_torch_available()
if torch_available:
import torch
tf_available = is_tf_available()
if tf_available:
import tensorflow as tf # type: ignore
class ImageDoc(BaseDoc):
"""
Document for handling images.
It can contain an ImageUrl (`Image.url`), an AnyTensor (`Image.tensor`),
and an AnyEmbedding (`Image.embedding`).
EXAMPLE USAGE:
You can use this Document directly:
.. code-block:: python
from docarray.documents import ImageDoc
# use it directly
image = ImageDoc(url='http://www.jina.ai/image.jpg')
image.tensor = image.url.load()
model = MyEmbeddingModel()
image.embedding = model(image.tensor)
You can extend this Document:
.. code-block:: python
from docarray.documents import ImageDoc
from docarray.typing import AnyEmbedding
from typing import Optional
# extend it
class MyImage(ImageDoc):
second_embedding: Optional[AnyEmbedding]
image = MyImage(url='http://www.jina.ai/image.jpg')
image.tensor = image.url.load()
model = MyEmbeddingModel()
image.embedding = model(image.tensor)
image.second_embedding = model(image.tensor)
You can use this Document for composition:
.. code-block:: python
from docarray import BaseDoc
from docarray.documents import ImageDoc, TextDoc
# compose it
class MultiModalDoc(BaseDoc):
image: Image
text: Text
mmdoc = MultiModalDoc(
image=Image(url="http://www.jina.ai/image.jpg"),
text=Text(text="hello world, how are you doing?"),
)
mmdoc.image.tensor = mmdoc.image.url.load()
# or
mmdoc.image.bytes_ = mmdoc.image.url.load_bytes()
mmdoc.image.tensor = mmdoc.image.bytes.load()
"""
url: Optional[ImageUrl]
tensor: Optional[ImageTensor]
embedding: Optional[AnyEmbedding]
bytes_: Optional[ImageBytes]
@classmethod
def validate(
cls: Type[T],
value: Union[str, AbstractTensor, Any],
) -> T:
if isinstance(value, str):
value = cls(url=value)
elif (
isinstance(value, (AbstractTensor, np.ndarray))
or (torch_available and isinstance(value, torch.Tensor))
or (tf_available and isinstance(value, tf.Tensor))
):
value = cls(tensor=value)
elif isinstance(value, bytes):
value = cls(byte=value)
return super().validate(value)
|
# TODO: deprecate
agent_instructions = """You are a helpful assistant. Help the user answer any questions.
You have access to the following tools:
{tools}
In order to use a tool, you can use <tool></tool> and <tool_input></tool_input> tags. \
You will then get back a response in the form <observation></observation>
For example, if you have a tool called 'search' that could run a google search, in order to search for the weather in SF you would respond:
<tool>search</tool><tool_input>weather in SF</tool_input>
<observation>64 degrees</observation>
When you are done, respond with a final answer between <final_answer></final_answer>. For example:
<final_answer>The weather in SF is 64 degrees</final_answer>
Begin!
Question: {question}""" # noqa: E501
|
# flake8: noqa
# TODO: deprecate
agent_instructions = """You are a helpful assistant. Help the user answer any questions.
You have access to the following tools:
{tools}
In order to use a tool, you can use <tool></tool> and <tool_input></tool_input> tags. \
You will then get back a response in the form <observation></observation>
For example, if you have a tool called 'search' that could run a google search, in order to search for the weather in SF you would respond:
<tool>search</tool><tool_input>weather in SF</tool_input>
<observation>64 degrees</observation>
When you are done, respond with a final answer between <final_answer></final_answer>. For example:
<final_answer>The weather in SF is 64 degrees</final_answer>
Begin!
Question: {question}"""
|
_base_ = ['./ld_r18_gflv1_r101_fpn_coco_1x.py']
teacher_ckpt = 'https://download.openmmlab.com/mmdetection/v2.0/gfl/gfl_r101_fpn_dconv_c3-c5_mstrain_2x_coco/gfl_r101_fpn_dconv_c3-c5_mstrain_2x_coco_20200630_102002-134b07df.pth' # noqa
model = dict(
teacher_config='configs/gfl/gfl_r101_fpn_dconv_c3-c5_mstrain_2x_coco.py',
teacher_ckpt=teacher_ckpt,
backbone=dict(
type='ResNet',
depth=101,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
add_extra_convs='on_output',
num_outs=5))
max_epochs = 24
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[16, 22],
gamma=0.1)
]
train_cfg = dict(max_epochs=max_epochs)
# multi-scale training
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='RandomResize', scale=[(1333, 480), (1333, 800)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
|
_base_ = ['./ld_r18_gflv1_r101_fpn_coco_1x.py']
teacher_ckpt = 'https://download.openmmlab.com/mmdetection/v2.0/gfl/gfl_r101_fpn_dconv_c3-c5_mstrain_2x_coco/gfl_r101_fpn_dconv_c3-c5_mstrain_2x_coco_20200630_102002-134b07df.pth' # noqa
model = dict(
teacher_config='configs/gfl/gfl_r101_fpn_dconv_c3-c5_mstrain_2x_coco.py',
teacher_ckpt=teacher_ckpt,
backbone=dict(
type='ResNet',
depth=101,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
add_extra_convs='on_output',
num_outs=5))
lr_config = dict(step=[16, 22])
runner = dict(type='EpochBasedRunner', max_epochs=24)
# multi-scale training
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='Resize',
img_scale=[(1333, 480), (1333, 800)],
multiscale_mode='range',
keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
data = dict(train=dict(pipeline=train_pipeline))
|
from typing import TYPE_CHECKING, Optional, Type
from langchain_core.callbacks import (
CallbackManagerForToolRun,
)
from langchain_core.tools import BaseTool
from pydantic import BaseModel, Field
if TYPE_CHECKING:
# This is for linting and IDE typehints
import multion
else:
try:
# We do this so pydantic can resolve the types when instantiating
import multion
except ImportError:
pass
class UpdateSessionSchema(BaseModel):
"""Input for UpdateSessionTool."""
sessionId: str = Field(
...,
description="""The sessionID,
received from one of the createSessions run before""",
)
query: str = Field(
...,
description="The query to run in multion agent.",
)
url: str = Field(
"https://www.google.com/",
description="""The Url to run the agent at. \
Note: accepts only secure links having https://""",
)
class MultionUpdateSession(BaseTool):
"""Tool that updates an existing Multion Browser Window with provided fields.
Attributes:
name: The name of the tool. Default: "update_multion_session"
description: The description of the tool.
args_schema: The schema for the tool's arguments. Default: UpdateSessionSchema
"""
name: str = "update_multion_session"
description: str = """Use this tool to update \
an existing corresponding Multion Browser Window with provided fields. \
Note: sessionId must be received from previous Browser window creation."""
args_schema: Type[UpdateSessionSchema] = UpdateSessionSchema
sessionId: str = ""
def _run(
self,
sessionId: str,
query: str,
url: Optional[str] = "https://www.google.com/",
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> dict:
try:
try:
response = multion.update_session(
sessionId, {"input": query, "url": url}
)
content = {"sessionId": sessionId, "Response": response["message"]}
self.sessionId = sessionId
return content
except Exception as e:
print(f"{e}, retrying...") # noqa: T201
return {"error": f"{e}", "Response": "retrying..."}
except Exception as e:
raise Exception(f"An error occurred: {e}")
|
from typing import TYPE_CHECKING, Optional, Type
from langchain_core.callbacks import (
CallbackManagerForToolRun,
)
from langchain_core.tools import BaseTool
from pydantic import BaseModel, Field
if TYPE_CHECKING:
# This is for linting and IDE typehints
import multion
else:
try:
# We do this so pydantic can resolve the types when instantiating
import multion
except ImportError:
pass
class UpdateSessionSchema(BaseModel):
"""Input for UpdateSessionTool."""
sessionId: str = Field(
...,
description="""The sessionID,
received from one of the createSessions run before""",
)
query: str = Field(
...,
description="The query to run in multion agent.",
)
url: str = Field(
"https://www.google.com/",
description="""The Url to run the agent at. \
Note: accepts only secure links having https://""",
)
class MultionUpdateSession(BaseTool): # type: ignore[override, override]
"""Tool that updates an existing Multion Browser Window with provided fields.
Attributes:
name: The name of the tool. Default: "update_multion_session"
description: The description of the tool.
args_schema: The schema for the tool's arguments. Default: UpdateSessionSchema
"""
name: str = "update_multion_session"
description: str = """Use this tool to update \
an existing corresponding Multion Browser Window with provided fields. \
Note: sessionId must be received from previous Browser window creation."""
args_schema: Type[UpdateSessionSchema] = UpdateSessionSchema
sessionId: str = ""
def _run(
self,
sessionId: str,
query: str,
url: Optional[str] = "https://www.google.com/",
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> dict:
try:
try:
response = multion.update_session(
sessionId, {"input": query, "url": url}
)
content = {"sessionId": sessionId, "Response": response["message"]}
self.sessionId = sessionId
return content
except Exception as e:
print(f"{e}, retrying...") # noqa: T201
return {"error": f"{e}", "Response": "retrying..."}
except Exception as e:
raise Exception(f"An error occurred: {e}")
|
_base_ = ['./ld_r18-gflv1-r101_fpn_1x_coco.py']
model = dict(
backbone=dict(
type='ResNet',
depth=34,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet34')),
neck=dict(
type='FPN',
in_channels=[64, 128, 256, 512],
out_channels=256,
start_level=1,
add_extra_convs='on_output',
num_outs=5))
|
_base_ = ['./ld_r18_gflv1_r101_fpn_coco_1x.py']
model = dict(
backbone=dict(
type='ResNet',
depth=34,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet34')),
neck=dict(
type='FPN',
in_channels=[64, 128, 256, 512],
out_channels=256,
start_level=1,
add_extra_convs='on_output',
num_outs=5))
|
import os
import socket
from typing import TYPE_CHECKING, Optional
def get_docker_network(client) -> Optional[str]:
"""Do a best-effort guess if the caller is already in a docker network
Check if `hostname` exists in list of docker containers.
If a container is found, check its network id
:param client: docker client object
:return: network id if exists
"""
import docker
if TYPE_CHECKING: # pragma: no cover
from docker.models.containers import Container
container: 'Container' = None
try:
hostname = socket.gethostname()
container = client.containers.get(hostname)
except docker.errors.NotFound:
try:
# https://stackoverflow.com/a/52988227/15683245
with open('/proc/1/cpuset') as f:
hostname = os.path.basename(f.read().rstrip())
container = client.containers.get(hostname)
except Exception:
return None
try:
networks = container.attrs['NetworkSettings']['Networks']
if networks:
net_mode = list(networks.keys())[0]
return networks[net_mode]['NetworkID']
else:
return None
except Exception:
return None
def get_gpu_device_requests(gpu_args):
"""Get docker device requests from gpu args
:param gpu_args: gpu args fr
:return: docker device requests
"""
import docker
_gpus = {
'count': 0,
'capabilities': ['gpu'],
'device': [],
'driver': '',
}
for gpu_arg in gpu_args.split(','):
if gpu_arg == 'all':
_gpus['count'] = -1
if gpu_arg.isdigit():
_gpus['count'] = int(gpu_arg)
if '=' in gpu_arg:
gpu_arg_key, gpu_arg_value = gpu_arg.split('=')
if gpu_arg_key in _gpus.keys():
if isinstance(_gpus[gpu_arg_key], list):
_gpus[gpu_arg_key].append(gpu_arg_value)
else:
_gpus[gpu_arg_key] = gpu_arg_value
device_requests = [
docker.types.DeviceRequest(
count=_gpus['count'],
driver=_gpus['driver'],
device_ids=_gpus['device'],
capabilities=[_gpus['capabilities']],
)
]
return device_requests
|
import os
import socket
from typing import Optional, TYPE_CHECKING
def get_docker_network(client) -> Optional[str]:
"""Do a best-effort guess if the caller is already in a docker network
Check if `hostname` exists in list of docker containers.
If a container is found, check its network id
:param client: docker client object
:return: network id if exists
"""
import docker
if TYPE_CHECKING: # pragma: no cover
from docker.models.containers import Container
container: 'Container' = None
try:
hostname = socket.gethostname()
container = client.containers.get(hostname)
except docker.errors.NotFound:
try:
# https://stackoverflow.com/a/52988227/15683245
with open('/proc/1/cpuset') as f:
hostname = os.path.basename(f.read().rstrip())
container = client.containers.get(hostname)
except Exception:
return None
try:
networks = container.attrs['NetworkSettings']['Networks']
if networks:
net_mode = list(networks.keys())[0]
return networks[net_mode]['NetworkID']
else:
return None
except Exception:
return None
def get_gpu_device_requests(gpu_args):
"""Get docker device requests from gpu args
:param gpu_args: gpu args fr
:return: docker device requests
"""
import docker
_gpus = {
'count': 0,
'capabilities': ['gpu'],
'device': [],
'driver': '',
}
for gpu_arg in gpu_args.split(','):
if gpu_arg == 'all':
_gpus['count'] = -1
if gpu_arg.isdigit():
_gpus['count'] = int(gpu_arg)
if '=' in gpu_arg:
gpu_arg_key, gpu_arg_value = gpu_arg.split('=')
if gpu_arg_key in _gpus.keys():
if isinstance(_gpus[gpu_arg_key], list):
_gpus[gpu_arg_key].append(gpu_arg_value)
else:
_gpus[gpu_arg_key] = gpu_arg_value
device_requests = [
docker.types.DeviceRequest(
count=_gpus['count'],
driver=_gpus['driver'],
device_ids=_gpus['device'],
capabilities=[_gpus['capabilities']],
)
]
return device_requests
|
"""Module contains a few fake embedding models for testing purposes."""
# Please do not add additional fake embedding model implementations here.
import hashlib
from pydantic import BaseModel
from typing_extensions import override
from langchain_core.embeddings import Embeddings
class FakeEmbeddings(Embeddings, BaseModel):
"""Fake embedding model for unit testing purposes.
This embedding model creates embeddings by sampling from a normal distribution.
Do not use this outside of testing, as it is not a real embedding model.
Instantiate:
.. code-block:: python
from langchain_core.embeddings import FakeEmbeddings
embed = FakeEmbeddings(size=100)
Embed single text:
.. code-block:: python
input_text = "The meaning of life is 42"
vector = embed.embed_query(input_text)
print(vector[:3])
.. code-block:: python
[-0.700234640213188, -0.581266257710429, -1.1328482266445354]
Embed multiple texts:
.. code-block:: python
input_texts = ["Document 1...", "Document 2..."]
vectors = embed.embed_documents(input_texts)
print(len(vectors))
# The first 3 coordinates for the first vector
print(vectors[0][:3])
.. code-block:: python
2
[-0.5670477847544458, -0.31403828652395727, -0.5840547508955257]
"""
size: int
"""The size of the embedding vector."""
def _get_embedding(self) -> list[float]:
import numpy as np
return list(np.random.default_rng().normal(size=self.size))
@override
def embed_documents(self, texts: list[str]) -> list[list[float]]:
return [self._get_embedding() for _ in texts]
@override
def embed_query(self, text: str) -> list[float]:
return self._get_embedding()
class DeterministicFakeEmbedding(Embeddings, BaseModel):
"""Deterministic fake embedding model for unit testing purposes.
This embedding model creates embeddings by sampling from a normal distribution
with a seed based on the hash of the text.
Do not use this outside of testing, as it is not a real embedding model.
Instantiate:
.. code-block:: python
from langchain_core.embeddings import DeterministicFakeEmbedding
embed = DeterministicFakeEmbedding(size=100)
Embed single text:
.. code-block:: python
input_text = "The meaning of life is 42"
vector = embed.embed_query(input_text)
print(vector[:3])
.. code-block:: python
[-0.700234640213188, -0.581266257710429, -1.1328482266445354]
Embed multiple texts:
.. code-block:: python
input_texts = ["Document 1...", "Document 2..."]
vectors = embed.embed_documents(input_texts)
print(len(vectors))
# The first 3 coordinates for the first vector
print(vectors[0][:3])
.. code-block:: python
2
[-0.5670477847544458, -0.31403828652395727, -0.5840547508955257]
"""
size: int
"""The size of the embedding vector."""
def _get_embedding(self, seed: int) -> list[float]:
import numpy as np
# set the seed for the random generator
rng = np.random.default_rng(seed)
return list(rng.normal(size=self.size))
def _get_seed(self, text: str) -> int:
"""Get a seed for the random generator, using the hash of the text."""
return int(hashlib.sha256(text.encode("utf-8")).hexdigest(), 16) % 10**8
@override
def embed_documents(self, texts: list[str]) -> list[list[float]]:
return [self._get_embedding(seed=self._get_seed(_)) for _ in texts]
@override
def embed_query(self, text: str) -> list[float]:
return self._get_embedding(seed=self._get_seed(text))
|
"""Module contains a few fake embedding models for testing purposes."""
# Please do not add additional fake embedding model implementations here.
import hashlib
from pydantic import BaseModel
from typing_extensions import override
from langchain_core.embeddings import Embeddings
class FakeEmbeddings(Embeddings, BaseModel):
"""Fake embedding model for unit testing purposes.
This embedding model creates embeddings by sampling from a normal distribution.
Do not use this outside of testing, as it is not a real embedding model.
Instantiate:
.. code-block:: python
from langchain_core.embeddings import FakeEmbeddings
embed = FakeEmbeddings(size=100)
Embed single text:
.. code-block:: python
input_text = "The meaning of life is 42"
vector = embed.embed_query(input_text)
print(vector[:3])
.. code-block:: python
[-0.700234640213188, -0.581266257710429, -1.1328482266445354]
Embed multiple texts:
.. code-block:: python
input_texts = ["Document 1...", "Document 2..."]
vectors = embed.embed_documents(input_texts)
print(len(vectors))
# The first 3 coordinates for the first vector
print(vectors[0][:3])
.. code-block:: python
2
[-0.5670477847544458, -0.31403828652395727, -0.5840547508955257]
"""
size: int
"""The size of the embedding vector."""
def _get_embedding(self) -> list[float]:
import numpy as np # type: ignore[import-not-found, import-untyped]
return list(np.random.default_rng().normal(size=self.size))
@override
def embed_documents(self, texts: list[str]) -> list[list[float]]:
return [self._get_embedding() for _ in texts]
@override
def embed_query(self, text: str) -> list[float]:
return self._get_embedding()
class DeterministicFakeEmbedding(Embeddings, BaseModel):
"""Deterministic fake embedding model for unit testing purposes.
This embedding model creates embeddings by sampling from a normal distribution
with a seed based on the hash of the text.
Do not use this outside of testing, as it is not a real embedding model.
Instantiate:
.. code-block:: python
from langchain_core.embeddings import DeterministicFakeEmbedding
embed = DeterministicFakeEmbedding(size=100)
Embed single text:
.. code-block:: python
input_text = "The meaning of life is 42"
vector = embed.embed_query(input_text)
print(vector[:3])
.. code-block:: python
[-0.700234640213188, -0.581266257710429, -1.1328482266445354]
Embed multiple texts:
.. code-block:: python
input_texts = ["Document 1...", "Document 2..."]
vectors = embed.embed_documents(input_texts)
print(len(vectors))
# The first 3 coordinates for the first vector
print(vectors[0][:3])
.. code-block:: python
2
[-0.5670477847544458, -0.31403828652395727, -0.5840547508955257]
"""
size: int
"""The size of the embedding vector."""
def _get_embedding(self, seed: int) -> list[float]:
import numpy as np # type: ignore[import-not-found, import-untyped]
# set the seed for the random generator
rng = np.random.default_rng(seed)
return list(rng.normal(size=self.size))
def _get_seed(self, text: str) -> int:
"""Get a seed for the random generator, using the hash of the text."""
return int(hashlib.sha256(text.encode("utf-8")).hexdigest(), 16) % 10**8
@override
def embed_documents(self, texts: list[str]) -> list[list[float]]:
return [self._get_embedding(seed=self._get_seed(_)) for _ in texts]
@override
def embed_query(self, text: str) -> list[float]:
return self._get_embedding(seed=self._get_seed(text))
|
import os
import pytest
from google.ai.generativelanguage_v1beta.types import (
FunctionCallingConfig,
ToolConfig,
)
from llama_index.core.base.llms.base import BaseLLM
from llama_index.core.base.llms.types import ChatMessage, ImageBlock, MessageRole
from llama_index.core.prompts.base import ChatPromptTemplate
from llama_index.core.tools.function_tool import FunctionTool
from llama_index.llms.gemini import Gemini
from llama_index.llms.gemini.utils import chat_message_to_gemini
from pydantic import BaseModel
def test_embedding_class() -> None:
names_of_base_classes = [b.__name__ for b in Gemini.__mro__]
assert BaseLLM.__name__ in names_of_base_classes
def test_chat_message_to_gemini() -> None:
msg = ChatMessage("Some content")
assert chat_message_to_gemini(msg) == {
"role": MessageRole.USER,
"parts": [{"text": "Some content"}],
}
msg = ChatMessage("Some content")
msg.blocks.append(ImageBlock(image=b"foo", image_mimetype="image/png"))
gemini_msg = chat_message_to_gemini(msg)
assert gemini_msg["role"] == MessageRole.USER
assert len(gemini_msg["parts"]) == 2
@pytest.mark.skipif(
os.environ.get("GOOGLE_API_KEY") is None, reason="GOOGLE_API_KEY not set"
)
def test_generate_image_prompt() -> None:
msg = ChatMessage("Tell me the brand of the car in this image:")
msg.blocks.append(
ImageBlock(
url="https://upload.wikimedia.org/wikipedia/commons/5/52/Ferrari_SP_FFX.jpg",
image_mimetype="image/jpeg",
)
)
response = Gemini(model="models/gemini-1.5-flash").chat(messages=[msg])
assert "ferrari" in str(response).lower()
@pytest.mark.skipif(
os.environ.get("GOOGLE_API_KEY") is None, reason="GOOGLE_API_KEY not set"
)
def test_chat_stream() -> None:
msg = ChatMessage("List three types of software testing strategies")
response = list(Gemini(model="models/gemini-1.5-flash").stream_chat(messages=[msg]))
assert response
@pytest.mark.skipif(
os.environ.get("GOOGLE_API_KEY") is None, reason="GOOGLE_API_KEY not set"
)
def test_chat_with_tools() -> None:
def add(a: int, b: int) -> int:
"""Add two integers and returns the result integer."""
return a + b
add_tool = FunctionTool.from_defaults(fn=add)
msg = ChatMessage("What is the result of adding 2 and 3?")
model = Gemini(model="models/gemini-1.5-flash")
response = model.chat_with_tools(
user_msg=msg,
tools=[add_tool],
tool_config=ToolConfig(
function_calling_config=FunctionCallingConfig(
mode=FunctionCallingConfig.Mode.ANY
)
),
)
tool_calls = model.get_tool_calls_from_response(response)
assert len(tool_calls) == 1
assert tool_calls[0].tool_name == "add"
assert tool_calls[0].tool_kwargs == {"a": 2, "b": 3}
assert len(response.additional_kwargs["tool_calls"]) >= 1
@pytest.mark.skipif(
os.environ.get("GOOGLE_API_KEY") is None, reason="GOOGLE_API_KEY not set"
)
def test_structured_llm() -> None:
class Test(BaseModel):
test: str
gemini_flash = Gemini(
model="models/gemini-2.0-flash-001",
api_key=os.environ["GOOGLE_API_KEY"],
additional_kwargs={"seed": 4242},
)
chat_prompt = ChatPromptTemplate(message_templates=[ChatMessage(content="test")])
direct_prediction_response = gemini_flash.structured_predict(
output_cls=Test, prompt=chat_prompt
)
assert direct_prediction_response.test is not None
structured_llm_response = gemini_flash.as_structured_llm(Test).complete("test")
assert structured_llm_response.raw.test is not None
@pytest.mark.skipif(
os.environ.get("GOOGLE_API_KEY") is None, reason="GOOGLE_API_KEY not set"
)
def test_is_function_calling_model() -> None:
assert Gemini(
model="models/gemini-2.0-flash-001"
).metadata.is_function_calling_model
# this model is the only one that does not support function calling
assert not Gemini(
model="models/gemini-2.0-flash-thinking-exp-01-21"
).metadata.is_function_calling_model
# in case of un-released models it should be possible to override the
# capabilities of the current model
manual_override = Gemini(model="models/gemini-2.0-flash-001")
assert manual_override.metadata.is_function_calling_model
manual_override._is_function_call_model = False
assert not manual_override._is_function_call_model
assert not manual_override.metadata.is_function_calling_model
|
import os
import pytest
from google.ai.generativelanguage_v1beta.types import (
FunctionCallingConfig,
ToolConfig,
)
from llama_index.core.base.llms.base import BaseLLM
from llama_index.core.base.llms.types import ChatMessage, ImageBlock, MessageRole
from llama_index.core.prompts.base import ChatPromptTemplate
from llama_index.core.tools.function_tool import FunctionTool
from llama_index.llms.gemini import Gemini
from llama_index.llms.gemini.utils import chat_message_to_gemini
from pydantic import BaseModel
def test_embedding_class() -> None:
names_of_base_classes = [b.__name__ for b in Gemini.__mro__]
assert BaseLLM.__name__ in names_of_base_classes
def test_chat_message_to_gemini() -> None:
msg = ChatMessage("Some content")
assert chat_message_to_gemini(msg) == {
"role": MessageRole.USER,
"parts": [{"text": "Some content"}],
}
msg = ChatMessage("Some content")
msg.blocks.append(ImageBlock(image=b"foo", image_mimetype="image/png"))
assert chat_message_to_gemini(msg) == {
"role": MessageRole.USER,
"parts": [{"text": "Some content"}, {"data": b"foo", "mime_type": "image/png"}],
}
@pytest.mark.skipif(
os.environ.get("GOOGLE_API_KEY") is None, reason="GOOGLE_API_KEY not set"
)
def test_generate_image_prompt() -> None:
msg = ChatMessage("Tell me the brand of the car in this image:")
msg.blocks.append(
ImageBlock(
url="https://upload.wikimedia.org/wikipedia/commons/5/52/Ferrari_SP_FFX.jpg"
)
)
response = Gemini(model="models/gemini-1.5-flash").chat(messages=[msg])
assert "ferrari" in str(response).lower()
@pytest.mark.skipif(
os.environ.get("GOOGLE_API_KEY") is None, reason="GOOGLE_API_KEY not set"
)
def test_chat_stream() -> None:
msg = ChatMessage("List three types of software testing strategies")
response = list(Gemini(model="models/gemini-1.5-flash").stream_chat(messages=[msg]))
assert response
@pytest.mark.skipif(
os.environ.get("GOOGLE_API_KEY") is None, reason="GOOGLE_API_KEY not set"
)
def test_chat_with_tools() -> None:
def add(a: int, b: int) -> int:
"""Add two integers and returns the result integer."""
return a + b
add_tool = FunctionTool.from_defaults(fn=add)
msg = ChatMessage("What is the result of adding 2 and 3?")
model = Gemini(model="models/gemini-1.5-flash")
response = model.chat_with_tools(
user_msg=msg,
tools=[add_tool],
tool_config=ToolConfig(
function_calling_config=FunctionCallingConfig(
mode=FunctionCallingConfig.Mode.ANY
)
),
)
tool_calls = model.get_tool_calls_from_response(response)
assert len(tool_calls) == 1
assert tool_calls[0].tool_name == "add"
assert tool_calls[0].tool_kwargs == {"a": 2, "b": 3}
assert len(response.additional_kwargs["tool_calls"]) >= 1
@pytest.mark.skipif(
os.environ.get("GOOGLE_API_KEY") is None, reason="GOOGLE_API_KEY not set"
)
def test_structured_llm() -> None:
class Test(BaseModel):
test: str
gemini_flash = Gemini(
model="models/gemini-2.0-flash-001",
api_key=os.environ["GOOGLE_API_KEY"],
additional_kwargs={"seed": 4242},
)
chat_prompt = ChatPromptTemplate(message_templates=[ChatMessage(content="test")])
direct_prediction_response = gemini_flash.structured_predict(
output_cls=Test, prompt=chat_prompt
)
assert direct_prediction_response.test is not None
structured_llm_response = gemini_flash.as_structured_llm(Test).complete("test")
assert structured_llm_response.raw.test is not None
@pytest.mark.skipif(
os.environ.get("GOOGLE_API_KEY") is None, reason="GOOGLE_API_KEY not set"
)
def test_is_function_calling_model() -> None:
assert Gemini(
model="models/gemini-2.0-flash-001"
).metadata.is_function_calling_model
# this model is the only one that does not support function calling
assert not Gemini(
model="models/gemini-2.0-flash-thinking-exp-01-21"
).metadata.is_function_calling_model
# in case of un-released models it should be possible to override the
# capabilities of the current model
manual_override = Gemini(model="models/gemini-2.0-flash-001")
assert manual_override.metadata.is_function_calling_model
manual_override._is_function_call_model = False
assert not manual_override._is_function_call_model
assert not manual_override.metadata.is_function_calling_model
|
import torch
from docarray import Document
from docarray.typing import TorchEmbedding, TorchTensor
def test_set_torch_tensor():
class MyDocument(Document):
tensor: TorchTensor
d = MyDocument(tensor=torch.zeros((3, 224, 224)))
assert isinstance(d.tensor, TorchTensor)
assert isinstance(d.tensor, torch.Tensor)
assert (d.tensor == torch.zeros((3, 224, 224))).all()
def test_set_torch_embedding():
class MyDocument(Document):
embedding: TorchEmbedding
d = MyDocument(embedding=torch.zeros((128,)))
assert isinstance(d.embedding, TorchTensor)
assert isinstance(d.embedding, TorchEmbedding)
assert isinstance(d.embedding, torch.Tensor)
assert (d.embedding == torch.zeros((128,))).all()
|
import torch
from docarray import Document
from docarray.typing import TorchTensor
def test_set_torch_tensor():
class MyDocument(Document):
tensor: TorchTensor
d = MyDocument(tensor=torch.zeros((3, 224, 224)))
assert isinstance(d.tensor, TorchTensor)
assert isinstance(d.tensor, torch.Tensor)
assert (d.tensor == torch.zeros((3, 224, 224))).all()
|
from typing import Any, Optional, Type, TypeVar, Union
from pydantic import Field
from docarray.base_doc import BaseDoc
from docarray.documents.mesh.vertices_and_faces import VerticesAndFaces
from docarray.typing.tensor.embedding import AnyEmbedding
from docarray.typing.url.url_3d.mesh_url import Mesh3DUrl
from docarray.utils._internal.pydantic import is_pydantic_v2
if is_pydantic_v2:
from pydantic import model_validator
T = TypeVar('T', bound='Mesh3D')
class Mesh3D(BaseDoc):
"""
Document for handling meshes for 3D data representation.
A mesh is a representation for 3D data and contains vertices and faces information.
Vertices are points in a 3D space, represented as a tensor of shape (n_points, 3).
Faces are triangular surfaces that can be defined by three points in 3D space,
corresponding to the three vertices of a triangle. Faces can be represented as a
tensor of shape (n_faces, 3). Each number in that tensor refers to an index of a
vertex in the tensor of vertices.
The Mesh3D Document can contain:
- an [`Mesh3DUrl`][docarray.typing.url.Mesh3DUrl] (`Mesh3D.url`)
- a [`VerticesAndFaces`][docarray.documents.mesh.vertices_and_faces.VerticesAndFaces]
object containing:
- an [`AnyTensor`](../../../../api_references/typing/tensor/tensor) of
vertices (`Mesh3D.tensors.vertices`)
- an [`AnyTensor`](../../../../api_references/typing/tensor/tensor) of faces (`Mesh3D.tensors.faces`)
- an [`AnyEmbedding`](../../../../api_references/typing/tensor/embedding) (`Mesh3D.embedding`)
- a `bytes` object (`Mesh3D.bytes_`).
You can use this Document directly:
```python
from docarray.documents import Mesh3D
# use it directly
mesh = Mesh3D(url='https://people.sc.fsu.edu/~jburkardt/data/obj/al.obj')
mesh.tensors = mesh.url.load()
# model = MyEmbeddingModel()
# mesh.embedding = model(mesh.tensors.vertices)
```
You can extend this Document:
```python
from docarray.documents import Mesh3D
from docarray.typing import AnyEmbedding
from typing import Optional
# extend it
class MyMesh3D(Mesh3D):
name: Optional[str]
mesh = MyMesh3D(url='https://people.sc.fsu.edu/~jburkardt/data/obj/al.obj')
mesh.name = 'my first mesh'
mesh.tensors = mesh.url.load()
# model = MyEmbeddingModel()
# mesh.embedding = model(mesh.vertices)
```
You can use this Document for composition:
```python
from docarray import BaseDoc
from docarray.documents import Mesh3D, TextDoc
# compose it
class MultiModalDoc(BaseDoc):
mesh: Mesh3D
text: TextDoc
mmdoc = MultiModalDoc(
mesh=Mesh3D(url='https://people.sc.fsu.edu/~jburkardt/data/obj/al.obj'),
text=TextDoc(text='hello world, how are you doing?'),
)
mmdoc.mesh.tensors = mmdoc.mesh.url.load()
# or
mmdoc.mesh.bytes_ = mmdoc.mesh.url.load_bytes()
```
You can display your 3D mesh in a notebook from either its url, or its tensors:
```python
from docarray.documents import Mesh3D
# display from url
mesh = Mesh3D(url='https://people.sc.fsu.edu/~jburkardt/data/obj/al.obj')
# mesh.url.display()
# display from tensors
mesh.tensors = mesh.url.load()
# mesh.tensors.display()
```
"""
url: Optional[Mesh3DUrl] = Field(
description='URL to a file containing 3D mesh information. Can be remote (web) URL, or a local file path.',
example='https://people.sc.fsu.edu/~jburkardt/data/obj/al.obj',
default=None,
)
tensors: Optional[VerticesAndFaces] = Field(
description='A tensor object of 3D mesh of type `VerticesAndFaces`.',
example=[[0, 1, 1], [1, 0, 1], [1, 1, 0]],
default=None,
)
embedding: Optional[AnyEmbedding] = Field(
description='Store an embedding: a vector representation of the 3D mesh.',
default=[1, 0, 1],
)
bytes_: Optional[bytes] = Field(
description='Bytes representation of 3D mesh.',
default=None,
)
if is_pydantic_v2:
@model_validator(mode='before')
@classmethod
def validate_model_before(cls, value):
if isinstance(value, str):
return {'url': value}
return value
else:
@classmethod
def validate(
cls: Type[T],
value: Union[str, Any],
) -> T:
if isinstance(value, str):
value = cls(url=value)
return super().validate(value)
|
from typing import Any, Optional, Type, TypeVar, Union
from pydantic import Field
from docarray.base_doc import BaseDoc
from docarray.documents.mesh.vertices_and_faces import VerticesAndFaces
from docarray.typing.tensor.embedding import AnyEmbedding
from docarray.typing.url.url_3d.mesh_url import Mesh3DUrl
T = TypeVar('T', bound='Mesh3D')
class Mesh3D(BaseDoc):
"""
Document for handling meshes for 3D data representation.
A mesh is a representation for 3D data and contains vertices and faces information.
Vertices are points in a 3D space, represented as a tensor of shape (n_points, 3).
Faces are triangular surfaces that can be defined by three points in 3D space,
corresponding to the three vertices of a triangle. Faces can be represented as a
tensor of shape (n_faces, 3). Each number in that tensor refers to an index of a
vertex in the tensor of vertices.
The Mesh3D Document can contain:
- an [`Mesh3DUrl`][docarray.typing.url.Mesh3DUrl] (`Mesh3D.url`)
- a [`VerticesAndFaces`][docarray.documents.mesh.vertices_and_faces.VerticesAndFaces]
object containing:
- an [`AnyTensor`](../../../../api_references/typing/tensor/tensor) of
vertices (`Mesh3D.tensors.vertices`)
- an [`AnyTensor`](../../../../api_references/typing/tensor/tensor) of faces (`Mesh3D.tensors.faces`)
- an [`AnyEmbedding`](../../../../api_references/typing/tensor/embedding) (`Mesh3D.embedding`)
- a `bytes` object (`Mesh3D.bytes_`).
You can use this Document directly:
```python
from docarray.documents import Mesh3D
# use it directly
mesh = Mesh3D(url='https://people.sc.fsu.edu/~jburkardt/data/obj/al.obj')
mesh.tensors = mesh.url.load()
# model = MyEmbeddingModel()
# mesh.embedding = model(mesh.tensors.vertices)
```
You can extend this Document:
```python
from docarray.documents import Mesh3D
from docarray.typing import AnyEmbedding
from typing import Optional
# extend it
class MyMesh3D(Mesh3D):
name: Optional[str]
mesh = MyMesh3D(url='https://people.sc.fsu.edu/~jburkardt/data/obj/al.obj')
mesh.name = 'my first mesh'
mesh.tensors = mesh.url.load()
# model = MyEmbeddingModel()
# mesh.embedding = model(mesh.vertices)
```
You can use this Document for composition:
```python
from docarray import BaseDoc
from docarray.documents import Mesh3D, TextDoc
# compose it
class MultiModalDoc(BaseDoc):
mesh: Mesh3D
text: TextDoc
mmdoc = MultiModalDoc(
mesh=Mesh3D(url='https://people.sc.fsu.edu/~jburkardt/data/obj/al.obj'),
text=TextDoc(text='hello world, how are you doing?'),
)
mmdoc.mesh.tensors = mmdoc.mesh.url.load()
# or
mmdoc.mesh.bytes_ = mmdoc.mesh.url.load_bytes()
```
You can display your 3D mesh in a notebook from either its url, or its tensors:
```python
from docarray.documents import Mesh3D
# display from url
mesh = Mesh3D(url='https://people.sc.fsu.edu/~jburkardt/data/obj/al.obj')
# mesh.url.display()
# display from tensors
mesh.tensors = mesh.url.load()
# mesh.tensors.display()
```
"""
url: Optional[Mesh3DUrl] = Field(
description='URL to a file containing 3D mesh information. Can be remote (web) URL, or a local file path.',
example='https://people.sc.fsu.edu/~jburkardt/data/obj/al.obj',
default=None,
)
tensors: Optional[VerticesAndFaces] = Field(
description='A tensor object of 3D mesh of type `VerticesAndFaces`.',
example=[[0, 1, 1], [1, 0, 1], [1, 1, 0]],
default=None,
)
embedding: Optional[AnyEmbedding] = Field(
description='Store an embedding: a vector representation of the 3D mesh.',
default=[1, 0, 1],
)
bytes_: Optional[bytes] = Field(
description='Bytes representation of 3D mesh.',
default=None,
)
@classmethod
def validate(
cls: Type[T],
value: Union[str, Any],
) -> T:
if isinstance(value, str):
value = cls(url=value)
return super().validate(value)
|
from pathlib import Path
from typing import Dict, List, Optional
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document, ImageDocument
from llama_index.core.utils import infer_torch_device
class ImageVisionLLMReader(BaseReader):
"""
Image parser.
Caption image using Blip2 (a multimodal VisionLLM similar to GPT4).
"""
def __init__(
self,
parser_config: Optional[Dict] = None,
keep_image: bool = False,
prompt: str = "Question: describe what you see in this image. Answer:",
):
"""Init params."""
if parser_config is None:
try:
import sentencepiece # noqa
import torch
from PIL import Image # noqa
from transformers import Blip2ForConditionalGeneration, Blip2Processor
except ImportError:
raise ImportError(
"Please install extra dependencies that are required for "
"the ImageCaptionReader: "
"`pip install torch transformers sentencepiece Pillow`"
)
self._torch = torch
self._torch_imported = True
device = infer_torch_device()
dtype = (
self._torch.float16
if self._torch.cuda.is_available()
else self._torch.float32
)
processor = Blip2Processor.from_pretrained("Salesforce/blip2-opt-2.7b")
model = Blip2ForConditionalGeneration.from_pretrained(
"Salesforce/blip2-opt-2.7b", torch_dtype=dtype
)
parser_config = {
"processor": processor,
"model": model,
"device": device,
"dtype": dtype,
}
# Try to import PyTorch in order to run inference efficiently.
self._import_torch()
self._parser_config = parser_config
self._keep_image = keep_image
self._prompt = prompt
def load_data(
self, file: Path, extra_info: Optional[Dict] = None
) -> List[Document]:
"""Parse file."""
from llama_index.core.img_utils import img_2_b64
from PIL import Image
# load document image
image = Image.open(file)
if image.mode != "RGB":
image = image.convert("RGB")
# Encode image into base64 string and keep in document
image_str: Optional[str] = None
if self._keep_image:
image_str = img_2_b64(image)
# Parse image into text
model = self._parser_config["model"]
processor = self._parser_config["processor"]
device = self._parser_config["device"]
dtype = self._parser_config["dtype"]
model.to(device)
# unconditional image captioning
inputs = processor(image, self._prompt, return_tensors="pt").to(device, dtype)
if self._torch_imported:
# Gradients are not needed during inference. If PyTorch is
# installed, we can instruct it to not track the gradients.
# This reduces GPU memory usage and improves inference efficiency.
with self._torch.no_grad():
out = model.generate(**inputs)
else:
# Fallback to less efficient behavior if PyTorch is not installed.
out = model.generate(**inputs)
text_str = processor.decode(out[0], skip_special_tokens=True)
return [
ImageDocument(
text=text_str,
image=image_str,
image_path=str(file),
metadata=extra_info or {},
)
]
def _import_torch(self) -> None:
self._torch = None
try:
import torch
self._torch = torch
self._torch_imported = True
except ImportError:
self._torch_imported = False
|
from pathlib import Path
from typing import Dict, List, Optional
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document, ImageDocument
from llama_index.core.utils import infer_torch_device
class ImageVisionLLMReader(BaseReader):
"""Image parser.
Caption image using Blip2 (a multimodal VisionLLM similar to GPT4).
"""
def __init__(
self,
parser_config: Optional[Dict] = None,
keep_image: bool = False,
prompt: str = "Question: describe what you see in this image. Answer:",
):
"""Init params."""
if parser_config is None:
try:
import sentencepiece # noqa
import torch
from PIL import Image # noqa
from transformers import Blip2ForConditionalGeneration, Blip2Processor
except ImportError:
raise ImportError(
"Please install extra dependencies that are required for "
"the ImageCaptionReader: "
"`pip install torch transformers sentencepiece Pillow`"
)
self._torch = torch
self._torch_imported = True
device = infer_torch_device()
dtype = (
self._torch.float16
if self._torch.cuda.is_available()
else self._torch.float32
)
processor = Blip2Processor.from_pretrained("Salesforce/blip2-opt-2.7b")
model = Blip2ForConditionalGeneration.from_pretrained(
"Salesforce/blip2-opt-2.7b", torch_dtype=dtype
)
parser_config = {
"processor": processor,
"model": model,
"device": device,
"dtype": dtype,
}
# Try to import PyTorch in order to run inference efficiently.
self._import_torch()
self._parser_config = parser_config
self._keep_image = keep_image
self._prompt = prompt
def load_data(
self, file: Path, extra_info: Optional[Dict] = None
) -> List[Document]:
"""Parse file."""
from llama_index.core.img_utils import img_2_b64
from PIL import Image
# load document image
image = Image.open(file)
if image.mode != "RGB":
image = image.convert("RGB")
# Encode image into base64 string and keep in document
image_str: Optional[str] = None
if self._keep_image:
image_str = img_2_b64(image)
# Parse image into text
model = self._parser_config["model"]
processor = self._parser_config["processor"]
device = self._parser_config["device"]
dtype = self._parser_config["dtype"]
model.to(device)
# unconditional image captioning
inputs = processor(image, self._prompt, return_tensors="pt").to(device, dtype)
if self._torch_imported:
# Gradients are not needed during inference. If PyTorch is
# installed, we can instruct it to not track the gradients.
# This reduces GPU memory usage and improves inference efficiency.
with self._torch.no_grad():
out = model.generate(**inputs)
else:
# Fallback to less efficient behavior if PyTorch is not installed.
out = model.generate(**inputs)
text_str = processor.decode(out[0], skip_special_tokens=True)
return [
ImageDocument(
text=text_str,
image=image_str,
image_path=str(file),
metadata=extra_info or {},
)
]
def _import_torch(self) -> None:
self._torch = None
try:
import torch
self._torch = torch
self._torch_imported = True
except ImportError:
self._torch_imported = False
|
import json
import logging
from enum import Enum
from typing import Any
from requests.exceptions import HTTPError, RequestException
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from backend.data.model import SchemaField
from backend.util.request import requests
logger = logging.getLogger(name=__name__)
class HttpMethod(Enum):
GET = "GET"
POST = "POST"
PUT = "PUT"
DELETE = "DELETE"
PATCH = "PATCH"
OPTIONS = "OPTIONS"
HEAD = "HEAD"
class SendWebRequestBlock(Block):
class Input(BlockSchema):
url: str = SchemaField(
description="The URL to send the request to",
placeholder="https://api.example.com",
)
method: HttpMethod = SchemaField(
description="The HTTP method to use for the request",
default=HttpMethod.POST,
)
headers: dict[str, str] = SchemaField(
description="The headers to include in the request",
default={},
)
json_format: bool = SchemaField(
title="JSON format",
description="Whether to send and receive body as JSON",
default=True,
)
body: Any = SchemaField(
description="The body of the request",
default=None,
)
class Output(BlockSchema):
response: object = SchemaField(description="The response from the server")
client_error: object = SchemaField(description="Errors on 4xx status codes")
server_error: object = SchemaField(description="Errors on 5xx status codes")
error: str = SchemaField(description="Errors for all other exceptions")
def __init__(self):
super().__init__(
id="6595ae1f-b924-42cb-9a41-551a0611c4b4",
description="This block makes an HTTP request to the given URL.",
categories={BlockCategory.OUTPUT},
input_schema=SendWebRequestBlock.Input,
output_schema=SendWebRequestBlock.Output,
)
def run(self, input_data: Input, **kwargs) -> BlockOutput:
body = input_data.body
if input_data.json_format:
if isinstance(body, str):
try:
# Try to parse as JSON first
body = json.loads(body)
except json.JSONDecodeError:
# If it's not valid JSON and just plain text,
# we should send it as plain text instead
input_data.json_format = False
try:
response = requests.request(
input_data.method.value,
input_data.url,
headers=input_data.headers,
json=body if input_data.json_format else None,
data=body if not input_data.json_format else None,
)
result = response.json() if input_data.json_format else response.text
yield "response", result
except HTTPError as e:
# Handle error responses
try:
result = e.response.json() if input_data.json_format else str(e)
except json.JSONDecodeError:
result = str(e)
if 400 <= e.response.status_code < 500:
yield "client_error", result
elif 500 <= e.response.status_code < 600:
yield "server_error", result
else:
error_msg = (
"Unexpected status code "
f"{e.response.status_code} '{e.response.reason}'"
)
logger.warning(error_msg)
yield "error", error_msg
except RequestException as e:
# Handle other request-related exceptions
yield "error", str(e)
except Exception as e:
# Catch any other unexpected exceptions
yield "error", str(e)
|
import json
from enum import Enum
from typing import Any
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from backend.data.model import SchemaField
from backend.util.request import requests
class HttpMethod(Enum):
GET = "GET"
POST = "POST"
PUT = "PUT"
DELETE = "DELETE"
PATCH = "PATCH"
OPTIONS = "OPTIONS"
HEAD = "HEAD"
class SendWebRequestBlock(Block):
class Input(BlockSchema):
url: str = SchemaField(
description="The URL to send the request to",
placeholder="https://api.example.com",
)
method: HttpMethod = SchemaField(
description="The HTTP method to use for the request",
default=HttpMethod.POST,
)
headers: dict[str, str] = SchemaField(
description="The headers to include in the request",
default={},
)
json_format: bool = SchemaField(
title="JSON format",
description="Whether to send and receive body as JSON",
default=True,
)
body: Any = SchemaField(
description="The body of the request",
default=None,
)
class Output(BlockSchema):
response: object = SchemaField(description="The response from the server")
client_error: object = SchemaField(description="The error on 4xx status codes")
server_error: object = SchemaField(description="The error on 5xx status codes")
def __init__(self):
super().__init__(
id="6595ae1f-b924-42cb-9a41-551a0611c4b4",
description="This block makes an HTTP request to the given URL.",
categories={BlockCategory.OUTPUT},
input_schema=SendWebRequestBlock.Input,
output_schema=SendWebRequestBlock.Output,
)
def run(self, input_data: Input, **kwargs) -> BlockOutput:
body = input_data.body
if input_data.json_format:
if isinstance(body, str):
try:
# Try to parse as JSON first
body = json.loads(body)
except json.JSONDecodeError:
# If it's not valid JSON and just plain text,
# we should send it as plain text instead
input_data.json_format = False
response = requests.request(
input_data.method.value,
input_data.url,
headers=input_data.headers,
json=body if input_data.json_format else None,
data=body if not input_data.json_format else None,
)
result = response.json() if input_data.json_format else response.text
if response.status_code // 100 == 2:
yield "response", result
elif response.status_code // 100 == 4:
yield "client_error", result
elif response.status_code // 100 == 5:
yield "server_error", result
else:
raise ValueError(f"Unexpected status code: {response.status_code}")
|
"""
Example showing how to use the SpladeLambdaSchedulerCallback to gradually
increase the lambda parameters during training of a SPLADE model.
"""
from datasets import Dataset
from sentence_transformers.sparse_encoder import (
MLMTransformer,
SchedulerType,
SparseEncoder,
SparseEncoderTrainer,
SparseEncoderTrainingArguments,
SparseMarginMSELoss,
SpladeLambdaSchedulerCallback,
SpladeLoss,
SpladePooling,
)
# Initialize the SPLADE model
student_model_name = "prithivida/Splade_PP_en_v1"
student_model = SparseEncoder(
modules=[
MLMTransformer(student_model_name),
SpladePooling(pooling_strategy="max"), # You can also use 'sum'
],
device="cuda:0",
)
# Initialize the SPLADE model
teacher_model_name = "naver/splade-cocondenser-ensembledistil"
teacher_model = SparseEncoder(
modules=[
MLMTransformer(teacher_model_name),
SpladePooling(pooling_strategy="max"), # You can also use 'sum'
],
device="cuda:0",
)
# Create a small toy dataset
train_dataset = Dataset.from_dict(
{
"query": ["It's nice weather outside today.", "He drove to work."],
"passage1": ["It's so sunny.", "He took the car to work."],
"passage2": ["It's very sunny.", "She walked to the store."],
}
)
def compute_labels(batch):
emb_queries = teacher_model.encode(batch["query"])
emb_passages1 = teacher_model.encode(batch["passage1"])
emb_passages2 = teacher_model.encode(batch["passage2"])
return {
"label": teacher_model.similarity_pairwise(emb_queries, emb_passages1)
- teacher_model.similarity_pairwise(emb_queries, emb_passages2)
}
train_dataset = train_dataset.map(compute_labels, batched=True)
loss = SpladeLoss(
student_model,
main_loss=SparseMarginMSELoss(student_model),
lambda_corpus=5e-3,
lambda_query=0.1,
)
# Create the callback with explicit parameters
splade_callback = SpladeLambdaSchedulerCallback(
loss=loss,
scheduler_type=SchedulerType.QUADRATIC, # Can be LINEAR or QUADRATIC
warmup_ratio=1 / 3, # Will reach max values after 20% of total steps
)
training_args = SparseEncoderTrainingArguments(
num_train_epochs=20,
per_device_train_batch_size=2,
output_dir="runs/splade_with_lambda_scheduling",
logging_steps=1,
)
# Create the trainer with the callback
trainer = SparseEncoderTrainer(
model=student_model,
train_dataset=train_dataset,
loss=loss,
callbacks=[splade_callback], # Explicitly add the callback
args=training_args,
)
# Train the model with the scheduler active
trainer.train()
# Note:
# 1. The lambda values of SpladeLoss will start at 0 and gradually increase to their maximum values
# 2. When using the QUADRATIC scheduler, the values increase more slowly at first
# 3. If you don't add the callback manually, the SparseEncoderTrainer will add it automatically
# when it detects a SpladeLoss is being used, but with a linear scheduler and 1/3 warmup ratio
|
from datasets import Dataset
from sentence_transformers.sparse_encoder import (
MLMTransformer,
SparseEncoder,
SparseEncoderTrainer,
SparseMarginMSELoss,
SpladeLoss,
SpladePooling,
)
# Initialize the SPLADE model
student_model_name = "prithivida/Splade_PP_en_v1"
student_model = SparseEncoder(
modules=[
MLMTransformer(student_model_name),
SpladePooling(pooling_strategy="max"), # You can also use 'sum'
],
device="cuda:0",
)
# Initialize the SPLADE model
teacher_model_name = "naver/splade-cocondenser-ensembledistil"
teacher_model = SparseEncoder(
modules=[
MLMTransformer(teacher_model_name),
SpladePooling(pooling_strategy="max"), # You can also use 'sum'
],
device="cuda:0",
)
# Create a small toy dataset
train_dataset = Dataset.from_dict(
{
"query": ["It's nice weather outside today.", "He drove to work."],
"passage1": ["It's so sunny.", "He took the car to work."],
"passage2": ["It's very sunny.", "She walked to the store."],
}
)
def compute_labels(batch):
emb_queries = teacher_model.encode(batch["query"])
emb_passages1 = teacher_model.encode(batch["passage1"])
emb_passages2 = teacher_model.encode(batch["passage2"])
return {
"label": teacher_model.similarity_pairwise(emb_queries, emb_passages1)
- teacher_model.similarity_pairwise(emb_queries, emb_passages2)
}
train_dataset = train_dataset.map(compute_labels, batched=True)
loss = SpladeLoss(
student_model,
main_loss=SparseMarginMSELoss,
lambda_corpus=5e-3,
lambda_query=0.1,
)
trainer = SparseEncoderTrainer(
model=student_model,
train_dataset=train_dataset,
loss=loss,
)
trainer.train()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.