input
stringlengths 33
5k
| output
stringlengths 32
5k
|
---|---|
import sqlite3
import warnings
from dataclasses import dataclass, field
from tempfile import NamedTemporaryFile
from typing import Iterable, Dict, Optional, TYPE_CHECKING, Union
from docarray.array.storage.sqlite.helper import initialize_table
from docarray.array.storage.base.backend import BaseBackendMixin
from docarray.helper import random_identity, dataclass_from_dict
if TYPE_CHECKING:
from docarray.typing import DocumentArraySourceType
def _sanitize_table_name(table_name: str, raise_warning=True) -> str:
ret = ''.join(c for c in table_name if c.isalnum() or c == '_')
if ret != table_name and raise_warning:
warnings.warn(f'The table name is changed to {ret} due to illegal characters')
return ret
@dataclass
class SqliteConfig:
connection: Optional[Union[str, 'sqlite3.Connection']] = None
table_name: Optional[str] = None
serialize_config: Dict = field(default_factory=dict)
conn_config: Dict = field(default_factory=dict)
journal_mode: str = 'WAL'
synchronous: str = 'OFF'
class BackendMixin(BaseBackendMixin):
"""Provide necessary functions to enable this storage backend."""
schema_version = '0'
def _sql(self, *args, **kwargs) -> 'sqlite3.Cursor':
return self._cursor.execute(*args, **kwargs)
def _commit(self):
self._connection.commit()
@property
def _cursor(self) -> 'sqlite3.Cursor':
return self._connection.cursor()
def _init_storage(
self,
_docs: Optional['DocumentArraySourceType'] = None,
config: Optional[Union[SqliteConfig, Dict]] = None,
**kwargs,
):
if not config:
config = SqliteConfig()
if isinstance(config, dict):
config = dataclass_from_dict(SqliteConfig, config)
from docarray import Document
sqlite3.register_adapter(
Document, lambda d: d.to_bytes(**config.serialize_config)
)
sqlite3.register_converter(
'Document', lambda x: Document.from_bytes(x, **config.serialize_config)
)
_conn_kwargs = dict()
_conn_kwargs.update(config.conn_config)
if config.connection is None:
config.connection = NamedTemporaryFile().name
if isinstance(config.connection, str):
self._connection = sqlite3.connect(
config.connection,
detect_types=sqlite3.PARSE_DECLTYPES,
check_same_thread=False,
**_conn_kwargs,
)
elif isinstance(config.connection, sqlite3.Connection):
self._connection = config.connection
else:
raise TypeError(
f'connection argument must be None or a string or a sqlite3.Connection, not `{type(config.connection)}`'
)
self._connection.execute(f'PRAGMA synchronous={config.synchronous}')
self._connection.execute(f'PRAGMA journal_mode={config.journal_mode}')
self._table_name = (
_sanitize_table_name(self.__class__.__name__ + random_identity())
if config.table_name is None
else _sanitize_table_name(config.table_name)
)
self._persist = bool(config.table_name)
config.table_name = self._table_name
initialize_table(
self._table_name, self.__class__.__name__, self.schema_version, self._cursor
)
self._connection.commit()
self._config = config
super()._init_storage()
if _docs is None:
return
elif isinstance(_docs, Iterable):
self.clear()
self.extend(_docs)
else:
self.clear()
if isinstance(_docs, Document):
self.append(_docs)
def _ensure_unique_config(
self,
config_root: dict,
config_subindex: dict,
config_joined: dict,
subindex_name: str,
) -> dict:
if 'table_name' not in config_subindex:
subindex_table_name = _sanitize_table_name(
config_joined['table_name'] + 'subindex' + subindex_name,
raise_warning=False,
)
config_joined['table_name'] = subindex_table_name
return config_joined
def __getstate__(self):
d = dict(self.__dict__)
del d['_connection']
return d
def __setstate__(self, state):
self.__dict__ = state
_conn_kwargs = dict()
_conn_kwargs.update(state['_config'].conn_config)
self._connection = sqlite3.connect(
state['_config'].connection,
detect_types=sqlite3.PARSE_DECLTYPES,
check_same_thread=False,
**_conn_kwargs,
)
|
import sqlite3
import warnings
from dataclasses import dataclass, field, asdict
from tempfile import NamedTemporaryFile
from typing import (
Iterable,
Dict,
Optional,
TYPE_CHECKING,
Union,
List,
Tuple,
)
from docarray.array.storage.sqlite.helper import initialize_table
from docarray.array.storage.base.backend import BaseBackendMixin
from docarray.helper import random_identity, dataclass_from_dict
if TYPE_CHECKING:
from docarray.typing import DocumentArraySourceType
def _sanitize_table_name(table_name: str, raise_warning=True) -> str:
ret = ''.join(c for c in table_name if c.isalnum() or c == '_')
if ret != table_name and raise_warning:
warnings.warn(f'The table name is changed to {ret} due to illegal characters')
return ret
@dataclass
class SqliteConfig:
connection: Optional[Union[str, 'sqlite3.Connection']] = None
table_name: Optional[str] = None
serialize_config: Dict = field(default_factory=dict)
conn_config: Dict = field(default_factory=dict)
journal_mode: str = 'WAL'
synchronous: str = 'OFF'
class BackendMixin(BaseBackendMixin):
"""Provide necessary functions to enable this storage backend."""
schema_version = '0'
def _sql(self, *args, **kwargs) -> 'sqlite3.Cursor':
return self._cursor.execute(*args, **kwargs)
def _commit(self):
self._connection.commit()
@property
def _cursor(self) -> 'sqlite3.Cursor':
return self._connection.cursor()
def _init_storage(
self,
_docs: Optional['DocumentArraySourceType'] = None,
config: Optional[Union[SqliteConfig, Dict]] = None,
**kwargs,
):
if not config:
config = SqliteConfig()
if isinstance(config, dict):
config = dataclass_from_dict(SqliteConfig, config)
from docarray import Document
sqlite3.register_adapter(
Document, lambda d: d.to_bytes(**config.serialize_config)
)
sqlite3.register_converter(
'Document', lambda x: Document.from_bytes(x, **config.serialize_config)
)
_conn_kwargs = dict()
_conn_kwargs.update(config.conn_config)
if config.connection is None:
config.connection = NamedTemporaryFile().name
if isinstance(config.connection, str):
self._connection = sqlite3.connect(
config.connection,
detect_types=sqlite3.PARSE_DECLTYPES,
check_same_thread=False,
**_conn_kwargs,
)
elif isinstance(config.connection, sqlite3.Connection):
self._connection = config.connection
else:
raise TypeError(
f'connection argument must be None or a string or a sqlite3.Connection, not `{type(config.connection)}`'
)
self._connection.execute(f'PRAGMA synchronous={config.synchronous}')
self._connection.execute(f'PRAGMA journal_mode={config.journal_mode}')
self._table_name = (
_sanitize_table_name(self.__class__.__name__ + random_identity())
if config.table_name is None
else _sanitize_table_name(config.table_name)
)
self._persist = bool(config.table_name)
config.table_name = self._table_name
initialize_table(
self._table_name, self.__class__.__name__, self.schema_version, self._cursor
)
self._connection.commit()
self._config = config
super()._init_storage()
if _docs is None:
return
elif isinstance(_docs, Iterable):
self.clear()
self.extend(_docs)
else:
self.clear()
if isinstance(_docs, Document):
self.append(_docs)
def _ensure_unique_config(
self,
config_root: dict,
config_subindex: dict,
config_joined: dict,
subindex_name: str,
) -> dict:
if 'table_name' not in config_subindex:
subindex_table_name = _sanitize_table_name(
config_joined['table_name'] + 'subindex' + subindex_name,
raise_warning=False,
)
config_joined['table_name'] = subindex_table_name
return config_joined
def __getstate__(self):
d = dict(self.__dict__)
del d['_connection']
return d
def __setstate__(self, state):
self.__dict__ = state
_conn_kwargs = dict()
_conn_kwargs.update(state['_config'].conn_config)
self._connection = sqlite3.connect(
state['_config'].connection,
detect_types=sqlite3.PARSE_DECLTYPES,
check_same_thread=False,
**_conn_kwargs,
)
|
# Copyright (c) OpenMMLab. All rights reserved.
from .coco_api import COCO, COCOeval, COCOPanoptic
from .cocoeval_mp import COCOevalMP
__all__ = ['COCO', 'COCOeval', 'COCOPanoptic', 'COCOevalMP']
|
# Copyright (c) OpenMMLab. All rights reserved.
from .coco_api import COCO, COCOeval, COCOPanoptic
__all__ = ['COCO', 'COCOeval', 'COCOPanoptic']
|
import tracemalloc
from functools import wraps
from docarray import DocArray
from docarray.documents import TextDoc
def get_test_da(n: int):
return DocArray[TextDoc](gen_text_docs(n))
def gen_text_docs(n: int):
for i in range(n):
yield TextDoc(text=f'text {i}')
def profile_memory(func):
"""Decorator to profile memory usage of a function.
Returns:
original function return value, (current memory usage, peak memory usage)
"""
@wraps(func)
def _inner(*args, **kwargs):
tracemalloc.start()
ret = func(*args, **kwargs)
current, peak = tracemalloc.get_traced_memory()
tracemalloc.stop()
return ret, (current, peak)
return _inner
|
import tracemalloc
from functools import wraps
from docarray import DocumentArray
from docarray.documents import TextDoc
def get_test_da(n: int):
return DocumentArray[TextDoc](gen_text_docs(n))
def gen_text_docs(n: int):
for i in range(n):
yield TextDoc(text=f'text {i}')
def profile_memory(func):
"""Decorator to profile memory usage of a function.
Returns:
original function return value, (current memory usage, peak memory usage)
"""
@wraps(func)
def _inner(*args, **kwargs):
tracemalloc.start()
ret = func(*args, **kwargs)
current, peak = tracemalloc.get_traced_memory()
tracemalloc.stop()
return ret, (current, peak)
return _inner
|
from typing import List
import torch
import torchaudio.prototype.transforms as T
from torch.autograd import gradcheck, gradgradcheck
from torchaudio_unittest.common_utils import get_spectrogram, get_whitenoise, TestBaseMixin
class Autograd(TestBaseMixin):
def assert_grad(
self,
transform: torch.nn.Module,
inputs: List[torch.Tensor],
*,
nondet_tol: float = 0.0,
):
transform = transform.to(dtype=torch.float64, device=self.device)
# gradcheck and gradgradcheck only pass if the input tensors are of dtype `torch.double` or
# `torch.cdouble`, when the default eps and tolerance values are used.
inputs_ = []
for i in inputs:
if torch.is_tensor(i):
i = i.to(dtype=torch.cdouble if i.is_complex() else torch.double, device=self.device)
i.requires_grad = True
inputs_.append(i)
assert gradcheck(transform, inputs_)
assert gradgradcheck(transform, inputs_, nondet_tol=nondet_tol)
def test_barkspectrogram(self):
# replication_pad1d_backward_cuda is not deteministic and
# gives very small (~e-16) difference.
sample_rate = 8000
transform = T.BarkSpectrogram(sample_rate=sample_rate)
waveform = get_whitenoise(sample_rate=sample_rate, duration=0.05, n_channels=2)
self.assert_grad(transform, [waveform], nondet_tol=1e-10)
def test_barkscale(self):
sample_rate = 8000
n_fft = 400
n_barks = n_fft // 2 + 1
transform = T.BarkScale(sample_rate=sample_rate, n_barks=n_barks)
spec = get_spectrogram(
get_whitenoise(sample_rate=sample_rate, duration=0.05, n_channels=2), n_fft=n_fft, power=1
)
self.assert_grad(transform, [spec])
def test_chroma_spectrogram(self):
sample_rate = 8000
transform = T.ChromaSpectrogram(sample_rate=sample_rate, n_fft=400)
waveform = get_whitenoise(sample_rate=sample_rate, duration=0.05, n_channels=2)
self.assert_grad(transform, [waveform], nondet_tol=1e-10)
def test_chroma_scale(self):
sample_rate = 8000
n_fft = 400
n_chroma = 12
transform = T.ChromaScale(sample_rate=sample_rate, n_freqs=n_fft // 2 + 1, n_chroma=n_chroma)
waveform = get_spectrogram(
get_whitenoise(sample_rate=sample_rate, duration=0.05, n_channels=2), n_fft=n_fft, power=1
)
self.assert_grad(transform, [waveform], nondet_tol=1e-10)
|
from typing import List
import torch
import torchaudio.prototype.transforms as T
from torch.autograd import gradcheck, gradgradcheck
from torchaudio_unittest.common_utils import get_spectrogram, get_whitenoise, TestBaseMixin
class Autograd(TestBaseMixin):
def assert_grad(
self,
transform: torch.nn.Module,
inputs: List[torch.Tensor],
*,
nondet_tol: float = 0.0,
):
transform = transform.to(dtype=torch.float64, device=self.device)
# gradcheck and gradgradcheck only pass if the input tensors are of dtype `torch.double` or
# `torch.cdouble`, when the default eps and tolerance values are used.
inputs_ = []
for i in inputs:
if torch.is_tensor(i):
i = i.to(dtype=torch.cdouble if i.is_complex() else torch.double, device=self.device)
i.requires_grad = True
inputs_.append(i)
assert gradcheck(transform, inputs_)
assert gradgradcheck(transform, inputs_, nondet_tol=nondet_tol)
def test_barkspectrogram(self):
# replication_pad1d_backward_cuda is not deteministic and
# gives very small (~e-16) difference.
sample_rate = 8000
transform = T.BarkSpectrogram(sample_rate=sample_rate)
waveform = get_whitenoise(sample_rate=sample_rate, duration=0.05, n_channels=2)
self.assert_grad(transform, [waveform], nondet_tol=1e-10)
def test_barkscale(self):
sample_rate = 8000
n_fft = 400
n_barks = n_fft // 2 + 1
transform = T.BarkScale(sample_rate=sample_rate, n_barks=n_barks)
spec = get_spectrogram(
get_whitenoise(sample_rate=sample_rate, duration=0.05, n_channels=2), n_fft=n_fft, power=1
)
self.assert_grad(transform, [spec])
|
import os
import subprocess
import time
from typing import List
import docker
import pytest
from jina.logging.logger import JinaLogger
client = docker.from_env()
cur_dir = os.path.dirname(__file__)
@pytest.fixture()
def test_dir() -> str:
return cur_dir
@pytest.fixture
def logger():
return JinaLogger('docker-compose-testing')
@pytest.fixture
def image_name_tag_map():
return {
'reload-executor': '0.13.1',
'test-executor': '0.13.1',
'executor-merger': '0.1.1',
'custom-gateway': '0.1.1',
'multiprotocol-gateway': '0.1.1',
'jinaai/jina': 'test-pip',
}
def build_docker_image(image_name, image_name_tag_map):
logger = JinaLogger('docker-compose-testing')
image_tag = image_name + ':' + image_name_tag_map[image_name]
image, build_logs = client.images.build(
path=os.path.join(cur_dir, image_name), tag=image_tag
)
for chunk in build_logs:
if 'stream' in chunk:
for line in chunk['stream'].splitlines():
logger.debug(line)
return image.tags[-1]
@pytest.fixture(autouse=True)
def set_test_pip_version():
os.environ['JINA_GATEWAY_IMAGE'] = 'jinaai/jina:test-pip'
yield
del os.environ['JINA_GATEWAY_IMAGE']
@pytest.fixture(autouse=True)
def build_images(image_name_tag_map):
for image in image_name_tag_map.keys():
if image != 'jinaai/jina':
build_docker_image(image, image_name_tag_map)
@pytest.fixture
def docker_images(request, image_name_tag_map):
image_names = request.param
images = [
image_name + ':' + image_name_tag_map[image_name] for image_name in image_names
]
return images
class DockerComposeServices:
healthy_status = 'healthy'
unhealthy_status = 'unhealthy'
def __init__(self, dump_path, timeout_second=30):
self.dump_path = dump_path
self.timeout_second = timeout_second
def __enter__(self):
subprocess.run(
f'docker-compose -f {self.dump_path} up --build -d --remove-orphans'.split(
' '
)
)
container_ids = (
subprocess.run(
f'docker-compose -f {self.dump_path} ps -q'.split(' '),
capture_output=True,
)
.stdout.decode("utf-8")
.split('\n')
)
container_ids.remove('') # remove empty return line
if not container_ids:
raise RuntimeError('docker-compose ps did not detect any launch container')
client = docker.from_env()
init_time = time.time()
healthy = False
while time.time() - init_time < self.timeout_second:
if self._are_all_container_healthy(container_ids, client):
healthy = True
break
time.sleep(0.1)
if not healthy:
raise RuntimeError('Docker containers are not healthy')
@staticmethod
def _are_all_container_healthy(
container_ids: List[str], client: docker.client.DockerClient
) -> bool:
for id_ in container_ids:
status = client.containers.get(id_).attrs['State']['Health']['Status']
if status != DockerComposeServices.healthy_status:
return False
return True
def __exit__(self, exc_type, exc_val, exc_tb):
subprocess.run(
f'docker-compose -f {self.dump_path} down --remove-orphans'.split(' ')
)
|
import os
import docker
import pytest
from jina.logging.logger import JinaLogger
client = docker.from_env()
cur_dir = os.path.dirname(__file__)
@pytest.fixture()
def test_dir() -> str:
return cur_dir
@pytest.fixture
def logger():
return JinaLogger('docker-compose-testing')
@pytest.fixture
def image_name_tag_map():
return {
'reload-executor': '0.13.1',
'test-executor': '0.13.1',
'executor-merger': '0.1.1',
'custom-gateway': '0.1.1',
'multiprotocol-gateway': '0.1.1',
'jinaai/jina': 'test-pip',
}
def build_docker_image(image_name, image_name_tag_map):
logger = JinaLogger('docker-compose-testing')
image_tag = image_name + ':' + image_name_tag_map[image_name]
image, build_logs = client.images.build(
path=os.path.join(cur_dir, image_name), tag=image_tag
)
for chunk in build_logs:
if 'stream' in chunk:
for line in chunk['stream'].splitlines():
logger.debug(line)
return image.tags[-1]
@pytest.fixture(autouse=True)
def set_test_pip_version():
os.environ['JINA_GATEWAY_IMAGE'] = 'jinaai/jina:test-pip'
yield
del os.environ['JINA_GATEWAY_IMAGE']
@pytest.fixture(autouse=True)
def build_images(image_name_tag_map):
for image in image_name_tag_map.keys():
if image != 'jinaai/jina':
build_docker_image(image, image_name_tag_map)
@pytest.fixture
def docker_images(request, image_name_tag_map):
image_names = request.param
images = [
image_name + ':' + image_name_tag_map[image_name] for image_name in image_names
]
return images
|
import numpy as np
import torch
from docarray import BaseDocument, DocumentArray
from docarray.documents import Image, Text
from docarray.typing import (
AnyEmbedding,
AnyTensor,
AnyUrl,
ImageBytes,
ImageUrl,
Mesh3DUrl,
NdArray,
PointCloud3DUrl,
TextUrl,
TorchEmbedding,
TorchTensor,
)
from docarray.typing.tensor import NdArrayEmbedding
def test_multi_modal_doc_proto():
class MyMultiModalDoc(BaseDocument):
image: Image
text: Text
doc = MyMultiModalDoc(
image=Image(tensor=np.zeros((3, 224, 224))), text=Text(text='hello')
)
MyMultiModalDoc.from_protobuf(doc.to_protobuf())
def test_all_types():
class NestedDoc(BaseDocument):
tensor: NdArray
class MyDoc(BaseDocument):
img_url: ImageUrl
txt_url: TextUrl
mesh_url: Mesh3DUrl
point_cloud_url: PointCloud3DUrl
any_url: AnyUrl
torch_tensor: TorchTensor
torch_tensor_param: TorchTensor[224, 224, 3]
np_array: NdArray
np_array_param: NdArray[224, 224, 3]
generic_nd_array: AnyTensor
generic_torch_tensor: AnyTensor
embedding: AnyEmbedding
torch_embedding: TorchEmbedding[128]
np_embedding: NdArrayEmbedding[128]
nested_docs: DocumentArray[NestedDoc]
bytes_: bytes
img_bytes: ImageBytes
doc = MyDoc(
img_url='test.png',
txt_url='test.txt',
mesh_url='test.obj',
point_cloud_url='test.obj',
any_url='www.jina.ai',
torch_tensor=torch.zeros((3, 224, 224)),
torch_tensor_param=torch.zeros((3, 224, 224)),
np_array=np.zeros((3, 224, 224)),
np_array_param=np.zeros((3, 224, 224)),
generic_nd_array=np.zeros((3, 224, 224)),
generic_torch_tensor=torch.zeros((3, 224, 224)),
embedding=np.zeros((3, 224, 224)),
torch_embedding=torch.zeros((128,)),
np_embedding=np.zeros((128,)),
nested_docs=DocumentArray[NestedDoc]([NestedDoc(tensor=np.zeros((128,)))]),
bytes_=b'hello',
img_bytes=b'img',
)
doc = doc.to_protobuf()
doc = MyDoc.from_protobuf(doc)
assert doc.img_url == 'test.png'
assert doc.txt_url == 'test.txt'
assert doc.mesh_url == 'test.obj'
assert doc.point_cloud_url == 'test.obj'
assert doc.any_url == 'www.jina.ai'
assert (doc.torch_tensor == torch.zeros((3, 224, 224))).all()
assert isinstance(doc.torch_tensor, torch.Tensor)
assert (doc.torch_tensor_param == torch.zeros((224, 224, 3))).all()
assert isinstance(doc.torch_tensor_param, torch.Tensor)
assert (doc.np_array == np.zeros((3, 224, 224))).all()
assert isinstance(doc.np_array, np.ndarray)
assert doc.np_array.flags.writeable
assert (doc.np_array_param == np.zeros((224, 224, 3))).all()
assert isinstance(doc.np_array_param, np.ndarray)
assert (doc.generic_nd_array == np.zeros((3, 224, 224))).all()
assert isinstance(doc.generic_nd_array, np.ndarray)
assert (doc.generic_torch_tensor == torch.zeros((3, 224, 224))).all()
assert isinstance(doc.generic_torch_tensor, torch.Tensor)
assert (doc.torch_embedding == torch.zeros((128,))).all()
assert isinstance(doc.torch_embedding, torch.Tensor)
assert (doc.np_embedding == np.zeros((128,))).all()
assert isinstance(doc.np_embedding, np.ndarray)
assert (doc.embedding == np.zeros((3, 224, 224))).all()
assert (doc.embedding == np.zeros((3, 224, 224))).all()
assert doc.bytes_ == b'hello'
assert doc.img_bytes == b'img'
|
import numpy as np
import torch
from docarray import BaseDocument, DocumentArray
from docarray.documents import Image, Text
from docarray.typing import (
AnyEmbedding,
AnyTensor,
AnyUrl,
ImageUrl,
Mesh3DUrl,
NdArray,
PointCloud3DUrl,
TextUrl,
TorchEmbedding,
TorchTensor,
)
from docarray.typing.tensor import NdArrayEmbedding
def test_multi_modal_doc_proto():
class MyMultiModalDoc(BaseDocument):
image: Image
text: Text
class MySUperDoc(BaseDocument):
doc: MyMultiModalDoc
description: str
doc = MyMultiModalDoc(
image=Image(tensor=np.zeros((3, 224, 224))), text=Text(text='hello')
)
MyMultiModalDoc.from_protobuf(doc.to_protobuf())
def test_all_types():
class NestedDoc(BaseDocument):
tensor: NdArray
class MyDoc(BaseDocument):
img_url: ImageUrl
txt_url: TextUrl
mesh_url: Mesh3DUrl
point_cloud_url: PointCloud3DUrl
any_url: AnyUrl
torch_tensor: TorchTensor
torch_tensor_param: TorchTensor[224, 224, 3]
np_array: NdArray
np_array_param: NdArray[224, 224, 3]
generic_nd_array: AnyTensor
generic_torch_tensor: AnyTensor
embedding: AnyEmbedding
torch_embedding: TorchEmbedding[128]
np_embedding: NdArrayEmbedding[128]
nested_docs: DocumentArray[NestedDoc]
doc = MyDoc(
img_url='test.png',
txt_url='test.txt',
mesh_url='test.obj',
point_cloud_url='test.obj',
any_url='www.jina.ai',
torch_tensor=torch.zeros((3, 224, 224)),
torch_tensor_param=torch.zeros((3, 224, 224)),
np_array=np.zeros((3, 224, 224)),
np_array_param=np.zeros((3, 224, 224)),
generic_nd_array=np.zeros((3, 224, 224)),
generic_torch_tensor=torch.zeros((3, 224, 224)),
embedding=np.zeros((3, 224, 224)),
torch_embedding=torch.zeros((128,)),
np_embedding=np.zeros((128,)),
nested_docs=DocumentArray[NestedDoc]([NestedDoc(tensor=np.zeros((128,)))]),
)
doc = doc.to_protobuf()
doc = MyDoc.from_protobuf(doc)
assert doc.img_url == 'test.png'
assert doc.txt_url == 'test.txt'
assert doc.mesh_url == 'test.obj'
assert doc.point_cloud_url == 'test.obj'
assert doc.any_url == 'www.jina.ai'
assert (doc.torch_tensor == torch.zeros((3, 224, 224))).all()
assert isinstance(doc.torch_tensor, torch.Tensor)
assert (doc.torch_tensor_param == torch.zeros((224, 224, 3))).all()
assert isinstance(doc.torch_tensor_param, torch.Tensor)
assert (doc.np_array == np.zeros((3, 224, 224))).all()
assert isinstance(doc.np_array, np.ndarray)
assert doc.np_array.flags.writeable
assert (doc.np_array_param == np.zeros((224, 224, 3))).all()
assert isinstance(doc.np_array_param, np.ndarray)
assert (doc.generic_nd_array == np.zeros((3, 224, 224))).all()
assert isinstance(doc.generic_nd_array, np.ndarray)
assert (doc.generic_torch_tensor == torch.zeros((3, 224, 224))).all()
assert isinstance(doc.generic_torch_tensor, torch.Tensor)
assert (doc.torch_embedding == torch.zeros((128,))).all()
assert isinstance(doc.torch_embedding, torch.Tensor)
assert (doc.np_embedding == np.zeros((128,))).all()
assert isinstance(doc.np_embedding, np.ndarray)
assert (doc.embedding == np.zeros((3, 224, 224))).all()
|
import os
from typing import TYPE_CHECKING, Any, Optional, Type, TypeVar, Dict
import orjson
from pydantic import BaseModel, Field
from rich.console import Console
from docarray.base_doc.base_node import BaseNode
from docarray.base_doc.io.json import orjson_dumps_and_decode
from docarray.base_doc.mixins import IOMixin, UpdateMixin
from docarray.typing import ID
from docarray.typing.tensor.abstract_tensor import AbstractTensor
if TYPE_CHECKING:
from docarray.array.stacked.column_storage import ColumnStorageView
_console: Console = Console()
T = TypeVar('T', bound='BaseDoc')
class BaseDoc(BaseModel, IOMixin, UpdateMixin, BaseNode):
"""
The base class for Documents
"""
id: Optional[ID] = Field(default_factory=lambda: ID(os.urandom(16).hex()))
class Config:
json_loads = orjson.loads
json_dumps = orjson_dumps_and_decode
# `DocArrayResponse` is able to handle tensors by itself.
# Therefore, we stop FastAPI from doing any transformations
# on tensors by setting an identity function as a custom encoder.
json_encoders = {AbstractTensor: lambda x: x}
validate_assignment = True
@classmethod
def from_view(cls: Type[T], storage_view: 'ColumnStorageView') -> T:
doc = cls.__new__(cls)
object.__setattr__(doc, '__dict__', storage_view)
object.__setattr__(doc, '__fields_set__', set(storage_view.keys()))
doc._init_private_attributes()
return doc
@classmethod
def _get_field_type(cls, field: str) -> Type:
"""
Accessing the nested python Class define in the schema. Could be useful for
reconstruction of Document in serialization/deserilization
:param field: name of the field
:return:
"""
return cls.__fields__[field].outer_type_
def __str__(self) -> str:
with _console.capture() as capture:
_console.print(self)
return capture.get().strip()
def summary(self) -> None:
"""Print non-empty fields and nested structure of this Document object."""
from docarray.display.document_summary import DocumentSummary
DocumentSummary(doc=self).summary()
@classmethod
def schema_summary(cls) -> None:
"""Print a summary of the Documents schema."""
from docarray.display.document_summary import DocumentSummary
DocumentSummary.schema_summary(cls)
def _ipython_display_(self) -> None:
"""Displays the object in IPython as a summary"""
self.summary()
def is_view(self) -> bool:
from docarray.array.stacked.column_storage import ColumnStorageView
return isinstance(self.__dict__, ColumnStorageView)
def __getattr__(self, item) -> Any:
if item in self.__fields__.keys():
return self.__dict__[item]
else:
return super().__getattribute__(item)
def __setattr__(self, field, value) -> None:
if not self.is_view():
super().__setattr__(field, value)
else:
# here we first validate with pydantic
# Then we apply the value to the remote dict,
# and we change back the __dict__ value to the remote dict
dict_ref = self.__dict__
super().__setattr__(field, value)
for key, val in self.__dict__.items():
dict_ref[key] = val
object.__setattr__(self, '__dict__', dict_ref)
def __eq__(self, other) -> bool:
if self.dict().keys() != other.dict().keys():
return False
for field_name in self.__fields__:
value1 = getattr(self, field_name)
value2 = getattr(other, field_name)
if field_name == 'id':
continue
if isinstance(value1, AbstractTensor) and isinstance(
value2, AbstractTensor
):
comp_be1 = value1.get_comp_backend()
comp_be2 = value2.get_comp_backend()
if comp_be1.shape(value1) != comp_be2.shape(value2):
return False
if (
not (comp_be1.to_numpy(value1) == comp_be2.to_numpy(value2))
.all()
.item()
):
return False
else:
if value1 != value2:
return False
return True
def __ne__(self, other) -> bool:
return not (self == other)
def _docarray_to_json_compatible(self) -> Dict:
"""
Convert itself into a json compatible object
:return: A dictionary of the BaseDoc object
"""
return self.dict()
|
import os
from typing import TYPE_CHECKING, Any, Optional, Type, TypeVar, Dict
import orjson
from pydantic import BaseModel, Field
from rich.console import Console
from docarray.base_doc.base_node import BaseNode
from docarray.base_doc.io.json import orjson_dumps_and_decode
from docarray.base_doc.mixins import IOMixin, UpdateMixin
from docarray.typing import ID
from docarray.typing.tensor.abstract_tensor import AbstractTensor
if TYPE_CHECKING:
from docarray.array.stacked.column_storage import ColumnStorageView
_console: Console = Console()
T = TypeVar('T', bound='BaseDoc')
class BaseDoc(BaseModel, IOMixin, UpdateMixin, BaseNode):
"""
The base class for Documents
"""
id: Optional[ID] = Field(default_factory=lambda: ID(os.urandom(16).hex()))
class Config:
json_loads = orjson.loads
json_dumps = orjson_dumps_and_decode
# `DocArrayResponse` is able to handle tensors by itself.
# Therefore, we stop FastAPI from doing any transformations
# on tensors by setting an identity function as a custom encoder.
json_encoders = {AbstractTensor: lambda x: x}
validate_assignment = True
@classmethod
def from_view(cls: Type[T], storage_view: 'ColumnStorageView') -> T:
doc = cls.__new__(cls)
object.__setattr__(doc, '__dict__', storage_view)
object.__setattr__(doc, '__fields_set__', set(storage_view.keys()))
doc._init_private_attributes()
return doc
@classmethod
def _get_field_type(cls, field: str) -> Type:
"""
Accessing the nested python Class define in the schema. Could be useful for
reconstruction of Document in serialization/deserilization
:param field: name of the field
:return:
"""
return cls.__fields__[field].outer_type_
def __str__(self) -> str:
with _console.capture() as capture:
_console.print(self)
return capture.get().strip()
def summary(self) -> None:
"""Print non-empty fields and nested structure of this Document object."""
from docarray.display.document_summary import DocumentSummary
DocumentSummary(doc=self).summary()
@classmethod
def schema_summary(cls) -> None:
"""Print a summary of the Documents schema."""
from docarray.display.document_summary import DocumentSummary
DocumentSummary.schema_summary(cls)
def _ipython_display_(self) -> None:
"""Displays the object in IPython as a summary"""
self.summary()
def is_view(self) -> bool:
from docarray.array.stacked.column_storage import ColumnStorageView
return isinstance(self.__dict__, ColumnStorageView)
def __getattr__(self, item) -> Any:
if item in self.__fields__.keys():
return self.__dict__[item]
else:
return super().__getattribute__(item)
def __setattr__(self, field, value) -> None:
if not self.is_view():
super().__setattr__(field, value)
else:
# here we first validate with pydantic
# Then we apply the value to the remote dict,
# and we change back the __dict__ value to the remote dict
dict_ref = self.__dict__
super().__setattr__(field, value)
for key, val in self.__dict__.items():
dict_ref[key] = val
object.__setattr__(self, '__dict__', dict_ref)
def _docarray_to_json_compatible(self) -> Dict:
"""
Convert itself into a json compatible object
:return: A dictionary of the BaseDoc object
"""
return self.dict()
|
# coding=utf-8
# Copyright 2025 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
import unittest
from diffusers import (
Lumina2Transformer2DModel,
)
from diffusers.utils.testing_utils import (
backend_empty_cache,
enable_full_determinism,
require_torch_accelerator,
torch_device,
)
enable_full_determinism()
@require_torch_accelerator
class Lumina2Transformer2DModelSingleFileTests(unittest.TestCase):
model_class = Lumina2Transformer2DModel
ckpt_path = "https://huggingface.co/Comfy-Org/Lumina_Image_2.0_Repackaged/blob/main/split_files/diffusion_models/lumina_2_model_bf16.safetensors"
alternate_keys_ckpt_paths = [
"https://huggingface.co/Comfy-Org/Lumina_Image_2.0_Repackaged/blob/main/split_files/diffusion_models/lumina_2_model_bf16.safetensors"
]
repo_id = "Alpha-VLLM/Lumina-Image-2.0"
def setUp(self):
super().setUp()
gc.collect()
backend_empty_cache(torch_device)
def tearDown(self):
super().tearDown()
gc.collect()
backend_empty_cache(torch_device)
def test_single_file_components(self):
model = self.model_class.from_pretrained(self.repo_id, subfolder="transformer")
model_single_file = self.model_class.from_single_file(self.ckpt_path)
PARAMS_TO_IGNORE = ["torch_dtype", "_name_or_path", "_use_default_values", "_diffusers_version"]
for param_name, param_value in model_single_file.config.items():
if param_name in PARAMS_TO_IGNORE:
continue
assert model.config[param_name] == param_value, (
f"{param_name} differs between single file loading and pretrained loading"
)
def test_checkpoint_loading(self):
for ckpt_path in self.alternate_keys_ckpt_paths:
backend_empty_cache(torch_device)
model = self.model_class.from_single_file(ckpt_path)
del model
gc.collect()
backend_empty_cache(torch_device)
|
# coding=utf-8
# Copyright 2025 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
import unittest
import torch
from diffusers import (
Lumina2Transformer2DModel,
)
from diffusers.utils.testing_utils import (
backend_empty_cache,
enable_full_determinism,
require_torch_accelerator,
torch_device,
)
enable_full_determinism()
@require_torch_accelerator
class Lumina2Transformer2DModelSingleFileTests(unittest.TestCase):
model_class = Lumina2Transformer2DModel
ckpt_path = "https://huggingface.co/Comfy-Org/Lumina_Image_2.0_Repackaged/blob/main/split_files/diffusion_models/lumina_2_model_bf16.safetensors"
alternate_keys_ckpt_paths = [
"https://huggingface.co/Comfy-Org/Lumina_Image_2.0_Repackaged/blob/main/split_files/diffusion_models/lumina_2_model_bf16.safetensors"
]
repo_id = "Alpha-VLLM/Lumina-Image-2.0"
def setUp(self):
super().setUp()
gc.collect()
backend_empty_cache(torch_device)
def tearDown(self):
super().tearDown()
gc.collect()
backend_empty_cache(torch_device)
def test_single_file_components(self):
model = self.model_class.from_pretrained(self.repo_id, subfolder="transformer")
model_single_file = self.model_class.from_single_file(self.ckpt_path)
PARAMS_TO_IGNORE = ["torch_dtype", "_name_or_path", "_use_default_values", "_diffusers_version"]
for param_name, param_value in model_single_file.config.items():
if param_name in PARAMS_TO_IGNORE:
continue
assert model.config[param_name] == param_value, (
f"{param_name} differs between single file loading and pretrained loading"
)
def test_checkpoint_loading(self):
for ckpt_path in self.alternate_keys_ckpt_paths:
torch.cuda.empty_cache()
model = self.model_class.from_single_file(ckpt_path)
del model
gc.collect()
torch.cuda.empty_cache()
|
_base_ = [
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
model = dict(
type='YOLOF',
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[103.530, 116.280, 123.675],
std=[1.0, 1.0, 1.0],
bgr_to_rgb=False,
pad_size_divisor=32),
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(3, ),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=False),
norm_eval=True,
style='caffe',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron/resnet50_caffe')),
neck=dict(
type='DilatedEncoder',
in_channels=2048,
out_channels=512,
block_mid_channels=128,
num_residual_blocks=4,
block_dilations=[2, 4, 6, 8]),
bbox_head=dict(
type='YOLOFHead',
num_classes=80,
in_channels=512,
reg_decoded_bbox=True,
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
scales=[1, 2, 4, 8, 16],
strides=[32]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[1., 1., 1., 1.],
add_ctr_clamp=True,
ctr_clamp=32),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=1.0)),
# training and testing settings
train_cfg=dict(
assigner=dict(
type='UniformAssigner', pos_ignore_thr=0.15, neg_ignore_thr=0.7),
allowed_border=-1,
pos_weight=-1,
debug=False),
test_cfg=dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.6),
max_per_img=100))
# optimizer
optim_wrapper = dict(
optimizer=dict(type='SGD', lr=0.12, momentum=0.9, weight_decay=0.0001),
paramwise_cfg=dict(
norm_decay_mult=0., custom_keys={'backbone': dict(lr_mult=1. / 3)}))
# learning rate
param_scheduler = [
dict(
type='LinearLR',
start_factor=0.00066667,
by_epoch=False,
begin=0,
end=1500),
dict(
type='MultiStepLR',
begin=0,
end=12,
by_epoch=True,
milestones=[8, 11],
gamma=0.1)
]
train_pipeline = [
dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='RandomShift', prob=0.5, max_shift_px=32),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(
batch_size=8, num_workers=8, dataset=dict(pipeline=train_pipeline))
val_dataloader = dict(dataset=dict(pipeline=test_pipeline))
test_dataloader = val_dataloader
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (8 GPUs) x (8 samples per GPU)
auto_scale_lr = dict(base_batch_size=64)
|
_base_ = [
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
model = dict(
type='YOLOF',
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[103.530, 116.280, 123.675],
std=[1.0, 1.0, 1.0],
bgr_to_rgb=False,
pad_size_divisor=32),
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(3, ),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=False),
norm_eval=True,
style='caffe',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron/resnet50_caffe')),
neck=dict(
type='DilatedEncoder',
in_channels=2048,
out_channels=512,
block_mid_channels=128,
num_residual_blocks=4,
block_dilations=[2, 4, 6, 8]),
bbox_head=dict(
type='YOLOFHead',
num_classes=80,
in_channels=512,
reg_decoded_bbox=True,
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
scales=[1, 2, 4, 8, 16],
strides=[32]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[1., 1., 1., 1.],
add_ctr_clamp=True,
ctr_clamp=32),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=1.0)),
# training and testing settings
train_cfg=dict(
assigner=dict(
type='UniformAssigner', pos_ignore_thr=0.15, neg_ignore_thr=0.7),
allowed_border=-1,
pos_weight=-1,
debug=False),
test_cfg=dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.6),
max_per_img=100))
# optimizer
optim_wrapper = dict(
optimizer=dict(type='SGD', lr=0.12, momentum=0.9, weight_decay=0.0001),
paramwise_cfg=dict(
norm_decay_mult=0., custom_keys={'backbone': dict(lr_mult=1. / 3)}))
# learning rate
param_scheduler = [
dict(
type='LinearLR',
start_factor=0.00066667,
by_epoch=False,
begin=0,
end=1500),
dict(
type='MultiStepLR',
begin=0,
end=12,
by_epoch=True,
milestones=[8, 11],
gamma=0.1)
]
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='RandomShift', prob=0.5, max_shift_px=32),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(
batch_size=8, num_workers=8, dataset=dict(pipeline=train_pipeline))
val_dataloader = dict(dataset=dict(pipeline=test_pipeline))
test_dataloader = val_dataloader
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (8 GPUs) x (8 samples per GPU)
auto_scale_lr = dict(base_batch_size=64)
|
# flake8: noqa
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
__version__ = "2.6.0"
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse("3.7"):
raise ImportWarning(
"To use `datasets`, Python>=3.7 is required, and the current version of Python doesn't match this condition."
)
if version.parse(pyarrow.__version__).major < 6:
raise ImportWarning(
"To use `datasets`, the module `pyarrow>=6.0.0` is required, and the current version of `pyarrow` doesn't match this condition.\n"
"If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`."
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
_arrow_dataset.concatenate_datasets = concatenate_datasets
_utils.DownloadConfig = DownloadConfig
_utils.DownloadManager = DownloadManager
_utils.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadConfig = DownloadConfig
_deprecated_download_manager.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadManager = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
|
# flake8: noqa
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
__version__ = "2.5.3.dev0"
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse("3.7"):
raise ImportWarning(
"To use `datasets`, Python>=3.7 is required, and the current version of Python doesn't match this condition."
)
if version.parse(pyarrow.__version__).major < 6:
raise ImportWarning(
"To use `datasets`, the module `pyarrow>=6.0.0` is required, and the current version of `pyarrow` doesn't match this condition.\n"
"If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`."
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
_arrow_dataset.concatenate_datasets = concatenate_datasets
_utils.DownloadConfig = DownloadConfig
_utils.DownloadManager = DownloadManager
_utils.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadConfig = DownloadConfig
_deprecated_download_manager.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadManager = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
|
"""Flat reader."""
from fsspec import AbstractFileSystem
from fsspec.implementations.local import LocalFileSystem
from pathlib import Path
from typing import Any, Dict, List, Optional
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
class FlatReader(BaseReader):
"""
Flat reader.
Extract raw text from a file and save the file type in the metadata
"""
def __init__(
self,
*args: Any,
**kwargs: Any,
) -> None:
"""Init params."""
super().__init__(*args, **kwargs)
def _get_fs(
self, file: Path, fs: Optional[AbstractFileSystem] = None
) -> AbstractFileSystem:
if fs is None:
fs = LocalFileSystem()
return fs
def load_data(
self,
file: Path,
extra_info: Optional[Dict] = None,
fs: Optional[AbstractFileSystem] = None,
) -> List[Document]:
"""Parse file into string."""
fs = self._get_fs(file, fs)
with fs.open(file, encoding="utf-8") as f:
content = f.read()
metadata = {"filename": file.name, "extension": file.suffix}
if extra_info:
metadata = {**metadata, **extra_info}
return [Document(text=content, metadata=metadata)]
|
"""Flat reader."""
from fsspec import AbstractFileSystem
from fsspec.implementations.local import LocalFileSystem
from pathlib import Path
from typing import Any, Dict, List, Optional
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
class FlatReader(BaseReader):
"""
Flat reader.
Extract raw text from a file and save the file type in the metadata
"""
def __init__(
self,
*args: Any,
**kwargs: Any,
) -> None:
"""Init params."""
super().__init__(*args, **kwargs)
def _get_fs(
self, file: Path, fs: Optional[AbstractFileSystem] = None
) -> AbstractFileSystem:
if fs is None:
fs = LocalFileSystem()
return fs
def load_data(
self,
file: Path,
extra_info: Optional[Dict] = None,
fs: Optional[AbstractFileSystem] = None,
) -> List[Document]:
"""Parse file into string."""
fs = self._get_fs(file, fs)
with fs.open(file, encoding="utf-8") as f:
content = f.read()
metadata = {"filename": file.name, "extension": file.suffix}
if extra_info:
metadata = {**metadata, **extra_info}
return [Document(text=content, metadata=metadata)]
|
import pytest
from llama_index.core.base.llms.types import (
ChatMessage,
MessageRole,
)
from llama_index.llms.modelscope.base import ModelScopeLLM
@pytest.fixture()
def modelscope_llm():
return ModelScopeLLM()
@pytest.fixture()
def prompt():
return "Hi, my name is"
@pytest.fixture()
def messages():
return [
ChatMessage(content="Which movie is the best?"),
ChatMessage(content="It's Die Hard for sure.", role=MessageRole.ASSISTANT),
ChatMessage(content="Can you explain why?"),
]
@pytest.mark.complete
def test_modelscope_complete(modelscope_llm, prompt):
response = modelscope_llm.complete(prompt)
assert response is not None
assert str(response).strip() != ""
print(response)
@pytest.mark.complete
def test_modelscope_stream_complete(modelscope_llm, prompt):
response = modelscope_llm.stream_complete(prompt)
assert response is not None
for r in response:
assert r is not None
assert str(r).strip() != ""
print(r)
@pytest.mark.xfail(reason="20 is the default max_length of the generation config")
def test_modelscope_chat_clear(modelscope_llm, messages):
response = modelscope_llm.chat(messages)
assert response is not None
assert str(response).strip() != ""
print(response)
@pytest.mark.chat
def test_modelscope_chat(modelscope_llm, messages):
response = modelscope_llm.chat(messages, max_new_tokens=100)
assert response is not None
assert str(response).strip() != ""
print(response)
@pytest.mark.chat
def test_modelscope_stream_chat(modelscope_llm, messages):
gen = modelscope_llm.stream_chat(messages, max_new_tokens=100)
assert gen is not None
for r in gen:
assert r is not None
assert str(r).strip() != ""
print(r)
|
import pytest
from llama_index.core.base.llms.types import (
ChatMessage,
MessageRole,
)
from llama_index.llms.modelscope.base import ModelScopeLLM
@pytest.fixture()
def modelscope_llm():
return ModelScopeLLM()
@pytest.fixture()
def prompt():
return "Hi, my name is"
@pytest.fixture()
def messages():
return [
ChatMessage(content="Which movie is the best?"),
ChatMessage(content="It's Die Hard for sure.", role=MessageRole.ASSISTANT),
ChatMessage(content="Can you explain why?"),
]
@pytest.mark.complete()
def test_modelscope_complete(modelscope_llm, prompt):
response = modelscope_llm.complete(prompt)
assert response is not None
assert str(response).strip() != ""
print(response)
@pytest.mark.complete()
def test_modelscope_stream_complete(modelscope_llm, prompt):
response = modelscope_llm.stream_complete(prompt)
assert response is not None
for r in response:
assert r is not None
assert str(r).strip() != ""
print(r)
@pytest.mark.xfail(reason="20 is the default max_length of the generation config")
def test_modelscope_chat_clear(modelscope_llm, messages):
response = modelscope_llm.chat(messages)
assert response is not None
assert str(response).strip() != ""
print(response)
@pytest.mark.chat()
def test_modelscope_chat(modelscope_llm, messages):
response = modelscope_llm.chat(messages, max_new_tokens=100)
assert response is not None
assert str(response).strip() != ""
print(response)
@pytest.mark.chat()
def test_modelscope_stream_chat(modelscope_llm, messages):
gen = modelscope_llm.stream_chat(messages, max_new_tokens=100)
assert gen is not None
for r in gen:
assert r is not None
assert str(r).strip() != ""
print(r)
|
"""Test Cohere API wrapper."""
from pathlib import Path
from pydantic import SecretStr
from pytest import MonkeyPatch
from langchain_community.llms.cohere import Cohere
from langchain_community.llms.loading import load_llm
from tests.integration_tests.llms.utils import assert_llm_equality
def test_cohere_call() -> None:
"""Test valid call to cohere."""
llm = Cohere(max_tokens=10)
output = llm.invoke("Say foo:")
assert isinstance(output, str)
def test_cohere_api_key(monkeypatch: MonkeyPatch) -> None:
"""Test that cohere api key is a secret key."""
# test initialization from init
assert isinstance(Cohere(cohere_api_key="1").cohere_api_key, SecretStr) # type: ignore[arg-type]
# test initialization from env variable
monkeypatch.setenv("COHERE_API_KEY", "secret-api-key")
assert isinstance(Cohere().cohere_api_key, SecretStr)
def test_saving_loading_llm(tmp_path: Path) -> None:
"""Test saving/loading an Cohere LLM."""
llm = Cohere(max_tokens=10)
llm.save(file_path=tmp_path / "cohere.yaml")
loaded_llm = load_llm(tmp_path / "cohere.yaml")
assert_llm_equality(llm, loaded_llm)
|
"""Test Cohere API wrapper."""
from pathlib import Path
from pydantic import SecretStr
from pytest import MonkeyPatch
from langchain_community.llms.cohere import Cohere
from langchain_community.llms.loading import load_llm
from tests.integration_tests.llms.utils import assert_llm_equality
def test_cohere_call() -> None:
"""Test valid call to cohere."""
llm = Cohere(max_tokens=10) # type: ignore[call-arg]
output = llm.invoke("Say foo:")
assert isinstance(output, str)
def test_cohere_api_key(monkeypatch: MonkeyPatch) -> None:
"""Test that cohere api key is a secret key."""
# test initialization from init
assert isinstance(Cohere(cohere_api_key="1").cohere_api_key, SecretStr) # type: ignore[arg-type, call-arg]
# test initialization from env variable
monkeypatch.setenv("COHERE_API_KEY", "secret-api-key")
assert isinstance(Cohere().cohere_api_key, SecretStr) # type: ignore[call-arg]
def test_saving_loading_llm(tmp_path: Path) -> None:
"""Test saving/loading an Cohere LLM."""
llm = Cohere(max_tokens=10) # type: ignore[call-arg]
llm.save(file_path=tmp_path / "cohere.yaml")
loaded_llm = load_llm(tmp_path / "cohere.yaml")
assert_llm_equality(llm, loaded_llm)
|
"""
This file loads sentences from a provided text file. It is expected, that the there is one sentence per line in that text file.
CT will be training using these sentences. Checkpoints are stored every 500 steps to the output folder.
Usage:
python train_ct_from_file.py path/to/sentences.txt
"""
import gzip
import logging
import math
import sys
from datetime import datetime
import tqdm
from sentence_transformers import LoggingHandler, SentenceTransformer, losses, models
#### Just some code to print debug information to stdout
logging.basicConfig(
format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO, handlers=[LoggingHandler()]
)
#### /print debug information to stdout
## Training parameters
model_name = "distilbert-base-uncased"
batch_size = 16
pos_neg_ratio = 8 # batch_size must be devisible by pos_neg_ratio
num_epochs = 1
max_seq_length = 75
# Input file path (a text file, each line a sentence)
if len(sys.argv) < 2:
print(f"Run this script with: python {sys.argv[0]} path/to/sentences.txt")
exit()
filepath = sys.argv[1]
# Save path to store our model
output_name = ""
if len(sys.argv) >= 3:
output_name = "-" + sys.argv[2].replace(" ", "_").replace("/", "_").replace("\\", "_")
model_output_path = "output/train_ct{}-{}".format(output_name, datetime.now().strftime("%Y-%m-%d_%H-%M-%S"))
# Use Huggingface/transformers model (like BERT, RoBERTa, XLNet, XLM-R) for mapping tokens to embeddings
word_embedding_model = models.Transformer(model_name, max_seq_length=max_seq_length)
# Apply mean pooling to get one fixed sized sentence vector
pooling_model = models.Pooling(word_embedding_model.get_word_embedding_dimension())
model = SentenceTransformer(modules=[word_embedding_model, pooling_model])
################# Read the train corpus #################
train_sentences = []
with gzip.open(filepath, "rt", encoding="utf8") if filepath.endswith(".gz") else open(
filepath, encoding="utf8"
) as fIn:
for line in tqdm.tqdm(fIn, desc="Read file"):
line = line.strip()
if len(line) >= 10:
train_sentences.append(line)
logging.info(f"Train sentences: {len(train_sentences)}")
# For ContrastiveTension we need a special data loader to construct batches with the desired properties
train_dataloader = losses.ContrastiveTensionDataLoader(
train_sentences, batch_size=batch_size, pos_neg_ratio=pos_neg_ratio
)
# As loss, we losses.ContrastiveTensionLoss
train_loss = losses.ContrastiveTensionLoss(model)
warmup_steps = math.ceil(len(train_dataloader) * num_epochs * 0.1) # 10% of train data for warm-up
logging.info(f"Warmup-steps: {warmup_steps}")
# Train the model
model.fit(
train_objectives=[(train_dataloader, train_loss)],
epochs=num_epochs,
warmup_steps=warmup_steps,
optimizer_params={"lr": 5e-5},
checkpoint_path=model_output_path,
show_progress_bar=True,
use_amp=False, # Set to True, if your GPU supports FP16 cores
)
|
"""
This file loads sentences from a provided text file. It is expected, that the there is one sentence per line in that text file.
CT will be training using these sentences. Checkpoints are stored every 500 steps to the output folder.
Usage:
python train_ct_from_file.py path/to/sentences.txt
"""
import gzip
import logging
import math
import sys
from datetime import datetime
import tqdm
from sentence_transformers import LoggingHandler, SentenceTransformer, losses, models
#### Just some code to print debug information to stdout
logging.basicConfig(
format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO, handlers=[LoggingHandler()]
)
#### /print debug information to stdout
## Training parameters
model_name = "distilbert-base-uncased"
batch_size = 16
pos_neg_ratio = 8 # batch_size must be devisible by pos_neg_ratio
num_epochs = 1
max_seq_length = 75
# Input file path (a text file, each line a sentence)
if len(sys.argv) < 2:
print("Run this script with: python {} path/to/sentences.txt".format(sys.argv[0]))
exit()
filepath = sys.argv[1]
# Save path to store our model
output_name = ""
if len(sys.argv) >= 3:
output_name = "-" + sys.argv[2].replace(" ", "_").replace("/", "_").replace("\\", "_")
model_output_path = "output/train_ct{}-{}".format(output_name, datetime.now().strftime("%Y-%m-%d_%H-%M-%S"))
# Use Huggingface/transformers model (like BERT, RoBERTa, XLNet, XLM-R) for mapping tokens to embeddings
word_embedding_model = models.Transformer(model_name, max_seq_length=max_seq_length)
# Apply mean pooling to get one fixed sized sentence vector
pooling_model = models.Pooling(word_embedding_model.get_word_embedding_dimension())
model = SentenceTransformer(modules=[word_embedding_model, pooling_model])
################# Read the train corpus #################
train_sentences = []
with gzip.open(filepath, "rt", encoding="utf8") if filepath.endswith(".gz") else open(
filepath, encoding="utf8"
) as fIn:
for line in tqdm.tqdm(fIn, desc="Read file"):
line = line.strip()
if len(line) >= 10:
train_sentences.append(line)
logging.info("Train sentences: {}".format(len(train_sentences)))
# For ContrastiveTension we need a special data loader to construct batches with the desired properties
train_dataloader = losses.ContrastiveTensionDataLoader(
train_sentences, batch_size=batch_size, pos_neg_ratio=pos_neg_ratio
)
# As loss, we losses.ContrastiveTensionLoss
train_loss = losses.ContrastiveTensionLoss(model)
warmup_steps = math.ceil(len(train_dataloader) * num_epochs * 0.1) # 10% of train data for warm-up
logging.info("Warmup-steps: {}".format(warmup_steps))
# Train the model
model.fit(
train_objectives=[(train_dataloader, train_loss)],
epochs=num_epochs,
warmup_steps=warmup_steps,
optimizer_params={"lr": 5e-5},
checkpoint_path=model_output_path,
show_progress_bar=True,
use_amp=False, # Set to True, if your GPU supports FP16 cores
)
|
from typing import Dict
from hypothesis import given, note, settings, strategies
import xgboost as xgb
from xgboost import testing as tm
pytestmark = tm.timeout(20)
parameter_strategy = strategies.fixed_dictionaries({
'booster': strategies.just('gblinear'),
'eta': strategies.floats(0.01, 0.25),
'tolerance': strategies.floats(1e-5, 1e-2),
'nthread': strategies.integers(1, 4),
})
coord_strategy = strategies.fixed_dictionaries({
'feature_selector': strategies.sampled_from(['cyclic', 'shuffle',
'greedy', 'thrifty']),
'top_k': strategies.integers(1, 10),
})
def train_result(param: dict, dmat: xgb.DMatrix, num_rounds: int) -> Dict[str, Dict]:
result: Dict[str, Dict] = {}
xgb.train(
param,
dmat,
num_rounds,
evals=[(dmat, "train")],
verbose_eval=False,
evals_result=result,
)
return result
class TestLinear:
@given(
parameter_strategy,
strategies.integers(10, 50),
tm.make_dataset_strategy(),
coord_strategy
)
@settings(deadline=None, max_examples=20, print_blob=True)
def test_coordinate(self, param, num_rounds, dataset, coord_param):
param['updater'] = 'coord_descent'
param.update(coord_param)
param = dataset.set_params(param)
result = train_result(param, dataset.get_dmat(), num_rounds)['train'][dataset.metric]
note(result)
assert tm.non_increasing(result, 5e-4)
# Loss is not guaranteed to always decrease because of regularisation parameters
# We test a weaker condition that the loss has not increased between the first and last
# iteration
@given(
parameter_strategy,
strategies.integers(10, 50),
tm.make_dataset_strategy(),
coord_strategy,
strategies.floats(1e-5, 0.8),
strategies.floats(1e-5, 0.8)
)
@settings(deadline=None, max_examples=20, print_blob=True)
def test_coordinate_regularised(self, param, num_rounds, dataset, coord_param, alpha, lambd):
param['updater'] = 'coord_descent'
param['alpha'] = alpha
param['lambda'] = lambd
param.update(coord_param)
param = dataset.set_params(param)
result = train_result(param, dataset.get_dmat(), num_rounds)['train'][dataset.metric]
note(result)
assert tm.non_increasing([result[0], result[-1]])
@given(
parameter_strategy, strategies.integers(10, 50), tm.make_dataset_strategy()
)
@settings(deadline=None, max_examples=20, print_blob=True)
def test_shotgun(self, param, num_rounds, dataset):
param['updater'] = 'shotgun'
param = dataset.set_params(param)
result = train_result(param, dataset.get_dmat(), num_rounds)['train'][dataset.metric]
note(result)
# shotgun is non-deterministic, so we relax the test by only using first and last
# iteration.
if len(result) > 2:
sampled_result = (result[0], result[-1])
else:
sampled_result = result
assert tm.non_increasing(sampled_result)
@given(
parameter_strategy,
strategies.integers(10, 50),
tm.make_dataset_strategy(),
strategies.floats(1e-5, 1.0),
strategies.floats(1e-5, 1.0)
)
@settings(deadline=None, max_examples=20, print_blob=True)
def test_shotgun_regularised(self, param, num_rounds, dataset, alpha, lambd):
param['updater'] = 'shotgun'
param['alpha'] = alpha
param['lambda'] = lambd
param = dataset.set_params(param)
result = train_result(param, dataset.get_dmat(), num_rounds)['train'][dataset.metric]
note(result)
assert tm.non_increasing([result[0], result[-1]])
|
from hypothesis import given, note, settings, strategies
import xgboost as xgb
from xgboost import testing as tm
pytestmark = tm.timeout(20)
parameter_strategy = strategies.fixed_dictionaries({
'booster': strategies.just('gblinear'),
'eta': strategies.floats(0.01, 0.25),
'tolerance': strategies.floats(1e-5, 1e-2),
'nthread': strategies.integers(1, 4),
})
coord_strategy = strategies.fixed_dictionaries({
'feature_selector': strategies.sampled_from(['cyclic', 'shuffle',
'greedy', 'thrifty']),
'top_k': strategies.integers(1, 10),
})
def train_result(param, dmat, num_rounds):
result = {}
xgb.train(
param,
dmat,
num_rounds,
evals=[(dmat, "train")],
verbose_eval=False,
evals_result=result,
)
return result
class TestLinear:
@given(
parameter_strategy,
strategies.integers(10, 50),
tm.make_dataset_strategy(),
coord_strategy
)
@settings(deadline=None, max_examples=20, print_blob=True)
def test_coordinate(self, param, num_rounds, dataset, coord_param):
param['updater'] = 'coord_descent'
param.update(coord_param)
param = dataset.set_params(param)
result = train_result(param, dataset.get_dmat(), num_rounds)['train'][dataset.metric]
note(result)
assert tm.non_increasing(result, 5e-4)
# Loss is not guaranteed to always decrease because of regularisation parameters
# We test a weaker condition that the loss has not increased between the first and last
# iteration
@given(
parameter_strategy,
strategies.integers(10, 50),
tm.make_dataset_strategy(),
coord_strategy,
strategies.floats(1e-5, 0.8),
strategies.floats(1e-5, 0.8)
)
@settings(deadline=None, max_examples=20, print_blob=True)
def test_coordinate_regularised(self, param, num_rounds, dataset, coord_param, alpha, lambd):
param['updater'] = 'coord_descent'
param['alpha'] = alpha
param['lambda'] = lambd
param.update(coord_param)
param = dataset.set_params(param)
result = train_result(param, dataset.get_dmat(), num_rounds)['train'][dataset.metric]
note(result)
assert tm.non_increasing([result[0], result[-1]])
@given(
parameter_strategy, strategies.integers(10, 50), tm.make_dataset_strategy()
)
@settings(deadline=None, max_examples=20, print_blob=True)
def test_shotgun(self, param, num_rounds, dataset):
param['updater'] = 'shotgun'
param = dataset.set_params(param)
result = train_result(param, dataset.get_dmat(), num_rounds)['train'][dataset.metric]
note(result)
# shotgun is non-deterministic, so we relax the test by only using first and last
# iteration.
if len(result) > 2:
sampled_result = (result[0], result[-1])
else:
sampled_result = result
assert tm.non_increasing(sampled_result)
@given(
parameter_strategy,
strategies.integers(10, 50),
tm.make_dataset_strategy(),
strategies.floats(1e-5, 1.0),
strategies.floats(1e-5, 1.0)
)
@settings(deadline=None, max_examples=20, print_blob=True)
def test_shotgun_regularised(self, param, num_rounds, dataset, alpha, lambd):
param['updater'] = 'shotgun'
param['alpha'] = alpha
param['lambda'] = lambd
param = dataset.set_params(param)
result = train_result(param, dataset.get_dmat(), num_rounds)['train'][dataset.metric]
note(result)
assert tm.non_increasing([result[0], result[-1]])
|
# Copyright 2025 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
import unittest
from importlib import import_module
class DependencyTester(unittest.TestCase):
def test_diffusers_import(self):
try:
import diffusers # noqa: F401
except ImportError:
assert False
def test_backend_registration(self):
import diffusers
from diffusers.dependency_versions_table import deps
all_classes = inspect.getmembers(diffusers, inspect.isclass)
for cls_name, cls_module in all_classes:
if "dummy_" in cls_module.__module__:
for backend in cls_module._backends:
if backend == "k_diffusion":
backend = "k-diffusion"
elif backend == "invisible_watermark":
backend = "invisible-watermark"
elif backend == "opencv":
backend = "opencv-python"
assert backend in deps, f"{backend} is not in the deps table!"
def test_pipeline_imports(self):
import diffusers
import diffusers.pipelines
all_classes = inspect.getmembers(diffusers, inspect.isclass)
for cls_name, cls_module in all_classes:
if hasattr(diffusers.pipelines, cls_name):
pipeline_folder_module = ".".join(str(cls_module.__module__).split(".")[:3])
_ = import_module(pipeline_folder_module, str(cls_name))
|
# Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
import unittest
from importlib import import_module
class DependencyTester(unittest.TestCase):
def test_diffusers_import(self):
try:
import diffusers # noqa: F401
except ImportError:
assert False
def test_backend_registration(self):
import diffusers
from diffusers.dependency_versions_table import deps
all_classes = inspect.getmembers(diffusers, inspect.isclass)
for cls_name, cls_module in all_classes:
if "dummy_" in cls_module.__module__:
for backend in cls_module._backends:
if backend == "k_diffusion":
backend = "k-diffusion"
elif backend == "invisible_watermark":
backend = "invisible-watermark"
elif backend == "opencv":
backend = "opencv-python"
assert backend in deps, f"{backend} is not in the deps table!"
def test_pipeline_imports(self):
import diffusers
import diffusers.pipelines
all_classes = inspect.getmembers(diffusers, inspect.isclass)
for cls_name, cls_module in all_classes:
if hasattr(diffusers.pipelines, cls_name):
pipeline_folder_module = ".".join(str(cls_module.__module__).split(".")[:3])
_ = import_module(pipeline_folder_module, str(cls_name))
|
import asyncio
import pytest
from llama_index.core.schema import TextNode
from llama_index.core.vector_stores.types import (
VectorStoreQuery,
VectorStoreQueryMode,
)
from vespa.application import ApplicationPackage
from llama_index.vector_stores.vespa import VespaVectorStore, hybrid_template
try:
# Should be installed as pyvespa-dependency
import docker
client = docker.from_env()
docker_available = client.ping()
except Exception:
docker_available = False
# Assuming Vespa services are mocked or local Vespa Docker is used
@pytest.fixture(scope="session")
def vespa_app():
app_package: ApplicationPackage = hybrid_template
try:
return VespaVectorStore(
application_package=app_package, deployment_target="local"
)
except RuntimeError as e:
pytest.skip(f"Could not create VespaVectorStore: {e}")
@pytest.fixture(scope="session")
def nodes() -> list:
return [
TextNode(
text="The Shawshank Redemption",
metadata={
"id": "1",
"author": "Stephen King",
"theme": "Friendship",
"year": 1994,
},
),
TextNode(
text="The Godfather",
metadata={
"id": "2",
"director": "Francis Ford Coppola",
"theme": "Mafia",
"year": 1972,
},
),
TextNode(
text="Inception",
metadata={
"id": "3",
"director": "Christopher Nolan",
"theme": "Fiction",
"year": 2010,
},
),
TextNode(
text="To Kill a Mockingbird",
metadata={
"id": "4",
"author": "Harper Lee",
"theme": "Mafia",
"year": 1960,
},
),
TextNode(
text="1984",
metadata={
"id": "5",
"author": "George Orwell",
"theme": "Totalitarianism",
"year": 1949,
},
),
TextNode(
text="The Great Gatsby",
metadata={
"id": "6",
"author": "F. Scott Fitzgerald",
"theme": "The American Dream",
"year": 1925,
},
),
TextNode(
text="Harry Potter and the Sorcerer's Stone",
metadata={
"id": "7",
"author": "J.K. Rowling",
"theme": "Fiction",
"year": 1997,
},
),
]
@pytest.fixture(scope="session")
def added_node_ids(vespa_app, nodes):
return vespa_app.add(nodes)
# Assume returned `inserted_ids` is a list of IDs that match the order of `nodes`
@pytest.mark.skipif(not docker_available, reason="Docker not available")
def test_query_text_search(vespa_app, added_node_ids):
query = VectorStoreQuery(
query_str="Inception", # Ensure the query matches the case used in the nodes
mode="text_search",
similarity_top_k=1,
)
result = vespa_app.query(query)
assert len(result.nodes) == 1
node_metadata = result.nodes[0].metadata
assert node_metadata["id"] == "3", "Expected Inception node"
@pytest.mark.skipif(not docker_available, reason="Docker not available")
def test_query_vector_search(vespa_app, added_node_ids):
query = VectorStoreQuery(
query_str="magic, wizardry",
mode="semantic_hybrid",
similarity_top_k=1,
)
result = vespa_app.query(query)
assert len(result.nodes) == 1, "Expected 1 result"
node_metadata = result.nodes[0].metadata
print(node_metadata)
assert node_metadata["id"] == "7", "Expected Harry Potter node"
@pytest.mark.skipif(not docker_available, reason="Docker not available")
def test_delete_node(vespa_app, added_node_ids):
# Testing the deletion of a node
vespa_app.delete(ref_doc_id=added_node_ids[1])
query = VectorStoreQuery(
query_str="Godfather",
mode=VectorStoreQueryMode.TEXT_SEARCH,
similarity_top_k=1,
)
result = vespa_app.query(query)
assert (
len(result.nodes) == 0
), f"Deleted node still present in the vector store: {result.nodes}"
@pytest.mark.skipif(not docker_available, reason="Docker not available")
@pytest.mark.asyncio
async def test_async_add_and_query(vespa_app, nodes):
# Testing async add and query
await asyncio.gather(*[vespa_app.async_add(nodes)])
query = VectorStoreQuery(query_str="Harry Potter", similarity_top_k=1)
result = await vespa_app.aquery(query)
assert len(result.nodes) == 1
assert result.nodes[0].node_id == "7"
|
import asyncio
import pytest
from llama_index.core.schema import TextNode
from llama_index.core.vector_stores.types import (
VectorStoreQuery,
VectorStoreQueryMode,
)
from vespa.application import ApplicationPackage
from llama_index.vector_stores.vespa import VespaVectorStore, hybrid_template
try:
# Should be installed as pyvespa-dependency
import docker
client = docker.from_env()
docker_available = client.ping()
except Exception:
docker_available = False
# Assuming Vespa services are mocked or local Vespa Docker is used
@pytest.fixture(scope="session")
def vespa_app():
app_package: ApplicationPackage = hybrid_template
try:
return VespaVectorStore(
application_package=app_package, deployment_target="local"
)
except RuntimeError as e:
pytest.skip(f"Could not create VespaVectorStore: {e}")
@pytest.fixture(scope="session")
def nodes() -> list:
return [
TextNode(
text="The Shawshank Redemption",
metadata={
"id": "1",
"author": "Stephen King",
"theme": "Friendship",
"year": 1994,
},
),
TextNode(
text="The Godfather",
metadata={
"id": "2",
"director": "Francis Ford Coppola",
"theme": "Mafia",
"year": 1972,
},
),
TextNode(
text="Inception",
metadata={
"id": "3",
"director": "Christopher Nolan",
"theme": "Fiction",
"year": 2010,
},
),
TextNode(
text="To Kill a Mockingbird",
metadata={
"id": "4",
"author": "Harper Lee",
"theme": "Mafia",
"year": 1960,
},
),
TextNode(
text="1984",
metadata={
"id": "5",
"author": "George Orwell",
"theme": "Totalitarianism",
"year": 1949,
},
),
TextNode(
text="The Great Gatsby",
metadata={
"id": "6",
"author": "F. Scott Fitzgerald",
"theme": "The American Dream",
"year": 1925,
},
),
TextNode(
text="Harry Potter and the Sorcerer's Stone",
metadata={
"id": "7",
"author": "J.K. Rowling",
"theme": "Fiction",
"year": 1997,
},
),
]
@pytest.fixture(scope="session")
def added_node_ids(vespa_app, nodes):
return vespa_app.add(nodes)
# Assume returned `inserted_ids` is a list of IDs that match the order of `nodes`
@pytest.mark.skipif(not docker_available, reason="Docker not available")
def test_query_text_search(vespa_app, added_node_ids):
query = VectorStoreQuery(
query_str="Inception", # Ensure the query matches the case used in the nodes
mode="text_search",
similarity_top_k=1,
)
result = vespa_app.query(query)
assert len(result.nodes) == 1
node_metadata = result.nodes[0].metadata
assert node_metadata["id"] == "3", "Expected Inception node"
@pytest.mark.skipif(not docker_available, reason="Docker not available")
def test_query_vector_search(vespa_app, added_node_ids):
query = VectorStoreQuery(
query_str="magic, wizardry",
mode="semantic_hybrid",
similarity_top_k=1,
)
result = vespa_app.query(query)
assert len(result.nodes) == 1, "Expected 1 result"
node_metadata = result.nodes[0].metadata
print(node_metadata)
assert node_metadata["id"] == "7", "Expected Harry Potter node"
@pytest.mark.skipif(not docker_available, reason="Docker not available")
def test_delete_node(vespa_app, added_node_ids):
# Testing the deletion of a node
vespa_app.delete(ref_doc_id=added_node_ids[1])
query = VectorStoreQuery(
query_str="Godfather",
mode=VectorStoreQueryMode.TEXT_SEARCH,
similarity_top_k=1,
)
result = vespa_app.query(query)
assert (
len(result.nodes) == 0
), f"Deleted node still present in the vector store: {result.nodes}"
@pytest.mark.skipif(not docker_available, reason="Docker not available")
@pytest.mark.asyncio()
async def test_async_add_and_query(vespa_app, nodes):
# Testing async add and query
await asyncio.gather(*[vespa_app.async_add(nodes)])
query = VectorStoreQuery(query_str="Harry Potter", similarity_top_k=1)
result = await vespa_app.aquery(query)
assert len(result.nodes) == 1
assert result.nodes[0].node_id == "7"
|
from ._hdemucs import HDemucs, hdemucs_high, hdemucs_low, hdemucs_medium
from .conformer import Conformer
from .conv_tasnet import ConvTasNet
from .deepspeech import DeepSpeech
from .emformer import Emformer
from .rnnt import emformer_rnnt_base, emformer_rnnt_model, RNNT
from .rnnt_decoder import Hypothesis, RNNTBeamSearch
from .tacotron2 import Tacotron2
from .wav2letter import Wav2Letter
from .wav2vec2 import (
hubert_base,
hubert_large,
hubert_pretrain_base,
hubert_pretrain_large,
hubert_pretrain_model,
hubert_pretrain_xlarge,
hubert_xlarge,
HuBERTPretrainModel,
wav2vec2_base,
wav2vec2_large,
wav2vec2_large_lv60k,
wav2vec2_model,
Wav2Vec2Model,
)
from .wavernn import WaveRNN
__all__ = [
"Wav2Letter",
"WaveRNN",
"ConvTasNet",
"DeepSpeech",
"Wav2Vec2Model",
"HuBERTPretrainModel",
"wav2vec2_model",
"wav2vec2_base",
"wav2vec2_large",
"wav2vec2_large_lv60k",
"hubert_base",
"hubert_large",
"hubert_xlarge",
"hubert_pretrain_model",
"hubert_pretrain_base",
"hubert_pretrain_large",
"hubert_pretrain_xlarge",
"Tacotron2",
"Conformer",
"Emformer",
"Hypothesis",
"RNNT",
"RNNTBeamSearch",
"emformer_rnnt_base",
"emformer_rnnt_model",
"HDemucs",
"hdemucs_low",
"hdemucs_medium",
"hdemucs_high",
]
|
from .conformer import Conformer
from .conv_tasnet import ConvTasNet
from .deepspeech import DeepSpeech
from .emformer import Emformer
from .rnnt import emformer_rnnt_base, emformer_rnnt_model, RNNT
from .rnnt_decoder import Hypothesis, RNNTBeamSearch
from .tacotron2 import Tacotron2
from .wav2letter import Wav2Letter
from .wav2vec2 import (
hubert_base,
hubert_large,
hubert_pretrain_base,
hubert_pretrain_large,
hubert_pretrain_model,
hubert_pretrain_xlarge,
hubert_xlarge,
HuBERTPretrainModel,
wav2vec2_base,
wav2vec2_large,
wav2vec2_large_lv60k,
wav2vec2_model,
Wav2Vec2Model,
)
from .wavernn import WaveRNN
__all__ = [
"Wav2Letter",
"WaveRNN",
"ConvTasNet",
"DeepSpeech",
"Wav2Vec2Model",
"HuBERTPretrainModel",
"wav2vec2_model",
"wav2vec2_base",
"wav2vec2_large",
"wav2vec2_large_lv60k",
"hubert_base",
"hubert_large",
"hubert_xlarge",
"hubert_pretrain_model",
"hubert_pretrain_base",
"hubert_pretrain_large",
"hubert_pretrain_xlarge",
"Tacotron2",
"Conformer",
"Emformer",
"Hypothesis",
"RNNT",
"RNNTBeamSearch",
"emformer_rnnt_base",
"emformer_rnnt_model",
]
|
"""Loads RST files."""
from pathlib import Path
from typing import Any, List, Union
from langchain_community.document_loaders.unstructured import (
UnstructuredFileLoader,
validate_unstructured_version,
)
class UnstructuredRSTLoader(UnstructuredFileLoader):
"""Load `RST` files using `Unstructured`.
You can run the loader in one of two modes: "single" and "elements".
If you use "single" mode, the document will be returned as a single
langchain Document object. If you use "elements" mode, the unstructured
library will split the document into elements such as Title and NarrativeText.
You can pass in additional unstructured kwargs after mode to apply
different unstructured settings.
Examples
--------
from langchain_community.document_loaders import UnstructuredRSTLoader
loader = UnstructuredRSTLoader(
"example.rst", mode="elements", strategy="fast",
)
docs = loader.load()
References
----------
https://unstructured-io.github.io/unstructured/bricks.html#partition-rst
"""
def __init__(
self,
file_path: Union[str, Path],
mode: str = "single",
**unstructured_kwargs: Any,
):
"""
Initialize with a file path.
Args:
file_path: The path to the file to load.
mode: The mode to use for partitioning. See unstructured for details.
Defaults to "single".
**unstructured_kwargs: Additional keyword arguments to pass
to unstructured.
"""
file_path = str(file_path)
validate_unstructured_version(min_unstructured_version="0.7.5")
super().__init__(file_path=file_path, mode=mode, **unstructured_kwargs)
def _get_elements(self) -> List:
from unstructured.partition.rst import partition_rst
return partition_rst(filename=self.file_path, **self.unstructured_kwargs)
|
"""Loads RST files."""
from pathlib import Path
from typing import Any, List, Union
from langchain_community.document_loaders.unstructured import (
UnstructuredFileLoader,
validate_unstructured_version,
)
class UnstructuredRSTLoader(UnstructuredFileLoader):
"""Load `RST` files using `Unstructured`.
You can run the loader in one of two modes: "single" and "elements".
If you use "single" mode, the document will be returned as a single
langchain Document object. If you use "elements" mode, the unstructured
library will split the document into elements such as Title and NarrativeText.
You can pass in additional unstructured kwargs after mode to apply
different unstructured settings.
Examples
--------
from langchain_community.document_loaders import UnstructuredRSTLoader
loader = UnstructuredRSTLoader(
"example.rst", mode="elements", strategy="fast",
)
docs = loader.load()
References
----------
https://unstructured-io.github.io/unstructured/bricks.html#partition-rst
"""
def __init__(
self,
file_path: Union[str, Path],
mode: str = "single",
**unstructured_kwargs: Any,
):
"""
Initialize with a file path.
Args:
file_path: The path to the file to load.
mode: The mode to use for partitioning. See unstructured for details.
Defaults to "single".
**unstructured_kwargs: Additional keyword arguments to pass
to unstructured.
"""
file_path = str(file_path)
validate_unstructured_version(min_unstructured_version="0.7.5")
super().__init__(file_path=file_path, mode=mode, **unstructured_kwargs)
def _get_elements(self) -> List:
from unstructured.partition.rst import partition_rst
return partition_rst(filename=self.file_path, **self.unstructured_kwargs) # type: ignore[arg-type]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .base_det_dataset import BaseDetDataset
from .cityscapes import CityscapesDataset
from .coco import CocoDataset
from .coco_panoptic import CocoPanopticDataset
from .crowdhuman import CrowdHumanDataset
from .dataset_wrappers import MultiImageMixDataset
from .deepfashion import DeepFashionDataset
from .lvis import LVISDataset, LVISV1Dataset, LVISV05Dataset
from .openimages import OpenImagesChallengeDataset, OpenImagesDataset
from .samplers import (AspectRatioBatchSampler, ClassAwareSampler,
GroupMultiSourceSampler, MultiSourceSampler)
from .utils import get_loading_pipeline
from .voc import VOCDataset
from .wider_face import WIDERFaceDataset
from .xml_style import XMLDataset
__all__ = [
'XMLDataset', 'CocoDataset', 'DeepFashionDataset', 'VOCDataset',
'CityscapesDataset', 'LVISDataset', 'LVISV05Dataset', 'LVISV1Dataset',
'WIDERFaceDataset', 'get_loading_pipeline', 'CocoPanopticDataset',
'MultiImageMixDataset', 'OpenImagesDataset', 'OpenImagesChallengeDataset',
'AspectRatioBatchSampler', 'ClassAwareSampler', 'MultiSourceSampler',
'GroupMultiSourceSampler', 'BaseDetDataset', 'CrowdHumanDataset'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .base_det_dataset import BaseDetDataset
from .builder import DATASETS, PIPELINES, build_dataset
from .cityscapes import CityscapesDataset
from .coco import CocoDataset
from .coco_panoptic import CocoPanopticDataset
from .crowdhuman import CrowdHumanDataset
from .dataset_wrappers import MultiImageMixDataset
from .deepfashion import DeepFashionDataset
from .lvis import LVISDataset, LVISV1Dataset, LVISV05Dataset
from .openimages import OpenImagesChallengeDataset, OpenImagesDataset
from .samplers import (AspectRatioBatchSampler, ClassAwareSampler,
GroupMultiSourceSampler, MultiSourceSampler)
from .utils import get_loading_pipeline
from .voc import VOCDataset
from .wider_face import WIDERFaceDataset
from .xml_style import XMLDataset
__all__ = [
'XMLDataset', 'CocoDataset', 'DeepFashionDataset', 'VOCDataset',
'CityscapesDataset', 'LVISDataset', 'LVISV05Dataset', 'LVISV1Dataset',
'WIDERFaceDataset', 'DATASETS', 'PIPELINES', 'build_dataset',
'get_loading_pipeline', 'CocoPanopticDataset', 'MultiImageMixDataset',
'OpenImagesDataset', 'OpenImagesChallengeDataset',
'AspectRatioBatchSampler', 'ClassAwareSampler', 'MultiSourceSampler',
'GroupMultiSourceSampler', 'BaseDetDataset', 'CrowdHumanDataset'
]
|
import base64
import os
import pytest
import requests
from llama_index.core.llms import LLM
from llama_index.core.schema import ImageNode
from llama_index.multi_modal_llms.gemini import GeminiMultiModal
def test_embedding_class():
names_of_base_classes = [b.__name__ for b in GeminiMultiModal.__mro__]
assert LLM.__name__ in names_of_base_classes
@pytest.mark.skipif(
os.environ.get("GOOGLE_API_KEY") is None, reason="GOOGLE_API_KEY not set"
)
@pytest.mark.asyncio
async def test_streaming_async():
response = requests.get(
"https://storage.googleapis.com/generativeai-downloads/data/scene.jpg",
headers={"User-agent": "Mozilla/5.0"},
)
image_str = base64.b64encode(response.content).decode("UTF-8")
node = ImageNode(image=image_str)
m = GeminiMultiModal()
streaming_handler = await m.astream_complete(
"Tell me what's in this image",
image_documents=[node],
)
async for chunk in streaming_handler:
assert chunk.delta
@pytest.mark.skipif(
os.environ.get("GOOGLE_API_KEY") is None, reason="GOOGLE_API_KEY not set"
)
def test_streaming():
response = requests.get(
"https://storage.googleapis.com/generativeai-downloads/data/scene.jpg",
headers={"User-agent": "Mozilla/5.0"},
)
image_str = base64.b64encode(response.content).decode("UTF-8")
node = ImageNode(image=image_str)
m = GeminiMultiModal()
streaming_handler = m.stream_complete(
"Tell me what's in this image",
image_documents=[node],
)
for chunk in streaming_handler:
assert chunk.delta
|
import base64
import os
import pytest
import requests
from llama_index.core.llms import LLM
from llama_index.core.schema import ImageNode
from llama_index.multi_modal_llms.gemini import GeminiMultiModal
def test_embedding_class():
names_of_base_classes = [b.__name__ for b in GeminiMultiModal.__mro__]
assert LLM.__name__ in names_of_base_classes
@pytest.mark.skipif(
os.environ.get("GOOGLE_API_KEY") is None, reason="GOOGLE_API_KEY not set"
)
@pytest.mark.asyncio()
async def test_streaming_async():
response = requests.get(
"https://storage.googleapis.com/generativeai-downloads/data/scene.jpg",
headers={"User-agent": "Mozilla/5.0"},
)
image_str = base64.b64encode(response.content).decode("UTF-8")
node = ImageNode(image=image_str)
m = GeminiMultiModal()
streaming_handler = await m.astream_complete(
"Tell me what's in this image",
image_documents=[node],
)
async for chunk in streaming_handler:
assert chunk.delta
@pytest.mark.skipif(
os.environ.get("GOOGLE_API_KEY") is None, reason="GOOGLE_API_KEY not set"
)
def test_streaming():
response = requests.get(
"https://storage.googleapis.com/generativeai-downloads/data/scene.jpg",
headers={"User-agent": "Mozilla/5.0"},
)
image_str = base64.b64encode(response.content).decode("UTF-8")
node = ImageNode(image=image_str)
m = GeminiMultiModal()
streaming_handler = m.stream_complete(
"Tell me what's in this image",
image_documents=[node],
)
for chunk in streaming_handler:
assert chunk.delta
|
from __future__ import annotations
from collections.abc import Iterable
import torch.nn.functional as F
from torch import Tensor, nn
from sentence_transformers.SentenceTransformer import SentenceTransformer
from .ContrastiveLoss import SiameseDistanceMetric
class OnlineContrastiveLoss(nn.Module):
def __init__(
self, model: SentenceTransformer, distance_metric=SiameseDistanceMetric.COSINE_DISTANCE, margin: float = 0.5
) -> None:
"""
This Online Contrastive loss is similar to :class:`ConstrativeLoss`, but it selects hard positive (positives that
are far apart) and hard negative pairs (negatives that are close) and computes the loss only for these pairs.
This loss often yields better performances than ContrastiveLoss.
Args:
model: SentenceTransformer model
distance_metric: Function that returns a distance between
two embeddings. The class SiameseDistanceMetric contains
pre-defined metrics that can be used
margin: Negative samples (label == 0) should have a distance
of at least the margin value.
References:
- `Training Examples > Quora Duplicate Questions <../../../examples/sentence_transformer/training/quora_duplicate_questions/README.html>`_
Requirements:
1. (anchor, positive/negative) pairs
2. Data should include hard positives and hard negatives
Inputs:
+-----------------------------------------------+------------------------------+
| Texts | Labels |
+===============================================+==============================+
| (anchor, positive/negative) pairs | 1 if positive, 0 if negative |
+-----------------------------------------------+------------------------------+
Relations:
- :class:`ContrastiveLoss` is similar, but does not use hard positive and hard negative pairs.
:class:`OnlineContrastiveLoss` often yields better results.
Example:
::
from sentence_transformers import SentenceTransformer, SentenceTransformerTrainer, losses
from datasets import Dataset
model = SentenceTransformer("microsoft/mpnet-base")
train_dataset = Dataset.from_dict({
"sentence1": ["It's nice weather outside today.", "He drove to work."],
"sentence2": ["It's so sunny.", "She walked to the store."],
"label": [1, 0],
})
loss = losses.OnlineContrastiveLoss(model)
trainer = SentenceTransformerTrainer(
model=model,
train_dataset=train_dataset,
loss=loss,
)
trainer.train()
"""
super().__init__()
self.model = model
self.margin = margin
self.distance_metric = distance_metric
def forward(self, sentence_features: Iterable[dict[str, Tensor]], labels: Tensor, size_average=False) -> Tensor:
embeddings = [self.model(sentence_feature)["sentence_embedding"] for sentence_feature in sentence_features]
distance_matrix = self.distance_metric(embeddings[0], embeddings[1])
negs = distance_matrix[labels == 0]
poss = distance_matrix[labels == 1]
# select hard positive and hard negative pairs
negative_pairs = negs[negs < (poss.max() if len(poss) > 1 else negs.mean())]
positive_pairs = poss[poss > (negs.min() if len(negs) > 1 else poss.mean())]
positive_loss = positive_pairs.pow(2).sum()
negative_loss = F.relu(self.margin - negative_pairs).pow(2).sum()
loss = positive_loss + negative_loss
return loss
|
from __future__ import annotations
from collections.abc import Iterable
import torch.nn.functional as F
from torch import Tensor, nn
from sentence_transformers.SentenceTransformer import SentenceTransformer
from .ContrastiveLoss import SiameseDistanceMetric
class OnlineContrastiveLoss(nn.Module):
def __init__(
self, model: SentenceTransformer, distance_metric=SiameseDistanceMetric.COSINE_DISTANCE, margin: float = 0.5
) -> None:
"""
This Online Contrastive loss is similar to :class:`ConstrativeLoss`, but it selects hard positive (positives that
are far apart) and hard negative pairs (negatives that are close) and computes the loss only for these pairs.
This loss often yields better performances than ContrastiveLoss.
Args:
model: SentenceTransformer model
distance_metric: Function that returns a distance between
two embeddings. The class SiameseDistanceMetric contains
pre-defined metrics that can be used
margin: Negative samples (label == 0) should have a distance
of at least the margin value.
References:
- `Training Examples > Quora Duplicate Questions <../../examples/training/quora_duplicate_questions/README.html>`_
Requirements:
1. (anchor, positive/negative) pairs
2. Data should include hard positives and hard negatives
Inputs:
+-----------------------------------------------+------------------------------+
| Texts | Labels |
+===============================================+==============================+
| (anchor, positive/negative) pairs | 1 if positive, 0 if negative |
+-----------------------------------------------+------------------------------+
Relations:
- :class:`ContrastiveLoss` is similar, but does not use hard positive and hard negative pairs.
:class:`OnlineContrastiveLoss` often yields better results.
Example:
::
from sentence_transformers import SentenceTransformer, SentenceTransformerTrainer, losses
from datasets import Dataset
model = SentenceTransformer("microsoft/mpnet-base")
train_dataset = Dataset.from_dict({
"sentence1": ["It's nice weather outside today.", "He drove to work."],
"sentence2": ["It's so sunny.", "She walked to the store."],
"label": [1, 0],
})
loss = losses.OnlineContrastiveLoss(model)
trainer = SentenceTransformerTrainer(
model=model,
train_dataset=train_dataset,
loss=loss,
)
trainer.train()
"""
super().__init__()
self.model = model
self.margin = margin
self.distance_metric = distance_metric
def forward(self, sentence_features: Iterable[dict[str, Tensor]], labels: Tensor, size_average=False) -> Tensor:
embeddings = [self.model(sentence_feature)["sentence_embedding"] for sentence_feature in sentence_features]
distance_matrix = self.distance_metric(embeddings[0], embeddings[1])
negs = distance_matrix[labels == 0]
poss = distance_matrix[labels == 1]
# select hard positive and hard negative pairs
negative_pairs = negs[negs < (poss.max() if len(poss) > 1 else negs.mean())]
positive_pairs = poss[poss > (negs.min() if len(negs) > 1 else poss.mean())]
positive_loss = positive_pairs.pow(2).sum()
negative_loss = F.relu(self.margin - negative_pairs).pow(2).sum()
loss = positive_loss + negative_loss
return loss
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.agent_toolkits.spark_sql.toolkit import SparkSQLToolkit
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"SparkSQLToolkit": "langchain_community.agent_toolkits.spark_sql.toolkit",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"SparkSQLToolkit",
]
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.agent_toolkits.spark_sql.toolkit import SparkSQLToolkit
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"SparkSQLToolkit": "langchain_community.agent_toolkits.spark_sql.toolkit"
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"SparkSQLToolkit",
]
|
def get_full_schema() -> dict:
"""Get full schema
:return: the full schema for Jina core as a dict.
"""
from jina import __version__
from jina.importer import IMPORTED
from jina.schemas.deployment import schema_deployment
from jina.schemas.executor import schema_all_executors
from jina.schemas.flow import schema_flow
from jina.schemas.gateway import schema_gateway
from jina.schemas.meta import schema_metas
definitions = {}
for s in [
schema_gateway,
schema_all_executors,
schema_flow,
schema_metas,
schema_deployment,
IMPORTED.schema_executors,
]:
definitions.update(s)
return {
'$id': f'https://api.jina.ai/schemas/{__version__}.json',
'$schema': 'http://json-schema.org/draft-07/schema#',
'description': 'The YAML schema of Jina objects (Flow, Executor).',
'type': 'object',
'oneOf': [{'$ref': '#/definitions/Jina::Flow'}]
+ [{"$ref": f"#/definitions/{k}"} for k in IMPORTED.schema_executors.keys()],
'definitions': definitions,
}
|
def get_full_schema() -> dict:
"""Get full schema
:return: the full schema for Jina core as a dict.
"""
from jina import __version__
from jina.importer import IMPORTED
from jina.schemas.executor import schema_all_executors
from jina.schemas.flow import schema_flow
from jina.schemas.meta import schema_metas
from jina.schemas.deployment import schema_deployment
definitions = {}
for s in [
schema_all_executors,
schema_flow,
schema_metas,
schema_deployment,
IMPORTED.schema_executors,
]:
definitions.update(s)
return {
'$id': f'https://api.jina.ai/schemas/{__version__}.json',
'$schema': 'http://json-schema.org/draft-07/schema#',
'description': 'The YAML schema of Jina objects (Flow, Executor).',
'type': 'object',
'oneOf': [{'$ref': '#/definitions/Jina::Flow'}]
+ [{"$ref": f"#/definitions/{k}"} for k in IMPORTED.schema_executors.keys()],
'definitions': definitions,
}
|
"""
This is a simple application for sparse encoder: Computing embeddings.
we have multiple sentences and we want to compute their embeddings.
The embeddings are sparse, meaning that most of the values are zero.
The embeddings are stored in a sparse matrix format, which is more efficient for storage and computation.
we can also visualize the top tokens for each text."""
from sentence_transformers import SparseEncoder
# Initialize the SPLADE model
model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
# Embed a list of sentences
sentences = [
"This framework generates embeddings for each input sentence",
"Sentences are passed as a list of string.",
"The quick brown fox jumps over the lazy dog.",
]
# Generate embeddings
embeddings = model.encode(sentences)
# Print embedding sim and sparsity
print(f"Embedding dim: {model.get_sentence_embedding_dimension()}")
print(f"Embedding sparsity: {model.get_sparsity_stats(embeddings)}")
"""
Embedding dim: 30522
Embedding sparsity: {'num_rows': 3, 'num_cols': 30522, 'row_non_zero_mean': 56.66666793823242, 'row_sparsity_mean': 0.9981433749198914}
"""
# Visualize top tokens for each text
top_k = 10
token_weights = model.decode(embeddings, top_k=top_k)["decoded_1"]
print(f"\nTop tokens {top_k} for each text:")
# The result is a list of sentence embeddings as numpy arrays
for i, sentence in enumerate(sentences):
token_scores = ", ".join([f'("{token.strip()}", {value:.2f})' for token, value in token_weights[i]])
print(f"{i}: {sentence} -> Top tokens: {token_scores}")
"""
Top tokens 10 for each text:
0: This framework generates embeddings for each input sentence -> Top tokens: ("framework", 2.19), ("##bed", 2.12), ("input", 1.99), ("each", 1.60), ("em", 1.58), ("sentence", 1.49), ("generate", 1.42), ("##ding", 1.33), ("sentences", 1.10), ("create", 0.93)
1: Sentences are passed as a list of string. -> Top tokens: ("string", 2.72), ("pass", 2.24), ("sentences", 2.15), ("passed", 2.07), ("sentence", 1.90), ("strings", 1.86), ("list", 1.84), ("lists", 1.49), ("as", 1.18), ("passing", 0.73)
2: The quick brown fox jumps over the lazy dog. -> Top tokens: ("lazy", 2.18), ("fox", 1.67), ("brown", 1.56), ("over", 1.52), ("dog", 1.50), ("quick", 1.49), ("jump", 1.39), ("dogs", 1.25), ("foxes", 0.99), ("jumping", 0.84)
"""
|
"""
This is a simple application for sparse encoder: Computing embeddings.
we have multiple sentences and we want to compute their embeddings.
The embeddings are sparse, meaning that most of the values are zero.
The embeddings are stored in a sparse matrix format, which is more efficient for storage and computation.
we can also visualize the top tokens for each text."""
import numpy as np
from sentence_transformers import SparseEncoder
# Initialize the SPLADE model
model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
# Embed a list of sentences
sentences = [
"This framework generates embeddings for each input sentence",
"Sentences are passed as a list of string.",
"The quick brown fox jumps over the lazy dog.",
]
# Generate embeddings
embeddings = model.encode(sentences)
# Print embedding sim and sparsity
print(f"Embedding dim: {model.get_sentence_embedding_dimension()}")
print(f"Embedding sparsity: {model.get_sparsity_stats(embeddings)}")
"""
Embedding dim: 30522
Embedding sparsity: {'num_rows': 3, 'num_cols': 30522, 'row_non_zero_mean': 56.66666793823242, 'row_sparsity_mean': 0.9981433749198914}
"""
# Visualize top tokens for each text
top_k = 10
print(f"\nTop tokens {top_k} for each text:")
# The result is a list of sentence embeddings as numpy arrays
for i, sentence in enumerate(sentences):
top_indices = np.argsort(-embeddings[i].to_dense().cpu().numpy())[:top_k]
top_values = embeddings[i].to_dense().cpu().numpy()[top_indices]
top_tokens = [model.tokenizer.decode([idx]) for idx in top_indices]
token_scores = ", ".join([f'("{token.strip()}", {value:.2f})' for token, value in zip(top_tokens, top_values)])
print(f"{i}: {sentence} -> Top tokens: {token_scores}")
"""
Top tokens 10 for each text:
0: This framework generates embeddings for each input sentence -> Top tokens: ("framework", 2.19), ("##bed", 2.12), ("input", 1.99), ("each", 1.60), ("em", 1.58), ("sentence", 1.49), ("generate", 1.42), ("##ding", 1.33), ("sentences", 1.10), ("create", 0.93)
1: Sentences are passed as a list of string. -> Top tokens: ("string", 2.72), ("pass", 2.24), ("sentences", 2.15), ("passed", 2.07), ("sentence", 1.90), ("strings", 1.86), ("list", 1.84), ("lists", 1.49), ("as", 1.18), ("passing", 0.73)
2: The quick brown fox jumps over the lazy dog. -> Top tokens: ("lazy", 2.18), ("fox", 1.67), ("brown", 1.56), ("over", 1.52), ("dog", 1.50), ("quick", 1.49), ("jump", 1.39), ("dogs", 1.25), ("foxes", 0.99), ("jumping", 0.84)
"""
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmcv.transforms import Compose
from mmengine.hooks import Hook
from mmdet.registry import HOOKS
@HOOKS.register_module()
class PipelineSwitchHook(Hook):
"""Switch data pipeline at switch_epoch.
Args:
switch_epoch (int): switch pipeline at this epoch.
switch_pipeline (list[dict]): the pipeline to switch to.
"""
def __init__(self, switch_epoch, switch_pipeline):
self.switch_epoch = switch_epoch
self.switch_pipeline = switch_pipeline
self._restart_dataloader = False
self._has_switched = False
def before_train_epoch(self, runner):
"""switch pipeline."""
epoch = runner.epoch
train_loader = runner.train_dataloader
if epoch >= self.switch_epoch and not self._has_switched:
runner.logger.info('Switch pipeline now!')
# The dataset pipeline cannot be updated when persistent_workers
# is True, so we need to force the dataloader's multi-process
# restart. This is a very hacky approach.
train_loader.dataset.pipeline = Compose(self.switch_pipeline)
if hasattr(train_loader, 'persistent_workers'
) and train_loader.persistent_workers is True:
train_loader._DataLoader__initialized = False
train_loader._iterator = None
self._restart_dataloader = True
self._has_switched = True
else:
# Once the restart is complete, we need to restore
# the initialization flag.
if self._restart_dataloader:
train_loader._DataLoader__initialized = True
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmcv.transforms import Compose
from mmengine.hooks import Hook
from mmdet.registry import HOOKS
@HOOKS.register_module()
class PipelineSwitchHook(Hook):
"""Switch data pipeline at switch_epoch.
Args:
switch_epoch (int): switch pipeline at this epoch.
switch_pipeline (list[dict]): the pipeline to switch to.
"""
def __init__(self, switch_epoch, switch_pipeline):
self.switch_epoch = switch_epoch
self.switch_pipeline = switch_pipeline
self._restart_dataloader = False
def before_train_epoch(self, runner):
"""switch pipeline."""
epoch = runner.epoch
train_loader = runner.train_dataloader
if epoch == self.switch_epoch:
runner.logger.info('Switch pipeline now!')
# The dataset pipeline cannot be updated when persistent_workers
# is True, so we need to force the dataloader's multi-process
# restart. This is a very hacky approach.
train_loader.dataset.pipeline = Compose(self.switch_pipeline)
if hasattr(train_loader, 'persistent_workers'
) and train_loader.persistent_workers is True:
train_loader._DataLoader__initialized = False
train_loader._iterator = None
self._restart_dataloader = True
else:
# Once the restart is complete, we need to restore
# the initialization flag.
if self._restart_dataloader:
train_loader._DataLoader__initialized = True
|
_base_ = [
'../_base_/models/faster_rcnn_r50_fpn.py',
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
checkpoint = 'https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_8xb256-rsb-a1-600e_in1k_20211228-20e21305.pth' # noqa
model = dict(
backbone=dict(
init_cfg=dict(
type='Pretrained', prefix='backbone.', checkpoint=checkpoint)))
optim_wrapper = dict(
optimizer=dict(_delete_=True, type='AdamW', lr=0.0002, weight_decay=0.05),
paramwise_cfg=dict(norm_decay_mult=0., bypass_duplicate=True))
|
_base_ = [
'../_base_/models/faster_rcnn_r50_fpn.py',
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
checkpoint = 'https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_8xb256-rsb-a1-600e_in1k_20211228-20e21305.pth' # noqa
model = dict(
backbone=dict(
init_cfg=dict(
type='Pretrained', prefix='backbone.', checkpoint=checkpoint)))
optimizer = dict(
_delete_=True,
type='AdamW',
lr=0.0002,
weight_decay=0.05,
paramwise_cfg=dict(norm_decay_mult=0., bypass_duplicate=True))
|
_base_ = './faster-rcnn_r50_fpn_2x_coco.py'
model = dict(
backbone=dict(
type='ResNeXt',
depth=101,
groups=64,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
style='pytorch',
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d')))
|
_base_ = './faster_rcnn_r50_fpn_2x_coco.py'
model = dict(
backbone=dict(
type='ResNeXt',
depth=101,
groups=64,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
style='pytorch',
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d')))
|
# Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from mmdet.models.backbones import ResNeXt
from mmdet.models.backbones.resnext import Bottleneck as BottleneckX
from .utils import is_block
def test_renext_bottleneck():
with pytest.raises(AssertionError):
# Style must be in ['pytorch', 'caffe']
BottleneckX(64, 64, groups=32, base_width=4, style='tensorflow')
# Test ResNeXt Bottleneck structure
block = BottleneckX(
64, 64, groups=32, base_width=4, stride=2, style='pytorch')
assert block.conv2.stride == (2, 2)
assert block.conv2.groups == 32
assert block.conv2.out_channels == 128
# Test ResNeXt Bottleneck with DCN
dcn = dict(type='DCN', deform_groups=1, fallback_on_stride=False)
with pytest.raises(AssertionError):
# conv_cfg must be None if dcn is not None
BottleneckX(
64,
64,
groups=32,
base_width=4,
dcn=dcn,
conv_cfg=dict(type='Conv'))
BottleneckX(64, 64, dcn=dcn)
# Test ResNeXt Bottleneck forward
block = BottleneckX(64, 16, groups=32, base_width=4)
x = torch.randn(1, 64, 56, 56)
x_out = block(x)
assert x_out.shape == torch.Size([1, 64, 56, 56])
# Test ResNeXt Bottleneck forward with plugins
plugins = [
dict(
cfg=dict(
type='GeneralizedAttention',
spatial_range=-1,
num_heads=8,
attention_type='0010',
kv_stride=2),
stages=(False, False, True, True),
position='after_conv2')
]
block = BottleneckX(64, 16, groups=32, base_width=4, plugins=plugins)
x = torch.randn(1, 64, 56, 56)
x_out = block(x)
assert x_out.shape == torch.Size([1, 64, 56, 56])
def test_resnext_backbone():
with pytest.raises(KeyError):
# ResNeXt depth should be in [50, 101, 152]
ResNeXt(depth=18)
# Test ResNeXt with group 32, base_width 4
model = ResNeXt(depth=50, groups=32, base_width=4)
for m in model.modules():
if is_block(m):
assert m.conv2.groups == 32
model.train()
imgs = torch.randn(1, 3, 32, 32)
feat = model(imgs)
assert len(feat) == 4
assert feat[0].shape == torch.Size([1, 256, 8, 8])
assert feat[1].shape == torch.Size([1, 512, 4, 4])
assert feat[2].shape == torch.Size([1, 1024, 2, 2])
assert feat[3].shape == torch.Size([1, 2048, 1, 1])
regnet_test_data = [
('regnetx_400mf',
dict(w0=24, wa=24.48, wm=2.54, group_w=16, depth=22,
bot_mul=1.0), [32, 64, 160, 384]),
('regnetx_800mf',
dict(w0=56, wa=35.73, wm=2.28, group_w=16, depth=16,
bot_mul=1.0), [64, 128, 288, 672]),
('regnetx_1.6gf',
dict(w0=80, wa=34.01, wm=2.25, group_w=24, depth=18,
bot_mul=1.0), [72, 168, 408, 912]),
('regnetx_3.2gf',
dict(w0=88, wa=26.31, wm=2.25, group_w=48, depth=25,
bot_mul=1.0), [96, 192, 432, 1008]),
('regnetx_4.0gf',
dict(w0=96, wa=38.65, wm=2.43, group_w=40, depth=23,
bot_mul=1.0), [80, 240, 560, 1360]),
('regnetx_6.4gf',
dict(w0=184, wa=60.83, wm=2.07, group_w=56, depth=17,
bot_mul=1.0), [168, 392, 784, 1624]),
('regnetx_8.0gf',
dict(w0=80, wa=49.56, wm=2.88, group_w=120, depth=23,
bot_mul=1.0), [80, 240, 720, 1920]),
('regnetx_12gf',
dict(w0=168, wa=73.36, wm=2.37, group_w=112, depth=19,
bot_mul=1.0), [224, 448, 896, 2240]),
]
|
# Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from mmdet.models.backbones import ResNeXt
from mmdet.models.backbones.resnext import Bottleneck as BottleneckX
from .utils import is_block
def test_renext_bottleneck():
with pytest.raises(AssertionError):
# Style must be in ['pytorch', 'caffe']
BottleneckX(64, 64, groups=32, base_width=4, style='tensorflow')
# Test ResNeXt Bottleneck structure
block = BottleneckX(
64, 64, groups=32, base_width=4, stride=2, style='pytorch')
assert block.conv2.stride == (2, 2)
assert block.conv2.groups == 32
assert block.conv2.out_channels == 128
# Test ResNeXt Bottleneck with DCN
dcn = dict(type='DCN', deform_groups=1, fallback_on_stride=False)
with pytest.raises(AssertionError):
# conv_cfg must be None if dcn is not None
BottleneckX(
64,
64,
groups=32,
base_width=4,
dcn=dcn,
conv_cfg=dict(type='Conv'))
BottleneckX(64, 64, dcn=dcn)
# Test ResNeXt Bottleneck forward
block = BottleneckX(64, 16, groups=32, base_width=4)
x = torch.randn(1, 64, 56, 56)
x_out = block(x)
assert x_out.shape == torch.Size([1, 64, 56, 56])
# Test ResNeXt Bottleneck forward with plugins
plugins = [
dict(
cfg=dict(
type='GeneralizedAttention',
spatial_range=-1,
num_heads=8,
attention_type='0010',
kv_stride=2),
stages=(False, False, True, True),
position='after_conv2')
]
block = BottleneckX(64, 16, groups=32, base_width=4, plugins=plugins)
x = torch.randn(1, 64, 56, 56)
x_out = block(x)
assert x_out.shape == torch.Size([1, 64, 56, 56])
def test_resnext_backbone():
with pytest.raises(KeyError):
# ResNeXt depth should be in [50, 101, 152]
ResNeXt(depth=18)
# Test ResNeXt with group 32, base_width 4
model = ResNeXt(depth=50, groups=32, base_width=4)
for m in model.modules():
if is_block(m):
assert m.conv2.groups == 32
model.init_weights()
model.train()
imgs = torch.randn(1, 3, 224, 224)
feat = model(imgs)
assert len(feat) == 4
assert feat[0].shape == torch.Size([1, 256, 56, 56])
assert feat[1].shape == torch.Size([1, 512, 28, 28])
assert feat[2].shape == torch.Size([1, 1024, 14, 14])
assert feat[3].shape == torch.Size([1, 2048, 7, 7])
regnet_test_data = [
('regnetx_400mf',
dict(w0=24, wa=24.48, wm=2.54, group_w=16, depth=22,
bot_mul=1.0), [32, 64, 160, 384]),
('regnetx_800mf',
dict(w0=56, wa=35.73, wm=2.28, group_w=16, depth=16,
bot_mul=1.0), [64, 128, 288, 672]),
('regnetx_1.6gf',
dict(w0=80, wa=34.01, wm=2.25, group_w=24, depth=18,
bot_mul=1.0), [72, 168, 408, 912]),
('regnetx_3.2gf',
dict(w0=88, wa=26.31, wm=2.25, group_w=48, depth=25,
bot_mul=1.0), [96, 192, 432, 1008]),
('regnetx_4.0gf',
dict(w0=96, wa=38.65, wm=2.43, group_w=40, depth=23,
bot_mul=1.0), [80, 240, 560, 1360]),
('regnetx_6.4gf',
dict(w0=184, wa=60.83, wm=2.07, group_w=56, depth=17,
bot_mul=1.0), [168, 392, 784, 1624]),
('regnetx_8.0gf',
dict(w0=80, wa=49.56, wm=2.88, group_w=120, depth=23,
bot_mul=1.0), [80, 240, 720, 1920]),
('regnetx_12gf',
dict(w0=168, wa=73.36, wm=2.37, group_w=112, depth=19,
bot_mul=1.0), [224, 448, 896, 2240]),
]
|
# dataset settings
dataset_type = 'LVISV05Dataset'
data_root = 'data/lvis_v0.5/'
# file_client_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/detection/',
# 'data/': 's3://openmmlab/datasets/detection/'
# }))
file_client_args = dict(backend='disk')
train_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='RandomChoiceResize',
scales=[(1333, 640), (1333, 672), (1333, 704), (1333, 736),
(1333, 768), (1333, 800)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(
batch_size=2,
num_workers=2,
persistent_workers=True,
sampler=dict(type='DefaultSampler', shuffle=True),
batch_sampler=dict(type='AspectRatioBatchSampler'),
dataset=dict(
type='ClassBalancedDataset',
oversample_thr=1e-3,
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/lvis_v0.5_train.json',
data_prefix=dict(img='train2017/'),
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=train_pipeline)))
val_dataloader = dict(
batch_size=1,
num_workers=2,
persistent_workers=True,
drop_last=False,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/lvis_v0.5_val.json',
data_prefix=dict(img='val2017/'),
test_mode=True,
pipeline=test_pipeline))
test_dataloader = val_dataloader
val_evaluator = dict(
type='LVISMetric',
ann_file=data_root + 'annotations/lvis_v0.5_val.json',
metric=['bbox', 'segm'])
test_evaluator = val_evaluator
|
# dataset settings
_base_ = 'coco_instance.py'
dataset_type = 'LVISV05Dataset'
data_root = 'data/lvis_v0.5/'
data = dict(
samples_per_gpu=2,
workers_per_gpu=2,
train=dict(
_delete_=True,
type='ClassBalancedDataset',
oversample_thr=1e-3,
dataset=dict(
type=dataset_type,
ann_file=data_root + 'annotations/lvis_v0.5_train.json',
img_prefix=data_root + 'train2017/')),
val=dict(
type=dataset_type,
ann_file=data_root + 'annotations/lvis_v0.5_val.json',
img_prefix=data_root + 'val2017/'),
test=dict(
type=dataset_type,
ann_file=data_root + 'annotations/lvis_v0.5_val.json',
img_prefix=data_root + 'val2017/'))
evaluation = dict(metric=['bbox', 'segm'])
|
# Copyright (c) OpenMMLab. All rights reserved.
import asyncio
from argparse import ArgumentParser
import mmcv
from mmdet.apis import (async_inference_detector, inference_detector,
init_detector)
from mmdet.registry import VISUALIZERS
from mmdet.utils import register_all_modules
def parse_args():
parser = ArgumentParser()
parser.add_argument('img', help='Image file')
parser.add_argument('config', help='Config file')
parser.add_argument('checkpoint', help='Checkpoint file')
parser.add_argument('--out-file', default=None, help='Path to output file')
parser.add_argument(
'--device', default='cuda:0', help='Device used for inference')
parser.add_argument(
'--palette',
default='coco',
choices=['coco', 'voc', 'citys', 'random'],
help='Color palette used for visualization')
parser.add_argument(
'--score-thr', type=float, default=0.3, help='bbox score threshold')
parser.add_argument(
'--async-test',
action='store_true',
help='whether to set async options for async inference.')
args = parser.parse_args()
return args
def main(args):
# register all modules in mmdet into the registries
register_all_modules()
# TODO: Support inference of image directory.
# build the model from a config file and a checkpoint file
model = init_detector(
args.config, args.checkpoint, palette=args.palette, device=args.device)
# init visualizer
visualizer = VISUALIZERS.build(model.cfg.visualizer)
# the dataset_meta is loaded from the checkpoint and
# then pass to the model in init_detector
visualizer.dataset_meta = model.dataset_meta
# test a single image
result = inference_detector(model, args.img)
# show the results
img = mmcv.imread(args.img)
img = mmcv.imconvert(img, 'bgr', 'rgb')
visualizer.add_datasample(
'result',
img,
pred_sample=result,
show=args.out_file is None,
wait_time=0,
out_file=args.out_file,
pred_score_thr=args.score_thr)
async def async_main(args):
# build the model from a config file and a checkpoint file
model = init_detector(args.config, args.checkpoint, device=args.device)
# init visualizer
visualizer = VISUALIZERS.build(model.cfg.visualizer)
visualizer.dataset_meta = model.dataset_meta
# test a single image
tasks = asyncio.create_task(async_inference_detector(model, args.img))
result = await asyncio.gather(tasks)
# show the results
img = mmcv.imread(args.img)
img = mmcv.imconvert(img, 'bgr', 'rgb')
visualizer.add_datasample(
'result',
img,
pred_sample=result[0],
show=args.out_file is None,
wait_time=0,
out_file=args.out_file,
pred_score_thr=args.score_thr)
if __name__ == '__main__':
args = parse_args()
assert not args.async_test, 'async inference is not supported yet.'
if args.async_test:
asyncio.run(async_main(args))
else:
main(args)
|
# Copyright (c) OpenMMLab. All rights reserved.
import asyncio
from argparse import ArgumentParser
import mmcv
from mmdet.apis import (async_inference_detector, inference_detector,
init_detector)
from mmdet.registry import VISUALIZERS
from mmdet.utils import register_all_modules
def parse_args():
parser = ArgumentParser()
parser.add_argument('img', help='Image file')
parser.add_argument('config', help='Config file')
parser.add_argument('checkpoint', help='Checkpoint file')
parser.add_argument('--out-file', default=None, help='Path to output file')
parser.add_argument(
'--device', default='cuda:0', help='Device used for inference')
parser.add_argument(
'--palette',
default='coco',
choices=['coco', 'voc', 'citys', 'random'],
help='Color palette used for visualization')
parser.add_argument(
'--score-thr', type=float, default=0.3, help='bbox score threshold')
parser.add_argument(
'--async-test',
action='store_true',
help='whether to set async options for async inference.')
args = parser.parse_args()
return args
def main(args):
# register all modules in mmdet into the registries
register_all_modules()
# TODO: Support inference of image directory.
# build the model from a config file and a checkpoint file
model = init_detector(
args.config, args.checkpoint, palette=args.palette, device=args.device)
# init visualizer
visualizer = VISUALIZERS.build(model.cfg.visualizer)
visualizer.dataset_meta = model.dataset_meta
# test a single image
result = inference_detector(model, args.img)
# show the results
img = mmcv.imread(args.img)
img = mmcv.imconvert(img, 'bgr', 'rgb')
visualizer.add_datasample(
'result',
img,
pred_sample=result,
show=True,
wait_time=0,
out_file=args.out_file,
pred_score_thr=args.score_thr)
async def async_main(args):
# build the model from a config file and a checkpoint file
model = init_detector(args.config, args.checkpoint, device=args.device)
# init visualizer
visualizer = VISUALIZERS.build(model.cfg.visualizer)
visualizer.dataset_meta = model.dataset_meta
# test a single image
tasks = asyncio.create_task(async_inference_detector(model, args.img))
result = await asyncio.gather(tasks)
# show the results
img = mmcv.imread(args.img)
img = mmcv.imconvert(img, 'bgr', 'rgb')
visualizer.add_datasample(
'result',
img,
pred_sample=result[0],
show=True,
wait_time=0,
out_file=args.out_file,
pred_score_thr=args.score_thr)
if __name__ == '__main__':
args = parse_args()
if args.async_test:
asyncio.run(async_main(args))
else:
main(args)
|
import json
from enum import Enum
from typing import Any
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from backend.data.model import SchemaField
from backend.util.request import requests
class HttpMethod(Enum):
GET = "GET"
POST = "POST"
PUT = "PUT"
DELETE = "DELETE"
PATCH = "PATCH"
OPTIONS = "OPTIONS"
HEAD = "HEAD"
class SendWebRequestBlock(Block):
class Input(BlockSchema):
url: str = SchemaField(
description="The URL to send the request to",
placeholder="https://api.example.com",
)
method: HttpMethod = SchemaField(
description="The HTTP method to use for the request",
default=HttpMethod.POST,
)
headers: dict[str, str] = SchemaField(
description="The headers to include in the request",
default={},
)
json_format: bool = SchemaField(
title="JSON format",
description="Whether to send and receive body as JSON",
default=True,
)
body: Any = SchemaField(
description="The body of the request",
default=None,
)
class Output(BlockSchema):
response: object = SchemaField(description="The response from the server")
client_error: object = SchemaField(description="The error on 4xx status codes")
server_error: object = SchemaField(description="The error on 5xx status codes")
def __init__(self):
super().__init__(
id="6595ae1f-b924-42cb-9a41-551a0611c4b4",
description="This block makes an HTTP request to the given URL.",
categories={BlockCategory.OUTPUT},
input_schema=SendWebRequestBlock.Input,
output_schema=SendWebRequestBlock.Output,
)
def run(self, input_data: Input, **kwargs) -> BlockOutput:
body = input_data.body
if input_data.json_format:
if isinstance(body, str):
try:
# Try to parse as JSON first
body = json.loads(body)
except json.JSONDecodeError:
# If it's not valid JSON and just plain text,
# we should send it as plain text instead
input_data.json_format = False
response = requests.request(
input_data.method.value,
input_data.url,
headers=input_data.headers,
json=body if input_data.json_format else None,
data=body if not input_data.json_format else None,
)
result = response.json() if input_data.json_format else response.text
if response.status_code // 100 == 2:
yield "response", result
elif response.status_code // 100 == 4:
yield "client_error", result
elif response.status_code // 100 == 5:
yield "server_error", result
else:
raise ValueError(f"Unexpected status code: {response.status_code}")
|
import json
from enum import Enum
from typing import Any
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from backend.data.model import SchemaField
from backend.util.request import requests
class HttpMethod(Enum):
GET = "GET"
POST = "POST"
PUT = "PUT"
DELETE = "DELETE"
PATCH = "PATCH"
OPTIONS = "OPTIONS"
HEAD = "HEAD"
class SendWebRequestBlock(Block):
class Input(BlockSchema):
url: str = SchemaField(
description="The URL to send the request to",
placeholder="https://api.example.com",
)
method: HttpMethod = SchemaField(
description="The HTTP method to use for the request",
default=HttpMethod.POST,
)
headers: dict[str, str] = SchemaField(
description="The headers to include in the request",
default={},
)
json_format: bool = SchemaField(
title="JSON format",
description="Whether to send and receive body as JSON",
default=True,
)
body: Any = SchemaField(
description="The body of the request",
default=None,
)
class Output(BlockSchema):
response: object = SchemaField(description="The response from the server")
client_error: object = SchemaField(description="The error on 4xx status codes")
server_error: object = SchemaField(description="The error on 5xx status codes")
def __init__(self):
super().__init__(
id="6595ae1f-b924-42cb-9a41-551a0611c4b4",
description="This block makes an HTTP request to the given URL.",
categories={BlockCategory.OUTPUT},
input_schema=SendWebRequestBlock.Input,
output_schema=SendWebRequestBlock.Output,
)
def run(self, input_data: Input, **kwargs) -> BlockOutput:
if isinstance(input_data.body, str):
input_data.body = json.loads(input_data.body)
response = requests.request(
input_data.method.value,
input_data.url,
headers=input_data.headers,
json=input_data.body if input_data.json_format else None,
data=input_data.body if not input_data.json_format else None,
)
result = response.json() if input_data.json_format else response.text
if response.status_code // 100 == 2:
yield "response", result
elif response.status_code // 100 == 4:
yield "client_error", result
elif response.status_code // 100 == 5:
yield "server_error", result
else:
raise ValueError(f"Unexpected status code: {response.status_code}")
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.models.builder import HEADS
from .convfc_bbox_head import ConvFCBBoxHead
@HEADS.register_module()
class SCNetBBoxHead(ConvFCBBoxHead):
"""BBox head for `SCNet <https://arxiv.org/abs/2012.10150>`_.
This inherits ``ConvFCBBoxHead`` with modified forward() function, allow us
to get intermediate shared feature.
"""
def _forward_shared(self, x):
"""Forward function for shared part."""
if self.num_shared_convs > 0:
for conv in self.shared_convs:
x = conv(x)
if self.num_shared_fcs > 0:
if self.with_avg_pool:
x = self.avg_pool(x)
x = x.flatten(1)
for fc in self.shared_fcs:
x = self.relu(fc(x))
return x
def _forward_cls_reg(self, x):
"""Forward function for classification and regression parts."""
x_cls = x
x_reg = x
for conv in self.cls_convs:
x_cls = conv(x_cls)
if x_cls.dim() > 2:
if self.with_avg_pool:
x_cls = self.avg_pool(x_cls)
x_cls = x_cls.flatten(1)
for fc in self.cls_fcs:
x_cls = self.relu(fc(x_cls))
for conv in self.reg_convs:
x_reg = conv(x_reg)
if x_reg.dim() > 2:
if self.with_avg_pool:
x_reg = self.avg_pool(x_reg)
x_reg = x_reg.flatten(1)
for fc in self.reg_fcs:
x_reg = self.relu(fc(x_reg))
cls_score = self.fc_cls(x_cls) if self.with_cls else None
bbox_pred = self.fc_reg(x_reg) if self.with_reg else None
return cls_score, bbox_pred
def forward(self, x, return_shared_feat=False):
"""Forward function.
Args:
x (Tensor): input features
return_shared_feat (bool): If True, return cls-reg-shared feature.
Return:
out (tuple[Tensor]): contain ``cls_score`` and ``bbox_pred``,
if ``return_shared_feat`` is True, append ``x_shared`` to the
returned tuple.
"""
x_shared = self._forward_shared(x)
out = self._forward_cls_reg(x_shared)
if return_shared_feat:
out += (x_shared, )
return out
|
from mmdet.models.builder import HEADS
from .convfc_bbox_head import ConvFCBBoxHead
@HEADS.register_module()
class SCNetBBoxHead(ConvFCBBoxHead):
"""BBox head for `SCNet <https://arxiv.org/abs/2012.10150>`_.
This inherits ``ConvFCBBoxHead`` with modified forward() function, allow us
to get intermediate shared feature.
"""
def _forward_shared(self, x):
"""Forward function for shared part."""
if self.num_shared_convs > 0:
for conv in self.shared_convs:
x = conv(x)
if self.num_shared_fcs > 0:
if self.with_avg_pool:
x = self.avg_pool(x)
x = x.flatten(1)
for fc in self.shared_fcs:
x = self.relu(fc(x))
return x
def _forward_cls_reg(self, x):
"""Forward function for classification and regression parts."""
x_cls = x
x_reg = x
for conv in self.cls_convs:
x_cls = conv(x_cls)
if x_cls.dim() > 2:
if self.with_avg_pool:
x_cls = self.avg_pool(x_cls)
x_cls = x_cls.flatten(1)
for fc in self.cls_fcs:
x_cls = self.relu(fc(x_cls))
for conv in self.reg_convs:
x_reg = conv(x_reg)
if x_reg.dim() > 2:
if self.with_avg_pool:
x_reg = self.avg_pool(x_reg)
x_reg = x_reg.flatten(1)
for fc in self.reg_fcs:
x_reg = self.relu(fc(x_reg))
cls_score = self.fc_cls(x_cls) if self.with_cls else None
bbox_pred = self.fc_reg(x_reg) if self.with_reg else None
return cls_score, bbox_pred
def forward(self, x, return_shared_feat=False):
"""Forward function.
Args:
x (Tensor): input features
return_shared_feat (bool): If True, return cls-reg-shared feature.
Return:
out (tuple[Tensor]): contain ``cls_score`` and ``bbox_pred``,
if ``return_shared_feat`` is True, append ``x_shared`` to the
returned tuple.
"""
x_shared = self._forward_shared(x)
out = self._forward_cls_reg(x_shared)
if return_shared_feat:
out += (x_shared, )
return out
|
import numpy as np
from docarray import BaseDocument
from docarray.typing import Embedding
def test_set_embedding():
class MyDocument(BaseDocument):
embedding: Embedding
d = MyDocument(embedding=np.zeros((3, 224, 224)))
assert isinstance(d.embedding, np.ndarray)
assert (d.embedding == np.zeros((3, 224, 224))).all()
|
import numpy as np
from docarray import Document
from docarray.typing import Embedding
def test_set_embedding():
class MyDocument(Document):
embedding: Embedding
d = MyDocument(embedding=np.zeros((3, 224, 224)))
assert isinstance(d.embedding, np.ndarray)
assert (d.embedding == np.zeros((3, 224, 224))).all()
|
from collections.abc import Sequence
from typing import Any, Optional
from langchain_qdrant.sparse_embeddings import SparseEmbeddings, SparseVector
class FastEmbedSparse(SparseEmbeddings):
"""An interface for sparse embedding models to use with Qdrant."""
def __init__(
self,
model_name: str = "Qdrant/bm25",
batch_size: int = 256,
cache_dir: Optional[str] = None,
threads: Optional[int] = None,
providers: Optional[Sequence[Any]] = None,
parallel: Optional[int] = None,
**kwargs: Any,
) -> None:
"""
Sparse encoder implementation using FastEmbed - https://qdrant.github.io/fastembed/
For a list of available models, see https://qdrant.github.io/fastembed/examples/Supported_Models/
Args:
model_name (str): The name of the model to use. Defaults to `"Qdrant/bm25"`.
batch_size (int): Batch size for encoding. Defaults to 256.
cache_dir (str, optional): The path to the model cache directory.\
Can also be set using the\
`FASTEMBED_CACHE_PATH` env variable.
threads (int, optional): The number of threads onnxruntime session can use.
providers (Sequence[Any], optional): List of ONNX execution providers.\
parallel (int, optional): If `>1`, data-parallel encoding will be used, r\
Recommended for encoding of large datasets.\
If `0`, use all available cores.\
If `None`, don't use data-parallel processing,\
use default onnxruntime threading instead.\
Defaults to None.
kwargs: Additional options to pass to fastembed.SparseTextEmbedding
Raises:
ValueError: If the model_name is not supported in SparseTextEmbedding.
"""
try:
from fastembed import SparseTextEmbedding # type: ignore
except ImportError:
raise ValueError(
"The 'fastembed' package is not installed. "
"Please install it with "
"`pip install fastembed` or `pip install fastembed-gpu`."
)
self._batch_size = batch_size
self._parallel = parallel
self._model = SparseTextEmbedding(
model_name=model_name,
cache_dir=cache_dir,
threads=threads,
providers=providers,
**kwargs,
)
def embed_documents(self, texts: list[str]) -> list[SparseVector]:
results = self._model.embed(
texts, batch_size=self._batch_size, parallel=self._parallel
)
return [
SparseVector(indices=result.indices.tolist(), values=result.values.tolist())
for result in results
]
def embed_query(self, text: str) -> SparseVector:
result = next(self._model.query_embed(text))
return SparseVector(
indices=result.indices.tolist(), values=result.values.tolist()
)
|
from typing import Any, List, Optional, Sequence
from langchain_qdrant.sparse_embeddings import SparseEmbeddings, SparseVector
class FastEmbedSparse(SparseEmbeddings):
"""An interface for sparse embedding models to use with Qdrant."""
def __init__(
self,
model_name: str = "Qdrant/bm25",
batch_size: int = 256,
cache_dir: Optional[str] = None,
threads: Optional[int] = None,
providers: Optional[Sequence[Any]] = None,
parallel: Optional[int] = None,
**kwargs: Any,
) -> None:
"""
Sparse encoder implementation using FastEmbed - https://qdrant.github.io/fastembed/
For a list of available models, see https://qdrant.github.io/fastembed/examples/Supported_Models/
Args:
model_name (str): The name of the model to use. Defaults to `"Qdrant/bm25"`.
batch_size (int): Batch size for encoding. Defaults to 256.
cache_dir (str, optional): The path to the model cache directory.\
Can also be set using the\
`FASTEMBED_CACHE_PATH` env variable.
threads (int, optional): The number of threads onnxruntime session can use.
providers (Sequence[Any], optional): List of ONNX execution providers.\
parallel (int, optional): If `>1`, data-parallel encoding will be used, r\
Recommended for encoding of large datasets.\
If `0`, use all available cores.\
If `None`, don't use data-parallel processing,\
use default onnxruntime threading instead.\
Defaults to None.
kwargs: Additional options to pass to fastembed.SparseTextEmbedding
Raises:
ValueError: If the model_name is not supported in SparseTextEmbedding.
"""
try:
from fastembed import SparseTextEmbedding # type: ignore
except ImportError:
raise ValueError(
"The 'fastembed' package is not installed. "
"Please install it with "
"`pip install fastembed` or `pip install fastembed-gpu`."
)
self._batch_size = batch_size
self._parallel = parallel
self._model = SparseTextEmbedding(
model_name=model_name,
cache_dir=cache_dir,
threads=threads,
providers=providers,
**kwargs,
)
def embed_documents(self, texts: List[str]) -> List[SparseVector]:
results = self._model.embed(
texts, batch_size=self._batch_size, parallel=self._parallel
)
return [
SparseVector(indices=result.indices.tolist(), values=result.values.tolist())
for result in results
]
def embed_query(self, text: str) -> SparseVector:
result = next(self._model.query_embed(text))
return SparseVector(
indices=result.indices.tolist(), values=result.values.tolist()
)
|
from typing import Any, Optional, Type, TypeVar, Union
import numpy as np
from docarray.base_doc import BaseDoc
from docarray.typing import AnyEmbedding, AudioUrl
from docarray.typing.bytes.audio_bytes import AudioBytes
from docarray.typing.tensor.abstract_tensor import AbstractTensor
from docarray.typing.tensor.audio.audio_tensor import AudioTensor
from docarray.utils.misc import is_tf_available, is_torch_available
torch_available = is_torch_available()
if torch_available:
import torch
tf_available = is_tf_available()
if tf_available:
import tensorflow as tf # type: ignore
T = TypeVar('T', bound='AudioDoc')
class AudioDoc(BaseDoc):
"""
Document for handling audios.
The Audio Document can contain an AudioUrl (`AudioDoc.url`), an AudioTensor
(`AudioDoc.tensor`), and an AnyEmbedding (`AudioDoc.embedding`).
EXAMPLE USAGE:
You can use this Document directly:
.. code-block:: python
from docarray.documents import AudioDoc
# use it directly
audio = Audio(
url='https://github.com/docarray/docarray/blob/feat-rewrite-v2/tests/toydata/hello.wav?raw=true'
)
audio.tensor, audio.frame_rate = audio.url.load()
model = MyEmbeddingModel()
audio.embedding = model(audio.tensor)
You can extend this Document:
.. code-block:: python
from docarray.documents import AudioDoc, TextDoc
from typing import Optional
# extend it
class MyAudio(Audio):
name: Optional[Text]
audio = MyAudio(
url='https://github.com/docarray/docarray/blob/feat-rewrite-v2/tests/toydata/hello.wav?raw=true'
)
audio.tensor, audio.frame_rate = audio.url.load()
model = MyEmbeddingModel()
audio.embedding = model(audio.tensor)
audio.name = Text(text='my first audio')
You can use this Document for composition:
.. code-block:: python
from docarray import BaseDoc
from docarray.documents import AudioDoc, TextDoc
# compose it
class MultiModalDoc(Document):
audio: Audio
text: Text
mmdoc = MultiModalDoc(
audio=Audio(
url='https://github.com/docarray/docarray/blob/feat-rewrite-v2/tests/toydata/hello.wav?raw=true'
),
text=Text(text='hello world, how are you doing?'),
)
mmdoc.audio.tensor, mmdoc.audio.frame_rate = mmdoc.audio.url.load()
# equivalent to
mmdoc.audio.bytes_ = mmdoc.audio.url.load_bytes()
mmdoc.audio.tensor, mmdoc.audio.frame_rate = mmdoc.audio.bytes.load()
"""
url: Optional[AudioUrl]
tensor: Optional[AudioTensor]
embedding: Optional[AnyEmbedding]
bytes_: Optional[AudioBytes]
frame_rate: Optional[int]
@classmethod
def validate(
cls: Type[T],
value: Union[str, AbstractTensor, Any],
) -> T:
if isinstance(value, str):
value = cls(url=value)
elif isinstance(value, (AbstractTensor, np.ndarray)) or (
torch_available
and isinstance(value, torch.Tensor)
or (tf_available and isinstance(value, tf.Tensor))
):
value = cls(tensor=value)
return super().validate(value)
|
from typing import Any, Optional, Type, TypeVar, Union
import numpy as np
from docarray.base_document import BaseDocument
from docarray.typing import AnyEmbedding, AudioUrl
from docarray.typing.bytes.audio_bytes import AudioBytes
from docarray.typing.tensor.abstract_tensor import AbstractTensor
from docarray.typing.tensor.audio.audio_tensor import AudioTensor
from docarray.utils.misc import is_tf_available, is_torch_available
torch_available = is_torch_available()
if torch_available:
import torch
tf_available = is_tf_available()
if tf_available:
import tensorflow as tf # type: ignore
T = TypeVar('T', bound='AudioDoc')
class AudioDoc(BaseDocument):
"""
Document for handling audios.
The Audio Document can contain an AudioUrl (`AudioDoc.url`), an AudioTensor
(`AudioDoc.tensor`), and an AnyEmbedding (`AudioDoc.embedding`).
EXAMPLE USAGE:
You can use this Document directly:
.. code-block:: python
from docarray.documents import AudioDoc
# use it directly
audio = Audio(
url='https://github.com/docarray/docarray/blob/feat-rewrite-v2/tests/toydata/hello.wav?raw=true'
)
audio.tensor, audio.frame_rate = audio.url.load()
model = MyEmbeddingModel()
audio.embedding = model(audio.tensor)
You can extend this Document:
.. code-block:: python
from docarray.documents import AudioDoc, TextDoc
from typing import Optional
# extend it
class MyAudio(Audio):
name: Optional[Text]
audio = MyAudio(
url='https://github.com/docarray/docarray/blob/feat-rewrite-v2/tests/toydata/hello.wav?raw=true'
)
audio.tensor, audio.frame_rate = audio.url.load()
model = MyEmbeddingModel()
audio.embedding = model(audio.tensor)
audio.name = Text(text='my first audio')
You can use this Document for composition:
.. code-block:: python
from docarray import BaseDocument
from docarray.documents import AudioDoc, TextDoc
# compose it
class MultiModalDoc(Document):
audio: Audio
text: Text
mmdoc = MultiModalDoc(
audio=Audio(
url='https://github.com/docarray/docarray/blob/feat-rewrite-v2/tests/toydata/hello.wav?raw=true'
),
text=Text(text='hello world, how are you doing?'),
)
mmdoc.audio.tensor, mmdoc.audio.frame_rate = mmdoc.audio.url.load()
# equivalent to
mmdoc.audio.bytes_ = mmdoc.audio.url.load_bytes()
mmdoc.audio.tensor, mmdoc.audio.frame_rate = mmdoc.audio.bytes.load()
"""
url: Optional[AudioUrl]
tensor: Optional[AudioTensor]
embedding: Optional[AnyEmbedding]
bytes_: Optional[AudioBytes]
frame_rate: Optional[int]
@classmethod
def validate(
cls: Type[T],
value: Union[str, AbstractTensor, Any],
) -> T:
if isinstance(value, str):
value = cls(url=value)
elif isinstance(value, (AbstractTensor, np.ndarray)) or (
torch_available
and isinstance(value, torch.Tensor)
or (tf_available and isinstance(value, tf.Tensor))
):
value = cls(tensor=value)
return super().validate(value)
|
import torch
from torchvision.transforms.functional import InterpolationMode
def get_module(use_v2):
# We need a protected import to avoid the V2 warning in case just V1 is used
if use_v2:
import torchvision.transforms.v2
return torchvision.transforms.v2
else:
import torchvision.transforms
return torchvision.transforms
class ClassificationPresetTrain:
# Note: this transform assumes that the input to forward() are always PIL
# images, regardless of the backend parameter. We may change that in the
# future though, if we change the output type from the dataset.
def __init__(
self,
*,
crop_size,
mean=(0.485, 0.456, 0.406),
std=(0.229, 0.224, 0.225),
interpolation=InterpolationMode.BILINEAR,
hflip_prob=0.5,
auto_augment_policy=None,
ra_magnitude=9,
augmix_severity=3,
random_erase_prob=0.0,
backend="pil",
use_v2=False,
):
T = get_module(use_v2)
transforms = []
backend = backend.lower()
if backend == "tensor":
transforms.append(T.PILToTensor())
elif backend != "pil":
raise ValueError(f"backend can be 'tensor' or 'pil', but got {backend}")
transforms.append(T.RandomResizedCrop(crop_size, interpolation=interpolation, antialias=True))
if hflip_prob > 0:
transforms.append(T.RandomHorizontalFlip(hflip_prob))
if auto_augment_policy is not None:
if auto_augment_policy == "ra":
transforms.append(T.RandAugment(interpolation=interpolation, magnitude=ra_magnitude))
elif auto_augment_policy == "ta_wide":
transforms.append(T.TrivialAugmentWide(interpolation=interpolation))
elif auto_augment_policy == "augmix":
transforms.append(T.AugMix(interpolation=interpolation, severity=augmix_severity))
else:
aa_policy = T.AutoAugmentPolicy(auto_augment_policy)
transforms.append(T.AutoAugment(policy=aa_policy, interpolation=interpolation))
if backend == "pil":
transforms.append(T.PILToTensor())
transforms.extend(
[
T.ConvertImageDtype(torch.float),
T.Normalize(mean=mean, std=std),
]
)
if random_erase_prob > 0:
transforms.append(T.RandomErasing(p=random_erase_prob))
self.transforms = T.Compose(transforms)
def __call__(self, img):
return self.transforms(img)
class ClassificationPresetEval:
def __init__(
self,
*,
crop_size,
resize_size=256,
mean=(0.485, 0.456, 0.406),
std=(0.229, 0.224, 0.225),
interpolation=InterpolationMode.BILINEAR,
backend="pil",
use_v2=False,
):
T = get_module(use_v2)
transforms = []
backend = backend.lower()
if backend == "tensor":
transforms.append(T.PILToTensor())
elif backend != "pil":
raise ValueError(f"backend can be 'tensor' or 'pil', but got {backend}")
transforms += [
T.Resize(resize_size, interpolation=interpolation, antialias=True),
T.CenterCrop(crop_size),
]
if backend == "pil":
transforms.append(T.PILToTensor())
transforms += [
T.ConvertImageDtype(torch.float),
T.Normalize(mean=mean, std=std),
]
self.transforms = T.Compose(transforms)
def __call__(self, img):
return self.transforms(img)
|
import torch
from torchvision.transforms.functional import InterpolationMode
def get_module(use_v2):
# We need a protected import to avoid the V2 warning in case just V1 is used
if use_v2:
import torchvision.transforms.v2
return torchvision.transforms.v2
else:
import torchvision.transforms
return torchvision.transforms
class ClassificationPresetTrain:
def __init__(
self,
*,
crop_size,
mean=(0.485, 0.456, 0.406),
std=(0.229, 0.224, 0.225),
interpolation=InterpolationMode.BILINEAR,
hflip_prob=0.5,
auto_augment_policy=None,
ra_magnitude=9,
augmix_severity=3,
random_erase_prob=0.0,
backend="pil",
use_v2=False,
):
module = get_module(use_v2)
transforms = []
backend = backend.lower()
if backend == "tensor":
transforms.append(module.PILToTensor())
elif backend != "pil":
raise ValueError(f"backend can be 'tensor' or 'pil', but got {backend}")
transforms.append(module.RandomResizedCrop(crop_size, interpolation=interpolation, antialias=True))
if hflip_prob > 0:
transforms.append(module.RandomHorizontalFlip(hflip_prob))
if auto_augment_policy is not None:
if auto_augment_policy == "ra":
transforms.append(module.RandAugment(interpolation=interpolation, magnitude=ra_magnitude))
elif auto_augment_policy == "ta_wide":
transforms.append(module.TrivialAugmentWide(interpolation=interpolation))
elif auto_augment_policy == "augmix":
transforms.append(module.AugMix(interpolation=interpolation, severity=augmix_severity))
else:
aa_policy = module.AutoAugmentPolicy(auto_augment_policy)
transforms.append(module.AutoAugment(policy=aa_policy, interpolation=interpolation))
if backend == "pil":
transforms.append(module.PILToTensor())
transforms.extend(
[
module.ConvertImageDtype(torch.float),
module.Normalize(mean=mean, std=std),
]
)
if random_erase_prob > 0:
transforms.append(module.RandomErasing(p=random_erase_prob))
self.transforms = module.Compose(transforms)
def __call__(self, img):
return self.transforms(img)
class ClassificationPresetEval:
def __init__(
self,
*,
crop_size,
resize_size=256,
mean=(0.485, 0.456, 0.406),
std=(0.229, 0.224, 0.225),
interpolation=InterpolationMode.BILINEAR,
backend="pil",
use_v2=False,
):
module = get_module(use_v2)
transforms = []
backend = backend.lower()
if backend == "tensor":
transforms.append(module.PILToTensor())
elif backend != "pil":
raise ValueError(f"backend can be 'tensor' or 'pil', but got {backend}")
transforms += [
module.Resize(resize_size, interpolation=interpolation, antialias=True),
module.CenterCrop(crop_size),
]
if backend == "pil":
transforms.append(module.PILToTensor())
transforms += [
module.ConvertImageDtype(torch.float),
module.Normalize(mean=mean, std=std),
]
self.transforms = module.Compose(transforms)
def __call__(self, img):
return self.transforms(img)
|
# flake8: noqa
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
__version__ = "2.16.2.dev0"
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_default_config_name,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
_arrow_dataset.concatenate_datasets = concatenate_datasets
_utils.DownloadConfig = DownloadConfig
_utils.DownloadManager = DownloadManager
_utils.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadConfig = DownloadConfig
_deprecated_download_manager.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadManager = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
|
# flake8: noqa
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
__version__ = "2.16.1"
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_default_config_name,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
_arrow_dataset.concatenate_datasets = concatenate_datasets
_utils.DownloadConfig = DownloadConfig
_utils.DownloadManager = DownloadManager
_utils.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadConfig = DownloadConfig
_deprecated_download_manager.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadManager = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
|
try:
import sklearn
from sklearn.base import BaseEstimator
from sklearn.base import TransformerMixin
except ImportError:
sklearn = None
class BaseEstimator:
pass
class TransformerMixin:
pass
def assert_sklearn_installed(symbol_name):
if sklearn is None:
raise ImportError(
f"{symbol_name} requires `scikit-learn` to be installed. "
"Run `pip install scikit-learn` to install it."
)
def _check_model(model):
"""Check whether the model need sto be compiled."""
# compile model if user gave us an un-compiled model
if not model.compiled or not model.loss or not model.optimizer:
raise RuntimeError(
"Given model needs to be compiled, and have a loss and an "
"optimizer."
)
class TargetReshaper(TransformerMixin, BaseEstimator):
"""Convert 1D targets to 2D and back.
For use in pipelines with transformers that only accept
2D inputs, like OneHotEncoder and OrdinalEncoder.
Attributes:
ndim_ : int
Dimensions of y that the transformer was trained on.
"""
def fit(self, y):
"""Fit the transformer to a target y.
Returns:
TargetReshaper
A reference to the current instance of TargetReshaper.
"""
self.ndim_ = y.ndim
return self
def transform(self, y):
"""Makes 1D y 2D.
Args:
y : np.ndarray
Target y to be transformed.
Returns:
np.ndarray
A numpy array, of dimension at least 2.
"""
if y.ndim == 1:
return y.reshape(-1, 1)
return y
def inverse_transform(self, y):
"""Revert the transformation of transform.
Args:
y: np.ndarray
Transformed numpy array.
Returns:
np.ndarray
If the transformer was fit to a 1D numpy array,
and a 2D numpy array with a singleton second dimension
is passed, it will be squeezed back to 1D. Otherwise, it
will eb left untouched.
"""
sklearn.base.check_is_fitted(self)
xp, _ = sklearn.utils._array_api.get_namespace(y)
if self.ndim_ == 1 and y.ndim == 2:
return xp.squeeze(y, axis=1)
return y
|
from sklearn.base import BaseEstimator
from sklearn.base import TransformerMixin
from sklearn.base import check_is_fitted
from sklearn.utils._array_api import get_namespace
def _check_model(model):
"""Check whether the model need sto be compiled."""
# compile model if user gave us an un-compiled model
if not model.compiled or not model.loss or not model.optimizer:
raise RuntimeError(
"Given model needs to be compiled, and have a loss and an "
"optimizer."
)
class TargetReshaper(TransformerMixin, BaseEstimator):
"""Convert 1D targets to 2D and back.
For use in pipelines with transformers that only accept
2D inputs, like OneHotEncoder and OrdinalEncoder.
Attributes:
ndim_ : int
Dimensions of y that the transformer was trained on.
"""
def fit(self, y):
"""Fit the transformer to a target y.
Returns:
TargetReshaper
A reference to the current instance of TargetReshaper.
"""
self.ndim_ = y.ndim
return self
def transform(self, y):
"""Makes 1D y 2D.
Args:
y : np.ndarray
Target y to be transformed.
Returns:
np.ndarray
A numpy array, of dimension at least 2.
"""
if y.ndim == 1:
return y.reshape(-1, 1)
return y
def inverse_transform(self, y):
"""Revert the transformation of transform.
Args:
y: np.ndarray
Transformed numpy array.
Returns:
np.ndarray
If the transformer was fit to a 1D numpy array,
and a 2D numpy array with a singleton second dimension
is passed, it will be squeezed back to 1D. Otherwise, it
will eb left untouched.
"""
check_is_fitted(self)
xp, _ = get_namespace(y)
if self.ndim_ == 1 and y.ndim == 2:
return xp.squeeze(y, axis=1)
return y
|
"""O365 tools."""
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.tools import (
O365CreateDraftMessage,
O365SearchEmails,
O365SearchEvents,
O365SendEvent,
O365SendMessage,
)
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"O365SearchEmails": "langchain_community.tools",
"O365SearchEvents": "langchain_community.tools",
"O365CreateDraftMessage": "langchain_community.tools",
"O365SendMessage": "langchain_community.tools",
"O365SendEvent": "langchain_community.tools",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"O365CreateDraftMessage",
"O365SearchEmails",
"O365SearchEvents",
"O365SendEvent",
"O365SendMessage",
]
|
"""O365 tools."""
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.tools import (
O365CreateDraftMessage,
O365SearchEmails,
O365SearchEvents,
O365SendEvent,
O365SendMessage,
)
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"O365SearchEmails": "langchain_community.tools",
"O365SearchEvents": "langchain_community.tools",
"O365CreateDraftMessage": "langchain_community.tools",
"O365SendMessage": "langchain_community.tools",
"O365SendEvent": "langchain_community.tools",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"O365SearchEmails",
"O365SearchEvents",
"O365CreateDraftMessage",
"O365SendMessage",
"O365SendEvent",
]
|
"""Vectara RAG Pack."""
from typing import Any, Dict, List, Optional
from llama_index.core.llama_pack.base import BaseLlamaPack
from llama_index.core.schema import TextNode
from llama_index.indices.managed.vectara import VectaraIndex
class VectaraRagPack(BaseLlamaPack):
"""Vectara RAG pack."""
def __init__(
self,
nodes: Optional[List[TextNode]] = None,
similarity_top_k: int = 5,
**kwargs: Any,
):
self._index = VectaraIndex(nodes)
vectara_kwargs = kwargs.get("vectara_kwargs", {})
if "summary_enabled" not in vectara_kwargs:
vectara_kwargs["summary_enabled"] = True
self._query_engine = self._index.as_query_engine(
similarity_top_k=similarity_top_k,
**kwargs,
)
def get_modules(self) -> Dict[str, Any]:
"""Get modules."""
return {
"index": self._index,
"query_engine": self._query_engine,
}
def retrieve(self, query_str: str) -> Any:
"""Retrieve."""
return self._query_engine.retrieve(query_str)
def run(self, *args: Any, **kwargs: Any) -> Any:
"""Run the pipeline."""
return self._query_engine.query(*args, **kwargs)
|
"""Vectara RAG Pack."""
from typing import Any, Dict, List, Optional
from llama_index.core.llama_pack.base import BaseLlamaPack
from llama_index.core.schema import TextNode
from llama_index.indices.managed.vectara import VectaraIndex
class VectaraRagPack(BaseLlamaPack):
"""Vectara RAG pack."""
def __init__(
self,
nodes: Optional[List[TextNode]] = None,
similarity_top_k: int = 5,
**kwargs: Any,
):
self._index = VectaraIndex(nodes)
vectara_kwargs = kwargs.get("vectara_kwargs", {})
if "summary_enabled" not in vectara_kwargs:
vectara_kwargs["summary_enabled"] = True
self._query_engine = self._index.as_query_engine(
similarity_top_k=similarity_top_k,
**kwargs,
)
def get_modules(self) -> Dict[str, Any]:
"""Get modules."""
return {
"index": self._index,
"query_engine": self._query_engine,
}
def retrieve(self, query_str: str) -> Any:
"""Retrieve."""
return self._query_engine.retrieve(query_str)
def run(self, *args: Any, **kwargs: Any) -> Any:
"""Run the pipeline."""
return self._query_engine.query(*args, **kwargs)
|
import numpy as np
from absl.testing import parameterized
from keras.src import backend
from keras.src import dtype_policies
from keras.src import layers
from keras.src import testing
class ZeroPadding2DTest(testing.TestCase, parameterized.TestCase):
@parameterized.parameters(
{"data_format": "channels_first"},
{"data_format": "channels_last"},
)
def test_zero_padding_2d(self, data_format):
inputs = np.random.rand(1, 2, 3, 4)
outputs = layers.ZeroPadding2D(
padding=((1, 2), (3, 4)), data_format=data_format
)(inputs)
if data_format == "channels_first":
for index in [0, -1, -2]:
self.assertAllClose(outputs[:, :, index, :], 0.0)
for index in [0, 1, 2, -1, -2, -3, -4]:
self.assertAllClose(outputs[:, :, :, index], 0.0)
self.assertAllClose(outputs[:, :, 1:-2, 3:-4], inputs)
else:
for index in [0, -1, -2]:
self.assertAllClose(outputs[:, index, :, :], 0.0)
for index in [0, 1, 2, -1, -2, -3, -4]:
self.assertAllClose(outputs[:, :, index, :], 0.0)
self.assertAllClose(outputs[:, 1:-2, 3:-4, :], inputs)
@parameterized.product(
(
{"padding": ((2, 2), (2, 2))}, # 2 tuples
{"padding": (2, 2)}, # 1 tuple
{"padding": 2}, # 1 int
),
(
{"data_format": "channels_first"},
{"data_format": "channels_last"},
),
)
def test_zero_padding_2d_with_same_padding(self, padding, data_format):
inputs = np.random.rand(1, 2, 3, 4)
outputs = layers.ZeroPadding2D(
padding=padding, data_format=data_format
)(inputs)
if data_format == "channels_first":
for index in [0, 1, -1, -2]:
self.assertAllClose(outputs[:, :, index, :], 0.0)
self.assertAllClose(outputs[:, :, :, index], 0.0)
self.assertAllClose(outputs[:, :, 2:-2, 2:-2], inputs)
else:
for index in [0, 1, -1, -2]:
self.assertAllClose(outputs[:, index, :, :], 0.0)
self.assertAllClose(outputs[:, :, index, :], 0.0)
self.assertAllClose(outputs[:, 2:-2, 2:-2, :], inputs)
def test_zero_padding_2d_with_dynamic_spatial_dim(self):
if backend.config.image_data_format() == "channels_last":
input_layer = layers.Input(batch_shape=(1, 2, None, 4))
else:
input_layer = layers.Input(batch_shape=(1, 4, 2, None))
padded = layers.ZeroPadding2D(((1, 2), (3, 4)))(input_layer)
if backend.config.image_data_format() == "channels_last":
self.assertEqual(padded.shape, (1, 5, None, 4))
else:
self.assertEqual(padded.shape, (1, 4, 5, None))
@parameterized.parameters(
{"padding": (1,)},
{"padding": (1, 2, 3)},
{"padding": "1"},
{"padding": ((1, 2), (3, 4, 5))},
{"padding": ((1, 2), (3, -4))},
{"padding": ((1, 2), "3")},
)
def test_zero_padding_2d_errors_if_padding_argument_invalid(self, padding):
with self.assertRaises(ValueError):
layers.ZeroPadding2D(padding)
@parameterized.parameters(
{"data_format": "channels_first"},
{"data_format": "channels_last"},
)
def test_zero_padding_2d_get_config(self, data_format):
layer = layers.ZeroPadding2D(padding=(1, 2), data_format=data_format)
expected_config = {
"data_format": data_format,
"dtype": dtype_policies.serialize(layer.dtype_policy),
"name": layer.name,
"padding": ((1, 1), (2, 2)),
"trainable": layer.trainable,
}
self.assertEqual(layer.get_config(), expected_config)
|
import numpy as np
from absl.testing import parameterized
from keras.src import backend
from keras.src import layers
from keras.src import testing
class ZeroPadding2DTest(testing.TestCase, parameterized.TestCase):
@parameterized.parameters(
{"data_format": "channels_first"},
{"data_format": "channels_last"},
)
def test_zero_padding_2d(self, data_format):
inputs = np.random.rand(1, 2, 3, 4)
outputs = layers.ZeroPadding2D(
padding=((1, 2), (3, 4)), data_format=data_format
)(inputs)
if data_format == "channels_first":
for index in [0, -1, -2]:
self.assertAllClose(outputs[:, :, index, :], 0.0)
for index in [0, 1, 2, -1, -2, -3, -4]:
self.assertAllClose(outputs[:, :, :, index], 0.0)
self.assertAllClose(outputs[:, :, 1:-2, 3:-4], inputs)
else:
for index in [0, -1, -2]:
self.assertAllClose(outputs[:, index, :, :], 0.0)
for index in [0, 1, 2, -1, -2, -3, -4]:
self.assertAllClose(outputs[:, :, index, :], 0.0)
self.assertAllClose(outputs[:, 1:-2, 3:-4, :], inputs)
@parameterized.product(
(
{"padding": ((2, 2), (2, 2))}, # 2 tuples
{"padding": (2, 2)}, # 1 tuple
{"padding": 2}, # 1 int
),
(
{"data_format": "channels_first"},
{"data_format": "channels_last"},
),
)
def test_zero_padding_2d_with_same_padding(self, padding, data_format):
inputs = np.random.rand(1, 2, 3, 4)
outputs = layers.ZeroPadding2D(
padding=padding, data_format=data_format
)(inputs)
if data_format == "channels_first":
for index in [0, 1, -1, -2]:
self.assertAllClose(outputs[:, :, index, :], 0.0)
self.assertAllClose(outputs[:, :, :, index], 0.0)
self.assertAllClose(outputs[:, :, 2:-2, 2:-2], inputs)
else:
for index in [0, 1, -1, -2]:
self.assertAllClose(outputs[:, index, :, :], 0.0)
self.assertAllClose(outputs[:, :, index, :], 0.0)
self.assertAllClose(outputs[:, 2:-2, 2:-2, :], inputs)
def test_zero_padding_2d_with_dynamic_spatial_dim(self):
if backend.config.image_data_format() == "channels_last":
input_layer = layers.Input(batch_shape=(1, 2, None, 4))
else:
input_layer = layers.Input(batch_shape=(1, 4, 2, None))
padded = layers.ZeroPadding2D(((1, 2), (3, 4)))(input_layer)
if backend.config.image_data_format() == "channels_last":
self.assertEqual(padded.shape, (1, 5, None, 4))
else:
self.assertEqual(padded.shape, (1, 4, 5, None))
@parameterized.parameters(
{"padding": (1,)},
{"padding": (1, 2, 3)},
{"padding": "1"},
{"padding": ((1, 2), (3, 4, 5))},
{"padding": ((1, 2), (3, -4))},
{"padding": ((1, 2), "3")},
)
def test_zero_padding_2d_errors_if_padding_argument_invalid(self, padding):
with self.assertRaises(ValueError):
layers.ZeroPadding2D(padding)
@parameterized.parameters(
{"data_format": "channels_first"},
{"data_format": "channels_last"},
)
def test_zero_padding_2d_get_config(self, data_format):
layer = layers.ZeroPadding2D(padding=(1, 2), data_format=data_format)
expected_config = {
"data_format": data_format,
"dtype": layer.dtype_policy.name,
"name": layer.name,
"padding": ((1, 1), (2, 2)),
"trainable": layer.trainable,
}
self.assertEqual(layer.get_config(), expected_config)
|
from PIL import Image
from sentence_transformers import SentenceTransformer, models, util
###########
image = Image.open("two_dogs_in_snow.jpg")
from transformers import CLIPModel, CLIPProcessor
model = CLIPModel.from_pretrained("openai/clip-vit-base-patch32")
processor = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32")
inputs = processor(texts=["a cat", "a dog"], images=[image], return_tensors="pt", padding=True)
output = model(**inputs)
# vision_outputs = model.vision_model(pixel_values=inputs['pixel_values'])
# image_embeds = model.visual_projection(vision_outputs[1])
# print(image_embeds.shape)
# exit()
# Load CLIP model
clip = models.CLIPModel()
model = SentenceTransformer(modules=[clip])
model.save("tmp-clip-model")
model = SentenceTransformer("tmp-clip-model")
# Encode an image:
img_emb = model.encode(Image.open("two_dogs_in_snow.jpg"))
# Encode text descriptions
text_emb = model.encode(["Two dogs in the snow", "A cat on a table", "A picture of London at night"])
# Compute cosine similarities
cos_scores = util.cos_sim(img_emb, text_emb)
print(cos_scores)
|
from sentence_transformers import SentenceTransformer, util, models
from PIL import ImageFile, Image
import numpy as np
import requests
###########
image = Image.open('two_dogs_in_snow.jpg')
from transformers import CLIPProcessor, CLIPModel
model = CLIPModel.from_pretrained("openai/clip-vit-base-patch32")
processor = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32")
inputs = processor(texts=["a cat", "a dog"], images=[image], return_tensors="pt", padding=True)
output = model(**inputs)
#vision_outputs = model.vision_model(pixel_values=inputs['pixel_values'])
#image_embeds = model.visual_projection(vision_outputs[1])
#print(image_embeds.shape)
#exit()
#Load CLIP model
clip = models.CLIPModel()
model = SentenceTransformer(modules=[clip])
model.save('tmp-clip-model')
model = SentenceTransformer('tmp-clip-model')
#Encode an image:
img_emb = model.encode(Image.open('two_dogs_in_snow.jpg'))
#Encode text descriptions
text_emb = model.encode(['Two dogs in the snow', 'A cat on a table', 'A picture of London at night'])
#Compute cosine similarities
cos_scores = util.cos_sim(img_emb, text_emb)
print(cos_scores)
|
"""Test file reader."""
import json
import sys
from tempfile import TemporaryDirectory
import pytest
from llama_index.core.readers.json import JSONReader
def test_basic() -> None:
"""Test JSON reader in basic mode."""
with TemporaryDirectory() as tmp_dir:
file_name = f"{tmp_dir}/test1.json"
with open(file_name, "w") as f:
f.write('{"test1": "test1"}')
reader = JSONReader()
data = reader.load_data(file_name)
assert len(data) == 1
assert isinstance(data[0].get_content(), str)
assert data[0].get_content().index("test1") is not None
def test_levels_back0() -> None:
"""Test JSON reader using the levels_back function."""
with TemporaryDirectory() as tmp_dir:
file_name = f"{tmp_dir}/test2.json"
with open(file_name, "w") as f:
f.write('{ "a": { "b": ["c"] } }')
reader1 = JSONReader(levels_back=0)
data1 = reader1.load_data(file_name)
assert data1[0].get_content() == "a b c"
reader2 = JSONReader(levels_back=1)
data2 = reader2.load_data(file_name)
assert data2[0].get_content() == "b c"
def test_collapse_length() -> None:
"""Test JSON reader using the collapse_length function."""
with TemporaryDirectory() as tmp_dir:
file_name = f"{tmp_dir}/test3.json"
with open(file_name, "w") as f:
f.write('{ "a": { "b": "c" } }')
reader1 = JSONReader(levels_back=0, collapse_length=100)
data1 = reader1.load_data(file_name)
assert isinstance(data1[0].get_content(), str)
assert data1[0].get_content().index('"a":') is not None
reader2 = JSONReader(levels_back=0, collapse_length=10)
data2 = reader2.load_data(file_name)
assert isinstance(data2[0].get_content(), str)
assert data2[0].get_content().index("a ") is not None
def test_jsonl() -> None:
"""Test JSON reader using the is_jsonl function."""
with TemporaryDirectory() as tmp_dir:
file_name = f"{tmp_dir}/test4.json"
with open(file_name, "w") as f:
f.write('{"test1": "test1"}\n{"test2": "test2"}\n{"test3": "test3"}\n')
reader = JSONReader(is_jsonl=True)
data = reader.load_data(file_name)
assert len(data) == 3
assert isinstance(data[0].get_content(), str)
assert data[0].get_content().index("test1") is not None
assert isinstance(data[1].get_content(), str)
assert data[1].get_content().index("test2") is not None
assert isinstance(data[2].get_content(), str)
assert data[2].get_content().index("test3") is not None
def test_clean_json() -> None:
"""Test JSON reader using the clean_json function."""
with TemporaryDirectory() as tmp_dir:
file_name = f"{tmp_dir}/test5.json"
with open(file_name, "w") as f:
f.write('{ "a": { "b": "c" } }')
# If levels back is set clean_json is ignored
reader1 = JSONReader(levels_back=0, clean_json=False)
data1 = reader1.load_data(file_name)
assert data1[0].get_content() == "a b c"
# If clean_json is false the full json should be contained in a document
reader1 = JSONReader(clean_json=False)
data1 = reader1.load_data(file_name)
assert data1[0].get_content() == '{"a": {"b": "c"}}'
# If clean_json is True the full json should be contained in a document
reader1 = JSONReader(clean_json=True)
data1 = reader1.load_data(file_name)
assert data1[0].get_content() == '"a": {\n"b": "c"'
def test_max_recursion_attack(tmp_path):
original_limit = sys.getrecursionlimit()
try:
nested_dict = {}
current_level = nested_dict
sys.setrecursionlimit(5000)
for i in range(1, 2001): # Create 2000 levels of nesting
if i == 2000:
current_level[f"level{i}"] = "final_value"
else:
current_level[f"level{i}"] = {}
current_level = current_level[f"level{i}"]
file_name = tmp_path / "test_nested.json"
with open(file_name, "w") as f:
f.write(json.dumps(nested_dict))
# Force a recursion error
sys.setrecursionlimit(500)
reader = JSONReader(levels_back=1)
with pytest.warns(UserWarning):
data = reader.load_data(file_name)
assert data == []
finally:
sys.setrecursionlimit(original_limit)
|
"""Test file reader."""
from tempfile import TemporaryDirectory
from llama_index.core.readers.json import JSONReader
def test_basic() -> None:
"""Test JSON reader in basic mode."""
with TemporaryDirectory() as tmp_dir:
file_name = f"{tmp_dir}/test1.json"
with open(file_name, "w") as f:
f.write('{"test1": "test1"}')
reader = JSONReader()
data = reader.load_data(file_name)
assert len(data) == 1
assert isinstance(data[0].get_content(), str)
assert data[0].get_content().index("test1") is not None
def test_levels_back0() -> None:
"""Test JSON reader using the levels_back function."""
with TemporaryDirectory() as tmp_dir:
file_name = f"{tmp_dir}/test2.json"
with open(file_name, "w") as f:
f.write('{ "a": { "b": ["c"] } }')
reader1 = JSONReader(levels_back=0)
data1 = reader1.load_data(file_name)
assert data1[0].get_content() == "a b c"
reader2 = JSONReader(levels_back=1)
data2 = reader2.load_data(file_name)
assert data2[0].get_content() == "b c"
def test_collapse_length() -> None:
"""Test JSON reader using the collapse_length function."""
with TemporaryDirectory() as tmp_dir:
file_name = f"{tmp_dir}/test3.json"
with open(file_name, "w") as f:
f.write('{ "a": { "b": "c" } }')
reader1 = JSONReader(levels_back=0, collapse_length=100)
data1 = reader1.load_data(file_name)
assert isinstance(data1[0].get_content(), str)
assert data1[0].get_content().index('"a":') is not None
reader2 = JSONReader(levels_back=0, collapse_length=10)
data2 = reader2.load_data(file_name)
assert isinstance(data2[0].get_content(), str)
assert data2[0].get_content().index("a ") is not None
def test_jsonl() -> None:
"""Test JSON reader using the is_jsonl function."""
with TemporaryDirectory() as tmp_dir:
file_name = f"{tmp_dir}/test4.json"
with open(file_name, "w") as f:
f.write('{"test1": "test1"}\n{"test2": "test2"}\n{"test3": "test3"}\n')
reader = JSONReader(is_jsonl=True)
data = reader.load_data(file_name)
assert len(data) == 3
assert isinstance(data[0].get_content(), str)
assert data[0].get_content().index("test1") is not None
assert isinstance(data[1].get_content(), str)
assert data[1].get_content().index("test2") is not None
assert isinstance(data[2].get_content(), str)
assert data[2].get_content().index("test3") is not None
def test_clean_json() -> None:
"""Test JSON reader using the clean_json function."""
with TemporaryDirectory() as tmp_dir:
file_name = f"{tmp_dir}/test5.json"
with open(file_name, "w") as f:
f.write('{ "a": { "b": "c" } }')
# If levels back is set clean_json is ignored
reader1 = JSONReader(levels_back=0, clean_json=False)
data1 = reader1.load_data(file_name)
assert data1[0].get_content() == "a b c"
# If clean_json is false the full json should be contained in a document
reader1 = JSONReader(clean_json=False)
data1 = reader1.load_data(file_name)
assert data1[0].get_content() == '{"a": {"b": "c"}}'
# If clean_json is True the full json should be contained in a document
reader1 = JSONReader(clean_json=True)
data1 = reader1.load_data(file_name)
assert data1[0].get_content() == '"a": {\n"b": "c"'
|
from docarray.base_document.document import BaseDocument
def test_base_document_init():
doc = BaseDocument()
assert doc.id is not None
|
from docarray.document.document import BaseDocument
def test_base_document_init():
doc = BaseDocument()
assert doc.id is not None
|
_base_ = './mask-rcnn_hrnetv2p-w32-1x_coco.py'
model = dict(
backbone=dict(
extra=dict(
stage2=dict(num_channels=(18, 36)),
stage3=dict(num_channels=(18, 36, 72)),
stage4=dict(num_channels=(18, 36, 72, 144))),
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://msra/hrnetv2_w18')),
neck=dict(type='HRFPN', in_channels=[18, 36, 72, 144], out_channels=256))
|
_base_ = './mask_rcnn_hrnetv2p_w32_1x_coco.py'
model = dict(
backbone=dict(
extra=dict(
stage2=dict(num_channels=(18, 36)),
stage3=dict(num_channels=(18, 36, 72)),
stage4=dict(num_channels=(18, 36, 72, 144))),
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://msra/hrnetv2_w18')),
neck=dict(type='HRFPN', in_channels=[18, 36, 72, 144], out_channels=256))
|
"""LLM Compiler Output Parser."""
import re
from typing import Any, Dict, List, Sequence
from llama_index.core.tools import BaseTool
from llama_index.core.types import BaseOutputParser
from .schema import JoinerOutput, LLMCompilerParseResult
from .utils import get_graph_dict
THOUGHT_PATTERN = r"Thought: ([^\n]*)"
ACTION_PATTERN = r"\n*(\d+)\. (\w+)\((.*)\)(\s*#\w+\n)?"
# $1 or ${1} -> 1
ID_PATTERN = r"\$\{?(\d+)\}?"
END_OF_PLAN = "<END_OF_PLAN>"
JOINER_REPLAN = "Replan"
def default_dependency_rule(idx: int, args: str) -> bool:
"""Default dependency rule."""
matches = re.findall(ID_PATTERN, args)
numbers = [int(match) for match in matches]
return idx in numbers
class LLMCompilerPlanParser(BaseOutputParser):
"""
LLM Compiler plan output parser.
Directly adapted from source code: https://github.com/SqueezeAILab/LLMCompiler/blob/main/src/llm_compiler/output_parser.py.
"""
def __init__(self, tools: Sequence[BaseTool]):
"""Init params."""
self.tools = tools
def parse(self, text: str) -> Dict[int, Any]:
# 1. search("Ronaldo number of kids") -> 1, "search", '"Ronaldo number of kids"'
# pattern = r"(\d+)\. (\w+)\(([^)]+)\)"
pattern = rf"(?:{THOUGHT_PATTERN}\n)?{ACTION_PATTERN}"
matches = re.findall(pattern, text)
# convert matches to a list of LLMCompilerParseResult
results: List[LLMCompilerParseResult] = []
for match in matches:
thought, idx, tool_name, args, _ = match
idx = int(idx)
results.append(
LLMCompilerParseResult(
thought=thought, idx=idx, tool_name=tool_name, args=args
)
)
# get graph dict
return get_graph_dict(results, self.tools)
### Helper functions
class LLMCompilerJoinerParser(BaseOutputParser):
"""
LLM Compiler output parser for the join step.
Adapted from _parse_joiner_output in
https://github.com/SqueezeAILab/LLMCompiler/blob/main/src/llm_compiler/llm_compiler.py
"""
def parse(self, text: str) -> JoinerOutput:
"""Parse."""
thought, answer, is_replan = "", "", False # default values
raw_answers = text.split("\n")
for answer in raw_answers:
if answer.startswith("Action:"):
answer = answer[answer.find("(") + 1 : answer.find(")")]
is_replan = JOINER_REPLAN in answer
elif answer.startswith("Thought:"):
thought = answer.split("Thought:")[1].strip()
return JoinerOutput(thought=thought, answer=answer, is_replan=is_replan)
|
"""LLM Compiler Output Parser."""
import re
from typing import Any, Dict, List, Sequence
from llama_index.core.tools import BaseTool
from llama_index.core.types import BaseOutputParser
from .schema import JoinerOutput, LLMCompilerParseResult
from .utils import get_graph_dict
THOUGHT_PATTERN = r"Thought: ([^\n]*)"
ACTION_PATTERN = r"\n*(\d+)\. (\w+)\((.*)\)(\s*#\w+\n)?"
# $1 or ${1} -> 1
ID_PATTERN = r"\$\{?(\d+)\}?"
END_OF_PLAN = "<END_OF_PLAN>"
JOINER_REPLAN = "Replan"
def default_dependency_rule(idx: int, args: str) -> bool:
"""Default dependency rule."""
matches = re.findall(ID_PATTERN, args)
numbers = [int(match) for match in matches]
return idx in numbers
class LLMCompilerPlanParser(BaseOutputParser):
"""LLM Compiler plan output parser.
Directly adapted from source code: https://github.com/SqueezeAILab/LLMCompiler/blob/main/src/llm_compiler/output_parser.py.
"""
def __init__(self, tools: Sequence[BaseTool]):
"""Init params."""
self.tools = tools
def parse(self, text: str) -> Dict[int, Any]:
# 1. search("Ronaldo number of kids") -> 1, "search", '"Ronaldo number of kids"'
# pattern = r"(\d+)\. (\w+)\(([^)]+)\)"
pattern = rf"(?:{THOUGHT_PATTERN}\n)?{ACTION_PATTERN}"
matches = re.findall(pattern, text)
# convert matches to a list of LLMCompilerParseResult
results: List[LLMCompilerParseResult] = []
for match in matches:
thought, idx, tool_name, args, _ = match
idx = int(idx)
results.append(
LLMCompilerParseResult(
thought=thought, idx=idx, tool_name=tool_name, args=args
)
)
# get graph dict
return get_graph_dict(results, self.tools)
### Helper functions
class LLMCompilerJoinerParser(BaseOutputParser):
"""LLM Compiler output parser for the join step.
Adapted from _parse_joiner_output in
https://github.com/SqueezeAILab/LLMCompiler/blob/main/src/llm_compiler/llm_compiler.py
"""
def parse(self, text: str) -> JoinerOutput:
"""Parse."""
thought, answer, is_replan = "", "", False # default values
raw_answers = text.split("\n")
for answer in raw_answers:
if answer.startswith("Action:"):
answer = answer[answer.find("(") + 1 : answer.find(")")]
is_replan = JOINER_REPLAN in answer
elif answer.startswith("Thought:"):
thought = answer.split("Thought:")[1].strip()
return JoinerOutput(thought=thought, answer=answer, is_replan=is_replan)
|
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
from jina import Document, Flow
from ...torch_object_detection_segmenter import TorchObjectDetectionSegmenter
def test_exec():
f = Flow().add(uses=TorchObjectDetectionSegmenter)
with f:
resp = f.post(on='/test', inputs=Document(), return_results=True)
assert resp is not None
|
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
from jina import Flow, Document
from ...torch_object_detection_segmenter import TorchObjectDetectionSegmenter
def test_exec():
f = Flow().add(uses=TorchObjectDetectionSegmenter)
with f:
resp = f.post(on='/test', inputs=Document(), return_results=True)
assert resp is not None
|
# Copyright 2025 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import unittest
import torch
from transformers import AutoTokenizer, T5EncoderModel
from diffusers import AutoencoderKLMochi, FlowMatchEulerDiscreteScheduler, MochiPipeline, MochiTransformer3DModel
from diffusers.utils.testing_utils import (
floats_tensor,
require_peft_backend,
skip_mps,
)
sys.path.append(".")
from utils import PeftLoraLoaderMixinTests # noqa: E402
@require_peft_backend
@skip_mps
class MochiLoRATests(unittest.TestCase, PeftLoraLoaderMixinTests):
pipeline_class = MochiPipeline
scheduler_cls = FlowMatchEulerDiscreteScheduler
scheduler_classes = [FlowMatchEulerDiscreteScheduler]
scheduler_kwargs = {}
transformer_kwargs = {
"patch_size": 2,
"num_attention_heads": 2,
"attention_head_dim": 8,
"num_layers": 2,
"pooled_projection_dim": 16,
"in_channels": 12,
"out_channels": None,
"qk_norm": "rms_norm",
"text_embed_dim": 32,
"time_embed_dim": 4,
"activation_fn": "swiglu",
"max_sequence_length": 16,
}
transformer_cls = MochiTransformer3DModel
vae_kwargs = {
"latent_channels": 12,
"out_channels": 3,
"encoder_block_out_channels": (32, 32, 32, 32),
"decoder_block_out_channels": (32, 32, 32, 32),
"layers_per_block": (1, 1, 1, 1, 1),
}
vae_cls = AutoencoderKLMochi
tokenizer_cls, tokenizer_id = AutoTokenizer, "hf-internal-testing/tiny-random-t5"
text_encoder_cls, text_encoder_id = T5EncoderModel, "hf-internal-testing/tiny-random-t5"
text_encoder_target_modules = ["q", "k", "v", "o"]
@property
def output_shape(self):
return (1, 7, 16, 16, 3)
def get_dummy_inputs(self, with_generator=True):
batch_size = 1
sequence_length = 16
num_channels = 4
num_frames = 7
num_latent_frames = 3
sizes = (2, 2)
generator = torch.manual_seed(0)
noise = floats_tensor((batch_size, num_latent_frames, num_channels) + sizes)
input_ids = torch.randint(1, sequence_length, size=(batch_size, sequence_length), generator=generator)
pipeline_inputs = {
"prompt": "dance monkey",
"num_frames": num_frames,
"num_inference_steps": 4,
"guidance_scale": 6.0,
# Cannot reduce because convolution kernel becomes bigger than sample
"height": 16,
"width": 16,
"max_sequence_length": sequence_length,
"output_type": "np",
}
if with_generator:
pipeline_inputs.update({"generator": generator})
return noise, input_ids, pipeline_inputs
def test_simple_inference_with_text_lora_denoiser_fused_multi(self):
super().test_simple_inference_with_text_lora_denoiser_fused_multi(expected_atol=9e-3)
def test_simple_inference_with_text_denoiser_lora_unfused(self):
super().test_simple_inference_with_text_denoiser_lora_unfused(expected_atol=9e-3)
@unittest.skip("Not supported in Mochi.")
def test_simple_inference_with_text_denoiser_block_scale(self):
pass
@unittest.skip("Not supported in Mochi.")
def test_simple_inference_with_text_denoiser_block_scale_for_all_dict_options(self):
pass
@unittest.skip("Not supported in Mochi.")
def test_modify_padding_mode(self):
pass
@unittest.skip("Text encoder LoRA is not supported in Mochi.")
def test_simple_inference_with_partial_text_lora(self):
pass
@unittest.skip("Text encoder LoRA is not supported in Mochi.")
def test_simple_inference_with_text_lora(self):
pass
@unittest.skip("Text encoder LoRA is not supported in Mochi.")
def test_simple_inference_with_text_lora_and_scale(self):
pass
@unittest.skip("Text encoder LoRA is not supported in Mochi.")
def test_simple_inference_with_text_lora_fused(self):
pass
@unittest.skip("Text encoder LoRA is not supported in Mochi.")
def test_simple_inference_with_text_lora_save_load(self):
pass
@unittest.skip("Not supported in CogVideoX.")
def test_simple_inference_with_text_denoiser_multi_adapter_block_lora(self):
pass
|
# Copyright 2024 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import unittest
import torch
from transformers import AutoTokenizer, T5EncoderModel
from diffusers import AutoencoderKLMochi, FlowMatchEulerDiscreteScheduler, MochiPipeline, MochiTransformer3DModel
from diffusers.utils.testing_utils import (
floats_tensor,
require_peft_backend,
skip_mps,
)
sys.path.append(".")
from utils import PeftLoraLoaderMixinTests # noqa: E402
@require_peft_backend
@skip_mps
class MochiLoRATests(unittest.TestCase, PeftLoraLoaderMixinTests):
pipeline_class = MochiPipeline
scheduler_cls = FlowMatchEulerDiscreteScheduler
scheduler_classes = [FlowMatchEulerDiscreteScheduler]
scheduler_kwargs = {}
transformer_kwargs = {
"patch_size": 2,
"num_attention_heads": 2,
"attention_head_dim": 8,
"num_layers": 2,
"pooled_projection_dim": 16,
"in_channels": 12,
"out_channels": None,
"qk_norm": "rms_norm",
"text_embed_dim": 32,
"time_embed_dim": 4,
"activation_fn": "swiglu",
"max_sequence_length": 16,
}
transformer_cls = MochiTransformer3DModel
vae_kwargs = {
"latent_channels": 12,
"out_channels": 3,
"encoder_block_out_channels": (32, 32, 32, 32),
"decoder_block_out_channels": (32, 32, 32, 32),
"layers_per_block": (1, 1, 1, 1, 1),
}
vae_cls = AutoencoderKLMochi
tokenizer_cls, tokenizer_id = AutoTokenizer, "hf-internal-testing/tiny-random-t5"
text_encoder_cls, text_encoder_id = T5EncoderModel, "hf-internal-testing/tiny-random-t5"
text_encoder_target_modules = ["q", "k", "v", "o"]
@property
def output_shape(self):
return (1, 7, 16, 16, 3)
def get_dummy_inputs(self, with_generator=True):
batch_size = 1
sequence_length = 16
num_channels = 4
num_frames = 7
num_latent_frames = 3
sizes = (2, 2)
generator = torch.manual_seed(0)
noise = floats_tensor((batch_size, num_latent_frames, num_channels) + sizes)
input_ids = torch.randint(1, sequence_length, size=(batch_size, sequence_length), generator=generator)
pipeline_inputs = {
"prompt": "dance monkey",
"num_frames": num_frames,
"num_inference_steps": 4,
"guidance_scale": 6.0,
# Cannot reduce because convolution kernel becomes bigger than sample
"height": 16,
"width": 16,
"max_sequence_length": sequence_length,
"output_type": "np",
}
if with_generator:
pipeline_inputs.update({"generator": generator})
return noise, input_ids, pipeline_inputs
def test_simple_inference_with_text_lora_denoiser_fused_multi(self):
super().test_simple_inference_with_text_lora_denoiser_fused_multi(expected_atol=9e-3)
def test_simple_inference_with_text_denoiser_lora_unfused(self):
super().test_simple_inference_with_text_denoiser_lora_unfused(expected_atol=9e-3)
@unittest.skip("Not supported in Mochi.")
def test_simple_inference_with_text_denoiser_block_scale(self):
pass
@unittest.skip("Not supported in Mochi.")
def test_simple_inference_with_text_denoiser_block_scale_for_all_dict_options(self):
pass
@unittest.skip("Not supported in Mochi.")
def test_modify_padding_mode(self):
pass
@unittest.skip("Text encoder LoRA is not supported in Mochi.")
def test_simple_inference_with_partial_text_lora(self):
pass
@unittest.skip("Text encoder LoRA is not supported in Mochi.")
def test_simple_inference_with_text_lora(self):
pass
@unittest.skip("Text encoder LoRA is not supported in Mochi.")
def test_simple_inference_with_text_lora_and_scale(self):
pass
@unittest.skip("Text encoder LoRA is not supported in Mochi.")
def test_simple_inference_with_text_lora_fused(self):
pass
@unittest.skip("Text encoder LoRA is not supported in Mochi.")
def test_simple_inference_with_text_lora_save_load(self):
pass
@unittest.skip("Not supported in CogVideoX.")
def test_simple_inference_with_text_denoiser_multi_adapter_block_lora(self):
pass
|
# Licensed to the LF AI & Data foundation under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from docarray import BaseDoc, DocList
from docarray.utils._internal.pydantic import is_pydantic_v2
@pytest.mark.skipif(
is_pydantic_v2,
reason="Subscripted generics cannot be used with class and instance checks",
)
def test_instance_and_equivalence():
class MyDoc(BaseDoc):
text: str
docs = DocList[MyDoc]([MyDoc(text='hello')])
assert issubclass(DocList[MyDoc], DocList[MyDoc])
assert issubclass(docs.__class__, DocList[MyDoc])
assert isinstance(docs, DocList[MyDoc])
@pytest.mark.skipif(
is_pydantic_v2,
reason="Subscripted generics cannot be used with class and instance checks",
)
def test_subclassing():
class MyDoc(BaseDoc):
text: str
class MyDocList(DocList[MyDoc]):
pass
docs = MyDocList([MyDoc(text='hello')])
assert issubclass(MyDocList, DocList[MyDoc])
assert issubclass(docs.__class__, DocList[MyDoc])
assert isinstance(docs, MyDocList)
assert isinstance(docs, DocList[MyDoc])
assert issubclass(MyDoc, BaseDoc)
assert not issubclass(DocList[MyDoc], DocList[BaseDoc])
assert not issubclass(MyDocList, DocList[BaseDoc])
|
# Licensed to the LF AI & Data foundation under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from docarray import BaseDoc, DocList
def test_instance_and_equivalence():
class MyDoc(BaseDoc):
text: str
docs = DocList[MyDoc]([MyDoc(text='hello')])
assert issubclass(DocList[MyDoc], DocList[MyDoc])
assert issubclass(docs.__class__, DocList[MyDoc])
assert isinstance(docs, DocList[MyDoc])
def test_subclassing():
class MyDoc(BaseDoc):
text: str
class MyDocList(DocList[MyDoc]):
pass
docs = MyDocList([MyDoc(text='hello')])
assert issubclass(MyDocList, DocList[MyDoc])
assert issubclass(docs.__class__, DocList[MyDoc])
assert isinstance(docs, MyDocList)
assert isinstance(docs, DocList[MyDoc])
assert issubclass(MyDoc, BaseDoc)
assert not issubclass(DocList[MyDoc], DocList[BaseDoc])
assert not issubclass(MyDocList, DocList[BaseDoc])
|
from typing import Any, Dict
from langchain_core.utils import convert_to_secret_str, get_from_dict_or_env, pre_init
from pydantic import Field, SecretStr
from langchain_community.llms.openai import BaseOpenAI
from langchain_community.utils.openai import is_openai_v1
DEFAULT_BASE_URL = "https://text.octoai.run/v1/"
DEFAULT_MODEL = "codellama-7b-instruct"
class OctoAIEndpoint(BaseOpenAI):
"""OctoAI LLM Endpoints - OpenAI compatible.
OctoAIEndpoint is a class to interact with OctoAI Compute Service large
language model endpoints.
To use, you should have the environment variable ``OCTOAI_API_TOKEN`` set
with your API token, or pass it as a named parameter to the constructor.
Example:
.. code-block:: python
from langchain_community.llms.octoai_endpoint import OctoAIEndpoint
llm = OctoAIEndpoint(
model="llama-2-13b-chat-fp16",
max_tokens=200,
presence_penalty=0,
temperature=0.1,
top_p=0.9,
)
"""
"""Key word arguments to pass to the model."""
octoai_api_base: str = Field(default=DEFAULT_BASE_URL)
octoai_api_token: SecretStr = Field(default=SecretStr(""))
model_name: str = Field(default=DEFAULT_MODEL)
@classmethod
def is_lc_serializable(cls) -> bool:
return False
@property
def _invocation_params(self) -> Dict[str, Any]:
"""Get the parameters used to invoke the model."""
params: Dict[str, Any] = {
"model": self.model_name,
**self._default_params,
}
if not is_openai_v1():
params.update(
{
"api_key": self.octoai_api_token.get_secret_value(),
"api_base": self.octoai_api_base,
}
)
return {**params, **super()._invocation_params}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "octoai_endpoint"
@pre_init
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
values["octoai_api_base"] = get_from_dict_or_env(
values,
"octoai_api_base",
"OCTOAI_API_BASE",
default=DEFAULT_BASE_URL,
)
values["octoai_api_token"] = convert_to_secret_str(
get_from_dict_or_env(values, "octoai_api_token", "OCTOAI_API_TOKEN")
)
values["model_name"] = get_from_dict_or_env(
values,
"model_name",
"MODEL_NAME",
default=DEFAULT_MODEL,
)
try:
import openai
if is_openai_v1():
client_params = {
"api_key": values["octoai_api_token"].get_secret_value(),
"base_url": values["octoai_api_base"],
}
if not values.get("client"):
values["client"] = openai.OpenAI(**client_params).completions
if not values.get("async_client"):
values["async_client"] = openai.AsyncOpenAI(
**client_params
).completions
else:
values["openai_api_base"] = values["octoai_api_base"]
values["openai_api_key"] = values["octoai_api_token"].get_secret_value()
values["client"] = openai.Completion
except ImportError:
raise ImportError(
"Could not import openai python package. "
"Please install it with `pip install openai`."
)
if "endpoint_url" in values["model_kwargs"]:
raise ValueError(
"`endpoint_url` was deprecated, please use `octoai_api_base`."
)
return values
|
from typing import Any, Dict
from langchain_core.utils import convert_to_secret_str, get_from_dict_or_env, pre_init
from pydantic import Field, SecretStr
from langchain_community.llms.openai import BaseOpenAI
from langchain_community.utils.openai import is_openai_v1
DEFAULT_BASE_URL = "https://text.octoai.run/v1/"
DEFAULT_MODEL = "codellama-7b-instruct"
class OctoAIEndpoint(BaseOpenAI): # type: ignore[override]
"""OctoAI LLM Endpoints - OpenAI compatible.
OctoAIEndpoint is a class to interact with OctoAI Compute Service large
language model endpoints.
To use, you should have the environment variable ``OCTOAI_API_TOKEN`` set
with your API token, or pass it as a named parameter to the constructor.
Example:
.. code-block:: python
from langchain_community.llms.octoai_endpoint import OctoAIEndpoint
llm = OctoAIEndpoint(
model="llama-2-13b-chat-fp16",
max_tokens=200,
presence_penalty=0,
temperature=0.1,
top_p=0.9,
)
"""
"""Key word arguments to pass to the model."""
octoai_api_base: str = Field(default=DEFAULT_BASE_URL)
octoai_api_token: SecretStr = Field(default=SecretStr(""))
model_name: str = Field(default=DEFAULT_MODEL)
@classmethod
def is_lc_serializable(cls) -> bool:
return False
@property
def _invocation_params(self) -> Dict[str, Any]:
"""Get the parameters used to invoke the model."""
params: Dict[str, Any] = {
"model": self.model_name,
**self._default_params,
}
if not is_openai_v1():
params.update(
{
"api_key": self.octoai_api_token.get_secret_value(),
"api_base": self.octoai_api_base,
}
)
return {**params, **super()._invocation_params}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "octoai_endpoint"
@pre_init
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
values["octoai_api_base"] = get_from_dict_or_env(
values,
"octoai_api_base",
"OCTOAI_API_BASE",
default=DEFAULT_BASE_URL,
)
values["octoai_api_token"] = convert_to_secret_str(
get_from_dict_or_env(values, "octoai_api_token", "OCTOAI_API_TOKEN")
)
values["model_name"] = get_from_dict_or_env(
values,
"model_name",
"MODEL_NAME",
default=DEFAULT_MODEL,
)
try:
import openai
if is_openai_v1():
client_params = {
"api_key": values["octoai_api_token"].get_secret_value(),
"base_url": values["octoai_api_base"],
}
if not values.get("client"):
values["client"] = openai.OpenAI(**client_params).completions
if not values.get("async_client"):
values["async_client"] = openai.AsyncOpenAI(
**client_params
).completions
else:
values["openai_api_base"] = values["octoai_api_base"]
values["openai_api_key"] = values["octoai_api_token"].get_secret_value()
values["client"] = openai.Completion # type: ignore[attr-defined]
except ImportError:
raise ImportError(
"Could not import openai python package. "
"Please install it with `pip install openai`."
)
if "endpoint_url" in values["model_kwargs"]:
raise ValueError(
"`endpoint_url` was deprecated, please use `octoai_api_base`."
)
return values
|
import importlib.util
import os
import warnings
from functools import wraps
from typing import Optional
def eval_env(var, default):
"""Check if environment varable has True-y value"""
if var not in os.environ:
return default
val = os.environ.get(var, "0")
trues = ["1", "true", "TRUE", "on", "ON", "yes", "YES"]
falses = ["0", "false", "FALSE", "off", "OFF", "no", "NO"]
if val in trues:
return True
if val not in falses:
# fmt: off
raise RuntimeError(
f"Unexpected environment variable value `{var}={val}`. "
f"Expected one of {trues + falses}")
# fmt: on
return False
def is_module_available(*modules: str) -> bool:
r"""Returns if a top-level module with :attr:`name` exists *without**
importing it. This is generally safer than try-catch block around a
`import X`. It avoids third party libraries breaking assumptions of some of
our tests, e.g., setting multiprocessing start method when imported
(see librosa/#747, torchvision/#544).
"""
return all(importlib.util.find_spec(m) is not None for m in modules)
def requires_module(*modules: str):
"""Decorate function to give error message if invoked without required optional modules.
This decorator is to give better error message to users rather
than raising ``NameError: name 'module' is not defined`` at random places.
"""
missing = [m for m in modules if not is_module_available(m)]
if not missing:
# fall through. If all the modules are available, no need to decorate
def decorator(func):
return func
else:
req = f"module: {missing[0]}" if len(missing) == 1 else f"modules: {missing}"
def decorator(func):
@wraps(func)
def wrapped(*args, **kwargs):
raise RuntimeError(f"{func.__module__}.{func.__name__} requires {req}")
return wrapped
return decorator
UNSUPPORTED = []
def deprecated(direction: str, version: Optional[str] = None, remove: bool = False):
"""Decorator to add deprecation message
Args:
direction (str): Migration steps to be given to users.
version (str or int): The version when the object will be removed
remove (bool): If enabled, append future removal message.
"""
def decorator(func):
@wraps(func)
def wrapped(*args, **kwargs):
message = f"{func.__module__}.{func.__name__} has been deprecated. {direction}"
if remove:
message += f' It will be removed from {"future" if version is None else version} release. '
warnings.warn(message, stacklevel=2)
return func(*args, **kwargs)
message = "This function has been deprecated. "
if remove:
message += f'It will be removed from {"future" if version is None else version} release. '
wrapped.__doc__ = f"""DEPRECATED: {func.__doc__}
.. warning::
{message}
{direction}
"""
UNSUPPORTED.append(wrapped)
return wrapped
return decorator
dropping_support = deprecated(
"As TorchAudio is no longer being actively developed, this function can no longer be supported."
"See https://github.com/pytorch/audio/issues/3902 for more details.", version="2.9", remove=True)
def fail_with_message(message):
"""Generate decorator to give users message about missing TorchAudio extension."""
def decorator(func):
@wraps(func)
def wrapped(*args, **kwargs):
raise RuntimeError(f"{func.__module__}.{func.__name__} {message}")
return wrapped
return decorator
def no_op(func):
"""Op-op decorator. Used in place of fail_with_message when a functionality that requires extension works fine."""
return func
|
import importlib.util
import os
import warnings
from functools import wraps
from typing import Optional
def eval_env(var, default):
"""Check if environment varable has True-y value"""
if var not in os.environ:
return default
val = os.environ.get(var, "0")
trues = ["1", "true", "TRUE", "on", "ON", "yes", "YES"]
falses = ["0", "false", "FALSE", "off", "OFF", "no", "NO"]
if val in trues:
return True
if val not in falses:
# fmt: off
raise RuntimeError(
f"Unexpected environment variable value `{var}={val}`. "
f"Expected one of {trues + falses}")
# fmt: on
return False
def is_module_available(*modules: str) -> bool:
r"""Returns if a top-level module with :attr:`name` exists *without**
importing it. This is generally safer than try-catch block around a
`import X`. It avoids third party libraries breaking assumptions of some of
our tests, e.g., setting multiprocessing start method when imported
(see librosa/#747, torchvision/#544).
"""
return all(importlib.util.find_spec(m) is not None for m in modules)
def requires_module(*modules: str):
"""Decorate function to give error message if invoked without required optional modules.
This decorator is to give better error message to users rather
than raising ``NameError: name 'module' is not defined`` at random places.
"""
missing = [m for m in modules if not is_module_available(m)]
if not missing:
# fall through. If all the modules are available, no need to decorate
def decorator(func):
return func
else:
req = f"module: {missing[0]}" if len(missing) == 1 else f"modules: {missing}"
def decorator(func):
@wraps(func)
def wrapped(*args, **kwargs):
raise RuntimeError(f"{func.__module__}.{func.__name__} requires {req}")
return wrapped
return decorator
def deprecated(direction: str, version: Optional[str] = None, remove: bool = False):
"""Decorator to add deprecation message
Args:
direction (str): Migration steps to be given to users.
version (str or int): The version when the object will be removed
remove (bool): If enabled, append future removal message.
"""
def decorator(func):
@wraps(func)
def wrapped(*args, **kwargs):
message = f"{func.__module__}.{func.__name__} has been deprecated. {direction}"
if remove:
message += f' It will be removed from {"future" if version is None else version} release. '
warnings.warn(message, stacklevel=2)
return func(*args, **kwargs)
message = "This function has been deprecated. "
if remove:
message += f'It will be removed from {"future" if version is None else version} release. '
wrapped.__doc__ = f"""DEPRECATED: {func.__doc__}
.. warning::
{message}
{direction}
"""
return wrapped
return decorator
def fail_with_message(message):
"""Generate decorator to give users message about missing TorchAudio extension."""
def decorator(func):
@wraps(func)
def wrapped(*args, **kwargs):
raise RuntimeError(f"{func.__module__}.{func.__name__} {message}")
return wrapped
return decorator
def no_op(func):
"""Op-op decorator. Used in place of fail_with_message when a functionality that requires extension works fine."""
return func
|
"""
Getting started with categorical data
=====================================
Experimental support for categorical data.
In before, users need to run an encoder themselves before passing the data into XGBoost,
which creates a sparse matrix and potentially increase memory usage. This demo
showcases the experimental categorical data support, more advanced features are planned.
.. versionadded:: 1.5.0
See Also
--------
- :doc:`Tutorial </tutorials/categorical>`
- :ref:`sphx_glr_python_examples_cat_in_the_dat.py`
- :ref:`sphx_glr_python_examples_cat_pipeline.py`
"""
from typing import Tuple
import numpy as np
import pandas as pd
import xgboost as xgb
def make_categorical(
n_samples: int, n_features: int, n_categories: int, onehot: bool
) -> Tuple[pd.DataFrame, pd.Series]:
"""Make some random data for demo."""
rng = np.random.RandomState(1994)
pd_dict = {}
for i in range(n_features + 1):
c = rng.randint(low=0, high=n_categories, size=n_samples)
pd_dict[str(i)] = pd.Series(c, dtype=np.int64)
df = pd.DataFrame(pd_dict)
label = df.iloc[:, 0]
df = df.iloc[:, 1:]
for i in range(0, n_features):
label += df.iloc[:, i]
label += 1
df = df.astype("category")
categories = np.arange(0, n_categories)
for col in df.columns:
df[col] = df[col].cat.set_categories(categories)
if onehot:
return pd.get_dummies(df), label
return df, label
def main() -> None:
# Use builtin categorical data support
# For scikit-learn interface, the input data should be pandas DataFrame or cudf
# DataFrame with categorical features. If an numpy/cupy array is used instead, the
# `feature_types` for `XGBRegressor` should be set accordingly.
X, y = make_categorical(100, 10, 4, False)
# Specify `enable_categorical` to True, also we use onehot-encoding-based split here
# for demonstration. For details see the document of `max_cat_to_onehot`.
reg = xgb.XGBRegressor(
tree_method="hist", enable_categorical=True, max_cat_to_onehot=5, device="cuda"
)
reg.fit(X, y, eval_set=[(X, y)])
# Pass in already encoded data
X_enc, y_enc = make_categorical(100, 10, 4, True)
reg_enc = xgb.XGBRegressor(tree_method="hist", device="cuda")
reg_enc.fit(X_enc, y_enc, eval_set=[(X_enc, y_enc)])
reg_results = np.array(reg.evals_result()["validation_0"]["rmse"])
reg_enc_results = np.array(reg_enc.evals_result()["validation_0"]["rmse"])
# Check that they have same results
np.testing.assert_allclose(reg_results, reg_enc_results)
# Convert to DMatrix for SHAP value
booster: xgb.Booster = reg.get_booster()
m = xgb.DMatrix(X, enable_categorical=True) # specify categorical data support.
SHAP = booster.predict(m, pred_contribs=True)
margin = booster.predict(m, output_margin=True)
np.testing.assert_allclose(
np.sum(SHAP, axis=len(SHAP.shape) - 1), margin, rtol=1e-3
)
if __name__ == "__main__":
main()
|
"""
Getting started with categorical data
=====================================
Experimental support for categorical data.
In before, users need to run an encoder themselves before passing the data into XGBoost,
which creates a sparse matrix and potentially increase memory usage. This demo
showcases the experimental categorical data support, more advanced features are planned.
Also, see :doc:`the tutorial </tutorials/categorical>` for using XGBoost with
categorical data.
.. versionadded:: 1.5.0
"""
from typing import Tuple
import numpy as np
import pandas as pd
import xgboost as xgb
def make_categorical(
n_samples: int, n_features: int, n_categories: int, onehot: bool
) -> Tuple[pd.DataFrame, pd.Series]:
"""Make some random data for demo."""
rng = np.random.RandomState(1994)
pd_dict = {}
for i in range(n_features + 1):
c = rng.randint(low=0, high=n_categories, size=n_samples)
pd_dict[str(i)] = pd.Series(c, dtype=np.int64)
df = pd.DataFrame(pd_dict)
label = df.iloc[:, 0]
df = df.iloc[:, 1:]
for i in range(0, n_features):
label += df.iloc[:, i]
label += 1
df = df.astype("category")
categories = np.arange(0, n_categories)
for col in df.columns:
df[col] = df[col].cat.set_categories(categories)
if onehot:
return pd.get_dummies(df), label
return df, label
def main() -> None:
# Use builtin categorical data support
# For scikit-learn interface, the input data must be pandas DataFrame or cudf
# DataFrame with categorical features
X, y = make_categorical(100, 10, 4, False)
# Specify `enable_categorical` to True, also we use onehot encoding based split
# here for demonstration. For details see the document of `max_cat_to_onehot`.
reg = xgb.XGBRegressor(
tree_method="hist", enable_categorical=True, max_cat_to_onehot=5, device="cuda"
)
reg.fit(X, y, eval_set=[(X, y)])
# Pass in already encoded data
X_enc, y_enc = make_categorical(100, 10, 4, True)
reg_enc = xgb.XGBRegressor(tree_method="hist", device="cuda")
reg_enc.fit(X_enc, y_enc, eval_set=[(X_enc, y_enc)])
reg_results = np.array(reg.evals_result()["validation_0"]["rmse"])
reg_enc_results = np.array(reg_enc.evals_result()["validation_0"]["rmse"])
# Check that they have same results
np.testing.assert_allclose(reg_results, reg_enc_results)
# Convert to DMatrix for SHAP value
booster: xgb.Booster = reg.get_booster()
m = xgb.DMatrix(X, enable_categorical=True) # specify categorical data support.
SHAP = booster.predict(m, pred_contribs=True)
margin = booster.predict(m, output_margin=True)
np.testing.assert_allclose(
np.sum(SHAP, axis=len(SHAP.shape) - 1), margin, rtol=1e-3
)
if __name__ == "__main__":
main()
|
_base_ = './fcos_hrnetv2p_w32_gn-head_4x4_1x_coco.py'
model = dict(
data_preprocessor=dict(
mean=[103.53, 116.28, 123.675],
std=[57.375, 57.12, 58.395],
bgr_to_rgb=False))
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='RandomChoiceResize',
scales=[(1333, 640), (1333, 800)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
# learning policy
max_epochs = 24
train_cfg = dict(max_epochs=max_epochs)
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[16, 22],
gamma=0.1)
]
|
_base_ = './fcos_hrnetv2p_w32_gn-head_4x4_1x_coco.py'
img_norm_cfg = dict(
mean=[103.53, 116.28, 123.675], std=[57.375, 57.12, 58.395], to_rgb=False)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='Resize',
img_scale=[(1333, 640), (1333, 800)],
multiscale_mode='value',
keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
train=dict(pipeline=train_pipeline),
val=dict(pipeline=test_pipeline),
test=dict(pipeline=test_pipeline))
# learning policy
lr_config = dict(step=[16, 22])
runner = dict(type='EpochBasedRunner', max_epochs=24)
|
from keras.src import tree
from keras.src.trainers.data_adapters import data_adapter_utils
from keras.src.trainers.data_adapters.data_adapter import DataAdapter
class TFDatasetAdapter(DataAdapter):
"""Adapter that handles `tf.data.Dataset`."""
def __init__(self, dataset, class_weight=None, distribution=None):
"""Initialize the TFDatasetAdapter.
Args:
dataset: The input `tf.data.Dataset` instance.
class_weight: A map where the keys are integer class ids and values
are the class weights, e.g. `{0: 0.2, 1: 0.6, 2: 0.3}`.
distribution: A `keras.distribution.Distribution` instance. Used to
shard the input dataset into per worker/process dataset
instance.
"""
from keras.src.utils.module_utils import tensorflow as tf
if not isinstance(
dataset, (tf.data.Dataset, tf.distribute.DistributedDataset)
):
raise ValueError(
"Expected argument `dataset` to be a tf.data.Dataset. "
f"Received: {dataset}"
)
if class_weight is not None:
dataset = dataset.map(
make_class_weight_map_fn(class_weight)
).prefetch(tf.data.AUTOTUNE)
if distribution is not None:
dataset = distribution.distribute_dataset(dataset)
self._dataset = dataset
def get_numpy_iterator(self):
from keras.src.backend.tensorflow.core import convert_to_numpy
for batch in self._dataset:
yield tree.map_structure(convert_to_numpy, batch)
def get_jax_iterator(self):
from keras.src.backend.tensorflow.core import convert_to_numpy
from keras.src.utils.module_utils import tensorflow as tf
def convert_to_jax(x):
if isinstance(x, tf.SparseTensor):
return data_adapter_utils.tf_sparse_to_jax_sparse(x)
else:
# We use numpy as an intermediary because it is faster.
return convert_to_numpy(x)
for batch in self._dataset:
yield tree.map_structure(convert_to_jax, batch)
def get_tf_dataset(self):
return self._dataset
def get_torch_dataloader(self):
return data_adapter_utils.get_torch_dataloader(self._dataset)
@property
def num_batches(self):
cardinality = self._dataset.cardinality
if callable(cardinality):
# `dataset.cardinality` is normally expected to be a callable.
cardinality = int(self._dataset.cardinality())
else:
# However, in the case of `DistributedDataset`, it's a np.int64.
cardinality = int(cardinality)
# Return None for Unknown and Infinite cardinality datasets
if cardinality < 0:
return None
return cardinality
@property
def batch_size(self):
first_element_spec = tree.flatten(self._dataset.element_spec)[0]
return first_element_spec.shape[0]
@property
def has_partial_batch(self):
return None
@property
def partial_batch_size(self):
return None
def make_class_weight_map_fn(class_weight):
"""Applies class weighting to a `Dataset`.
The `Dataset` is assumed to be in format `(x, y)` or `(x, y, sw)`, where
`y` must be a single `Tensor`.
Args:
class_weight: A map where the keys are integer class ids and values are
the class weights, e.g. `{0: 0.2, 1: 0.6, 2: 0.3}`
Returns:
A function that can be used with `tf.data.Dataset.map` to apply class
weighting.
"""
from keras.src.utils.module_utils import tensorflow as tf
class_weight_tensor = tf.convert_to_tensor(
[
class_weight.get(int(c), 1.0)
for c in range(max(class_weight.keys()) + 1)
]
)
def class_weights_map_fn(*data):
"""Convert `class_weight` to `sample_weight`."""
x, y, sw = data_adapter_utils.unpack_x_y_sample_weight(data)
if sw is not None:
raise ValueError(
"You cannot `class_weight` and `sample_weight` "
"at the same time."
)
if tree.is_nested(y):
raise ValueError(
"`class_weight` is only supported for Models with a single "
"output."
)
if y.shape.rank >= 2:
y_classes = tf.__internal__.smart_cond.smart_cond(
tf.shape(y)[-1] > 1,
lambda: tf.argmax(y, axis=-1),
lambda: tf.cast(tf.round(tf.squeeze(y, axis=-1)), tf.int32),
)
else:
# Special casing for rank 1, where we can guarantee sparse encoding.
y_classes = tf.cast(tf.round(y), tf.int32)
cw = tf.gather(class_weight_tensor, y_classes)
return x, y, cw
return class_weights_map_fn
|
from keras.src import tree
from keras.src.trainers.data_adapters import data_adapter_utils
from keras.src.trainers.data_adapters.data_adapter import DataAdapter
class TFDatasetAdapter(DataAdapter):
"""Adapter that handles `tf.data.Dataset`."""
def __init__(self, dataset, class_weight=None, distribution=None):
"""Iniitialize the TFDatasetAdapter.
Args:
dataset: The input `tf.data.Dataset` instance.
class_weight: A map where the keys are integer class ids and values
are the class weights, e.g. `{0: 0.2, 1: 0.6, 2: 0.3}`.
distribution: A `keras.distribution.Distribution` instance. Used to
shard the input dataset into per worker/process dataset
instance.
"""
from keras.src.utils.module_utils import tensorflow as tf
if not isinstance(
dataset, (tf.data.Dataset, tf.distribute.DistributedDataset)
):
raise ValueError(
"Expected argument `dataset` to be a tf.data.Dataset. "
f"Received: {dataset}"
)
if class_weight is not None:
dataset = dataset.map(
make_class_weight_map_fn(class_weight)
).prefetch(tf.data.AUTOTUNE)
if distribution is not None:
dataset = distribution.distribute_dataset(dataset)
self._dataset = dataset
def get_numpy_iterator(self):
from keras.src.backend.tensorflow.core import convert_to_numpy
for batch in self._dataset:
yield tree.map_structure(convert_to_numpy, batch)
def get_jax_iterator(self):
from keras.src.backend.tensorflow.core import convert_to_numpy
from keras.src.utils.module_utils import tensorflow as tf
def convert_to_jax(x):
if isinstance(x, tf.SparseTensor):
return data_adapter_utils.tf_sparse_to_jax_sparse(x)
else:
# We use numpy as an intermediary because it is faster.
return convert_to_numpy(x)
for batch in self._dataset:
yield tree.map_structure(convert_to_jax, batch)
def get_tf_dataset(self):
return self._dataset
def get_torch_dataloader(self):
return data_adapter_utils.get_torch_dataloader(self._dataset)
@property
def num_batches(self):
cardinality = self._dataset.cardinality
if callable(cardinality):
# `dataset.cardinality` is normally expected to be a callable.
cardinality = int(self._dataset.cardinality())
else:
# However, in the case of `DistributedDataset`, it's a np.int64.
cardinality = int(cardinality)
# Return None for Unknown and Infinite cardinality datasets
if cardinality < 0:
return None
return cardinality
@property
def batch_size(self):
first_element_spec = tree.flatten(self._dataset.element_spec)[0]
return first_element_spec.shape[0]
@property
def has_partial_batch(self):
return None
@property
def partial_batch_size(self):
return None
def make_class_weight_map_fn(class_weight):
"""Applies class weighting to a `Dataset`.
The `Dataset` is assumed to be in format `(x, y)` or `(x, y, sw)`, where
`y` must be a single `Tensor`.
Args:
class_weight: A map where the keys are integer class ids and values are
the class weights, e.g. `{0: 0.2, 1: 0.6, 2: 0.3}`
Returns:
A function that can be used with `tf.data.Dataset.map` to apply class
weighting.
"""
from keras.src.utils.module_utils import tensorflow as tf
class_weight_tensor = tf.convert_to_tensor(
[
class_weight.get(int(c), 1.0)
for c in range(max(class_weight.keys()) + 1)
]
)
def class_weights_map_fn(*data):
"""Convert `class_weight` to `sample_weight`."""
x, y, sw = data_adapter_utils.unpack_x_y_sample_weight(data)
if sw is not None:
raise ValueError(
"You cannot `class_weight` and `sample_weight` "
"at the same time."
)
if tree.is_nested(y):
raise ValueError(
"`class_weight` is only supported for Models with a single "
"output."
)
if y.shape.rank >= 2:
y_classes = tf.__internal__.smart_cond.smart_cond(
tf.shape(y)[-1] > 1,
lambda: tf.argmax(y, axis=-1),
lambda: tf.cast(tf.round(tf.squeeze(y, axis=-1)), tf.int32),
)
else:
# Special casing for rank 1, where we can guarantee sparse encoding.
y_classes = tf.cast(tf.round(y), tf.int32)
cw = tf.gather(class_weight_tensor, y_classes)
return x, y, cw
return class_weights_map_fn
|
from ._effector import AudioEffector
from ._playback import play_audio
from ._stream_reader import StreamReader
from ._stream_writer import CodecConfig, StreamWriter
__all__ = [
"AudioEffector",
"StreamReader",
"StreamWriter",
"CodecConfig",
"play_audio",
]
|
from ._playback import play_audio
from ._stream_reader import StreamReader
from ._stream_writer import StreamWriter
__all__ = [
"StreamReader",
"StreamWriter",
"play_audio",
]
|
from contextlib import contextmanager
from functools import partial
from unittest.mock import patch
import torch
from parameterized import parameterized
from torchaudio._internal.module_utils import is_module_available
from torchaudio_unittest.common_utils import skipIfNoModule, TorchaudioTestCase
from .utils import MockCustomDataset, MockDataloader, MockSentencePieceProcessor
if is_module_available("pytorch_lightning", "sentencepiece"):
from asr.emformer_rnnt.librispeech.lightning import LibriSpeechRNNTModule
class MockLIBRISPEECH:
def __init__(self, *args, **kwargs):
pass
def __getitem__(self, n: int):
return (
torch.rand(1, 32640),
16000,
"sup",
2,
3,
4,
)
def __len__(self):
return 10
@contextmanager
def get_lightning_module():
with patch(
"sentencepiece.SentencePieceProcessor", new=partial(MockSentencePieceProcessor, num_symbols=4096)
), patch("asr.emformer_rnnt.librispeech.lightning.GlobalStatsNormalization", new=torch.nn.Identity), patch(
"torchaudio.datasets.LIBRISPEECH", new=MockLIBRISPEECH
), patch(
"asr.emformer_rnnt.librispeech.lightning.CustomDataset", new=MockCustomDataset
), patch(
"torch.utils.data.DataLoader", new=MockDataloader
):
yield LibriSpeechRNNTModule(
librispeech_path="librispeech_path",
sp_model_path="sp_model_path",
global_stats_path="global_stats_path",
)
@skipIfNoModule("pytorch_lightning")
@skipIfNoModule("sentencepiece")
class TestLibriSpeechRNNTModule(TorchaudioTestCase):
@classmethod
def setUpClass(cls) -> None:
super().setUpClass()
torch.random.manual_seed(31)
@parameterized.expand(
[
("training_step", "train_dataloader"),
("validation_step", "val_dataloader"),
("test_step", "test_dataloader"),
]
)
def test_step(self, step_fname, dataloader_fname):
with get_lightning_module() as lightning_module:
dataloader = getattr(lightning_module, dataloader_fname)()
batch = next(iter(dataloader))
getattr(lightning_module, step_fname)(batch, 0)
@parameterized.expand(
[
("val_dataloader",),
]
)
def test_forward(self, dataloader_fname):
with get_lightning_module() as lightning_module:
dataloader = getattr(lightning_module, dataloader_fname)()
batch = next(iter(dataloader))
lightning_module(batch)
|
from contextlib import contextmanager
from functools import partial
from unittest.mock import patch
import torch
from parameterized import parameterized
from torchaudio._internal.module_utils import is_module_available
from torchaudio_unittest.common_utils import TorchaudioTestCase, skipIfNoModule
from .utils import MockSentencePieceProcessor, MockCustomDataset, MockDataloader
if is_module_available("pytorch_lightning", "sentencepiece"):
from asr.emformer_rnnt.librispeech.lightning import LibriSpeechRNNTModule
class MockLIBRISPEECH:
def __init__(self, *args, **kwargs):
pass
def __getitem__(self, n: int):
return (
torch.rand(1, 32640),
16000,
"sup",
2,
3,
4,
)
def __len__(self):
return 10
@contextmanager
def get_lightning_module():
with patch(
"sentencepiece.SentencePieceProcessor", new=partial(MockSentencePieceProcessor, num_symbols=4096)
), patch("asr.emformer_rnnt.librispeech.lightning.GlobalStatsNormalization", new=torch.nn.Identity), patch(
"torchaudio.datasets.LIBRISPEECH", new=MockLIBRISPEECH
), patch(
"asr.emformer_rnnt.librispeech.lightning.CustomDataset", new=MockCustomDataset
), patch(
"torch.utils.data.DataLoader", new=MockDataloader
):
yield LibriSpeechRNNTModule(
librispeech_path="librispeech_path",
sp_model_path="sp_model_path",
global_stats_path="global_stats_path",
)
@skipIfNoModule("pytorch_lightning")
@skipIfNoModule("sentencepiece")
class TestLibriSpeechRNNTModule(TorchaudioTestCase):
@classmethod
def setUpClass(cls) -> None:
super().setUpClass()
torch.random.manual_seed(31)
@parameterized.expand(
[
("training_step", "train_dataloader"),
("validation_step", "val_dataloader"),
("test_step", "test_dataloader"),
]
)
def test_step(self, step_fname, dataloader_fname):
with get_lightning_module() as lightning_module:
dataloader = getattr(lightning_module, dataloader_fname)()
batch = next(iter(dataloader))
getattr(lightning_module, step_fname)(batch, 0)
@parameterized.expand(
[
("val_dataloader",),
]
)
def test_forward(self, dataloader_fname):
with get_lightning_module() as lightning_module:
dataloader = getattr(lightning_module, dataloader_fname)()
batch = next(iter(dataloader))
lightning_module(batch)
|
import copy
import warnings
from collections.abc import Iterable, Iterator, Sized
from typing import TypeVar
from torch.utils.data.datapipes.datapipe import IterDataPipe
_T = TypeVar("_T")
__all__ = ["IterableWrapperIterDataPipe"]
class IterableWrapperIterDataPipe(IterDataPipe[_T]):
r"""
Wraps an iterable object to create an IterDataPipe.
Args:
iterable: Iterable object to be wrapped into an IterDataPipe
deepcopy: Option to deepcopy input iterable object for each
iterator. The copy is made when the first element is read in ``iter()``.
.. note::
If ``deepcopy`` is explicitly set to ``False``, users should ensure
that the data pipeline doesn't contain any in-place operations over
the iterable instance to prevent data inconsistency across iterations.
Example:
>>> # xdoctest: +SKIP
>>> from torchdata.datapipes.iter import IterableWrapper
>>> dp = IterableWrapper(range(10))
>>> list(dp)
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
"""
def __init__(self, iterable: Iterable[_T], deepcopy: bool = True) -> None:
self.iterable = iterable
self.deepcopy = deepcopy
def __iter__(self) -> Iterator[_T]:
source_data = self.iterable
if self.deepcopy:
try:
source_data = copy.deepcopy(self.iterable)
# For the case that data cannot be deep-copied,
# all in-place operations will affect iterable variable.
# When this DataPipe is iterated second time, it will
# yield modified items.
except TypeError:
warnings.warn(
"The input iterable can not be deepcopied, "
"please be aware of in-place modification would affect source data."
)
yield from source_data
def __len__(self) -> int:
if isinstance(self.iterable, Sized):
return len(self.iterable)
raise TypeError(f"{type(self).__name__} instance doesn't have valid length")
|
# mypy: allow-untyped-defs
import copy
import warnings
from torch.utils.data.datapipes.datapipe import IterDataPipe
__all__ = ["IterableWrapperIterDataPipe"]
class IterableWrapperIterDataPipe(IterDataPipe):
r"""
Wraps an iterable object to create an IterDataPipe.
Args:
iterable: Iterable object to be wrapped into an IterDataPipe
deepcopy: Option to deepcopy input iterable object for each
iterator. The copy is made when the first element is read in ``iter()``.
.. note::
If ``deepcopy`` is explicitly set to ``False``, users should ensure
that the data pipeline doesn't contain any in-place operations over
the iterable instance to prevent data inconsistency across iterations.
Example:
>>> # xdoctest: +SKIP
>>> from torchdata.datapipes.iter import IterableWrapper
>>> dp = IterableWrapper(range(10))
>>> list(dp)
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
"""
def __init__(self, iterable, deepcopy=True):
self.iterable = iterable
self.deepcopy = deepcopy
def __iter__(self):
source_data = self.iterable
if self.deepcopy:
try:
source_data = copy.deepcopy(self.iterable)
# For the case that data cannot be deep-copied,
# all in-place operations will affect iterable variable.
# When this DataPipe is iterated second time, it will
# yield modified items.
except TypeError:
warnings.warn(
"The input iterable can not be deepcopied, "
"please be aware of in-place modification would affect source data."
)
yield from source_data
def __len__(self):
return len(self.iterable)
|
# coding=utf-8
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""SBU Captioned Photo Dataset"""
import json
import datasets
_CITATION = """\
@inproceedings{NIPS2011_5dd9db5e,
author = {Ordonez, Vicente and Kulkarni, Girish and Berg, Tamara},
booktitle = {Advances in Neural Information Processing Systems},
editor = {J. Shawe-Taylor and R. Zemel and P. Bartlett and F. Pereira and K.Q. Weinberger},
pages = {},
publisher = {Curran Associates, Inc.},
title = {Im2Text: Describing Images Using 1 Million Captioned Photographs},
url = {https://proceedings.neurips.cc/paper/2011/file/5dd9db5e033da9c6fb5ba83c7a7ebea9-Paper.pdf},
volume = {24},
year = {2011}
}
"""
_DESCRIPTION = """\
The SBU Captioned Photo Dataset is a collection of over 1 million images with associated text descriptions extracted from Flicker.
"""
_LICENSE = "unknown"
_HOMEPAGE = "https://www.cs.rice.edu/~vo9/sbucaptions/"
_URL = "https://www.cs.rice.edu/~vo9/sbucaptions/sbu-captions-all.tar.gz"
_FEATURES = datasets.Features(
{"image_url": datasets.Value("string"), "user_id": datasets.Value("string"), "caption": datasets.Value("string")}
)
_MAP_SBU_FEATURES_TO_DATASETS_FEATURES = {"image_urls": "image_url", "user_ids": "user_id", "captions": "caption"}
class SBUCaptionedPhotoDatasetConfig(datasets.BuilderConfig):
"""BuilderConfig for SBU Captioned Photo dataset."""
VERSION = datasets.Version("0.0.0")
def __init__(self, version=None, *args, **kwargs):
super().__init__(
version=version or self.VERSION,
*args,
**kwargs,
)
class SBUCaptionedPhotoDataset(datasets.GeneratorBasedBuilder):
"""SBU Captioned Photo dataset."""
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=_FEATURES,
homepage=_HOMEPAGE,
license=_LICENSE,
citation=_CITATION,
)
def _split_generators(self, dl_manager: datasets.DownloadManager):
archive = dl_manager.download(_URL)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"files": dl_manager.iter_archive(archive),
},
)
]
def _generate_examples(self, files):
annotations = None
for path, f in files:
if path.endswith("sbu-captions-all.json"):
annotations = json.loads(f.read().decode("utf-8"))
break
# Sanity checks
assert annotations is not None
nb_samples = len(annotations[next(iter(annotations.keys()))])
assert all(len(values) == nb_samples for values in annotations.values())
keys = tuple(annotations.keys())
for idx in range(nb_samples):
yield idx, {_MAP_SBU_FEATURES_TO_DATASETS_FEATURES[key]: annotations[key][idx] for key in keys}
|
# coding=utf-8
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""SBU Captioned Photo Dataset"""
import json
import datasets
_CITATION = """\
@inproceedings{NIPS2011_5dd9db5e,
author = {Ordonez, Vicente and Kulkarni, Girish and Berg, Tamara},
booktitle = {Advances in Neural Information Processing Systems},
editor = {J. Shawe-Taylor and R. Zemel and P. Bartlett and F. Pereira and K.Q. Weinberger},
pages = {},
publisher = {Curran Associates, Inc.},
title = {Im2Text: Describing Images Using 1 Million Captioned Photographs},
url = {https://proceedings.neurips.cc/paper/2011/file/5dd9db5e033da9c6fb5ba83c7a7ebea9-Paper.pdf},
volume = {24},
year = {2011}
}
"""
_DESCRIPTION = """\
The SBU Captioned Photo Dataset is a collection of over 1 million images with associated text descriptions extracted from Flicker.
"""
_LICENSE = "unknown"
_HOMEPAGE = "http://www.cs.virginia.edu/~vicente/sbucaptions"
_URL = "http://www.cs.virginia.edu/~vicente/sbucaptions/sbu-captions-all.tar.gz"
_FEATURES = datasets.Features(
{"image_url": datasets.Value("string"), "user_id": datasets.Value("string"), "caption": datasets.Value("string")}
)
_MAP_SBU_FEATURES_TO_DATASETS_FEATURES = {"image_urls": "image_url", "user_ids": "user_id", "captions": "caption"}
class SBUCaptionedPhotoDatasetConfig(datasets.BuilderConfig):
"""BuilderConfig for SBU Captioned Photo dataset."""
VERSION = datasets.Version("0.0.0")
def __init__(self, version=None, *args, **kwargs):
super().__init__(
version=version or self.VERSION,
*args,
**kwargs,
)
class SBUCaptionedPhotoDataset(datasets.GeneratorBasedBuilder):
"""SBU Captioned Photo dataset."""
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=_FEATURES,
homepage=_HOMEPAGE,
license=_LICENSE,
citation=_CITATION,
)
def _split_generators(self, dl_manager: datasets.DownloadManager):
archive = dl_manager.download(_URL)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"files": dl_manager.iter_archive(archive),
},
)
]
def _generate_examples(self, files):
annotations = None
for path, f in files:
if path.endswith("sbu-captions-all.json"):
annotations = json.loads(f.read().decode("utf-8"))
break
# Sanity checks
assert annotations is not None
nb_samples = len(annotations[next(iter(annotations.keys()))])
assert all(len(values) == nb_samples for values in annotations.values())
keys = tuple(annotations.keys())
for idx in range(nb_samples):
yield idx, {_MAP_SBU_FEATURES_TO_DATASETS_FEATURES[key]: annotations[key][idx] for key in keys}
|
from .DenoisingAutoEncoderDataset import DenoisingAutoEncoderDataset
from .NoDuplicatesDataLoader import NoDuplicatesDataLoader
from .ParallelSentencesDataset import ParallelSentencesDataset
from .SentenceLabelDataset import SentenceLabelDataset
from .SentencesDataset import SentencesDataset
__all__ = [
"DenoisingAutoEncoderDataset",
"NoDuplicatesDataLoader",
"ParallelSentencesDataset",
"SentencesDataset",
"SentenceLabelDataset",
]
|
from .DenoisingAutoEncoderDataset import DenoisingAutoEncoderDataset
from .NoDuplicatesDataLoader import NoDuplicatesDataLoader
from .ParallelSentencesDataset import ParallelSentencesDataset
from .SentencesDataset import SentencesDataset
from .SentenceLabelDataset import SentenceLabelDataset
|
from torchvision.transforms import AutoAugmentPolicy, InterpolationMode # usort: skip
from . import functional, utils # usort: skip
from ._transform import Transform # usort: skip
from ._presets import StereoMatching # usort: skip
from ._augment import RandomCutmix, RandomErasing, RandomMixup, SimpleCopyPaste
from ._auto_augment import AugMix, AutoAugment, RandAugment, TrivialAugmentWide
from ._color import (
ColorJitter,
Grayscale,
RandomAdjustSharpness,
RandomAutocontrast,
RandomEqualize,
RandomGrayscale,
RandomInvert,
RandomPhotometricDistort,
RandomPosterize,
RandomSolarize,
)
from ._container import Compose, RandomApply, RandomChoice, RandomOrder
from ._geometry import (
CenterCrop,
ElasticTransform,
FiveCrop,
FixedSizeCrop,
Pad,
RandomAffine,
RandomCrop,
RandomHorizontalFlip,
RandomIoUCrop,
RandomPerspective,
RandomResize,
RandomResizedCrop,
RandomRotation,
RandomShortestSize,
RandomVerticalFlip,
RandomZoomOut,
Resize,
ScaleJitter,
TenCrop,
)
from ._meta import ClampBoundingBox, ConvertBoundingBoxFormat, ConvertDtype, ConvertImageDtype
from ._misc import (
GaussianBlur,
Identity,
Lambda,
LinearTransformation,
Normalize,
PermuteDimensions,
SanitizeBoundingBoxes,
ToDtype,
TransposeDimensions,
)
from ._temporal import UniformTemporalSubsample
from ._type_conversion import LabelToOneHot, PILToTensor, ToImagePIL, ToImageTensor, ToPILImage
from ._deprecated import ToTensor # usort: skip
|
from torchvision.transforms import AutoAugmentPolicy, InterpolationMode # usort: skip
from . import functional, utils # usort: skip
from ._transform import Transform # usort: skip
from ._presets import StereoMatching # usort: skip
from ._augment import RandomCutmix, RandomErasing, RandomMixup, SimpleCopyPaste
from ._auto_augment import AugMix, AutoAugment, RandAugment, TrivialAugmentWide
from ._color import (
ColorJitter,
Grayscale,
RandomAdjustSharpness,
RandomAutocontrast,
RandomEqualize,
RandomGrayscale,
RandomInvert,
RandomPhotometricDistort,
RandomPosterize,
RandomSolarize,
)
from ._container import Compose, RandomApply, RandomChoice, RandomOrder
from ._geometry import (
CenterCrop,
ElasticTransform,
FiveCrop,
FixedSizeCrop,
Pad,
RandomAffine,
RandomCrop,
RandomHorizontalFlip,
RandomIoUCrop,
RandomPerspective,
RandomResize,
RandomResizedCrop,
RandomRotation,
RandomShortestSize,
RandomVerticalFlip,
RandomZoomOut,
Resize,
ScaleJitter,
TenCrop,
)
from ._meta import ClampBoundingBox, ConvertBoundingBoxFormat, ConvertDtype, ConvertImageDtype
from ._misc import (
GaussianBlur,
Identity,
Lambda,
LinearTransformation,
Normalize,
PermuteDimensions,
RemoveSmallBoundingBoxes,
ToDtype,
TransposeDimensions,
)
from ._temporal import UniformTemporalSubsample
from ._type_conversion import LabelToOneHot, PILToTensor, ToImagePIL, ToImageTensor, ToPILImage
from ._deprecated import ToTensor # usort: skip
|
# Copyright (c) OpenMMLab. All rights reserved.
import numpy as np
from mmdet.datasets import CocoDataset
from mmdet.visualization import get_palette, jitter_color, palette_val
def test_palette():
assert palette_val([(1, 2, 3)])[0] == (1 / 255, 2 / 255, 3 / 255)
# test list
palette = [(1, 0, 0), (0, 1, 0), (0, 0, 1)]
palette_ = get_palette(palette, 3)
for color, color_ in zip(palette, palette_):
assert color == color_
# test tuple
palette = get_palette((1, 2, 3), 3)
assert len(palette) == 3
for color in palette:
assert color == (1, 2, 3)
# test color str
palette = get_palette('red', 3)
assert len(palette) == 3
for color in palette:
assert color == (255, 0, 0)
# test dataset str
palette = get_palette('coco', len(CocoDataset.METAINFO['CLASSES']))
assert len(palette) == len(CocoDataset.METAINFO['CLASSES'])
assert palette[0] == (220, 20, 60)
# TODO: Awaiting refactoring
# palette = get_palette('coco', len(CocoPanopticDataset.METAINFO['CLASSES'])) # noqa
# assert len(palette) == len(CocoPanopticDataset.METAINFO['CLASSES'])
# assert palette[-1] == (250, 141, 255)
# palette = get_palette('voc', len(VOCDataset.METAINFO['CLASSES']))
# assert len(palette) == len(VOCDataset.METAINFO['CLASSES'])
# assert palette[0] == (106, 0, 228)
# palette = get_palette('citys', len(CityscapesDataset.METAINFO['CLASSES'])) # noqa
# assert len(palette) == len(CityscapesDataset.METAINFO['CLASSES'])
# assert palette[0] == (220, 20, 60)
# test random
palette1 = get_palette('random', 3)
palette2 = get_palette(None, 3)
for color1, color2 in zip(palette1, palette2):
assert isinstance(color1, tuple)
assert isinstance(color2, tuple)
assert color1 == color2
def test_jitter_color():
color = tuple(np.random.randint(0, 255, 3, np.uint8))
jittered_color = jitter_color(color)
for c in jittered_color:
assert 0 <= c <= 255
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.datasets import CocoDataset
from mmdet.visualization import get_palette, palette_val
def test_palette():
assert palette_val([(1, 2, 3)])[0] == (1 / 255, 2 / 255, 3 / 255)
# test list
palette = [(1, 0, 0), (0, 1, 0), (0, 0, 1)]
palette_ = get_palette(palette, 3)
for color, color_ in zip(palette, palette_):
assert color == color_
# test tuple
palette = get_palette((1, 2, 3), 3)
assert len(palette) == 3
for color in palette:
assert color == (1, 2, 3)
# test color str
palette = get_palette('red', 3)
assert len(palette) == 3
for color in palette:
assert color == (255, 0, 0)
# test dataset str
palette = get_palette('coco', len(CocoDataset.METAINFO['CLASSES']))
assert len(palette) == len(CocoDataset.METAINFO['CLASSES'])
assert palette[0] == (220, 20, 60)
# TODO: Awaiting refactoring
# palette = get_palette('coco', len(CocoPanopticDataset.METAINFO['CLASSES'])) # noqa
# assert len(palette) == len(CocoPanopticDataset.METAINFO['CLASSES'])
# assert palette[-1] == (250, 141, 255)
# palette = get_palette('voc', len(VOCDataset.METAINFO['CLASSES']))
# assert len(palette) == len(VOCDataset.METAINFO['CLASSES'])
# assert palette[0] == (106, 0, 228)
# palette = get_palette('citys', len(CityscapesDataset.METAINFO['CLASSES'])) # noqa
# assert len(palette) == len(CityscapesDataset.METAINFO['CLASSES'])
# assert palette[0] == (220, 20, 60)
# test random
palette1 = get_palette('random', 3)
palette2 = get_palette(None, 3)
for color1, color2 in zip(palette1, palette2):
assert isinstance(color1, tuple)
assert isinstance(color2, tuple)
assert color1 == color2
|
_base_ = [
'../_base_/models/cascade_mask_rcnn_r50_fpn.py',
'../_base_/datasets/coco_instance.py',
'../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py'
]
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')),
roi_head=dict(
bbox_head=[
dict(
type='Shared2FCBBoxHead',
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=1203,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2]),
reg_class_agnostic=True,
cls_predictor_cfg=dict(type='NormedLinear', tempearture=20),
loss_cls=dict(
type='SeesawLoss',
p=0.8,
q=2.0,
num_classes=1203,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0,
loss_weight=1.0)),
dict(
type='Shared2FCBBoxHead',
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=1203,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.05, 0.05, 0.1, 0.1]),
reg_class_agnostic=True,
cls_predictor_cfg=dict(type='NormedLinear', tempearture=20),
loss_cls=dict(
type='SeesawLoss',
p=0.8,
q=2.0,
num_classes=1203,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0,
loss_weight=1.0)),
dict(
type='Shared2FCBBoxHead',
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=1203,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.033, 0.033, 0.067, 0.067]),
reg_class_agnostic=True,
cls_predictor_cfg=dict(type='NormedLinear', tempearture=20),
loss_cls=dict(
type='SeesawLoss',
p=0.8,
q=2.0,
num_classes=1203,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0))
],
mask_head=dict(num_classes=1203)),
test_cfg=dict(
rcnn=dict(
score_thr=0.0001,
# LVIS allows up to 300
max_per_img=300)))
# dataset settings
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='RandomChoiceResize',
scales=[(1333, 640), (1333, 672), (1333, 704), (1333, 736),
(1333, 768), (1333, 800)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
dataset_type = 'LVISV1Dataset'
data_root = 'data/lvis_v1/'
train_dataloader = dict(
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/lvis_v1_train.json',
data_prefix=dict(img=''),
pipeline=train_pipeline))
val_dataloader = dict(
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/lvis_v1_val.json',
data_prefix=dict(img='')))
test_dataloader = val_dataloader
val_evaluator = dict(
type='LVISMetric',
ann_file=data_root + 'annotations/lvis_v1_val.json',
metric=['bbox', 'segm'])
test_evaluator = val_evaluator
train_cfg = dict(val_interval=24)
|
_base_ = [
'../_base_/models/cascade_mask_rcnn_r50_fpn.py',
'../_base_/datasets/coco_instance.py',
'../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py'
]
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')),
roi_head=dict(
bbox_head=[
dict(
type='Shared2FCBBoxHead',
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=1203,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2]),
reg_class_agnostic=True,
cls_predictor_cfg=dict(type='NormedLinear', tempearture=20),
loss_cls=dict(
type='SeesawLoss',
p=0.8,
q=2.0,
num_classes=1203,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0,
loss_weight=1.0)),
dict(
type='Shared2FCBBoxHead',
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=1203,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.05, 0.05, 0.1, 0.1]),
reg_class_agnostic=True,
cls_predictor_cfg=dict(type='NormedLinear', tempearture=20),
loss_cls=dict(
type='SeesawLoss',
p=0.8,
q=2.0,
num_classes=1203,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0,
loss_weight=1.0)),
dict(
type='Shared2FCBBoxHead',
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=1203,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.033, 0.033, 0.067, 0.067]),
reg_class_agnostic=True,
cls_predictor_cfg=dict(type='NormedLinear', tempearture=20),
loss_cls=dict(
type='SeesawLoss',
p=0.8,
q=2.0,
num_classes=1203,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0))
],
mask_head=dict(num_classes=1203)),
test_cfg=dict(
rcnn=dict(
score_thr=0.0001,
# LVIS allows up to 300
max_per_img=300)))
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='Resize',
img_scale=[(1333, 640), (1333, 672), (1333, 704), (1333, 736),
(1333, 768), (1333, 800)],
multiscale_mode='value',
keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
dataset_type = 'LVISV1Dataset'
data_root = 'data/lvis_v1/'
data = dict(
samples_per_gpu=2,
workers_per_gpu=2,
train=dict(
type=dataset_type,
ann_file=data_root + 'annotations/lvis_v1_train.json',
img_prefix=data_root,
pipeline=train_pipeline),
val=dict(
type=dataset_type,
ann_file=data_root + 'annotations/lvis_v1_val.json',
img_prefix=data_root,
pipeline=test_pipeline),
test=dict(
type=dataset_type,
ann_file=data_root + 'annotations/lvis_v1_val.json',
img_prefix=data_root,
pipeline=test_pipeline))
evaluation = dict(interval=24, metric=['bbox', 'segm'])
|
"""News article reader using Newspaper."""
import logging
from importlib.util import find_spec
from typing import Any, Generator, List
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
logger = logging.getLogger(__name__)
class NewsArticleReader(BaseReader):
"""
Simple news article reader.
Reads news articles from the web and parses them using the `newspaper` library.
Args:
text_mode (bool): Whether to load a text version or HTML version of the content (default=True).
use_nlp (bool): Whether to use NLP to extract additional summary and keywords (default=True).
newspaper_kwargs: Additional keyword arguments to pass to newspaper.Article. See
https://newspaper.readthedocs.io/en/latest/user_guide/quickstart.html#article
"""
def __init__(
self, text_mode: bool = True, use_nlp: bool = True, **newspaper_kwargs: Any
) -> None:
"""Initialize with parameters."""
if find_spec("newspaper") is None:
raise ImportError(
"`newspaper` package not found, please run `pip install newspaper3k`"
)
self.load_text = text_mode
self.use_nlp = use_nlp
self.newspaper_kwargs = newspaper_kwargs
def load_data(self, urls: List[str]) -> List[Document]:
"""
Load data from the list of news article urls.
Args:
urls (List[str]): List of URLs to load news articles.
Returns:
List[Document]: List of documents.
"""
if not isinstance(urls, list) and not isinstance(urls, Generator):
raise ValueError("urls must be a list or generator.")
documents = []
for url in urls:
from newspaper import Article
try:
article = Article(url, **self.newspaper_kwargs)
article.download()
article.parse()
if self.use_nlp:
article.nlp()
except Exception as e:
logger.error(f"Error fetching or processing {url}, exception: {e}")
continue
metadata = {
"title": getattr(article, "title", ""),
"link": getattr(article, "url", getattr(article, "canonical_link", "")),
"authors": getattr(article, "authors", []),
"language": getattr(article, "meta_lang", ""),
"description": getattr(article, "meta_description", ""),
"publish_date": getattr(article, "publish_date", ""),
}
if self.load_text:
content = article.text
else:
content = article.html
if self.use_nlp:
metadata["keywords"] = getattr(article, "keywords", [])
metadata["summary"] = getattr(article, "summary", "")
documents.append(Document(text=content, metadata=metadata))
return documents
if __name__ == "__main__":
reader = NewsArticleReader()
article = reader.load_data(["https://www.bbc.com/news/world-us-canada-56797998"])
print(article)
|
"""News article reader using Newspaper."""
import logging
from importlib.util import find_spec
from typing import Any, Generator, List
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
logger = logging.getLogger(__name__)
class NewsArticleReader(BaseReader):
"""Simple news article reader.
Reads news articles from the web and parses them using the `newspaper` library.
Args:
text_mode (bool): Whether to load a text version or HTML version of the content (default=True).
use_nlp (bool): Whether to use NLP to extract additional summary and keywords (default=True).
newspaper_kwargs: Additional keyword arguments to pass to newspaper.Article. See
https://newspaper.readthedocs.io/en/latest/user_guide/quickstart.html#article
"""
def __init__(
self, text_mode: bool = True, use_nlp: bool = True, **newspaper_kwargs: Any
) -> None:
"""Initialize with parameters."""
if find_spec("newspaper") is None:
raise ImportError(
"`newspaper` package not found, please run `pip install newspaper3k`"
)
self.load_text = text_mode
self.use_nlp = use_nlp
self.newspaper_kwargs = newspaper_kwargs
def load_data(self, urls: List[str]) -> List[Document]:
"""Load data from the list of news article urls.
Args:
urls (List[str]): List of URLs to load news articles.
Returns:
List[Document]: List of documents.
"""
if not isinstance(urls, list) and not isinstance(urls, Generator):
raise ValueError("urls must be a list or generator.")
documents = []
for url in urls:
from newspaper import Article
try:
article = Article(url, **self.newspaper_kwargs)
article.download()
article.parse()
if self.use_nlp:
article.nlp()
except Exception as e:
logger.error(f"Error fetching or processing {url}, exception: {e}")
continue
metadata = {
"title": getattr(article, "title", ""),
"link": getattr(article, "url", getattr(article, "canonical_link", "")),
"authors": getattr(article, "authors", []),
"language": getattr(article, "meta_lang", ""),
"description": getattr(article, "meta_description", ""),
"publish_date": getattr(article, "publish_date", ""),
}
if self.load_text:
content = article.text
else:
content = article.html
if self.use_nlp:
metadata["keywords"] = getattr(article, "keywords", [])
metadata["summary"] = getattr(article, "summary", "")
documents.append(Document(text=content, metadata=metadata))
return documents
if __name__ == "__main__":
reader = NewsArticleReader()
article = reader.load_data(["https://www.bbc.com/news/world-us-canada-56797998"])
print(article)
|
"""Base callback handler that can be used to handle callbacks in langchain."""
from __future__ import annotations
from langchain_core.callbacks import (
AsyncCallbackHandler,
BaseCallbackHandler,
BaseCallbackManager,
CallbackManagerMixin,
Callbacks,
ChainManagerMixin,
LLMManagerMixin,
RetrieverManagerMixin,
RunManagerMixin,
ToolManagerMixin,
)
__all__ = [
"AsyncCallbackHandler",
"BaseCallbackHandler",
"BaseCallbackManager",
"CallbackManagerMixin",
"Callbacks",
"ChainManagerMixin",
"LLMManagerMixin",
"RetrieverManagerMixin",
"RunManagerMixin",
"ToolManagerMixin",
]
|
"""Base callback handler that can be used to handle callbacks in langchain."""
from __future__ import annotations
from langchain_core.callbacks import (
AsyncCallbackHandler,
BaseCallbackHandler,
BaseCallbackManager,
CallbackManagerMixin,
Callbacks,
ChainManagerMixin,
LLMManagerMixin,
RetrieverManagerMixin,
RunManagerMixin,
ToolManagerMixin,
)
__all__ = [
"RetrieverManagerMixin",
"LLMManagerMixin",
"ChainManagerMixin",
"ToolManagerMixin",
"CallbackManagerMixin",
"RunManagerMixin",
"BaseCallbackHandler",
"AsyncCallbackHandler",
"BaseCallbackManager",
"Callbacks",
]
|
import argparse
import pytest
from jina.parsers.hubble.new import mixin_hub_new_parser
def test_new_parser():
parser = argparse.ArgumentParser(
epilog=f'Test', description='Test Hub Command Line Interface'
)
mixin_hub_new_parser(parser)
args = parser.parse_args([])
assert not args.dockerfile
assert not args.advance_configuration
assert args.name == None
assert args.path == None
assert args.description == None
assert args.keywords == None
assert args.url == None
args = parser.parse_args(['--dockerfile', 'cpu'])
assert args.dockerfile
args = parser.parse_args(['--advance-configuration'])
assert args.advance_configuration
args = parser.parse_args(
[
'--name',
'Dummy Executor',
'--path',
'Dummy Path',
'--description',
'Dummy description',
'--keywords',
'Dummy keywords',
'--url',
'Dummy url',
]
)
assert not args.dockerfile
assert not args.advance_configuration
assert args.name == 'Dummy Executor'
assert args.path == 'Dummy Path'
assert args.description == 'Dummy description'
assert args.keywords == 'Dummy keywords'
assert args.url == 'Dummy url'
args = parser.parse_args(
[
'--name',
'Dummy Executor',
'--path',
'Dummy Path',
'--description',
'Dummy description',
'--keywords',
'Dummy keywords',
'--url',
'Dummy url',
'--advance-configuration',
]
)
assert not args.dockerfile
assert args.advance_configuration
assert args.name == 'Dummy Executor'
assert args.path == 'Dummy Path'
assert args.description == 'Dummy description'
assert args.keywords == 'Dummy keywords'
assert args.url == 'Dummy url'
args = parser.parse_args(
[
'--dockerfile',
'cpu',
'--name',
'Dummy Executor',
'--path',
'Dummy Path',
'--description',
'Dummy description',
'--keywords',
'Dummy keywords',
'--url',
'Dummy url',
]
)
assert args.dockerfile
assert not args.advance_configuration
assert args.name == 'Dummy Executor'
assert args.path == 'Dummy Path'
assert args.description == 'Dummy description'
assert args.keywords == 'Dummy keywords'
assert args.url == 'Dummy url'
|
import argparse
import pytest
from jina.parsers.hubble.new import mixin_hub_new_parser
def test_new_parser():
parser = argparse.ArgumentParser(
epilog=f'Test', description='Test Hub Command Line Interface'
)
mixin_hub_new_parser(parser)
args = parser.parse_args([])
assert not args.add_dockerfile
assert not args.advance_configuration
assert args.name == None
assert args.path == None
assert args.description == None
assert args.keywords == None
assert args.url == None
args = parser.parse_args(['--add-dockerfile'])
assert args.add_dockerfile
args = parser.parse_args(['--advance-configuration'])
assert args.advance_configuration
args = parser.parse_args(
[
'--name',
'Dummy Executor',
'--path',
'Dummy Path',
'--description',
'Dummy description',
'--keywords',
'Dummy keywords',
'--url',
'Dummy url',
]
)
assert not args.add_dockerfile
assert not args.advance_configuration
assert args.name == 'Dummy Executor'
assert args.path == 'Dummy Path'
assert args.description == 'Dummy description'
assert args.keywords == 'Dummy keywords'
assert args.url == 'Dummy url'
args = parser.parse_args(
[
'--name',
'Dummy Executor',
'--path',
'Dummy Path',
'--description',
'Dummy description',
'--keywords',
'Dummy keywords',
'--url',
'Dummy url',
'--advance-configuration',
]
)
assert not args.add_dockerfile
assert args.advance_configuration
assert args.name == 'Dummy Executor'
assert args.path == 'Dummy Path'
assert args.description == 'Dummy description'
assert args.keywords == 'Dummy keywords'
assert args.url == 'Dummy url'
args = parser.parse_args(
[
'--add-dockerfile',
'--name',
'Dummy Executor',
'--path',
'Dummy Path',
'--description',
'Dummy description',
'--keywords',
'Dummy keywords',
'--url',
'Dummy url',
]
)
assert args.add_dockerfile
assert not args.advance_configuration
assert args.name == 'Dummy Executor'
assert args.path == 'Dummy Path'
assert args.description == 'Dummy description'
assert args.keywords == 'Dummy keywords'
assert args.url == 'Dummy url'
|
# Copyright (c) OpenMMLab. All rights reserved.
from .base_video_metric import BaseVideoMetric
from .cityscapes_metric import CityScapesMetric
from .coco_caption_metric import COCOCaptionMetric
from .coco_metric import CocoMetric
from .coco_occluded_metric import CocoOccludedSeparatedMetric
from .coco_panoptic_metric import CocoPanopticMetric
from .coco_video_metric import CocoVideoMetric
from .crowdhuman_metric import CrowdHumanMetric
from .dump_det_results import DumpDetResults
from .dump_proposals_metric import DumpProposals
from .lvis_metric import LVISMetric
from .mot_challenge_metric import MOTChallengeMetric
from .openimages_metric import OpenImagesMetric
from .refseg_metric import RefSegMetric
from .reid_metric import ReIDMetrics
from .semseg_metric import SemSegMetric
from .voc_metric import VOCMetric
from .youtube_vis_metric import YouTubeVISMetric
__all__ = [
'CityScapesMetric', 'CocoMetric', 'CocoPanopticMetric', 'OpenImagesMetric',
'VOCMetric', 'LVISMetric', 'CrowdHumanMetric', 'DumpProposals',
'CocoOccludedSeparatedMetric', 'DumpDetResults', 'BaseVideoMetric',
'MOTChallengeMetric', 'CocoVideoMetric', 'ReIDMetrics', 'YouTubeVISMetric',
'COCOCaptionMetric', 'SemSegMetric', 'RefSegMetric'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .base_video_metric import BaseVideoMetric
from .cityscapes_metric import CityScapesMetric
from .coco_caption_metric import COCOCaptionMetric
from .coco_metric import CocoMetric
from .coco_occluded_metric import CocoOccludedSeparatedMetric
from .coco_panoptic_metric import CocoPanopticMetric
from .coco_video_metric import CocoVideoMetric
from .crowdhuman_metric import CrowdHumanMetric
from .dump_det_results import DumpDetResults
from .dump_proposals_metric import DumpProposals
from .lvis_metric import LVISMetric
from .mot_challenge_metric import MOTChallengeMetric
from .openimages_metric import OpenImagesMetric
from .reid_metric import ReIDMetrics
from .semseg_metric import SemSegMetric
from .voc_metric import VOCMetric
from .youtube_vis_metric import YouTubeVISMetric
__all__ = [
'CityScapesMetric', 'CocoMetric', 'CocoPanopticMetric', 'OpenImagesMetric',
'VOCMetric', 'LVISMetric', 'CrowdHumanMetric', 'DumpProposals',
'CocoOccludedSeparatedMetric', 'DumpDetResults', 'BaseVideoMetric',
'MOTChallengeMetric', 'CocoVideoMetric', 'ReIDMetrics', 'YouTubeVISMetric',
'COCOCaptionMetric', 'SemSegMetric'
]
|
from abc import ABC, abstractmethod
from typing import Callable, List, Sequence, Optional, Union, Any
from llama_index.core.agent.workflow.workflow_events import (
AgentOutput,
ToolCallResult,
)
from llama_index.core.bridge.pydantic import (
BaseModel,
Field,
ConfigDict,
field_validator,
)
from llama_index.core.llms import ChatMessage, LLM
from llama_index.core.memory import BaseMemory
from llama_index.core.prompts.mixin import PromptMixin, PromptMixinType, PromptDictType
from llama_index.core.tools import BaseTool, AsyncBaseTool, FunctionTool
from llama_index.core.workflow import Context
from llama_index.core.objects import ObjectRetriever
from llama_index.core.settings import Settings
from llama_index.core.workflow.checkpointer import CheckpointCallback
from llama_index.core.workflow.handler import WorkflowHandler
def get_default_llm() -> LLM:
return Settings.llm
class BaseWorkflowAgent(BaseModel, PromptMixin, ABC):
"""Base class for all agents, combining config and logic."""
model_config = ConfigDict(arbitrary_types_allowed=True)
name: str = Field(description="The name of the agent")
description: str = Field(
description="The description of what the agent does and is responsible for"
)
system_prompt: Optional[str] = Field(
default=None, description="The system prompt for the agent"
)
tools: Optional[List[BaseTool]] = Field(
default=None, description="The tools that the agent can use"
)
tool_retriever: Optional[ObjectRetriever] = Field(
default=None,
description="The tool retriever for the agent, can be provided instead of tools",
)
can_handoff_to: Optional[List[str]] = Field(
default=None, description="The agent names that this agent can hand off to"
)
llm: LLM = Field(
default_factory=get_default_llm, description="The LLM that the agent uses"
)
@field_validator("tools", mode="before")
def validate_tools(
cls, v: Optional[Sequence[Union[BaseTool, Callable]]]
) -> Optional[Sequence[BaseTool]]:
"""Validate tools.
If tools are not of type BaseTool, they will be converted to FunctionTools.
This assumes the inputs are tools or callable functions.
"""
if v is None:
return None
validated_tools: List[BaseTool] = []
for tool in v:
if not isinstance(tool, BaseTool):
validated_tools.append(FunctionTool.from_defaults(tool))
else:
validated_tools.append(tool)
for tool in validated_tools:
if tool.metadata.name == "handoff":
raise ValueError(
"'handoff' is a reserved tool name. Please use a different name."
)
return validated_tools # type: ignore[return-value]
def _get_prompts(self) -> PromptDictType:
"""Get prompts."""
return {}
def _get_prompt_modules(self) -> PromptMixinType:
"""Get prompt sub-modules."""
return {}
def _update_prompts(self, prompts_dict: PromptDictType) -> None:
"""Update prompts."""
@abstractmethod
async def take_step(
self,
ctx: Context,
llm_input: List[ChatMessage],
tools: Sequence[AsyncBaseTool],
memory: BaseMemory,
) -> AgentOutput:
"""Take a single step with the agent."""
@abstractmethod
async def handle_tool_call_results(
self, ctx: Context, results: List[ToolCallResult], memory: BaseMemory
) -> None:
"""Handle tool call results."""
@abstractmethod
async def finalize(
self, ctx: Context, output: AgentOutput, memory: BaseMemory
) -> AgentOutput:
"""Finalize the agent's execution."""
@abstractmethod
def run(
self,
user_msg: Optional[Union[str, ChatMessage]] = None,
chat_history: Optional[List[ChatMessage]] = None,
memory: Optional[BaseMemory] = None,
ctx: Optional[Context] = None,
stepwise: bool = False,
checkpoint_callback: Optional[CheckpointCallback] = None,
**workflow_kwargs: Any,
) -> WorkflowHandler:
"""Run the agent."""
|
from abc import ABC, abstractmethod
from typing import Callable, List, Sequence, Optional, Union
from llama_index.core.agent.workflow.workflow_events import (
AgentOutput,
ToolCallResult,
)
from llama_index.core.bridge.pydantic import (
BaseModel,
Field,
ConfigDict,
field_validator,
)
from llama_index.core.llms import ChatMessage, LLM
from llama_index.core.memory import BaseMemory
from llama_index.core.prompts.mixin import PromptMixin, PromptMixinType, PromptDictType
from llama_index.core.tools import BaseTool, AsyncBaseTool, FunctionTool
from llama_index.core.workflow import Context
from llama_index.core.objects import ObjectRetriever
from llama_index.core.settings import Settings
def get_default_llm() -> LLM:
return Settings.llm
class BaseWorkflowAgent(BaseModel, PromptMixin, ABC):
"""Base class for all agents, combining config and logic."""
model_config = ConfigDict(arbitrary_types_allowed=True)
name: str = Field(description="The name of the agent")
description: str = Field(
description="The description of what the agent does and is responsible for"
)
system_prompt: Optional[str] = Field(
default=None, description="The system prompt for the agent"
)
tools: Optional[List[BaseTool]] = Field(
default=None, description="The tools that the agent can use"
)
tool_retriever: Optional[ObjectRetriever] = Field(
default=None,
description="The tool retriever for the agent, can be provided instead of tools",
)
can_handoff_to: Optional[List[str]] = Field(
default=None, description="The agent names that this agent can hand off to"
)
llm: LLM = Field(
default_factory=get_default_llm, description="The LLM that the agent uses"
)
@field_validator("tools", mode="before")
def validate_tools(
cls, v: Optional[Sequence[Union[BaseTool, Callable]]]
) -> Optional[Sequence[BaseTool]]:
"""Validate tools.
If tools are not of type BaseTool, they will be converted to FunctionTools.
This assumes the inputs are tools or callable functions.
"""
if v is None:
return None
validated_tools: List[BaseTool] = []
for tool in v:
if not isinstance(tool, BaseTool):
validated_tools.append(FunctionTool.from_defaults(tool))
else:
validated_tools.append(tool)
for tool in validated_tools:
if tool.metadata.name == "handoff":
raise ValueError(
"'handoff' is a reserved tool name. Please use a different name."
)
return validated_tools # type: ignore[return-value]
def _get_prompts(self) -> PromptDictType:
"""Get prompts."""
return {}
def _get_prompt_modules(self) -> PromptMixinType:
"""Get prompt sub-modules."""
return {}
def _update_prompts(self, prompts_dict: PromptDictType) -> None:
"""Update prompts."""
@abstractmethod
async def take_step(
self,
ctx: Context,
llm_input: List[ChatMessage],
tools: Sequence[AsyncBaseTool],
memory: BaseMemory,
) -> AgentOutput:
"""Take a single step with the agent."""
@abstractmethod
async def handle_tool_call_results(
self, ctx: Context, results: List[ToolCallResult], memory: BaseMemory
) -> None:
"""Handle tool call results."""
@abstractmethod
async def finalize(
self, ctx: Context, output: AgentOutput, memory: BaseMemory
) -> AgentOutput:
"""Finalize the agent's execution."""
|
default_scope = 'mmdet'
default_hooks = dict(
optimizer=dict(type='OptimizerHook', grad_clip=None),
timer=dict(type='IterTimerHook'),
logger=dict(type='LoggerHook', interval=50),
param_scheduler=dict(type='ParamSchedulerHook'),
checkpoint=dict(type='CheckpointHook', interval=1),
sampler_seed=dict(type='DistSamplerSeedHook'),
visualization=dict(type='DetVisualizationHook'))
env_cfg = dict(
cudnn_benchmark=False,
mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0),
dist_cfg=dict(backend='nccl'),
)
vis_backends = [dict(type='LocalVisBackend')]
visualizer = dict(
type='DetLocalVisualizer', vis_backends=vis_backends, name='visualizer')
log_level = 'INFO'
load_from = None
resume = False
# TODO: support auto scaling lr
|
default_scope = 'mmdet'
default_hooks = dict(
optimizer=dict(type='OptimizerHook', grad_clip=None),
timer=dict(type='IterTimerHook'),
logger=dict(type='LoggerHook', interval=50),
param_scheduler=dict(type='ParamSchedulerHook'),
checkpoint=dict(type='CheckpointHook', interval=1),
sampler_seed=dict(type='DistSamplerSeedHook'),
)
env_cfg = dict(
cudnn_benchmark=False,
mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0),
dist_cfg=dict(backend='nccl'),
)
vis_backends = [dict(type='LocalVisBackend')]
visualizer = dict(
type='DetLocalVisualizer', vis_backends=vis_backends, name='visualizer')
# custom_hooks = [dict(type='DetVisualizationHook', interval=10)]
log_level = 'INFO'
load_from = None
resume = False
# TODO: support auto scaling lr
|
# Copyright (c) OpenMMLab. All rights reserved.
import unittest
from unittest import TestCase
import torch
from parameterized import parameterized
from mmdet.registry import MODELS
from mmdet.testing import demo_mm_inputs, demo_mm_proposals, get_roi_head_cfg
from mmdet.utils import register_all_modules
class TestDynamicRoIHead(TestCase):
def setUp(self):
register_all_modules()
self.roi_head_cfg = get_roi_head_cfg(
'dynamic_rcnn/dynamic_rcnn_r50_fpn_1x_coco.py')
def test_init(self):
roi_head = MODELS.build(self.roi_head_cfg)
self.assertTrue(roi_head.with_bbox)
@parameterized.expand(['cpu', 'cuda'])
def test_dynamic_roi_head_loss(self, device):
"""Tests trident roi head predict."""
if not torch.cuda.is_available() and device == 'cuda':
# RoI pooling only support in GPU
return unittest.skip('test requires GPU and torch+cuda')
roi_head = MODELS.build(self.roi_head_cfg)
roi_head = roi_head.to(device=device)
s = 256
feats = []
for i in range(len(roi_head.bbox_roi_extractor.featmap_strides)):
feats.append(
torch.rand(1, 256, s // (2**(i + 2)),
s // (2**(i + 2))).to(device=device))
image_shapes = [(3, s, s)]
batch_data_samples = demo_mm_inputs(
batch_size=1,
image_shapes=image_shapes,
num_items=[1],
num_classes=4,
with_mask=True,
device=device)['data_samples']
proposals_list = demo_mm_proposals(
image_shapes=image_shapes, num_proposals=100, device=device)
out = roi_head.loss(feats, proposals_list, batch_data_samples)
loss_cls = out['loss_cls']
loss_bbox = out['loss_bbox']
self.assertGreater(loss_cls.sum(), 0, 'cls loss should be non-zero')
self.assertGreater(loss_bbox.sum(), 0, 'box loss should be non-zero')
batch_data_samples = demo_mm_inputs(
batch_size=1,
image_shapes=image_shapes,
num_items=[0],
num_classes=4,
with_mask=True,
device=device)['data_samples']
proposals_list = demo_mm_proposals(
image_shapes=image_shapes, num_proposals=100, device=device)
out = roi_head.loss(feats, proposals_list, batch_data_samples)
empty_cls_loss = out['loss_cls']
empty_bbox_loss = out['loss_bbox']
self.assertGreater(empty_cls_loss.sum(), 0,
'cls loss should be non-zero')
self.assertEqual(
empty_bbox_loss.sum(), 0,
'there should be no box loss when there are no true boxes')
|
# Copyright (c) OpenMMLab. All rights reserved.
import unittest
from unittest import TestCase
import torch
from parameterized import parameterized
from mmdet.registry import MODELS
from mmdet.testing import demo_mm_inputs, demo_mm_proposals, get_roi_head_cfg
from mmdet.utils import register_all_modules
class TestDynamicRoIHead(TestCase):
def setUp(self):
register_all_modules()
self.roi_head_cfg = get_roi_head_cfg(
'dynamic_rcnn/dynamic_rcnn_r50_fpn_1x_coco.py')
def test_init(self):
roi_head = MODELS.build(self.roi_head_cfg)
self.assertTrue(roi_head.with_bbox)
@parameterized.expand(['cpu', 'cuda'])
def test_dynamic_roi_head_loss(self, device):
"""Tests trident roi head predict."""
if not torch.cuda.is_available() and device == 'cuda':
# RoI pooling only support in GPU
return unittest.skip('test requires GPU and torch+cuda')
roi_head = MODELS.build(self.roi_head_cfg)
roi_head = roi_head.to(device=device)
s = 256
feats = []
for i in range(len(roi_head.bbox_roi_extractor.featmap_strides)):
feats.append(
torch.rand(1, 256, s // (2**(i + 2)),
s // (2**(i + 2))).to(device=device))
image_shapes = [(3, s, s)]
packed_inputs = demo_mm_inputs(
batch_size=1,
image_shapes=image_shapes,
num_items=[1],
num_classes=4,
with_mask=True)
proposals_list = demo_mm_proposals(
image_shapes=image_shapes, num_proposals=100)
batch_data_samples = []
for i in range(len(packed_inputs)):
batch_data_samples.append(
packed_inputs[i]['data_sample'].to(device=device))
proposals_list[i] = proposals_list[i].to(device=device)
out = roi_head.loss(feats, proposals_list, batch_data_samples)
loss_cls = out['loss_cls']
loss_bbox = out['loss_bbox']
self.assertGreater(loss_cls.sum(), 0, 'cls loss should be non-zero')
self.assertGreater(loss_bbox.sum(), 0, 'box loss should be non-zero')
packed_inputs = demo_mm_inputs(
batch_size=1,
image_shapes=image_shapes,
num_items=[0],
num_classes=4,
with_mask=True)
proposals_list = demo_mm_proposals(
image_shapes=image_shapes, num_proposals=100)
batch_data_samples = []
for i in range(len(packed_inputs)):
batch_data_samples.append(
packed_inputs[i]['data_sample'].to(device=device))
proposals_list[i] = proposals_list[i].to(device=device)
out = roi_head.loss(feats, proposals_list, batch_data_samples)
empty_cls_loss = out['loss_cls']
empty_bbox_loss = out['loss_bbox']
self.assertGreater(empty_cls_loss.sum(), 0,
'cls loss should be non-zero')
self.assertEqual(
empty_bbox_loss.sum(), 0,
'there should be no box loss when there are no true boxes')
|
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import pickle
from inspect import signature
import pytest
from sklearn.utils.deprecation import _is_deprecated, deprecated
@deprecated("qwerty")
class MockClass1:
pass
class MockClass2:
@deprecated("mockclass2_method")
def method(self):
pass
@deprecated("n_features_ is deprecated") # type: ignore
@property
def n_features_(self):
"""Number of input features."""
return 10
class MockClass3:
@deprecated()
def __init__(self):
pass
class MockClass4:
pass
class MockClass5(MockClass1):
"""Inherit from deprecated class but does not call super().__init__."""
def __init__(self, a):
self.a = a
@deprecated("a message")
class MockClass6:
"""A deprecated class that overrides __new__."""
def __new__(cls, *args, **kwargs):
assert len(args) > 0
return super().__new__(cls)
@deprecated()
def mock_function():
return 10
def test_deprecated():
with pytest.warns(FutureWarning, match="qwerty"):
MockClass1()
with pytest.warns(FutureWarning, match="mockclass2_method"):
MockClass2().method()
with pytest.warns(FutureWarning, match="deprecated"):
MockClass3()
with pytest.warns(FutureWarning, match="qwerty"):
MockClass5(42)
with pytest.warns(FutureWarning, match="a message"):
MockClass6(42)
with pytest.warns(FutureWarning, match="deprecated"):
val = mock_function()
assert val == 10
def test_is_deprecated():
# Test if _is_deprecated helper identifies wrapping via deprecated
# NOTE it works only for class methods and functions
assert _is_deprecated(MockClass1.__new__)
assert _is_deprecated(MockClass2().method)
assert _is_deprecated(MockClass3.__init__)
assert not _is_deprecated(MockClass4.__init__)
assert _is_deprecated(MockClass5.__new__)
assert _is_deprecated(mock_function)
def test_pickle():
pickle.loads(pickle.dumps(mock_function))
def test_deprecated_class_signature():
@deprecated()
class MockClass:
def __init__(self, a, b=1, c=2):
pass
assert list(signature(MockClass).parameters.keys()) == ["a", "b", "c"]
|
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import pickle
import pytest
from sklearn.utils.deprecation import _is_deprecated, deprecated
@deprecated("qwerty")
class MockClass1:
pass
class MockClass2:
@deprecated("mockclass2_method")
def method(self):
pass
@deprecated("n_features_ is deprecated") # type: ignore
@property
def n_features_(self):
"""Number of input features."""
return 10
class MockClass3:
@deprecated()
def __init__(self):
pass
class MockClass4:
pass
class MockClass5(MockClass1):
"""Inherit from deprecated class but does not call super().__init__."""
def __init__(self, a):
self.a = a
@deprecated("a message")
class MockClass6:
"""A deprecated class that overrides __new__."""
def __new__(cls, *args, **kwargs):
assert len(args) > 0
return super().__new__(cls)
@deprecated()
def mock_function():
return 10
def test_deprecated():
with pytest.warns(FutureWarning, match="qwerty"):
MockClass1()
with pytest.warns(FutureWarning, match="mockclass2_method"):
MockClass2().method()
with pytest.warns(FutureWarning, match="deprecated"):
MockClass3()
with pytest.warns(FutureWarning, match="qwerty"):
MockClass5(42)
with pytest.warns(FutureWarning, match="a message"):
MockClass6(42)
with pytest.warns(FutureWarning, match="deprecated"):
val = mock_function()
assert val == 10
def test_is_deprecated():
# Test if _is_deprecated helper identifies wrapping via deprecated
# NOTE it works only for class methods and functions
assert _is_deprecated(MockClass1.__new__)
assert _is_deprecated(MockClass2().method)
assert _is_deprecated(MockClass3.__init__)
assert not _is_deprecated(MockClass4.__init__)
assert _is_deprecated(MockClass5.__new__)
assert _is_deprecated(mock_function)
def test_pickle():
pickle.loads(pickle.dumps(mock_function))
|
from langchain_core._api.path import as_import_path, get_relative_path
__all__ = ["as_import_path", "get_relative_path"]
|
from langchain_core._api.path import as_import_path, get_relative_path
__all__ = ["get_relative_path", "as_import_path"]
|
import numpy as np
import torch
import torchaudio.prototype.transforms as T
from scipy import signal
from torchaudio_unittest.common_utils import get_spectrogram, get_whitenoise, nested_params, TestBaseMixin
def _get_ratio(mat):
return (mat.sum() / mat.numel()).item()
class TransformsTestImpl(TestBaseMixin):
@nested_params(
[(10, 4), (4, 3, 1, 2), (2,), ()],
[(100, 43), (21, 45)],
["full", "valid", "same"],
)
def test_Convolve(self, leading_dims, lengths, mode):
"""Check that convolve returns values identical to those that SciPy produces."""
L_x, L_y = lengths
x = torch.rand(*(leading_dims + (L_x,)), dtype=self.dtype, device=self.device)
y = torch.rand(*(leading_dims + (L_y,)), dtype=self.dtype, device=self.device)
convolve = T.Convolve(mode=mode).to(self.device)
actual = convolve(x, y)
num_signals = torch.tensor(leading_dims).prod() if leading_dims else 1
x_reshaped = x.reshape((num_signals, L_x))
y_reshaped = y.reshape((num_signals, L_y))
expected = [
signal.convolve(x_reshaped[i].detach().cpu().numpy(), y_reshaped[i].detach().cpu().numpy(), mode=mode)
for i in range(num_signals)
]
expected = torch.tensor(np.array(expected))
expected = expected.reshape(leading_dims + (-1,))
self.assertEqual(expected, actual)
@nested_params(
[(10, 4), (4, 3, 1, 2), (2,), ()],
[(100, 43), (21, 45)],
["full", "valid", "same"],
)
def test_FFTConvolve(self, leading_dims, lengths, mode):
"""Check that fftconvolve returns values identical to those that SciPy produces."""
L_x, L_y = lengths
x = torch.rand(*(leading_dims + (L_x,)), dtype=self.dtype, device=self.device)
y = torch.rand(*(leading_dims + (L_y,)), dtype=self.dtype, device=self.device)
convolve = T.FFTConvolve(mode=mode).to(self.device)
actual = convolve(x, y)
expected = signal.fftconvolve(x.detach().cpu().numpy(), y.detach().cpu().numpy(), axes=-1, mode=mode)
expected = torch.tensor(expected)
self.assertEqual(expected, actual)
def test_InverseBarkScale(self):
"""Gauge the quality of InverseBarkScale transform.
As InverseBarkScale is currently implemented with
random initialization + iterative optimization,
it is not practically possible to assert the difference between
the estimated spectrogram and the original spectrogram as a whole.
Estimated spectrogram has very huge descrepency locally.
Thus in this test we gauge what percentage of elements are bellow
certain tolerance.
At the moment, the quality of estimated spectrogram is worse than the
one obtained for Inverse MelScale.
When implementation is changed in a way it makes the quality even worse,
this test will fail.
"""
n_fft = 400
power = 1
n_barks = 64
sample_rate = 8000
n_stft = n_fft // 2 + 1
# Generate reference spectrogram and input mel-scaled spectrogram
expected = get_spectrogram(
get_whitenoise(sample_rate=sample_rate, duration=1, n_channels=2), n_fft=n_fft, power=power
).to(self.device, self.dtype)
input = T.BarkScale(n_barks=n_barks, sample_rate=sample_rate, n_stft=n_stft).to(self.device, self.dtype)(
expected
)
# Run transform
transform = T.InverseBarkScale(n_stft, n_barks=n_barks, sample_rate=sample_rate).to(self.device, self.dtype)
result = transform(input)
# Compare
epsilon = 1e-60
relative_diff = torch.abs((result - expected) / (expected + epsilon))
for tol in [1e-1, 1e-3, 1e-5, 1e-10]:
print(f"Ratio of relative diff smaller than {tol:e} is " f"{_get_ratio(relative_diff < tol)}")
assert _get_ratio(relative_diff < 1e-1) > 0.2
assert _get_ratio(relative_diff < 1e-3) > 2e-3
|
import numpy as np
import torch
import torchaudio.prototype.transforms as T
from scipy import signal
from torchaudio_unittest.common_utils import nested_params, TestBaseMixin
class TransformsTestImpl(TestBaseMixin):
@nested_params(
[(10, 4), (4, 3, 1, 2), (2,), ()],
[(100, 43), (21, 45)],
["full", "valid", "same"],
)
def test_Convolve(self, leading_dims, lengths, mode):
"""Check that convolve returns values identical to those that SciPy produces."""
L_x, L_y = lengths
x = torch.rand(*(leading_dims + (L_x,)), dtype=self.dtype, device=self.device)
y = torch.rand(*(leading_dims + (L_y,)), dtype=self.dtype, device=self.device)
convolve = T.Convolve(mode=mode).to(self.device)
actual = convolve(x, y)
num_signals = torch.tensor(leading_dims).prod() if leading_dims else 1
x_reshaped = x.reshape((num_signals, L_x))
y_reshaped = y.reshape((num_signals, L_y))
expected = [
signal.convolve(x_reshaped[i].detach().cpu().numpy(), y_reshaped[i].detach().cpu().numpy(), mode=mode)
for i in range(num_signals)
]
expected = torch.tensor(np.array(expected))
expected = expected.reshape(leading_dims + (-1,))
self.assertEqual(expected, actual)
@nested_params(
[(10, 4), (4, 3, 1, 2), (2,), ()],
[(100, 43), (21, 45)],
["full", "valid", "same"],
)
def test_FFTConvolve(self, leading_dims, lengths, mode):
"""Check that fftconvolve returns values identical to those that SciPy produces."""
L_x, L_y = lengths
x = torch.rand(*(leading_dims + (L_x,)), dtype=self.dtype, device=self.device)
y = torch.rand(*(leading_dims + (L_y,)), dtype=self.dtype, device=self.device)
convolve = T.FFTConvolve(mode=mode).to(self.device)
actual = convolve(x, y)
expected = signal.fftconvolve(x.detach().cpu().numpy(), y.detach().cpu().numpy(), axes=-1, mode=mode)
expected = torch.tensor(expected)
self.assertEqual(expected, actual)
|
from __future__ import annotations
from typing import Any, Optional
from langchain_core.outputs import LLMResult
from langchain.callbacks.streaming_aiter import AsyncIteratorCallbackHandler
DEFAULT_ANSWER_PREFIX_TOKENS = ["Final", "Answer", ":"]
class AsyncFinalIteratorCallbackHandler(AsyncIteratorCallbackHandler):
"""Callback handler that returns an async iterator.
Only the final output of the agent will be iterated.
"""
def append_to_last_tokens(self, token: str) -> None:
self.last_tokens.append(token)
self.last_tokens_stripped.append(token.strip())
if len(self.last_tokens) > len(self.answer_prefix_tokens):
self.last_tokens.pop(0)
self.last_tokens_stripped.pop(0)
def check_if_answer_reached(self) -> bool:
if self.strip_tokens:
return self.last_tokens_stripped == self.answer_prefix_tokens_stripped
return self.last_tokens == self.answer_prefix_tokens
def __init__(
self,
*,
answer_prefix_tokens: Optional[list[str]] = None,
strip_tokens: bool = True,
stream_prefix: bool = False,
) -> None:
"""Instantiate AsyncFinalIteratorCallbackHandler.
Args:
answer_prefix_tokens: Token sequence that prefixes the answer.
Default is ["Final", "Answer", ":"]
strip_tokens: Ignore white spaces and new lines when comparing
answer_prefix_tokens to last tokens? (to determine if answer has been
reached)
stream_prefix: Should answer prefix itself also be streamed?
"""
super().__init__()
if answer_prefix_tokens is None:
self.answer_prefix_tokens = DEFAULT_ANSWER_PREFIX_TOKENS
else:
self.answer_prefix_tokens = answer_prefix_tokens
if strip_tokens:
self.answer_prefix_tokens_stripped = [
token.strip() for token in self.answer_prefix_tokens
]
else:
self.answer_prefix_tokens_stripped = self.answer_prefix_tokens
self.last_tokens = [""] * len(self.answer_prefix_tokens)
self.last_tokens_stripped = [""] * len(self.answer_prefix_tokens)
self.strip_tokens = strip_tokens
self.stream_prefix = stream_prefix
self.answer_reached = False
async def on_llm_start(
self, serialized: dict[str, Any], prompts: list[str], **kwargs: Any
) -> None:
# If two calls are made in a row, this resets the state
self.done.clear()
self.answer_reached = False
async def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None:
if self.answer_reached:
self.done.set()
async def on_llm_new_token(self, token: str, **kwargs: Any) -> None:
# Remember the last n tokens, where n = len(answer_prefix_tokens)
self.append_to_last_tokens(token)
# Check if the last n tokens match the answer_prefix_tokens list ...
if self.check_if_answer_reached():
self.answer_reached = True
if self.stream_prefix:
for t in self.last_tokens:
self.queue.put_nowait(t)
return
# If yes, then put tokens from now on
if self.answer_reached:
self.queue.put_nowait(token)
|
from __future__ import annotations
from typing import Any, Optional
from langchain_core.outputs import LLMResult
from langchain.callbacks.streaming_aiter import AsyncIteratorCallbackHandler
DEFAULT_ANSWER_PREFIX_TOKENS = ["Final", "Answer", ":"]
class AsyncFinalIteratorCallbackHandler(AsyncIteratorCallbackHandler):
"""Callback handler that returns an async iterator.
Only the final output of the agent will be iterated.
"""
def append_to_last_tokens(self, token: str) -> None:
self.last_tokens.append(token)
self.last_tokens_stripped.append(token.strip())
if len(self.last_tokens) > len(self.answer_prefix_tokens):
self.last_tokens.pop(0)
self.last_tokens_stripped.pop(0)
def check_if_answer_reached(self) -> bool:
if self.strip_tokens:
return self.last_tokens_stripped == self.answer_prefix_tokens_stripped
else:
return self.last_tokens == self.answer_prefix_tokens
def __init__(
self,
*,
answer_prefix_tokens: Optional[list[str]] = None,
strip_tokens: bool = True,
stream_prefix: bool = False,
) -> None:
"""Instantiate AsyncFinalIteratorCallbackHandler.
Args:
answer_prefix_tokens: Token sequence that prefixes the answer.
Default is ["Final", "Answer", ":"]
strip_tokens: Ignore white spaces and new lines when comparing
answer_prefix_tokens to last tokens? (to determine if answer has been
reached)
stream_prefix: Should answer prefix itself also be streamed?
"""
super().__init__()
if answer_prefix_tokens is None:
self.answer_prefix_tokens = DEFAULT_ANSWER_PREFIX_TOKENS
else:
self.answer_prefix_tokens = answer_prefix_tokens
if strip_tokens:
self.answer_prefix_tokens_stripped = [
token.strip() for token in self.answer_prefix_tokens
]
else:
self.answer_prefix_tokens_stripped = self.answer_prefix_tokens
self.last_tokens = [""] * len(self.answer_prefix_tokens)
self.last_tokens_stripped = [""] * len(self.answer_prefix_tokens)
self.strip_tokens = strip_tokens
self.stream_prefix = stream_prefix
self.answer_reached = False
async def on_llm_start(
self, serialized: dict[str, Any], prompts: list[str], **kwargs: Any
) -> None:
# If two calls are made in a row, this resets the state
self.done.clear()
self.answer_reached = False
async def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None:
if self.answer_reached:
self.done.set()
async def on_llm_new_token(self, token: str, **kwargs: Any) -> None:
# Remember the last n tokens, where n = len(answer_prefix_tokens)
self.append_to_last_tokens(token)
# Check if the last n tokens match the answer_prefix_tokens list ...
if self.check_if_answer_reached():
self.answer_reached = True
if self.stream_prefix:
for t in self.last_tokens:
self.queue.put_nowait(t)
return
# If yes, then put tokens from now on
if self.answer_reached:
self.queue.put_nowait(token)
|
"""Default prompt for ReAct agent."""
from pathlib import Path
# TODO: have formatting instructions be a part of react output parser
with (
Path(__file__).parents[0] / Path("templates") / Path("system_header_template.md")
).open("r") as f:
__BASE_REACT_CHAT_SYSTEM_HEADER = f.read()
REACT_CHAT_SYSTEM_HEADER = __BASE_REACT_CHAT_SYSTEM_HEADER.replace(
"{context_prompt}", "", 1
)
CONTEXT_REACT_CHAT_SYSTEM_HEADER = __BASE_REACT_CHAT_SYSTEM_HEADER.replace(
"{context_prompt}",
"""
Here is some context to help you answer the question and plan:
{context}
""",
1,
)
|
"""Default prompt for ReAct agent."""
from pathlib import Path
# TODO: have formatting instructions be a part of react output parser
with (
Path(__file__).parents[0] / Path("templates") / Path("system_header_template.md")
).open("r") as f:
__BASE_REACT_CHAT_SYSTEM_HEADER = f.read()
REACT_CHAT_SYSTEM_HEADER = __BASE_REACT_CHAT_SYSTEM_HEADER.replace(
"{context_prompt}", "", 1
)
CONTEXT_REACT_CHAT_SYSTEM_HEADER = __BASE_REACT_CHAT_SYSTEM_HEADER.replace(
"{context_prompt}",
"""
Here is some context to help you answer the question and plan:
{context}
""",
1,
)
|
tta_model = dict(
type='DetTTAModel',
tta_cfg=dict(nms=dict(type='nms', iou_threshold=0.5), max_per_img=100))
img_scales = [(1333, 800), (666, 400), (2000, 1200)]
tta_pipeline = [
dict(type='LoadImageFromFile', backend_args=None),
dict(
type='TestTimeAug',
transforms=[[
dict(type='Resize', scale=s, keep_ratio=True) for s in img_scales
], [
dict(type='RandomFlip', prob=1.),
dict(type='RandomFlip', prob=0.)
], [dict(type='LoadAnnotations', with_bbox=True)],
[
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape',
'img_shape', 'scale_factor', 'flip',
'flip_direction'))
]])
]
|
tta_model = dict(
type='DetTTAModel',
tta_cfg=dict(nms=dict(type='nms', iou_threshold=0.5), max_per_img=100))
img_scales = [(1333, 800), (666, 400), (2000, 1200)]
tta_pipeline = [
dict(type='LoadImageFromFile', file_client_args=dict(backend='disk')),
dict(
type='TestTimeAug',
transforms=[[
dict(type='Resize', scale=s, keep_ratio=True) for s in img_scales
], [
dict(type='RandomFlip', prob=1.),
dict(type='RandomFlip', prob=0.)
], [dict(type='LoadAnnotations', with_bbox=True)],
[
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape',
'img_shape', 'scale_factor', 'flip',
'flip_direction'))
]])
]
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.document_transformers import DoctranTextTranslator
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"DoctranTextTranslator": "langchain_community.document_transformers",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"DoctranTextTranslator",
]
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.document_transformers import DoctranTextTranslator
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"DoctranTextTranslator": "langchain_community.document_transformers"
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"DoctranTextTranslator",
]
|
"""Retriever tool."""
from __future__ import annotations
from functools import partial
from typing import TYPE_CHECKING, Literal, Optional, Union
from pydantic import BaseModel, Field
from langchain_core.prompts import (
BasePromptTemplate,
PromptTemplate,
aformat_document,
format_document,
)
from langchain_core.tools.simple import Tool
if TYPE_CHECKING:
from langchain_core.callbacks import Callbacks
from langchain_core.documents import Document
from langchain_core.retrievers import BaseRetriever
class RetrieverInput(BaseModel):
"""Input to the retriever."""
query: str = Field(description="query to look up in retriever")
def _get_relevant_documents(
query: str,
retriever: BaseRetriever,
document_prompt: BasePromptTemplate,
document_separator: str,
callbacks: Callbacks = None,
response_format: Literal["content", "content_and_artifact"] = "content",
) -> Union[str, tuple[str, list[Document]]]:
docs = retriever.invoke(query, config={"callbacks": callbacks})
content = document_separator.join(
format_document(doc, document_prompt) for doc in docs
)
if response_format == "content_and_artifact":
return (content, docs)
return content
async def _aget_relevant_documents(
query: str,
retriever: BaseRetriever,
document_prompt: BasePromptTemplate,
document_separator: str,
callbacks: Callbacks = None,
response_format: Literal["content", "content_and_artifact"] = "content",
) -> Union[str, tuple[str, list[Document]]]:
docs = await retriever.ainvoke(query, config={"callbacks": callbacks})
content = document_separator.join(
[await aformat_document(doc, document_prompt) for doc in docs]
)
if response_format == "content_and_artifact":
return (content, docs)
return content
def create_retriever_tool(
retriever: BaseRetriever,
name: str,
description: str,
*,
document_prompt: Optional[BasePromptTemplate] = None,
document_separator: str = "\n\n",
response_format: Literal["content", "content_and_artifact"] = "content",
) -> Tool:
r"""Create a tool to do retrieval of documents.
Args:
retriever: The retriever to use for the retrieval
name: The name for the tool. This will be passed to the language model,
so should be unique and somewhat descriptive.
description: The description for the tool. This will be passed to the language
model, so should be descriptive.
document_prompt: The prompt to use for the document. Defaults to None.
document_separator: The separator to use between documents. Defaults to "\n\n".
response_format: The tool response format. If "content" then the output of
the tool is interpreted as the contents of a ToolMessage. If
"content_and_artifact" then the output is expected to be a two-tuple
corresponding to the (content, artifact) of a ToolMessage (artifact
being a list of documents in this case). Defaults to "content".
Returns:
Tool class to pass to an agent.
"""
document_prompt = document_prompt or PromptTemplate.from_template("{page_content}")
func = partial(
_get_relevant_documents,
retriever=retriever,
document_prompt=document_prompt,
document_separator=document_separator,
response_format=response_format,
)
afunc = partial(
_aget_relevant_documents,
retriever=retriever,
document_prompt=document_prompt,
document_separator=document_separator,
response_format=response_format,
)
return Tool(
name=name,
description=description,
func=func,
coroutine=afunc,
args_schema=RetrieverInput,
response_format=response_format,
)
|
from __future__ import annotations
from functools import partial
from typing import TYPE_CHECKING, Literal, Optional, Union
from pydantic import BaseModel, Field
from langchain_core.prompts import (
BasePromptTemplate,
PromptTemplate,
aformat_document,
format_document,
)
from langchain_core.tools.simple import Tool
if TYPE_CHECKING:
from langchain_core.callbacks import Callbacks
from langchain_core.documents import Document
from langchain_core.retrievers import BaseRetriever
class RetrieverInput(BaseModel):
"""Input to the retriever."""
query: str = Field(description="query to look up in retriever")
def _get_relevant_documents(
query: str,
retriever: BaseRetriever,
document_prompt: BasePromptTemplate,
document_separator: str,
callbacks: Callbacks = None,
response_format: Literal["content", "content_and_artifact"] = "content",
) -> Union[str, tuple[str, list[Document]]]:
docs = retriever.invoke(query, config={"callbacks": callbacks})
content = document_separator.join(
format_document(doc, document_prompt) for doc in docs
)
if response_format == "content_and_artifact":
return (content, docs)
return content
async def _aget_relevant_documents(
query: str,
retriever: BaseRetriever,
document_prompt: BasePromptTemplate,
document_separator: str,
callbacks: Callbacks = None,
response_format: Literal["content", "content_and_artifact"] = "content",
) -> Union[str, tuple[str, list[Document]]]:
docs = await retriever.ainvoke(query, config={"callbacks": callbacks})
content = document_separator.join(
[await aformat_document(doc, document_prompt) for doc in docs]
)
if response_format == "content_and_artifact":
return (content, docs)
return content
def create_retriever_tool(
retriever: BaseRetriever,
name: str,
description: str,
*,
document_prompt: Optional[BasePromptTemplate] = None,
document_separator: str = "\n\n",
response_format: Literal["content", "content_and_artifact"] = "content",
) -> Tool:
"""Create a tool to do retrieval of documents.
Args:
retriever: The retriever to use for the retrieval
name: The name for the tool. This will be passed to the language model,
so should be unique and somewhat descriptive.
description: The description for the tool. This will be passed to the language
model, so should be descriptive.
document_prompt: The prompt to use for the document. Defaults to None.
document_separator: The separator to use between documents. Defaults to "\n\n".
response_format: The tool response format. If "content" then the output of
the tool is interpreted as the contents of a ToolMessage. If
"content_and_artifact" then the output is expected to be a two-tuple
corresponding to the (content, artifact) of a ToolMessage (artifact
being a list of documents in this case). Defaults to "content".
Returns:
Tool class to pass to an agent.
"""
document_prompt = document_prompt or PromptTemplate.from_template("{page_content}")
func = partial(
_get_relevant_documents,
retriever=retriever,
document_prompt=document_prompt,
document_separator=document_separator,
response_format=response_format,
)
afunc = partial(
_aget_relevant_documents,
retriever=retriever,
document_prompt=document_prompt,
document_separator=document_separator,
response_format=response_format,
)
return Tool(
name=name,
description=description,
func=func,
coroutine=afunc,
args_schema=RetrieverInput,
response_format=response_format,
)
|
# Copyright (c) OpenMMLab. All rights reserved.
import copy
import torch.nn as nn
from mmcv.cnn import ConvModule, Scale
from mmdet.models.dense_heads.fcos_head import FCOSHead
from mmdet.registry import MODELS
@MODELS.register_module()
class NASFCOSHead(FCOSHead):
"""Anchor-free head used in `NASFCOS <https://arxiv.org/abs/1906.04423>`_.
It is quite similar with FCOS head, except for the searched structure of
classification branch and bbox regression branch, where a structure of
"dconv3x3, conv3x3, dconv3x3, conv1x1" is utilized instead.
Args:
num_classes (int): Number of categories excluding the background
category.
in_channels (int): Number of channels in the input feature map.
strides (list[int] | list[tuple[int, int]]): Strides of points
in multiple feature levels. Default: (4, 8, 16, 32, 64).
regress_ranges (tuple[tuple[int, int]]): Regress range of multiple
level points.
center_sampling (bool): If true, use center sampling. Default: False.
center_sample_radius (float): Radius of center sampling. Default: 1.5.
norm_on_bbox (bool): If true, normalize the regression targets
with FPN strides. Default: False.
centerness_on_reg (bool): If true, position centerness on the
regress branch. Please refer to https://github.com/tianzhi0549/FCOS/issues/89#issuecomment-516877042.
Default: False.
conv_bias (bool | str): If specified as `auto`, it will be decided by the
norm_cfg. Bias of conv will be set as True if `norm_cfg` is None, otherwise
False. Default: "auto".
loss_cls (dict): Config of classification loss.
loss_bbox (dict): Config of localization loss.
loss_centerness (dict): Config of centerness loss.
norm_cfg (dict): dictionary to construct and config norm layer.
Default: norm_cfg=dict(type='GN', num_groups=32, requires_grad=True).
init_cfg (dict or list[dict], optional): Initialization config dict.
""" # noqa: E501
def __init__(self, *args, init_cfg=None, **kwargs):
if init_cfg is None:
init_cfg = [
dict(type='Caffe2Xavier', layer=['ConvModule', 'Conv2d']),
dict(
type='Normal',
std=0.01,
override=[
dict(name='conv_reg'),
dict(name='conv_centerness'),
dict(
name='conv_cls',
type='Normal',
std=0.01,
bias_prob=0.01)
]),
]
super(NASFCOSHead, self).__init__(*args, init_cfg=init_cfg, **kwargs)
def _init_layers(self):
"""Initialize layers of the head."""
dconv3x3_config = dict(
type='DCNv2',
kernel_size=3,
use_bias=True,
deform_groups=2,
padding=1)
conv3x3_config = dict(type='Conv', kernel_size=3, padding=1)
conv1x1_config = dict(type='Conv', kernel_size=1)
self.arch_config = [
dconv3x3_config, conv3x3_config, dconv3x3_config, conv1x1_config
]
self.cls_convs = nn.ModuleList()
self.reg_convs = nn.ModuleList()
for i, op_ in enumerate(self.arch_config):
op = copy.deepcopy(op_)
chn = self.in_channels if i == 0 else self.feat_channels
assert isinstance(op, dict)
use_bias = op.pop('use_bias', False)
padding = op.pop('padding', 0)
kernel_size = op.pop('kernel_size')
module = ConvModule(
chn,
self.feat_channels,
kernel_size,
stride=1,
padding=padding,
norm_cfg=self.norm_cfg,
bias=use_bias,
conv_cfg=op)
self.cls_convs.append(copy.deepcopy(module))
self.reg_convs.append(copy.deepcopy(module))
self.conv_cls = nn.Conv2d(
self.feat_channels, self.cls_out_channels, 3, padding=1)
self.conv_reg = nn.Conv2d(self.feat_channels, 4, 3, padding=1)
self.conv_centerness = nn.Conv2d(self.feat_channels, 1, 3, padding=1)
self.scales = nn.ModuleList([Scale(1.0) for _ in self.strides])
|
# Copyright (c) OpenMMLab. All rights reserved.
import copy
import torch.nn as nn
from mmcv.cnn import ConvModule, Scale
from mmdet.models.dense_heads.fcos_head import FCOSHead
from mmdet.registry import MODELS
@MODELS.register_module()
class NASFCOSHead(FCOSHead):
"""Anchor-free head used in `NASFCOS <https://arxiv.org/abs/1906.04423>`_.
It is quite similar with FCOS head, except for the searched structure of
classification branch and bbox regression branch, where a structure of
"dconv3x3, conv3x3, dconv3x3, conv1x1" is utilized instead.
"""
def __init__(self, *args, init_cfg=None, **kwargs):
if init_cfg is None:
init_cfg = [
dict(type='Caffe2Xavier', layer=['ConvModule', 'Conv2d']),
dict(
type='Normal',
std=0.01,
override=[
dict(name='conv_reg'),
dict(name='conv_centerness'),
dict(
name='conv_cls',
type='Normal',
std=0.01,
bias_prob=0.01)
]),
]
super(NASFCOSHead, self).__init__(*args, init_cfg=init_cfg, **kwargs)
def _init_layers(self):
"""Initialize layers of the head."""
dconv3x3_config = dict(
type='DCNv2',
kernel_size=3,
use_bias=True,
deform_groups=2,
padding=1)
conv3x3_config = dict(type='Conv', kernel_size=3, padding=1)
conv1x1_config = dict(type='Conv', kernel_size=1)
self.arch_config = [
dconv3x3_config, conv3x3_config, dconv3x3_config, conv1x1_config
]
self.cls_convs = nn.ModuleList()
self.reg_convs = nn.ModuleList()
for i, op_ in enumerate(self.arch_config):
op = copy.deepcopy(op_)
chn = self.in_channels if i == 0 else self.feat_channels
assert isinstance(op, dict)
use_bias = op.pop('use_bias', False)
padding = op.pop('padding', 0)
kernel_size = op.pop('kernel_size')
module = ConvModule(
chn,
self.feat_channels,
kernel_size,
stride=1,
padding=padding,
norm_cfg=self.norm_cfg,
bias=use_bias,
conv_cfg=op)
self.cls_convs.append(copy.deepcopy(module))
self.reg_convs.append(copy.deepcopy(module))
self.conv_cls = nn.Conv2d(
self.feat_channels, self.cls_out_channels, 3, padding=1)
self.conv_reg = nn.Conv2d(self.feat_channels, 4, 3, padding=1)
self.conv_centerness = nn.Conv2d(self.feat_channels, 1, 3, padding=1)
self.scales = nn.ModuleList([Scale(1.0) for _ in self.strides])
|
from typing import Any, Callable, Dict, Optional, Sequence
from llama_index.core.base.llms.types import ChatMessage, LLMMetadata
from llama_index.core.callbacks import CallbackManager
from llama_index.core.constants import DEFAULT_NUM_OUTPUTS, DEFAULT_TEMPERATURE
from llama_index.core.base.llms.generic_utils import get_from_param_or_env
from llama_index.core.types import BaseOutputParser, PydanticProgramMode
from llama_index.llms.everlyai.utils import everlyai_modelname_to_contextsize
from llama_index.llms.openai import OpenAI
EVERLYAI_API_BASE = "https://everlyai.xyz/hosted"
DEFAULT_MODEL = "meta-llama/Llama-2-7b-chat-hf"
class EverlyAI(OpenAI):
"""
EverlyAI LLM.
Examples:
`pip install llama-index-llms-everlyai`
```python
from llama_index.llms.everlyai import EverlyAI
llm = EverlyAI(api_key="your-api-key")
response = llm.complete("Hello World!")
print(response)
```
"""
def __init__(
self,
model: str = DEFAULT_MODEL,
temperature: float = DEFAULT_TEMPERATURE,
max_tokens: int = DEFAULT_NUM_OUTPUTS,
additional_kwargs: Optional[Dict[str, Any]] = None,
max_retries: int = 10,
api_key: Optional[str] = None,
callback_manager: Optional[CallbackManager] = None,
system_prompt: Optional[str] = None,
messages_to_prompt: Optional[Callable[[Sequence[ChatMessage]], str]] = None,
completion_to_prompt: Optional[Callable[[str], str]] = None,
pydantic_program_mode: PydanticProgramMode = PydanticProgramMode.DEFAULT,
output_parser: Optional[BaseOutputParser] = None,
) -> None:
additional_kwargs = additional_kwargs or {}
callback_manager = callback_manager or CallbackManager([])
api_key = get_from_param_or_env("api_key", api_key, "EverlyAI_API_KEY")
super().__init__(
model=model,
temperature=temperature,
max_tokens=max_tokens,
api_base=EVERLYAI_API_BASE,
api_key=api_key,
additional_kwargs=additional_kwargs,
max_retries=max_retries,
callback_manager=callback_manager,
system_prompt=system_prompt,
messages_to_prompt=messages_to_prompt,
completion_to_prompt=completion_to_prompt,
pydantic_program_mode=pydantic_program_mode,
output_parser=output_parser,
)
@classmethod
def class_name(cls) -> str:
return "EverlyAI_LLM"
@property
def metadata(self) -> LLMMetadata:
return LLMMetadata(
context_window=everlyai_modelname_to_contextsize(self.model),
num_output=self.max_tokens,
is_chat_model=True,
model_name=self.model,
)
@property
def _is_chat_model(self) -> bool:
return True
|
from typing import Any, Callable, Dict, Optional, Sequence
from llama_index.core.base.llms.types import ChatMessage, LLMMetadata
from llama_index.core.callbacks import CallbackManager
from llama_index.core.constants import DEFAULT_NUM_OUTPUTS, DEFAULT_TEMPERATURE
from llama_index.core.base.llms.generic_utils import get_from_param_or_env
from llama_index.core.types import BaseOutputParser, PydanticProgramMode
from llama_index.llms.everlyai.utils import everlyai_modelname_to_contextsize
from llama_index.llms.openai import OpenAI
EVERLYAI_API_BASE = "https://everlyai.xyz/hosted"
DEFAULT_MODEL = "meta-llama/Llama-2-7b-chat-hf"
class EverlyAI(OpenAI):
"""EverlyAI LLM.
Examples:
`pip install llama-index-llms-everlyai`
```python
from llama_index.llms.everlyai import EverlyAI
llm = EverlyAI(api_key="your-api-key")
response = llm.complete("Hello World!")
print(response)
```
"""
def __init__(
self,
model: str = DEFAULT_MODEL,
temperature: float = DEFAULT_TEMPERATURE,
max_tokens: int = DEFAULT_NUM_OUTPUTS,
additional_kwargs: Optional[Dict[str, Any]] = None,
max_retries: int = 10,
api_key: Optional[str] = None,
callback_manager: Optional[CallbackManager] = None,
system_prompt: Optional[str] = None,
messages_to_prompt: Optional[Callable[[Sequence[ChatMessage]], str]] = None,
completion_to_prompt: Optional[Callable[[str], str]] = None,
pydantic_program_mode: PydanticProgramMode = PydanticProgramMode.DEFAULT,
output_parser: Optional[BaseOutputParser] = None,
) -> None:
additional_kwargs = additional_kwargs or {}
callback_manager = callback_manager or CallbackManager([])
api_key = get_from_param_or_env("api_key", api_key, "EverlyAI_API_KEY")
super().__init__(
model=model,
temperature=temperature,
max_tokens=max_tokens,
api_base=EVERLYAI_API_BASE,
api_key=api_key,
additional_kwargs=additional_kwargs,
max_retries=max_retries,
callback_manager=callback_manager,
system_prompt=system_prompt,
messages_to_prompt=messages_to_prompt,
completion_to_prompt=completion_to_prompt,
pydantic_program_mode=pydantic_program_mode,
output_parser=output_parser,
)
@classmethod
def class_name(cls) -> str:
return "EverlyAI_LLM"
@property
def metadata(self) -> LLMMetadata:
return LLMMetadata(
context_window=everlyai_modelname_to_contextsize(self.model),
num_output=self.max_tokens,
is_chat_model=True,
model_name=self.model,
)
@property
def _is_chat_model(self) -> bool:
return True
|
from keras.src.backend.common.name_scope import name_scope
from keras.src.backend.numpy import core
from keras.src.backend.numpy import image
from keras.src.backend.numpy import linalg
from keras.src.backend.numpy import math
from keras.src.backend.numpy import nn
from keras.src.backend.numpy import numpy
from keras.src.backend.numpy import random
from keras.src.backend.numpy.core import IS_THREAD_SAFE
from keras.src.backend.numpy.core import SUPPORTS_RAGGED_TENSORS
from keras.src.backend.numpy.core import SUPPORTS_SPARSE_TENSORS
from keras.src.backend.numpy.core import Variable
from keras.src.backend.numpy.core import cast
from keras.src.backend.numpy.core import compute_output_spec
from keras.src.backend.numpy.core import cond
from keras.src.backend.numpy.core import convert_to_numpy
from keras.src.backend.numpy.core import convert_to_tensor
from keras.src.backend.numpy.core import device_scope
from keras.src.backend.numpy.core import is_tensor
from keras.src.backend.numpy.core import random_seed_dtype
from keras.src.backend.numpy.core import shape
from keras.src.backend.numpy.core import vectorized_map
from keras.src.backend.numpy.rnn import cudnn_ok
from keras.src.backend.numpy.rnn import gru
from keras.src.backend.numpy.rnn import lstm
from keras.src.backend.numpy.rnn import rnn
|
from keras.src.backend.common.name_scope import name_scope
from keras.src.backend.numpy import core
from keras.src.backend.numpy import image
from keras.src.backend.numpy import linalg
from keras.src.backend.numpy import math
from keras.src.backend.numpy import nn
from keras.src.backend.numpy import numpy
from keras.src.backend.numpy import random
from keras.src.backend.numpy.core import IS_THREAD_SAFE
from keras.src.backend.numpy.core import SUPPORTS_SPARSE_TENSORS
from keras.src.backend.numpy.core import Variable
from keras.src.backend.numpy.core import cast
from keras.src.backend.numpy.core import compute_output_spec
from keras.src.backend.numpy.core import cond
from keras.src.backend.numpy.core import convert_to_numpy
from keras.src.backend.numpy.core import convert_to_tensor
from keras.src.backend.numpy.core import device_scope
from keras.src.backend.numpy.core import is_tensor
from keras.src.backend.numpy.core import random_seed_dtype
from keras.src.backend.numpy.core import shape
from keras.src.backend.numpy.core import vectorized_map
from keras.src.backend.numpy.rnn import cudnn_ok
from keras.src.backend.numpy.rnn import gru
from keras.src.backend.numpy.rnn import lstm
from keras.src.backend.numpy.rnn import rnn
|
from __future__ import annotations
import json
import os
from typing import Any
import torch
from torch import nn
class SpladePooling(nn.Module):
"""
SPLADE Pooling module for creating the sparse embeddings.
This module implements the SPLADE pooling mechanism that:
1. Takes token logits from a masked language model (MLM)
2. Applies a sparse transformation using the activation function like this log(1 + activation(MLM_logits))
3. Applies a pooling strategy (max or sum) to produce sparse embeddings
The resulting embeddings are highly sparse and capture lexical information,
making them suitable for efficient information retrieval.
Args:
pooling_strategy (str): The pooling strategy to use, either "max" or "sum".
"max" takes the maximum value across all tokens.
"sum" adds the values across all tokens.
activation_function (str): The activation function to use, either "relu" or "log1p_relu".
"relu" applies the ReLU activation function.
"log1p_relu" applies the log(1 + exp(x)) transformation.
"""
SPLADE_POOLING_MODES = ("sum", "max")
SPLADE_ACTIVATION = ["relu", "log1p_relu"]
def __init__(
self, pooling_strategy: str = "max", activation_function="relu", word_embedding_dimension: int = None
) -> None:
super().__init__()
self.pooling_strategy = pooling_strategy
if pooling_strategy not in self.SPLADE_POOLING_MODES:
raise ValueError("pooling_strategy must be either 'max' or 'sum'")
self.activation_function = activation_function
if activation_function not in self.SPLADE_ACTIVATION:
raise ValueError("activation_function must be either 'relu' or 'log1p_relu'")
self.config_keys = ["pooling_strategy", "activation_function", "word_embedding_dimension"]
self.word_embedding_dimension = word_embedding_dimension # This will be set in the forward method
def forward(self, features: dict[str, torch.Tensor]) -> dict[str, torch.Tensor]:
"""Forward pass of the model.
Args:
features: Dictionary containing input features with 'mlm_logits' key
Returns:
Dictionary containing SPLADE pooled embeddings
"""
# Get the MLM head logits (shape: batch_size, seq_length, vocab_size)
mlm_logits = features["token_embeddings"]
# Apply ReLU and log transformation for SPLADE
if self.activation_function == "relu":
splade_scores = torch.log1p(torch.relu(mlm_logits))
elif self.activation_function == "log1p_relu":
splade_scores = torch.log1p(torch.log1p(torch.relu(mlm_logits)))
else:
raise ValueError("activation_function must be either 'relu' or 'log1p_relu'")
# Pool across sequence length dimension
if self.pooling_strategy == "max":
pooled_scores = torch.max(splade_scores, dim=1)[0] # shape: batch_size, vocab_size
else: # sum
pooled_scores = torch.sum(splade_scores, dim=1) # shape: batch_size, vocab_size
# Set the word embedding dimension
if self.word_embedding_dimension is None:
self.word_embedding_dimension = pooled_scores.shape[1]
features["sentence_embedding"] = pooled_scores
return features
def get_config_dict(self) -> dict[str, Any]:
return {key: self.__dict__[key] for key in self.config_keys}
def save(self, output_path) -> None:
with open(os.path.join(output_path, "config.json"), "w") as fOut:
json.dump(self.get_config_dict(), fOut, indent=2)
@staticmethod
def load(input_path) -> SpladePooling:
with open(os.path.join(input_path, "config.json")) as fIn:
config = json.load(fIn)
return SpladePooling(**config)
def __repr__(self) -> str:
return f"SpladePooling({self.get_config_dict()})"
def get_sentence_embedding_dimension(self) -> int:
"""Get the dimension of the sentence embedding.
Returns:
int: Dimension of the sentence embedding
"""
return self.word_embedding_dimension
|
from __future__ import annotations
import json
import os
from typing import Any
import torch
from torch import nn
class SpladePooling(nn.Module):
"""
SPLADE Pooling module for creating the sparse embeddings.
This module implements the SPLADE pooling mechanism that:
1. Takes token logits from a masked language model (MLM)
2. Applies a sparse transformation using the RELU(log(1 + exp(MLM logits)))
3. Applies a pooling strategy (max or sum) to produce sparse embeddings
The resulting embeddings are highly sparse and capture lexical information,
making them suitable for efficient information retrieval.
Args:
pooling_strategy (str): The pooling strategy to use, either "max" or "sum".
"max" takes the maximum value across all tokens.
"sum" adds the values across all tokens.
"""
SPLADE_POOLING_MODES = ("sum", "max")
def __init__(self, pooling_strategy: str = "max", word_embedding_dimension: int = None) -> None:
super().__init__()
self.pooling_strategy = pooling_strategy
if pooling_strategy not in self.SPLADE_POOLING_MODES:
raise ValueError("pooling_strategy must be either 'max' or 'sum'")
self.config_keys = ["pooling_strategy", "word_embedding_dimension"]
self.word_embedding_dimension = word_embedding_dimension # This will be set in the forward method
def forward(self, features: dict[str, torch.Tensor]) -> dict[str, torch.Tensor]:
"""Forward pass of the model.
Args:
features: Dictionary containing input features with 'mlm_logits' key
Returns:
Dictionary containing SPLADE pooled embeddings
"""
# Get the MLM head logits (shape: batch_size, seq_length, vocab_size)
mlm_logits = features["token_embeddings"]
# Apply ReLU and log transformation for SPLADE
splade_scores = torch.log1p(torch.relu(mlm_logits))
# Pool across sequence length dimension
if self.pooling_strategy == "max":
pooled_scores = torch.max(splade_scores, dim=1)[0] # shape: batch_size, vocab_size
else: # sum
pooled_scores = torch.sum(splade_scores, dim=1) # shape: batch_size, vocab_size
# Set the word embedding dimension
if self.word_embedding_dimension is None:
self.word_embedding_dimension = pooled_scores.shape[1]
features["sentence_embedding"] = pooled_scores
return features
def get_config_dict(self) -> dict[str, Any]:
return {key: self.__dict__[key] for key in self.config_keys}
def save(self, output_path) -> None:
with open(os.path.join(output_path, "config.json"), "w") as fOut:
json.dump(self.get_config_dict(), fOut, indent=2)
@staticmethod
def load(input_path) -> SpladePooling:
with open(os.path.join(input_path, "config.json")) as fIn:
config = json.load(fIn)
return SpladePooling(**config)
def __repr__(self) -> str:
return f"SpladePooling({self.get_config_dict()})"
def get_sentence_embedding_dimension(self) -> int:
"""Get the dimension of the sentence embedding.
Returns:
int: Dimension of the sentence embedding
"""
return self.word_embedding_dimension
|
# Copyright (c) OpenMMLab. All rights reserved.
from .dist_utils import (DistOptimizerHook, all_reduce_dict, allreduce_grads,
reduce_mean, sync_random_seed)
from .misc import (center_of_mass, filter_scores_and_topk, flip_tensor,
generate_coordinate, levels_to_images, mask2ndarray,
multi_apply, select_single_mlvl, unmap)
from .typing import (ConfigType, ForwardResults, InstanceList, MultiConfig,
OptConfigType, OptInstanceList, OptMultiConfig,
OptSampleList, OptSamplingResultList, SampleList,
SamplingResultList)
__all__ = [
'allreduce_grads', 'DistOptimizerHook', 'reduce_mean', 'multi_apply',
'unmap', 'mask2ndarray', 'flip_tensor', 'all_reduce_dict',
'center_of_mass', 'generate_coordinate', 'select_single_mlvl',
'filter_scores_and_topk', 'sync_random_seed', 'levels_to_images',
'ConfigType', 'OptConfigType', 'MultiConfig', 'OptMultiConfig',
'InstanceList', 'OptInstanceList', 'SampleList', 'OptSampleList',
'SamplingResultList', 'ForwardResults', 'OptSamplingResultList'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .dist_utils import (DistOptimizerHook, all_reduce_dict, allreduce_grads,
reduce_mean, sync_random_seed)
from .misc import (center_of_mass, filter_scores_and_topk, flip_tensor,
generate_coordinate, levels_to_images, mask2ndarray,
multi_apply, select_single_mlvl, stack_batch, unmap)
from .typing import (ConfigType, InstanceList, MultiConfig, OptConfigType,
OptInstanceList, OptMultiConfig, OptSampleList,
OptSamplingResultList, SampleList, SamplingResultList)
__all__ = [
'allreduce_grads', 'DistOptimizerHook', 'reduce_mean', 'multi_apply',
'unmap', 'mask2ndarray', 'flip_tensor', 'all_reduce_dict',
'center_of_mass', 'generate_coordinate', 'select_single_mlvl',
'filter_scores_and_topk', 'sync_random_seed', 'stack_batch',
'levels_to_images', 'ConfigType', 'OptConfigType', 'MultiConfig',
'OptMultiConfig', 'InstanceList', 'OptInstanceList', 'SampleList',
'OptSampleList', 'SamplingResultList', 'OptSamplingResultList'
]
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.tools.google_scholar.tool import GoogleScholarQueryRun
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"GoogleScholarQueryRun": "langchain_community.tools.google_scholar.tool",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"GoogleScholarQueryRun",
]
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.tools.google_scholar.tool import GoogleScholarQueryRun
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"GoogleScholarQueryRun": "langchain_community.tools.google_scholar.tool"
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"GoogleScholarQueryRun",
]
|
_base_ = './ga-retinanet_r50-caffe_fpn_1x_coco.py'
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet101_caffe')))
|
_base_ = './ga_retinanet_r50_caffe_fpn_1x_coco.py'
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet101_caffe')))
|
"""Cassandra-based chat message history, based on cassIO."""
from __future__ import annotations
import json
import uuid
from typing import TYPE_CHECKING, Any, Dict, Iterable, List, Optional, Sequence
from langchain_community.utilities.cassandra import SetupMode
if TYPE_CHECKING:
from cassandra.cluster import Session
from cassio.table.table_types import RowType
from langchain_core.chat_history import BaseChatMessageHistory
from langchain_core.messages import (
BaseMessage,
message_to_dict,
messages_from_dict,
)
DEFAULT_TABLE_NAME = "message_store"
DEFAULT_TTL_SECONDS = None
def _rows_to_messages(rows: Iterable[RowType]) -> List[BaseMessage]:
message_blobs = [row["body_blob"] for row in rows][::-1]
items = [json.loads(message_blob) for message_blob in message_blobs]
messages = messages_from_dict(items)
return messages
class CassandraChatMessageHistory(BaseChatMessageHistory):
"""Chat message history that is backed by Cassandra."""
def __init__(
self,
session_id: str,
session: Optional[Session] = None,
keyspace: Optional[str] = None,
table_name: str = DEFAULT_TABLE_NAME,
ttl_seconds: Optional[int] = DEFAULT_TTL_SECONDS,
*,
setup_mode: SetupMode = SetupMode.SYNC,
) -> None:
"""
Initialize a new instance of CassandraChatMessageHistory.
Args:
session_id: arbitrary key that is used to store the messages
of a single chat session.
session: Cassandra driver session.
If not provided, it is resolved from cassio.
keyspace: Cassandra key space. If not provided, it is resolved from cassio.
table_name: name of the table to use.
ttl_seconds: time-to-live (seconds) for automatic expiration
of stored entries. None (default) for no expiration.
setup_mode: mode used to create the Cassandra table (SYNC, ASYNC or OFF).
"""
try:
from cassio.table import ClusteredCassandraTable
except (ImportError, ModuleNotFoundError):
raise ImportError(
"Could not import cassio python package. "
"Please install it with `pip install cassio`."
)
self.session_id = session_id
self.ttl_seconds = ttl_seconds
kwargs: Dict[str, Any] = {}
if setup_mode == SetupMode.ASYNC:
kwargs["async_setup"] = True
self.table = ClusteredCassandraTable(
session=session,
keyspace=keyspace,
table=table_name,
ttl_seconds=ttl_seconds,
primary_key_type=["TEXT", "TIMEUUID"],
ordering_in_partition="DESC",
skip_provisioning=setup_mode == SetupMode.OFF,
**kwargs,
)
@property
def messages(self) -> List[BaseMessage]: # type: ignore
"""Retrieve all session messages from DB"""
# The latest are returned, in chronological order
rows = self.table.get_partition(
partition_id=self.session_id,
)
return _rows_to_messages(rows)
async def aget_messages(self) -> List[BaseMessage]:
"""Retrieve all session messages from DB"""
# The latest are returned, in chronological order
rows = await self.table.aget_partition(
partition_id=self.session_id,
)
return _rows_to_messages(rows)
def add_message(self, message: BaseMessage) -> None:
"""Write a message to the table
Args:
message: A message to write.
"""
this_row_id = uuid.uuid4()
self.table.put(
partition_id=self.session_id,
row_id=this_row_id,
body_blob=json.dumps(message_to_dict(message)),
ttl_seconds=self.ttl_seconds,
)
async def aadd_messages(self, messages: Sequence[BaseMessage]) -> None:
for message in messages:
this_row_id = uuid.uuid4()
await self.table.aput(
partition_id=self.session_id,
row_id=this_row_id,
body_blob=json.dumps(message_to_dict(message)),
ttl_seconds=self.ttl_seconds,
)
def clear(self) -> None:
"""Clear session memory from DB"""
self.table.delete_partition(self.session_id)
async def aclear(self) -> None:
"""Clear session memory from DB"""
await self.table.adelete_partition(self.session_id)
|
"""Cassandra-based chat message history, based on cassIO."""
from __future__ import annotations
import json
import uuid
from typing import TYPE_CHECKING, Any, Dict, Iterable, List, Optional, Sequence
from langchain_community.utilities.cassandra import SetupMode
if TYPE_CHECKING:
from cassandra.cluster import Session
from cassio.table.table_types import RowType
from langchain_core.chat_history import BaseChatMessageHistory
from langchain_core.messages import (
BaseMessage,
message_to_dict,
messages_from_dict,
)
DEFAULT_TABLE_NAME = "message_store"
DEFAULT_TTL_SECONDS = None
def _rows_to_messages(rows: Iterable[RowType]) -> List[BaseMessage]:
message_blobs = [row["body_blob"] for row in rows][::-1]
items = [json.loads(message_blob) for message_blob in message_blobs]
messages = messages_from_dict(items)
return messages
class CassandraChatMessageHistory(BaseChatMessageHistory):
"""Chat message history that is backed by Cassandra."""
def __init__(
self,
session_id: str,
session: Optional[Session] = None,
keyspace: Optional[str] = None,
table_name: str = DEFAULT_TABLE_NAME,
ttl_seconds: Optional[int] = DEFAULT_TTL_SECONDS,
*,
setup_mode: SetupMode = SetupMode.SYNC,
) -> None:
"""
Initialize a new instance of CassandraChatMessageHistory.
Args:
session_id: arbitrary key that is used to store the messages
of a single chat session.
session: Cassandra driver session.
If not provided, it is resolved from cassio.
keyspace: Cassandra key space. If not provided, it is resolved from cassio.
table_name: name of the table to use.
ttl_seconds: time-to-live (seconds) for automatic expiration
of stored entries. None (default) for no expiration.
setup_mode: mode used to create the Cassandra table (SYNC, ASYNC or OFF).
"""
try:
from cassio.table import ClusteredCassandraTable
except (ImportError, ModuleNotFoundError):
raise ImportError(
"Could not import cassio python package. "
"Please install it with `pip install cassio`."
)
self.session_id = session_id
self.ttl_seconds = ttl_seconds
kwargs: Dict[str, Any] = {}
if setup_mode == SetupMode.ASYNC:
kwargs["async_setup"] = True
self.table = ClusteredCassandraTable(
session=session,
keyspace=keyspace,
table=table_name,
ttl_seconds=ttl_seconds,
primary_key_type=["TEXT", "TIMEUUID"],
ordering_in_partition="DESC",
skip_provisioning=setup_mode == SetupMode.OFF,
**kwargs,
)
@property
def messages(self) -> List[BaseMessage]: # type: ignore
"""Retrieve all session messages from DB"""
# The latest are returned, in chronological order
rows = self.table.get_partition(
partition_id=self.session_id,
)
return _rows_to_messages(rows)
async def aget_messages(self) -> List[BaseMessage]:
"""Retrieve all session messages from DB"""
# The latest are returned, in chronological order
rows = await self.table.aget_partition(
partition_id=self.session_id,
)
return _rows_to_messages(rows)
def add_message(self, message: BaseMessage) -> None:
"""Write a message to the table
Args:
message: A message to write.
"""
this_row_id = uuid.uuid1()
self.table.put(
partition_id=self.session_id,
row_id=this_row_id,
body_blob=json.dumps(message_to_dict(message)),
ttl_seconds=self.ttl_seconds,
)
async def aadd_messages(self, messages: Sequence[BaseMessage]) -> None:
for message in messages:
this_row_id = uuid.uuid1()
await self.table.aput(
partition_id=self.session_id,
row_id=this_row_id,
body_blob=json.dumps(message_to_dict(message)),
ttl_seconds=self.ttl_seconds,
)
def clear(self) -> None:
"""Clear session memory from DB"""
self.table.delete_partition(self.session_id)
async def aclear(self) -> None:
"""Clear session memory from DB"""
await self.table.adelete_partition(self.session_id)
|
_base_ = './mask-rcnn_swin-t-p4-w7_fpn_amp-ms-crop-3x_coco.py'
pretrained = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_small_patch4_window7_224.pth' # noqa
model = dict(
backbone=dict(
depths=[2, 2, 18, 2],
init_cfg=dict(type='Pretrained', checkpoint=pretrained)))
|
_base_ = './mask_rcnn_swin-t-p4-w7_fpn_fp16_ms-crop-3x_coco.py'
pretrained = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_small_patch4_window7_224.pth' # noqa
model = dict(
backbone=dict(
depths=[2, 2, 18, 2],
init_cfg=dict(type='Pretrained', checkpoint=pretrained)))
|
# Copyright (c) Meta Platforms, Inc. and affiliates
import torch
from torch.distributed.tensor._op_schema import (
OpSchema,
OpSpec,
OpStrategy,
StrategyType,
)
from torch.distributed.tensor._ops.utils import is_tensor_partial, register_op_strategy
aten = torch.ops.aten
@register_op_strategy(
[
aten.normal_.default,
aten.uniform_.default,
aten.native_dropout.default,
aten.bernoulli_.float,
aten.bernoulli.default,
]
)
def random_op_strategy(op_schema: OpSchema) -> StrategyType:
self_strategy = op_schema.args_schema[0]
assert isinstance(self_strategy, OpStrategy)
random_strategy = OpStrategy([])
for arg_strategy in self_strategy.strategies:
arg_spec = arg_strategy.output_spec
if is_tensor_partial(arg_spec):
# TODO: figure out how inplace random op should behave when it's partial
raise RuntimeError(f"{op_schema.op} with Partial is not supported yet!")
random_strategy.strategies.append(
OpSpec(
output_specs=arg_spec,
input_specs=(arg_spec,),
redistribute_cost=[[0.0] * len(self_strategy.strategies)],
)
)
return random_strategy
|
# Copyright (c) Meta Platforms, Inc. and affiliates
import torch
from torch.distributed.tensor._op_schema import (
OpSchema,
OpSpec,
OpStrategy,
StrategyType,
)
from torch.distributed.tensor._ops.utils import is_tensor_partial, register_op_strategy
aten = torch.ops.aten
@register_op_strategy(
[
aten.normal_.default,
aten.uniform_.default,
aten.native_dropout.default,
aten.bernoulli_.float,
aten.bernoulli.default,
]
)
def random_op_strategy(op_schema: OpSchema) -> StrategyType:
self_strategy = op_schema.args_schema[0]
assert isinstance(self_strategy, OpStrategy)
random_strategy = OpStrategy([])
for arg_strategy in self_strategy.strategies:
arg_spec = arg_strategy.output_spec
if is_tensor_partial(arg_spec):
# TODO: figure out how inplace random op should behave when it's partial
raise RuntimeError(f"{op_schema.op} with Partial is not supported yet!")
random_strategy.strategies.append(OpSpec(output_specs=arg_spec))
return random_strategy
|
# Copyright (c) OpenMMLab. All rights reserved.
import os
import pytest
import torch
import torch.distributed as torch_dist
import torch.multiprocessing as mp
import mmengine.dist as dist
def _test_get_backend_non_dist():
assert dist.get_backend() is None
def _test_get_world_size_non_dist():
assert dist.get_world_size() == 1
def _test_get_rank_non_dist():
assert dist.get_rank() == 0
def _test_local_size_non_dist():
assert dist.get_local_size() == 1
def _test_local_rank_non_dist():
assert dist.get_local_rank() == 0
def _test_get_dist_info_non_dist():
assert dist.get_dist_info() == (0, 1)
def _test_is_main_process_non_dist():
assert dist.is_main_process()
def _test_master_only_non_dist():
@dist.master_only
def fun():
assert dist.get_rank() == 0
fun()
def _test_barrier_non_dist():
dist.barrier() # nothing is done
def init_process(rank, world_size, functions, backend='gloo'):
"""Initialize the distributed environment."""
os.environ['MASTER_ADDR'] = '127.0.0.1'
os.environ['MASTER_PORT'] = '29501'
os.environ['RANK'] = str(rank)
if backend == 'nccl':
num_gpus = torch.cuda.device_count()
torch.cuda.set_device(rank % num_gpus)
torch_dist.init_process_group(
backend=backend, rank=rank, world_size=world_size)
dist.init_local_group(0, world_size)
for func in functions:
func()
def main(functions, world_size=2, backend='gloo'):
try:
mp.spawn(
init_process,
args=(world_size, functions, backend),
nprocs=world_size)
except Exception:
pytest.fail('error')
def _test_get_backend_dist():
assert dist.get_backend() == torch_dist.get_backend()
def _test_get_world_size_dist():
assert dist.get_world_size() == 2
def _test_get_rank_dist():
if torch_dist.get_rank() == 0:
assert dist.get_rank() == 0
else:
assert dist.get_rank() == 1
def _test_local_size_dist():
assert dist.get_local_size() == 2
def _test_local_rank_dist():
torch_dist.get_rank(dist.get_local_group()) == dist.get_local_rank()
def _test_get_dist_info_dist():
if dist.get_rank() == 0:
assert dist.get_dist_info() == (0, 2)
else:
assert dist.get_dist_info() == (1, 2)
def _test_is_main_process_dist():
if dist.get_rank() == 0:
assert dist.is_main_process()
else:
assert not dist.is_main_process()
def _test_master_only_dist():
@dist.master_only
def fun():
assert dist.get_rank() == 0
fun()
def test_non_distributed_env():
_test_get_backend_non_dist()
_test_get_world_size_non_dist()
_test_get_rank_non_dist()
_test_local_size_non_dist()
_test_local_rank_non_dist()
_test_get_dist_info_non_dist()
_test_is_main_process_non_dist()
_test_master_only_non_dist()
_test_barrier_non_dist()
functions_to_test = [
_test_get_backend_dist,
_test_get_world_size_dist,
_test_get_rank_dist,
_test_local_size_dist,
_test_local_rank_dist,
_test_get_dist_info_dist,
_test_is_main_process_dist,
_test_master_only_dist,
]
def test_gloo_backend():
main(functions_to_test)
@pytest.mark.skipif(
torch.cuda.device_count() < 2, reason='need 2 gpu to test nccl')
def test_nccl_backend():
main(functions_to_test, backend='nccl')
|
# Copyright (c) OpenMMLab. All rights reserved.
import os
import pytest
import torch
import torch.distributed as torch_dist
import torch.multiprocessing as mp
import mmengine.dist as dist
def _test_get_backend_non_dist():
assert dist.get_backend() is None
def _test_get_world_size_non_dist():
assert dist.get_world_size() == 1
def _test_get_rank_non_dist():
assert dist.get_rank() == 0
def _test_local_size_non_dist():
assert dist.get_local_size() == 1
def _test_local_rank_non_dist():
assert dist.get_local_rank() == 0
def _test_get_dist_info_non_dist():
assert dist.get_dist_info() == (0, 1)
def _test_is_main_process_non_dist():
assert dist.is_main_process()
def _test_master_only_non_dist():
@dist.master_only
def fun():
assert dist.get_rank() == 0
fun()
def _test_barrier_non_dist():
dist.barrier() # nothing is done
def init_process(rank, world_size, functions, backend='gloo'):
"""Initialize the distributed environment."""
os.environ['MASTER_ADDR'] = '127.0.0.1'
os.environ['MASTER_PORT'] = '29501'
os.environ['RANK'] = str(rank)
dist.init_dist('pytorch', backend, rank=rank, world_size=world_size)
dist.init_local_group(0, world_size)
for func in functions:
func()
def main(functions, world_size=2, backend='gloo'):
try:
mp.spawn(
init_process,
args=(world_size, functions, backend),
nprocs=world_size)
except Exception:
pytest.fail('error')
def _test_get_backend_dist():
assert dist.get_backend() == torch_dist.get_backend()
def _test_get_world_size_dist():
assert dist.get_world_size() == 2
def _test_get_rank_dist():
if torch_dist.get_rank() == 0:
assert dist.get_rank() == 0
else:
assert dist.get_rank() == 1
def _test_local_size_dist():
assert dist.get_local_size() == 2
def _test_local_rank_dist():
torch_dist.get_rank(dist.get_local_group()) == dist.get_local_rank()
def _test_get_dist_info_dist():
if dist.get_rank() == 0:
assert dist.get_dist_info() == (0, 2)
else:
assert dist.get_dist_info() == (1, 2)
def _test_is_main_process_dist():
if dist.get_rank() == 0:
assert dist.is_main_process()
else:
assert not dist.is_main_process()
def _test_master_only_dist():
@dist.master_only
def fun():
assert dist.get_rank() == 0
fun()
def test_non_distributed_env():
_test_get_backend_non_dist()
_test_get_world_size_non_dist()
_test_get_rank_non_dist()
_test_local_size_non_dist()
_test_local_rank_non_dist()
_test_get_dist_info_non_dist()
_test_is_main_process_non_dist()
_test_master_only_non_dist()
_test_barrier_non_dist()
functions_to_test = [
_test_get_backend_dist,
_test_get_world_size_dist,
_test_get_rank_dist,
_test_local_size_dist,
_test_local_rank_dist,
_test_get_dist_info_dist,
_test_is_main_process_dist,
_test_master_only_dist,
]
def test_gloo_backend():
main(functions_to_test)
@pytest.mark.skipif(
torch.cuda.device_count() < 2, reason='need 2 gpu to test nccl')
def test_nccl_backend():
main(functions_to_test, backend='nccl')
|
from xgboost import dask as dxgb
from xgboost import testing as tm
import dask.array as da
import dask.distributed
def train_result(client, param, dtrain, num_rounds):
result = dxgb.train(
client,
param,
dtrain,
num_rounds,
verbose_eval=False,
evals=[(dtrain, "train")],
)
return result
class TestSYCLDask:
# The simplest test verify only one node training.
def test_simple(self):
cluster = dask.distributed.LocalCluster(n_workers=1)
client = dask.distributed.Client(cluster)
param = {}
param["tree_method"] = "hist"
param["device"] = "sycl"
param["verbosity"] = 0
param["objective"] = "reg:squarederror"
# X and y must be Dask dataframes or arrays
num_obs = int(1e4)
num_features = 20
rng = da.random.RandomState(1994)
X = rng.random_sample((num_obs, num_features), chunks=(1000, -1))
y = X.sum(axis=1)
dtrain = dxgb.DaskDMatrix(client, X, y)
result = train_result(client, param, dtrain, 10)
assert tm.non_increasing(result["history"]["train"]["rmse"])
|
from xgboost import dask as dxgb
from xgboost import testing as tm
from hypothesis import given, strategies, assume, settings, note
import dask.array as da
import dask.distributed
def train_result(client, param, dtrain, num_rounds):
result = dxgb.train(
client,
param,
dtrain,
num_rounds,
verbose_eval=False,
evals=[(dtrain, "train")],
)
return result
class TestSYCLDask:
# The simplest test verify only one node training.
def test_simple(self):
cluster = dask.distributed.LocalCluster(n_workers=1)
client = dask.distributed.Client(cluster)
param = {}
param["tree_method"] = "hist"
param["device"] = "sycl"
param["verbosity"] = 0
param["objective"] = "reg:squarederror"
# X and y must be Dask dataframes or arrays
num_obs = 1e4
num_features = 20
X = da.random.random(size=(num_obs, num_features), chunks=(1000, num_features))
y = da.random.random(size=(num_obs, 1), chunks=(1000, 1))
dtrain = dxgb.DaskDMatrix(client, X, y)
result = train_result(client, param, dtrain, 10)
assert tm.non_increasing(result["history"]["train"]["rmse"])
|
"""
Collection of examples for using sklearn interface
==================================================
For an introduction to XGBoost's scikit-learn estimator interface, see
:doc:`/python/sklearn_estimator`.
Created on 1 Apr 2015
@author: Jamie Hall
"""
import pickle
import numpy as np
from sklearn.datasets import fetch_california_housing, load_digits, load_iris
from sklearn.metrics import confusion_matrix, mean_squared_error
from sklearn.model_selection import GridSearchCV, KFold, train_test_split
import xgboost as xgb
rng = np.random.RandomState(31337)
print("Zeros and Ones from the Digits dataset: binary classification")
digits = load_digits(n_class=2)
y = digits["target"]
X = digits["data"]
kf = KFold(n_splits=2, shuffle=True, random_state=rng)
for train_index, test_index in kf.split(X):
xgb_model = xgb.XGBClassifier(n_jobs=1).fit(X[train_index], y[train_index])
predictions = xgb_model.predict(X[test_index])
actuals = y[test_index]
print(confusion_matrix(actuals, predictions))
print("Iris: multiclass classification")
iris = load_iris()
y = iris["target"]
X = iris["data"]
kf = KFold(n_splits=2, shuffle=True, random_state=rng)
for train_index, test_index in kf.split(X):
xgb_model = xgb.XGBClassifier(n_jobs=1).fit(X[train_index], y[train_index])
predictions = xgb_model.predict(X[test_index])
actuals = y[test_index]
print(confusion_matrix(actuals, predictions))
print("California Housing: regression")
X, y = fetch_california_housing(return_X_y=True)
kf = KFold(n_splits=2, shuffle=True, random_state=rng)
for train_index, test_index in kf.split(X):
xgb_model = xgb.XGBRegressor(n_jobs=1).fit(X[train_index], y[train_index])
predictions = xgb_model.predict(X[test_index])
actuals = y[test_index]
print(mean_squared_error(actuals, predictions))
print("Parameter optimization")
xgb_model = xgb.XGBRegressor(n_jobs=1)
clf = GridSearchCV(
xgb_model,
{"max_depth": [2, 4], "n_estimators": [50, 100]},
verbose=1,
n_jobs=1,
cv=3,
)
clf.fit(X, y)
print(clf.best_score_)
print(clf.best_params_)
# The sklearn API models are picklable
print("Pickling sklearn API models")
# must open in binary format to pickle
pickle.dump(clf, open("best_calif.pkl", "wb"))
clf2 = pickle.load(open("best_calif.pkl", "rb"))
print(np.allclose(clf.predict(X), clf2.predict(X)))
# Early-stopping
X = digits["data"]
y = digits["target"]
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf = xgb.XGBClassifier(n_jobs=1, early_stopping_rounds=10, eval_metric="auc")
clf.fit(X_train, y_train, eval_set=[(X_test, y_test)])
|
"""
Collection of examples for using sklearn interface
==================================================
For an introduction to XGBoost's scikit-learn estimator interface, see
:doc:`/python/sklearn_estimator`.
Created on 1 Apr 2015
@author: Jamie Hall
"""
import pickle
import numpy as np
from sklearn.datasets import fetch_california_housing, load_digits, load_iris
from sklearn.metrics import confusion_matrix, mean_squared_error
from sklearn.model_selection import GridSearchCV, KFold, train_test_split
import xgboost as xgb
rng = np.random.RandomState(31337)
print("Zeros and Ones from the Digits dataset: binary classification")
digits = load_digits(n_class=2)
y = digits["target"]
X = digits["data"]
kf = KFold(n_splits=2, shuffle=True, random_state=rng)
for train_index, test_index in kf.split(X):
xgb_model = xgb.XGBClassifier(n_jobs=1).fit(X[train_index], y[train_index])
predictions = xgb_model.predict(X[test_index])
actuals = y[test_index]
print(confusion_matrix(actuals, predictions))
print("Iris: multiclass classification")
iris = load_iris()
y = iris["target"]
X = iris["data"]
kf = KFold(n_splits=2, shuffle=True, random_state=rng)
for train_index, test_index in kf.split(X):
xgb_model = xgb.XGBClassifier(n_jobs=1).fit(X[train_index], y[train_index])
predictions = xgb_model.predict(X[test_index])
actuals = y[test_index]
print(confusion_matrix(actuals, predictions))
print("California Housing: regression")
X, y = fetch_california_housing(return_X_y=True)
kf = KFold(n_splits=2, shuffle=True, random_state=rng)
for train_index, test_index in kf.split(X):
xgb_model = xgb.XGBRegressor(n_jobs=1).fit(X[train_index], y[train_index])
predictions = xgb_model.predict(X[test_index])
actuals = y[test_index]
print(mean_squared_error(actuals, predictions))
print("Parameter optimization")
xgb_model = xgb.XGBRegressor(n_jobs=1)
clf = GridSearchCV(
xgb_model,
{"max_depth": [2, 4], "n_estimators": [50, 100]},
verbose=1,
n_jobs=1,
cv=3,
)
clf.fit(X, y)
print(clf.best_score_)
print(clf.best_params_)
# The sklearn API models are picklable
print("Pickling sklearn API models")
# must open in binary format to pickle
pickle.dump(clf, open("best_calif.pkl", "wb"))
clf2 = pickle.load(open("best_calif.pkl", "rb"))
print(np.allclose(clf.predict(X), clf2.predict(X)))
# Early-stopping
X = digits["data"]
y = digits["target"]
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf = xgb.XGBClassifier(n_jobs=1, early_stopping_rounds=10, eval_metric="auc")
clf.fit(X_train, y_train, eval_set=[(X_test, y_test)])
|
from typing import Any, Dict, List, Optional, Union
from langchain_core.documents import Document
from langchain_community.document_loaders.base import BaseLoader
class OracleAutonomousDatabaseLoader(BaseLoader):
"""
Load from oracle adb
Autonomous Database connection can be made by either connection_string
or tns name. wallet_location and wallet_password are required
for TLS connection.
Each document will represent one row of the query result.
Columns are written into the `page_content` and 'metadata' in
constructor is written into 'metadata' of document,
by default, the 'metadata' is None.
"""
def __init__(
self,
query: str,
user: str,
password: str,
*,
schema: Optional[str] = None,
tns_name: Optional[str] = None,
config_dir: Optional[str] = None,
wallet_location: Optional[str] = None,
wallet_password: Optional[str] = None,
connection_string: Optional[str] = None,
metadata: Optional[List[str]] = None,
parameters: Optional[Union[list, tuple, dict]] = None,
):
"""
init method
:param query: sql query to execute
:param user: username
:param password: user password
:param schema: schema to run in database
:param tns_name: tns name in tnsname.ora
:param config_dir: directory of config files(tnsname.ora, wallet)
:param wallet_location: location of wallet
:param wallet_password: password of wallet
:param connection_string: connection string to connect to adb instance
:param metadata: metadata used in document
:param parameters: bind variable to use in query
"""
# Mandatory required arguments.
self.query = query
self.user = user
self.password = password
# Schema
self.schema = schema
# TNS connection Method
self.tns_name = tns_name
self.config_dir = config_dir
# Wallet configuration is required for mTLS connection
self.wallet_location = wallet_location
self.wallet_password = wallet_password
# Connection String connection method
self.connection_string = connection_string
# metadata column
self.metadata = metadata
# parameters, e.g bind variable
self.parameters = parameters
# dsn
self.dsn: Optional[str]
self._set_dsn()
def _set_dsn(self) -> None:
if self.connection_string:
self.dsn = self.connection_string
elif self.tns_name:
self.dsn = self.tns_name
def _run_query(self) -> List[Dict[str, Any]]:
try:
import oracledb
except ImportError as e:
raise ImportError(
"Could not import oracledb, please install with 'pip install oracledb'"
) from e
connect_param = {"user": self.user, "password": self.password, "dsn": self.dsn}
if self.dsn == self.tns_name:
connect_param["config_dir"] = self.config_dir
if self.wallet_location and self.wallet_password:
connect_param["wallet_location"] = self.wallet_location
connect_param["wallet_password"] = self.wallet_password
try:
connection = oracledb.connect(**connect_param)
cursor = connection.cursor()
if self.schema:
cursor.execute(f"alter session set current_schema={self.schema}")
if self.parameters:
cursor.execute(self.query, self.parameters)
else:
cursor.execute(self.query)
columns = [col[0] for col in cursor.description]
data = cursor.fetchall()
data = [
{
i: (j if not isinstance(j, oracledb.LOB) else j.read())
for i, j in zip(columns, row)
}
for row in data
]
except oracledb.DatabaseError as e:
print("Got error while connecting: " + str(e)) # noqa: T201
data = []
finally:
cursor.close()
connection.close()
return data
def load(self) -> List[Document]:
data = self._run_query()
documents = []
metadata_columns = self.metadata if self.metadata else []
for row in data:
metadata = {
key: value for key, value in row.items() if key in metadata_columns
}
doc = Document(page_content=str(row), metadata=metadata)
documents.append(doc)
return documents
|
from typing import Any, Dict, List, Optional
from langchain_core.documents import Document
from langchain_community.document_loaders.base import BaseLoader
class OracleAutonomousDatabaseLoader(BaseLoader):
"""
Load from oracle adb
Autonomous Database connection can be made by either connection_string
or tns name. wallet_location and wallet_password are required
for TLS connection.
Each document will represent one row of the query result.
Columns are written into the `page_content` and 'metadata' in
constructor is written into 'metadata' of document,
by default, the 'metadata' is None.
"""
def __init__(
self,
query: str,
user: str,
password: str,
*,
schema: Optional[str] = None,
tns_name: Optional[str] = None,
config_dir: Optional[str] = None,
wallet_location: Optional[str] = None,
wallet_password: Optional[str] = None,
connection_string: Optional[str] = None,
metadata: Optional[List[str]] = None,
):
"""
init method
:param query: sql query to execute
:param user: username
:param password: user password
:param schema: schema to run in database
:param tns_name: tns name in tnsname.ora
:param config_dir: directory of config files(tnsname.ora, wallet)
:param wallet_location: location of wallet
:param wallet_password: password of wallet
:param connection_string: connection string to connect to adb instance
:param metadata: metadata used in document
"""
# Mandatory required arguments.
self.query = query
self.user = user
self.password = password
# Schema
self.schema = schema
# TNS connection Method
self.tns_name = tns_name
self.config_dir = config_dir
# Wallet configuration is required for mTLS connection
self.wallet_location = wallet_location
self.wallet_password = wallet_password
# Connection String connection method
self.connection_string = connection_string
# metadata column
self.metadata = metadata
# dsn
self.dsn: Optional[str]
self._set_dsn()
def _set_dsn(self) -> None:
if self.connection_string:
self.dsn = self.connection_string
elif self.tns_name:
self.dsn = self.tns_name
def _run_query(self) -> List[Dict[str, Any]]:
try:
import oracledb
except ImportError as e:
raise ImportError(
"Could not import oracledb, please install with 'pip install oracledb'"
) from e
connect_param = {"user": self.user, "password": self.password, "dsn": self.dsn}
if self.dsn == self.tns_name:
connect_param["config_dir"] = self.config_dir
if self.wallet_location and self.wallet_password:
connect_param["wallet_location"] = self.wallet_location
connect_param["wallet_password"] = self.wallet_password
try:
connection = oracledb.connect(**connect_param)
cursor = connection.cursor()
if self.schema:
cursor.execute(f"alter session set current_schema={self.schema}")
cursor.execute(self.query)
columns = [col[0] for col in cursor.description]
data = cursor.fetchall()
data = [
{
i: (j if not isinstance(j, oracledb.LOB) else j.read())
for i, j in zip(columns, row)
}
for row in data
]
except oracledb.DatabaseError as e:
print("Got error while connecting: " + str(e)) # noqa: T201
data = []
finally:
cursor.close()
connection.close()
return data
def load(self) -> List[Document]:
data = self._run_query()
documents = []
metadata_columns = self.metadata if self.metadata else []
for row in data:
metadata = {
key: value for key, value in row.items() if key in metadata_columns
}
doc = Document(page_content=str(row), metadata=metadata)
documents.append(doc)
return documents
|
from keras.src import backend
from keras.src.utils.module_utils import tensorflow as tf
def get_tensor_spec(t, dynamic_batch=False, name=None):
"""Returns a `TensorSpec` given a single `Tensor` or `TensorSpec`."""
if isinstance(t, tf.TypeSpec):
spec = t
elif isinstance(t, tf.__internal__.CompositeTensor):
# Check for ExtensionTypes
spec = t._type_spec
elif hasattr(t, "shape") and hasattr(t, "dtype"):
spec = tf.TensorSpec(shape=t.shape, dtype=t.dtype, name=name)
else:
return None # Allow non-Tensors to pass through.
if not dynamic_batch:
return spec
shape = spec.shape
if shape.rank is None or shape.rank == 0:
return spec
shape_list = shape.as_list()
shape_list[0] = None
shape = tf.TensorShape(shape_list)
spec._shape = shape
return spec
def ensure_tensor(inputs, dtype=None):
"""Ensures the input is a Tensor, SparseTensor or RaggedTensor."""
if not isinstance(inputs, (tf.Tensor, tf.SparseTensor, tf.RaggedTensor)):
if backend.backend() == "torch" and backend.is_tensor(inputs):
# Plain `np.asarray()` conversion fails with PyTorch.
inputs = backend.convert_to_numpy(inputs)
inputs = tf.convert_to_tensor(inputs, dtype)
if dtype is not None and inputs.dtype != dtype:
inputs = tf.cast(inputs, dtype)
return inputs
def is_ragged_tensor(x):
return "ragged_tensor.RaggedTensor" in str(type(x))
|
from keras.src import backend
from keras.src.utils.module_utils import tensorflow as tf
def get_tensor_spec(t, dynamic_batch=False, name=None):
"""Returns a `TensorSpec` given a single `Tensor` or `TensorSpec`."""
if isinstance(t, tf.TypeSpec):
spec = t
elif isinstance(t, tf.__internal__.CompositeTensor):
# Check for ExtensionTypes
spec = t._type_spec
elif hasattr(t, "shape") and hasattr(t, "dtype"):
spec = tf.TensorSpec(shape=t.shape, dtype=t.dtype, name=name)
else:
return None # Allow non-Tensors to pass through.
if not dynamic_batch:
return spec
shape = spec.shape
if shape.rank is None or shape.rank == 0:
return spec
shape_list = shape.as_list()
shape_list[0] = None
shape = tf.TensorShape(shape_list)
spec._shape = shape
return spec
def ensure_tensor(inputs, dtype=None):
"""Ensures the input is a Tensor, SparseTensor or RaggedTensor."""
if not isinstance(inputs, (tf.Tensor, tf.SparseTensor, tf.RaggedTensor)):
if backend.backend() == "torch" and backend.is_tensor(inputs):
# Plain `np.asarray()` conversion fails with PyTorch.
inputs = backend.convert_to_numpy(inputs)
inputs = tf.convert_to_tensor(inputs, dtype)
if dtype is not None and inputs.dtype != dtype:
inputs = tf.cast(inputs, dtype)
return inputs
|
# Copyright (c) OpenMMLab. All rights reserved.
import math
from mmcv.cnn import build_conv_layer, build_norm_layer
from mmdet.registry import MODELS
from .detectors_resnet import Bottleneck as _Bottleneck
from .detectors_resnet import DetectoRS_ResNet
class Bottleneck(_Bottleneck):
expansion = 4
def __init__(self,
inplanes,
planes,
groups=1,
base_width=4,
base_channels=64,
**kwargs):
"""Bottleneck block for ResNeXt.
If style is "pytorch", the stride-two layer is the 3x3 conv layer, if
it is "caffe", the stride-two layer is the first 1x1 conv layer.
"""
super(Bottleneck, self).__init__(inplanes, planes, **kwargs)
if groups == 1:
width = self.planes
else:
width = math.floor(self.planes *
(base_width / base_channels)) * groups
self.norm1_name, norm1 = build_norm_layer(
self.norm_cfg, width, postfix=1)
self.norm2_name, norm2 = build_norm_layer(
self.norm_cfg, width, postfix=2)
self.norm3_name, norm3 = build_norm_layer(
self.norm_cfg, self.planes * self.expansion, postfix=3)
self.conv1 = build_conv_layer(
self.conv_cfg,
self.inplanes,
width,
kernel_size=1,
stride=self.conv1_stride,
bias=False)
self.add_module(self.norm1_name, norm1)
fallback_on_stride = False
self.with_modulated_dcn = False
if self.with_dcn:
fallback_on_stride = self.dcn.pop('fallback_on_stride', False)
if self.with_sac:
self.conv2 = build_conv_layer(
self.sac,
width,
width,
kernel_size=3,
stride=self.conv2_stride,
padding=self.dilation,
dilation=self.dilation,
groups=groups,
bias=False)
elif not self.with_dcn or fallback_on_stride:
self.conv2 = build_conv_layer(
self.conv_cfg,
width,
width,
kernel_size=3,
stride=self.conv2_stride,
padding=self.dilation,
dilation=self.dilation,
groups=groups,
bias=False)
else:
assert self.conv_cfg is None, 'conv_cfg must be None for DCN'
self.conv2 = build_conv_layer(
self.dcn,
width,
width,
kernel_size=3,
stride=self.conv2_stride,
padding=self.dilation,
dilation=self.dilation,
groups=groups,
bias=False)
self.add_module(self.norm2_name, norm2)
self.conv3 = build_conv_layer(
self.conv_cfg,
width,
self.planes * self.expansion,
kernel_size=1,
bias=False)
self.add_module(self.norm3_name, norm3)
@MODELS.register_module()
class DetectoRS_ResNeXt(DetectoRS_ResNet):
"""ResNeXt backbone for DetectoRS.
Args:
groups (int): The number of groups in ResNeXt.
base_width (int): The base width of ResNeXt.
"""
arch_settings = {
50: (Bottleneck, (3, 4, 6, 3)),
101: (Bottleneck, (3, 4, 23, 3)),
152: (Bottleneck, (3, 8, 36, 3))
}
def __init__(self, groups=1, base_width=4, **kwargs):
self.groups = groups
self.base_width = base_width
super(DetectoRS_ResNeXt, self).__init__(**kwargs)
def make_res_layer(self, **kwargs):
return super().make_res_layer(
groups=self.groups,
base_width=self.base_width,
base_channels=self.base_channels,
**kwargs)
|
# Copyright (c) OpenMMLab. All rights reserved.
import math
from mmcv.cnn import build_conv_layer, build_norm_layer
from ..builder import BACKBONES
from .detectors_resnet import Bottleneck as _Bottleneck
from .detectors_resnet import DetectoRS_ResNet
class Bottleneck(_Bottleneck):
expansion = 4
def __init__(self,
inplanes,
planes,
groups=1,
base_width=4,
base_channels=64,
**kwargs):
"""Bottleneck block for ResNeXt.
If style is "pytorch", the stride-two layer is the 3x3 conv layer, if
it is "caffe", the stride-two layer is the first 1x1 conv layer.
"""
super(Bottleneck, self).__init__(inplanes, planes, **kwargs)
if groups == 1:
width = self.planes
else:
width = math.floor(self.planes *
(base_width / base_channels)) * groups
self.norm1_name, norm1 = build_norm_layer(
self.norm_cfg, width, postfix=1)
self.norm2_name, norm2 = build_norm_layer(
self.norm_cfg, width, postfix=2)
self.norm3_name, norm3 = build_norm_layer(
self.norm_cfg, self.planes * self.expansion, postfix=3)
self.conv1 = build_conv_layer(
self.conv_cfg,
self.inplanes,
width,
kernel_size=1,
stride=self.conv1_stride,
bias=False)
self.add_module(self.norm1_name, norm1)
fallback_on_stride = False
self.with_modulated_dcn = False
if self.with_dcn:
fallback_on_stride = self.dcn.pop('fallback_on_stride', False)
if self.with_sac:
self.conv2 = build_conv_layer(
self.sac,
width,
width,
kernel_size=3,
stride=self.conv2_stride,
padding=self.dilation,
dilation=self.dilation,
groups=groups,
bias=False)
elif not self.with_dcn or fallback_on_stride:
self.conv2 = build_conv_layer(
self.conv_cfg,
width,
width,
kernel_size=3,
stride=self.conv2_stride,
padding=self.dilation,
dilation=self.dilation,
groups=groups,
bias=False)
else:
assert self.conv_cfg is None, 'conv_cfg must be None for DCN'
self.conv2 = build_conv_layer(
self.dcn,
width,
width,
kernel_size=3,
stride=self.conv2_stride,
padding=self.dilation,
dilation=self.dilation,
groups=groups,
bias=False)
self.add_module(self.norm2_name, norm2)
self.conv3 = build_conv_layer(
self.conv_cfg,
width,
self.planes * self.expansion,
kernel_size=1,
bias=False)
self.add_module(self.norm3_name, norm3)
@BACKBONES.register_module()
class DetectoRS_ResNeXt(DetectoRS_ResNet):
"""ResNeXt backbone for DetectoRS.
Args:
groups (int): The number of groups in ResNeXt.
base_width (int): The base width of ResNeXt.
"""
arch_settings = {
50: (Bottleneck, (3, 4, 6, 3)),
101: (Bottleneck, (3, 4, 23, 3)),
152: (Bottleneck, (3, 8, 36, 3))
}
def __init__(self, groups=1, base_width=4, **kwargs):
self.groups = groups
self.base_width = base_width
super(DetectoRS_ResNeXt, self).__init__(**kwargs)
def make_res_layer(self, **kwargs):
return super().make_res_layer(
groups=self.groups,
base_width=self.base_width,
base_channels=self.base_channels,
**kwargs)
|
"""
This script finds the person responsible for labeling a PR by a commit SHA. It is used by the workflow in
'.github/workflows/pr-labels.yml'.
Note: we only ping the person who pulls the pr, not the reviewers, as the reviewers can sometimes be external
to torchaudio with no labeling responsibility, so we don't want to bother them.
"""
import json
import os
import sys
from typing import Any, Optional, Set, Tuple
import requests
# For a PR to be properly labeled it should have one primary label and one secondary label
# For a PR with primary label "other", it does not require an additional secondary label
PRIMARY_LABELS = {
"BC-breaking",
"deprecation",
"bug fix",
"new feature",
"improvement",
"prototype",
"other",
}
SECONDARY_LABELS = {
"module: io",
"module: ops",
"module: models",
"module: pipelines",
"module: datasets",
"module: docs",
"module: tests",
"tutorial",
"recipe",
"example",
"build",
"style",
"perf",
"other",
}
GITHUB_TOKEN = os.environ.get("GITHUB_TOKEN")
REQUEST_HEADERS = {"Accept": "application/vnd.github.v3+json", "Authorization": f"token {GITHUB_TOKEN}"}
TORCHAUDIO_REPO = "https://api.github.com/repos/pytorch/audio"
def query_torchaudio(cmd: str) -> Any:
response = requests.get(f"{TORCHAUDIO_REPO}/{cmd}", headers=REQUEST_HEADERS)
return response.json()
def get_pr_merger_and_number(commit_hash: str) -> Optional[str]:
data = query_torchaudio(f"commits/{commit_hash}")
commit_message = data["commit"]["message"]
pulled_by = commit_message.split("Pulled By: ")
pulled_by = pulled_by[1].split("\n")[0] if len(pulled_by) > 1 else None
pr_number = commit_message.split("Pull Request resolved: https://github.com/pytorch/audio/pull/")
pr_number = pr_number[1].split("\n")[0] if len(pr_number) > 1 else None
return pulled_by, pr_number
def get_labels(pr_number: int) -> Set[str]:
data = query_torchaudio(f"pulls/{pr_number}")
labels = {label["name"] for label in data["labels"]}
return labels
def post_github_comment(pr_number: int, merger: str) -> Any:
message = {
"body": f"Hey @{merger}."
+ """
You merged this PR, but labels were not properly added. Please add a primary and secondary label \
(See https://github.com/pytorch/audio/blob/main/.github/process_commit.py).
---
## Some guidance:
Use 'module: ops' for operations under 'torchaudio/{transforms, functional}', \
and ML-related components under 'torchaudio/csrc' (e.g. RNN-T loss).
Things in "examples" directory:
- 'recipe' is applicable to training recipes under the 'examples' folder,
- 'tutorial' is applicable to tutorials under the “examples/tutorials” folder
- 'example' is applicable to everything else (e.g. C++ examples)
- 'module: docs' is applicable to code documentations (not to tutorials). \
Regarding examples in code documentations, please also use 'module: docs'.
Please use 'other' tag only when you’re sure the changes are not much relevant to users, \
or when all other tags are not applicable. Try not to use it often, in order to minimize \
efforts required when we prepare release notes.
---
When preparing release notes, please make sure 'documentation' and 'tutorials' occur as the \
last sub-categories under each primary category like 'new feature', 'improvements' or 'prototype'.
Things related to build are by default excluded from the release note, \
except when it impacts users. For example:
* Drop support of Python 3.7.
* Add support of Python 3.X.
* Change the way a third party library is bound (so that user needs to install it separately).
"""
}
response = requests.post(
f"{TORCHAUDIO_REPO}/issues/{pr_number}/comments", json.dumps(message), headers=REQUEST_HEADERS
)
return response.json()
if __name__ == "__main__":
commit_hash = sys.argv[1]
merger, pr_number = get_pr_merger_and_number(commit_hash)
if pr_number:
labels = get_labels(pr_number)
is_properly_labeled = bool(PRIMARY_LABELS.intersection(labels) and SECONDARY_LABELS.intersection(labels))
if not is_properly_labeled:
post_github_comment(pr_number, merger)
|
"""
This script finds the person responsible for labeling a PR by a commit SHA. It is used by the workflow in
'.github/workflows/pr-labels.yml'.
Note: we only ping the person who pulls the pr, not the reviewers, as the reviewers can sometimes be external
to torchaudio with no labeling responsibility, so we don't want to bother them.
"""
import json
import os
import sys
from typing import Any, Optional, Set, Tuple
import requests
# For a PR to be properly labeled it should have one primary label and one secondary label
# For a PR with primary label "other", it does not require an additional secondary label
PRIMARY_LABELS = {
"BC-breaking",
"deprecation",
"bug fix",
"new feature",
"improvement",
"prototype",
"other",
}
SECONDARY_LABELS = {
"module: io",
"module: ops",
"module: models",
"module: pipelines",
"module: datasets",
"module: docs",
"module: tests",
"tutorial",
"recipe",
"example",
"build",
"style",
"perf",
"other",
}
GITHUB_TOKEN = os.environ.get("GITHUB_TOKEN")
REQUEST_HEADERS = {"Accept": "application/vnd.github.v3+json", "Authorization": f"token {GITHUB_TOKEN}"}
TORCHAUDIO_REPO = "https://api.github.com/repos/pytorch/audio"
def query_torchaudio(cmd: str) -> Any:
response = requests.get(f"{TORCHAUDIO_REPO}/{cmd}", headers=REQUEST_HEADERS)
return response.json()
def get_pr_merger_and_number(commit_hash: str) -> Optional[str]:
data = query_torchaudio(f"commits/{commit_hash}")
commit_message = data["commit"]["message"]
pulled_by = commit_message.split("Pulled By: ")
pulled_by = pulled_by[1].split("\n")[0] if len(pulled_by) > 1 else None
pr_number = commit_message.split("Pull Request resolved: https://github.com/pytorch/audio/pull/")
pr_number = pr_number[1].split("\n")[0] if len(pr_number) > 1 else None
return pulled_by, pr_number
def get_labels(pr_number: int) -> Set[str]:
data = query_torchaudio(f"pulls/{pr_number}")
labels = {label["name"] for label in data["labels"]}
return labels
def post_github_comment(pr_number: int, merger: str) -> Any:
message = {
"body": f"Hey @{merger}."
+ """
You merged this PR, but labels were not properly added. Please add a primary and secondary label \
(See https://github.com/pytorch/audio/blob/main/.github/process_commit.py).
Some guidance:
Use 'module: ops' for operations under 'torchaudio/{transforms, functional}', \
and ML-related components under 'torchaudio/csrc' (e.g. RNN-T loss).
Things in "examples" directory:
'recipe' is applicable to training recipes under the 'examples' folder,
'tutorial' is applicable to tutorials under the “examples/tutorials” folder
'example' is applicable to everything else (e.g. C++ examples)
'module: docs' is applicable to code documentations (not to tutorials). \
Regarding examples in code documentations, please also use 'module: docs'.
Please use 'other' tag only when you’re sure the changes are not much relevant to users, \
or when all other tags are not applicable. Try not to use it often, in order to minimize \
efforts required when we prepare release notes.
When preparing release notes, please make sure 'documentation' and 'tutorials' occur as the \
last sub-categories under each primary category like 'new feature', 'improvements' or 'prototype'.
Things related to build are by default excluded from the release note, except when it impacts users.
For example:
* Drop support of Python 3.7.
* Add support of Python 3.X.
* Changing the way a third party library is bound (so that user needs to install it separately).
"""
}
response = requests.post(
f"{TORCHAUDIO_REPO}/issues/{pr_number}/comments", json.dumps(message), headers=REQUEST_HEADERS
)
return response.json()
if __name__ == "__main__":
commit_hash = sys.argv[1]
merger, pr_number = get_pr_merger_and_number(commit_hash)
if pr_number:
labels = get_labels(pr_number)
is_properly_labeled = bool(PRIMARY_LABELS.intersection(labels) and SECONDARY_LABELS.intersection(labels))
if not is_properly_labeled:
post_github_comment(pr_number, merger)
|
# Copyright (c) OpenMMLab. All rights reserved.
import os
import sys
from tempfile import TemporaryDirectory
from unittest.mock import Mock, patch
from mmengine.hooks import CheckpointHook
sys.modules['file_client'] = sys.modules['mmengine.fileio.file_client']
class MockPetrel:
_allow_symlink = False
def __init__(self):
pass
@property
def name(self):
return self.__class__.__name__
@property
def allow_symlink(self):
return self._allow_symlink
prefix_to_backends = {'s3': MockPetrel}
class TestCheckpointHook:
@patch('file_client.FileClient._prefix_to_backends', prefix_to_backends)
def test_before_run(self):
runner = Mock()
runner.work_dir = './tmp'
# the out_dir of the checkpoint hook is None
checkpoint_hook = CheckpointHook(interval=1, by_epoch=True)
checkpoint_hook.before_run(runner)
assert checkpoint_hook.out_dir == runner.work_dir
# the out_dir of the checkpoint hook is not None
checkpoint_hook = CheckpointHook(
interval=1, by_epoch=True, out_dir='test_dir')
checkpoint_hook.before_run(runner)
assert checkpoint_hook.out_dir == 'test_dir/tmp'
# create_symlink in args and create_symlink is True
checkpoint_hook = CheckpointHook(
interval=1, by_epoch=True, out_dir='test_dir', create_symlink=True)
checkpoint_hook.before_run(runner)
assert checkpoint_hook.args['create_symlink']
runner.work_dir = 's3://path/of/file'
checkpoint_hook = CheckpointHook(
interval=1, by_epoch=True, create_symlink=True)
checkpoint_hook.before_run(runner)
assert not checkpoint_hook.args['create_symlink']
def test_after_train_epoch(self):
runner = Mock()
runner.work_dir = './tmp'
runner.epoch = 9
runner.meta = dict()
runner.model = Mock()
# by epoch is True
checkpoint_hook = CheckpointHook(interval=2, by_epoch=True)
checkpoint_hook.before_run(runner)
checkpoint_hook.after_train_epoch(runner)
assert (runner.epoch + 1) % 2 == 0
assert runner.meta['hook_msgs']['last_ckpt'] == './tmp/epoch_10.pth'
# epoch can not be evenly divided by 2
runner.epoch = 10
checkpoint_hook.after_train_epoch(runner)
assert runner.meta['hook_msgs']['last_ckpt'] == './tmp/epoch_10.pth'
# by epoch is False
runner.epoch = 9
runner.meta = dict()
checkpoint_hook = CheckpointHook(interval=2, by_epoch=False)
checkpoint_hook.before_run(runner)
checkpoint_hook.after_train_epoch(runner)
assert runner.meta.get('hook_msgs', None) is None
# max_keep_ckpts > 0
with TemporaryDirectory() as tempo_dir:
runner.work_dir = tempo_dir
os.system(f'touch {tempo_dir}/epoch_8.pth')
checkpoint_hook = CheckpointHook(
interval=2, by_epoch=True, max_keep_ckpts=1)
checkpoint_hook.before_run(runner)
checkpoint_hook.after_train_epoch(runner)
assert (runner.epoch + 1) % 2 == 0
assert not os.path.exists(f'{tempo_dir}/epoch_8.pth')
def test_after_train_iter(self):
runner = Mock()
runner.work_dir = './tmp'
runner.iter = 9
batch_idx = 9
runner.meta = dict()
runner.model = Mock()
# by epoch is True
checkpoint_hook = CheckpointHook(interval=2, by_epoch=True)
checkpoint_hook.before_run(runner)
checkpoint_hook.after_train_iter(runner, batch_idx=batch_idx)
assert runner.meta.get('hook_msgs', None) is None
# by epoch is False
checkpoint_hook = CheckpointHook(interval=2, by_epoch=False)
checkpoint_hook.before_run(runner)
checkpoint_hook.after_train_iter(runner, batch_idx=batch_idx)
assert (runner.iter + 1) % 2 == 0
assert runner.meta['hook_msgs']['last_ckpt'] == './tmp/iter_10.pth'
# epoch can not be evenly divided by 2
runner.iter = 10
checkpoint_hook.after_train_epoch(runner)
assert runner.meta['hook_msgs']['last_ckpt'] == './tmp/iter_10.pth'
# max_keep_ckpts > 0
runner.iter = 9
with TemporaryDirectory() as tempo_dir:
runner.work_dir = tempo_dir
os.system(f'touch {tempo_dir}/iter_8.pth')
checkpoint_hook = CheckpointHook(
interval=2, by_epoch=False, max_keep_ckpts=1)
checkpoint_hook.before_run(runner)
checkpoint_hook.after_train_iter(runner, batch_idx=batch_idx)
assert not os.path.exists(f'{tempo_dir}/iter_8.pth')
|
# Copyright (c) OpenMMLab. All rights reserved.
import os
import sys
from tempfile import TemporaryDirectory
from unittest.mock import Mock, patch
from mmengine.hooks import CheckpointHook
sys.modules['file_client'] = sys.modules['mmengine.fileio.file_client']
class MockPetrel:
_allow_symlink = False
def __init__(self):
pass
@property
def name(self):
return self.__class__.__name__
@property
def allow_symlink(self):
return self._allow_symlink
prefix_to_backends = {'s3': MockPetrel}
class TestCheckpointHook:
@patch('file_client.FileClient._prefix_to_backends', prefix_to_backends)
def test_before_run(self):
runner = Mock()
runner.work_dir = './tmp'
# the out_dir of the checkpoint hook is None
checkpoint_hook = CheckpointHook(interval=1, by_epoch=True)
checkpoint_hook.before_run(runner)
assert checkpoint_hook.out_dir == runner.work_dir
# the out_dir of the checkpoint hook is not None
checkpoint_hook = CheckpointHook(
interval=1, by_epoch=True, out_dir='test_dir')
checkpoint_hook.before_run(runner)
assert checkpoint_hook.out_dir == 'test_dir/tmp'
# create_symlink in args and create_symlink is True
checkpoint_hook = CheckpointHook(
interval=1, by_epoch=True, out_dir='test_dir', create_symlink=True)
checkpoint_hook.before_run(runner)
assert checkpoint_hook.args['create_symlink']
runner.work_dir = 's3://path/of/file'
checkpoint_hook = CheckpointHook(
interval=1, by_epoch=True, create_symlink=True)
checkpoint_hook.before_run(runner)
assert not checkpoint_hook.args['create_symlink']
def test_after_train_epoch(self):
runner = Mock()
runner.work_dir = './tmp'
runner.epoch = 9
runner.meta = dict()
runner.model = Mock()
# by epoch is True
checkpoint_hook = CheckpointHook(interval=2, by_epoch=True)
checkpoint_hook.before_run(runner)
checkpoint_hook.after_train_epoch(runner)
assert (runner.epoch + 1) % 2 == 0
assert runner.meta['hook_msgs']['last_ckpt'] == './tmp/epoch_10.pth'
# epoch can not be evenly divided by 2
runner.epoch = 10
checkpoint_hook.after_train_epoch(runner)
assert runner.meta['hook_msgs']['last_ckpt'] == './tmp/epoch_10.pth'
# by epoch is False
runner.epoch = 9
runner.meta = dict()
checkpoint_hook = CheckpointHook(interval=2, by_epoch=False)
checkpoint_hook.before_run(runner)
checkpoint_hook.after_train_epoch(runner)
assert runner.meta.get('hook_msgs', None) is None
# max_keep_ckpts > 0
with TemporaryDirectory() as tempo_dir:
runner.work_dir = tempo_dir
os.system(f'touch {tempo_dir}/epoch_8.pth')
checkpoint_hook = CheckpointHook(
interval=2, by_epoch=True, max_keep_ckpts=1)
checkpoint_hook.before_run(runner)
checkpoint_hook.after_train_epoch(runner)
assert (runner.epoch + 1) % 2 == 0
assert not os.path.exists(f'{tempo_dir}/epoch_8.pth')
def test_after_train_iter(self):
runner = Mock()
runner.work_dir = './tmp'
runner.iter = 9
runner.meta = dict()
runner.model = Mock()
# by epoch is True
checkpoint_hook = CheckpointHook(interval=2, by_epoch=True)
checkpoint_hook.before_run(runner)
checkpoint_hook.after_train_iter(runner)
assert runner.meta.get('hook_msgs', None) is None
# by epoch is False
checkpoint_hook = CheckpointHook(interval=2, by_epoch=False)
checkpoint_hook.before_run(runner)
checkpoint_hook.after_train_iter(runner)
assert (runner.iter + 1) % 2 == 0
assert runner.meta['hook_msgs']['last_ckpt'] == './tmp/iter_10.pth'
# epoch can not be evenly divided by 2
runner.iter = 10
checkpoint_hook.after_train_epoch(runner)
assert runner.meta['hook_msgs']['last_ckpt'] == './tmp/iter_10.pth'
# max_keep_ckpts > 0
runner.iter = 9
with TemporaryDirectory() as tempo_dir:
runner.work_dir = tempo_dir
os.system(f'touch {tempo_dir}/iter_8.pth')
checkpoint_hook = CheckpointHook(
interval=2, by_epoch=False, max_keep_ckpts=1)
checkpoint_hook.before_run(runner)
checkpoint_hook.after_train_iter(runner)
assert not os.path.exists(f'{tempo_dir}/iter_8.pth')
|
from torchaudio._internal.module_utils import dropping_class_support
from ._multi_channel import MVDR, PSD, RTFMVDR, SoudenMVDR
from ._transforms import (
AddNoise,
AmplitudeToDB,
ComputeDeltas,
Convolve,
Deemphasis,
Fade,
FFTConvolve,
FrequencyMasking,
GriffinLim,
InverseMelScale,
InverseSpectrogram,
LFCC,
Loudness,
MelScale,
MelSpectrogram,
MFCC,
MuLawDecoding,
MuLawEncoding,
PitchShift,
Preemphasis,
Resample,
RNNTLoss as _RNNTLoss,
SlidingWindowCmn,
SpecAugment,
SpectralCentroid,
Spectrogram,
Speed,
SpeedPerturbation,
TimeMasking,
TimeStretch,
Vad,
Vol,
)
RNNTLoss = dropping_class_support(_RNNTLoss)
__all__ = [
"AddNoise",
"AmplitudeToDB",
"ComputeDeltas",
"Convolve",
"Deemphasis",
"Fade",
"FFTConvolve",
"FrequencyMasking",
"GriffinLim",
"InverseMelScale",
"InverseSpectrogram",
"LFCC",
"Loudness",
"MFCC",
"MVDR",
"MelScale",
"MelSpectrogram",
"MuLawDecoding",
"MuLawEncoding",
"PSD",
"PitchShift",
"Preemphasis",
"RNNTLoss",
"RTFMVDR",
"Resample",
"SlidingWindowCmn",
"SoudenMVDR",
"SpecAugment",
"SpectralCentroid",
"Spectrogram",
"Speed",
"SpeedPerturbation",
"TimeMasking",
"TimeStretch",
"Vad",
"Vol",
]
|
from torchaudio._internal.module_utils import dropping_support
from ._multi_channel import MVDR, PSD, RTFMVDR, SoudenMVDR
from ._transforms import (
AddNoise,
AmplitudeToDB,
ComputeDeltas,
Convolve,
Deemphasis,
Fade,
FFTConvolve,
FrequencyMasking,
GriffinLim,
InverseMelScale,
InverseSpectrogram,
LFCC,
Loudness,
MelScale,
MelSpectrogram,
MFCC,
MuLawDecoding,
MuLawEncoding,
PitchShift,
Preemphasis,
Resample,
RNNTLoss,
SlidingWindowCmn,
SpecAugment,
SpectralCentroid,
Spectrogram,
Speed,
SpeedPerturbation,
TimeMasking,
TimeStretch,
Vad,
Vol,
)
RNNTLoss.__init__ = dropping_support(RNNTLoss.__init__)
__all__ = [
"AddNoise",
"AmplitudeToDB",
"ComputeDeltas",
"Convolve",
"Deemphasis",
"Fade",
"FFTConvolve",
"FrequencyMasking",
"GriffinLim",
"InverseMelScale",
"InverseSpectrogram",
"LFCC",
"Loudness",
"MFCC",
"MVDR",
"MelScale",
"MelSpectrogram",
"MuLawDecoding",
"MuLawEncoding",
"PSD",
"PitchShift",
"Preemphasis",
"RNNTLoss",
"RTFMVDR",
"Resample",
"SlidingWindowCmn",
"SoudenMVDR",
"SpecAugment",
"SpectralCentroid",
"Spectrogram",
"Speed",
"SpeedPerturbation",
"TimeMasking",
"TimeStretch",
"Vad",
"Vol",
]
|
import json
import os
from typing import Dict
import torch
from safetensors.torch import load_model as load_safetensors_model
from safetensors.torch import save_model as save_safetensors_model
from torch import Tensor, nn
class LayerNorm(nn.Module):
def __init__(self, dimension: int):
super(LayerNorm, self).__init__()
self.dimension = dimension
self.norm = nn.LayerNorm(dimension)
def forward(self, features: Dict[str, Tensor]):
features["sentence_embedding"] = self.norm(features["sentence_embedding"])
return features
def get_sentence_embedding_dimension(self):
return self.dimension
def save(self, output_path, safe_serialization: bool = True) -> None:
with open(os.path.join(output_path, "config.json"), "w") as fOut:
json.dump({"dimension": self.dimension}, fOut, indent=2)
if safe_serialization:
save_safetensors_model(self, os.path.join(output_path, "model.safetensors"))
else:
torch.save(self.state_dict(), os.path.join(output_path, "pytorch_model.bin"))
@staticmethod
def load(input_path):
with open(os.path.join(input_path, "config.json")) as fIn:
config = json.load(fIn)
model = LayerNorm(**config)
if os.path.exists(os.path.join(input_path, "model.safetensors")):
load_safetensors_model(model, os.path.join(input_path, "model.safetensors"))
else:
model.load_state_dict(
torch.load(os.path.join(input_path, "pytorch_model.bin"), map_location=torch.device("cpu"))
)
return model
|
import json
import os
from typing import Dict
import torch
from torch import Tensor, nn
class LayerNorm(nn.Module):
def __init__(self, dimension: int):
super(LayerNorm, self).__init__()
self.dimension = dimension
self.norm = nn.LayerNorm(dimension)
def forward(self, features: Dict[str, Tensor]):
features["sentence_embedding"] = self.norm(features["sentence_embedding"])
return features
def get_sentence_embedding_dimension(self):
return self.dimension
def save(self, output_path):
with open(os.path.join(output_path, "config.json"), "w") as fOut:
json.dump({"dimension": self.dimension}, fOut, indent=2)
torch.save(self.state_dict(), os.path.join(output_path, "pytorch_model.bin"))
@staticmethod
def load(input_path):
with open(os.path.join(input_path, "config.json")) as fIn:
config = json.load(fIn)
model = LayerNorm(**config)
model.load_state_dict(
torch.load(os.path.join(input_path, "pytorch_model.bin"), map_location=torch.device("cpu"))
)
return model
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmcv.runner.hooks.lr_updater import (CosineAnnealingLrUpdaterHook,
annealing_cos)
from mmdet.registry import HOOKS
@HOOKS.register_module()
class YOLOXLrUpdaterHook(CosineAnnealingLrUpdaterHook):
"""YOLOX learning rate scheme.
There are two main differences between YOLOXLrUpdaterHook
and CosineAnnealingLrUpdaterHook.
1. When the current running epoch is greater than
`max_epoch-last_epoch`, a fixed learning rate will be used
2. The exp warmup scheme is different with LrUpdaterHook in MMCV
Args:
num_last_epochs (int): The number of epochs with a fixed learning rate
before the end of the training.
"""
def __init__(self, num_last_epochs, **kwargs):
self.num_last_epochs = num_last_epochs
super(YOLOXLrUpdaterHook, self).__init__(**kwargs)
def get_warmup_lr(self, cur_iters):
def _get_warmup_lr(cur_iters, regular_lr):
# exp warmup scheme
k = self.warmup_ratio * pow(
(cur_iters + 1) / float(self.warmup_iters), 2)
warmup_lr = [_lr * k for _lr in regular_lr]
return warmup_lr
if isinstance(self.base_lr, dict):
lr_groups = {}
for key, base_lr in self.base_lr.items():
lr_groups[key] = _get_warmup_lr(cur_iters, base_lr)
return lr_groups
else:
return _get_warmup_lr(cur_iters, self.base_lr)
def get_lr(self, runner, base_lr):
last_iter = len(runner.data_loader) * self.num_last_epochs
if self.by_epoch:
progress = runner.epoch
max_progress = runner.max_epochs
else:
progress = runner.iter
max_progress = runner.max_iters
progress += 1
if self.min_lr_ratio is not None:
target_lr = base_lr * self.min_lr_ratio
else:
target_lr = self.min_lr
if progress >= max_progress - last_iter:
# fixed learning rate
return target_lr
else:
return annealing_cos(
base_lr, target_lr, (progress - self.warmup_iters) /
(max_progress - self.warmup_iters - last_iter))
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmcv.runner.hooks import HOOKS
from mmcv.runner.hooks.lr_updater import (CosineAnnealingLrUpdaterHook,
annealing_cos)
@HOOKS.register_module()
class YOLOXLrUpdaterHook(CosineAnnealingLrUpdaterHook):
"""YOLOX learning rate scheme.
There are two main differences between YOLOXLrUpdaterHook
and CosineAnnealingLrUpdaterHook.
1. When the current running epoch is greater than
`max_epoch-last_epoch`, a fixed learning rate will be used
2. The exp warmup scheme is different with LrUpdaterHook in MMCV
Args:
num_last_epochs (int): The number of epochs with a fixed learning rate
before the end of the training.
"""
def __init__(self, num_last_epochs, **kwargs):
self.num_last_epochs = num_last_epochs
super(YOLOXLrUpdaterHook, self).__init__(**kwargs)
def get_warmup_lr(self, cur_iters):
def _get_warmup_lr(cur_iters, regular_lr):
# exp warmup scheme
k = self.warmup_ratio * pow(
(cur_iters + 1) / float(self.warmup_iters), 2)
warmup_lr = [_lr * k for _lr in regular_lr]
return warmup_lr
if isinstance(self.base_lr, dict):
lr_groups = {}
for key, base_lr in self.base_lr.items():
lr_groups[key] = _get_warmup_lr(cur_iters, base_lr)
return lr_groups
else:
return _get_warmup_lr(cur_iters, self.base_lr)
def get_lr(self, runner, base_lr):
last_iter = len(runner.data_loader) * self.num_last_epochs
if self.by_epoch:
progress = runner.epoch
max_progress = runner.max_epochs
else:
progress = runner.iter
max_progress = runner.max_iters
progress += 1
if self.min_lr_ratio is not None:
target_lr = base_lr * self.min_lr_ratio
else:
target_lr = self.min_lr
if progress >= max_progress - last_iter:
# fixed learning rate
return target_lr
else:
return annealing_cos(
base_lr, target_lr, (progress - self.warmup_iters) /
(max_progress - self.warmup_iters - last_iter))
|
# Licensed to the LF AI & Data foundation under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from docarray.typing.bytes.audio_bytes import AudioBytes
from docarray.typing.bytes.image_bytes import ImageBytes
from docarray.typing.bytes.video_bytes import VideoBytes
__all__ = ['ImageBytes', 'VideoBytes', 'AudioBytes']
|
from docarray.typing.bytes.audio_bytes import AudioBytes
from docarray.typing.bytes.image_bytes import ImageBytes
from docarray.typing.bytes.video_bytes import VideoBytes
__all__ = ['ImageBytes', 'VideoBytes', 'AudioBytes']
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.