input
stringlengths 33
5k
| output
stringlengths 32
5k
|
---|---|
from backend.blocks.jina._auth import (
JinaCredentials,
JinaCredentialsField,
JinaCredentialsInput,
)
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from backend.data.model import SchemaField
from backend.util.request import requests
class JinaEmbeddingBlock(Block):
class Input(BlockSchema):
texts: list = SchemaField(description="List of texts to embed")
credentials: JinaCredentialsInput = JinaCredentialsField()
model: str = SchemaField(
description="Jina embedding model to use",
default="jina-embeddings-v2-base-en",
)
class Output(BlockSchema):
embeddings: list = SchemaField(description="List of embeddings")
def __init__(self):
super().__init__(
id="7c56b3ab-62e7-43a2-a2dc-4ec4245660b6",
description="Generates embeddings using Jina AI",
categories={BlockCategory.AI},
input_schema=JinaEmbeddingBlock.Input,
output_schema=JinaEmbeddingBlock.Output,
)
def run(
self, input_data: Input, *, credentials: JinaCredentials, **kwargs
) -> BlockOutput:
url = "https://api.jina.ai/v1/embeddings"
headers = {
"Content-Type": "application/json",
"Authorization": f"Bearer {credentials.api_key.get_secret_value()}",
}
data = {"input": input_data.texts, "model": input_data.model}
response = requests.post(url, headers=headers, json=data)
embeddings = [e["embedding"] for e in response.json()["data"]]
yield "embeddings", embeddings
|
import requests
from backend.blocks.jina._auth import (
JinaCredentials,
JinaCredentialsField,
JinaCredentialsInput,
)
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from backend.data.model import SchemaField
class JinaEmbeddingBlock(Block):
class Input(BlockSchema):
texts: list = SchemaField(description="List of texts to embed")
credentials: JinaCredentialsInput = JinaCredentialsField()
model: str = SchemaField(
description="Jina embedding model to use",
default="jina-embeddings-v2-base-en",
)
class Output(BlockSchema):
embeddings: list = SchemaField(description="List of embeddings")
def __init__(self):
super().__init__(
id="7c56b3ab-62e7-43a2-a2dc-4ec4245660b6",
description="Generates embeddings using Jina AI",
categories={BlockCategory.AI},
input_schema=JinaEmbeddingBlock.Input,
output_schema=JinaEmbeddingBlock.Output,
)
def run(
self, input_data: Input, *, credentials: JinaCredentials, **kwargs
) -> BlockOutput:
url = "https://api.jina.ai/v1/embeddings"
headers = {
"Content-Type": "application/json",
"Authorization": f"Bearer {credentials.api_key.get_secret_value()}",
}
data = {"input": input_data.texts, "model": input_data.model}
response = requests.post(url, headers=headers, json=data)
embeddings = [e["embedding"] for e in response.json()["data"]]
yield "embeddings", embeddings
|
from typing import IO, TYPE_CHECKING, Callable, Optional
from docarray.utils._internal.misc import import_library
def _compress_bytes(data: bytes, algorithm: Optional[str] = None) -> bytes:
if algorithm == 'lz4':
if TYPE_CHECKING:
from lz4 import frame
else:
lz4 = import_library('lz4', raise_error=True) # noqa: F841
from lz4 import frame
data = frame.compress(data)
elif algorithm == 'bz2':
import bz2
data = bz2.compress(data)
elif algorithm == 'lzma':
import lzma
data = lzma.compress(data)
elif algorithm == 'zlib':
import zlib
data = zlib.compress(data)
elif algorithm == 'gzip':
import gzip
data = gzip.compress(data)
return data
def _decompress_bytes(data: bytes, algorithm: Optional[str] = None) -> bytes:
if algorithm == 'lz4':
if TYPE_CHECKING:
from lz4 import frame
else:
lz4 = import_library('lz4', raise_error=True) # noqa: F841
from lz4 import frame
data = frame.decompress(data)
elif algorithm == 'bz2':
import bz2
data = bz2.decompress(data)
elif algorithm == 'lzma':
import lzma
data = lzma.decompress(data)
elif algorithm == 'zlib':
import zlib
data = zlib.decompress(data)
elif algorithm == 'gzip':
import gzip
data = gzip.decompress(data)
return data
def _get_compress_ctx(algorithm: Optional[str] = None) -> Optional[Callable]:
if algorithm == 'lz4':
if TYPE_CHECKING:
from lz4 import frame
else:
lz4 = import_library('lz4', raise_error=True) # noqa: F841
from lz4 import frame
def _fun(x: IO[bytes]):
return frame.LZ4FrameFile(x, 'wb')
compress_ctx = _fun
elif algorithm == 'gzip':
import gzip
def _fun(x: IO[bytes]):
return gzip.GzipFile(fileobj=x, mode='wb')
compress_ctx = _fun
elif algorithm == 'bz2':
import bz2
def _fun(x: IO[bytes]):
return bz2.BZ2File(filename=x, mode='wb')
compress_ctx = _fun
elif algorithm == 'lzma':
import lzma
def _fun(x: IO[bytes]):
return lzma.LZMAFile(filename=x, mode='wb')
compress_ctx = _fun
else:
compress_ctx = None
return compress_ctx
|
from typing import IO, Callable, Optional
def _compress_bytes(data: bytes, algorithm: Optional[str] = None) -> bytes:
if algorithm == 'lz4':
import lz4.frame # type: ignore
data = lz4.frame.compress(data)
elif algorithm == 'bz2':
import bz2
data = bz2.compress(data)
elif algorithm == 'lzma':
import lzma
data = lzma.compress(data)
elif algorithm == 'zlib':
import zlib
data = zlib.compress(data)
elif algorithm == 'gzip':
import gzip
data = gzip.compress(data)
return data
def _decompress_bytes(data: bytes, algorithm: Optional[str] = None) -> bytes:
if algorithm == 'lz4':
import lz4.frame # type: ignore
data = lz4.frame.decompress(data)
elif algorithm == 'bz2':
import bz2
data = bz2.decompress(data)
elif algorithm == 'lzma':
import lzma
data = lzma.decompress(data)
elif algorithm == 'zlib':
import zlib
data = zlib.decompress(data)
elif algorithm == 'gzip':
import gzip
data = gzip.decompress(data)
return data
def _get_compress_ctx(algorithm: Optional[str] = None) -> Optional[Callable]:
if algorithm == 'lz4':
import lz4.frame # type: ignore
def _fun(x: IO[bytes]):
return lz4.frame.LZ4FrameFile(x, 'wb')
compress_ctx = _fun
elif algorithm == 'gzip':
import gzip
def _fun(x: IO[bytes]):
return gzip.GzipFile(fileobj=x, mode='wb')
compress_ctx = _fun
elif algorithm == 'bz2':
import bz2
def _fun(x: IO[bytes]):
return bz2.BZ2File(filename=x, mode='wb')
compress_ctx = _fun
elif algorithm == 'lzma':
import lzma
def _fun(x: IO[bytes]):
return lzma.LZMAFile(filename=x, mode='wb')
compress_ctx = _fun
else:
compress_ctx = None
return compress_ctx
|
import copy
import sqlite3
import warnings
from dataclasses import dataclass, field
from tempfile import NamedTemporaryFile
from typing import Iterable, Dict, Optional, TYPE_CHECKING, Union
from docarray.array.storage.sqlite.helper import initialize_table
from docarray.array.storage.base.backend import BaseBackendMixin
from docarray.helper import random_identity, dataclass_from_dict
if TYPE_CHECKING: # pragma: no cover
from docarray.typing import DocumentArraySourceType
def _sanitize_table_name(table_name: str, raise_warning=True) -> str:
ret = ''.join(c for c in table_name if c.isalnum() or c == '_')
if ret != table_name and raise_warning:
warnings.warn(f'The table name is changed to {ret} due to illegal characters')
return ret
@dataclass
class SqliteConfig:
connection: Optional[Union[str, 'sqlite3.Connection']] = None
table_name: Optional[str] = None
serialize_config: Dict = field(default_factory=dict)
conn_config: Dict = field(default_factory=dict)
journal_mode: str = 'WAL'
synchronous: str = 'OFF'
class BackendMixin(BaseBackendMixin):
"""Provide necessary functions to enable this storage backend."""
schema_version = '0'
def _sql(self, *args, **kwargs) -> 'sqlite3.Cursor':
return self._cursor.execute(*args, **kwargs)
def _commit(self):
self._connection.commit()
@property
def _cursor(self) -> 'sqlite3.Cursor':
return self._connection.cursor()
def _init_storage(
self,
_docs: Optional['DocumentArraySourceType'] = None,
config: Optional[Union[SqliteConfig, Dict]] = None,
**kwargs,
):
config = copy.deepcopy(config)
if not config:
config = SqliteConfig()
if isinstance(config, dict):
config = dataclass_from_dict(SqliteConfig, config)
from docarray import Document
sqlite3.register_adapter(
Document, lambda d: d.to_bytes(**config.serialize_config)
)
sqlite3.register_converter(
'Document', lambda x: Document.from_bytes(x, **config.serialize_config)
)
_conn_kwargs = dict()
_conn_kwargs.update(config.conn_config)
if config.connection is None:
config.connection = NamedTemporaryFile().name
if isinstance(config.connection, str):
self._connection = sqlite3.connect(
config.connection,
detect_types=sqlite3.PARSE_DECLTYPES,
check_same_thread=False,
**_conn_kwargs,
)
elif isinstance(config.connection, sqlite3.Connection):
self._connection = config.connection
else:
raise TypeError(
f'connection argument must be None or a string or a sqlite3.Connection, not `{type(config.connection)}`'
)
self._connection.execute(f'PRAGMA synchronous={config.synchronous}')
self._connection.execute(f'PRAGMA journal_mode={config.journal_mode}')
self._table_name = (
_sanitize_table_name(self.__class__.__name__ + random_identity())
if config.table_name is None
else _sanitize_table_name(config.table_name)
)
config.table_name = self._table_name
initialize_table(
self._table_name, self.__class__.__name__, self.schema_version, self._cursor
)
self._connection.commit()
self._config = config
super()._init_storage()
if _docs is None:
return
elif isinstance(_docs, Iterable):
self.clear()
self.extend(_docs)
else:
self.clear()
if isinstance(_docs, Document):
self.append(_docs)
def _ensure_unique_config(
self,
config_root: dict,
config_subindex: dict,
config_joined: dict,
subindex_name: str,
) -> dict:
if 'table_name' not in config_subindex:
subindex_table_name = _sanitize_table_name(
config_joined['table_name'] + 'subindex' + subindex_name,
raise_warning=False,
)
config_joined['table_name'] = subindex_table_name
return config_joined
def __getstate__(self):
d = dict(self.__dict__)
del d['_connection']
return d
def __setstate__(self, state):
self.__dict__ = state
_conn_kwargs = dict()
_conn_kwargs.update(state['_config'].conn_config)
self._connection = sqlite3.connect(
state['_config'].connection,
detect_types=sqlite3.PARSE_DECLTYPES,
check_same_thread=False,
**_conn_kwargs,
)
|
import sqlite3
import warnings
from dataclasses import dataclass, field
from tempfile import NamedTemporaryFile
from typing import Iterable, Dict, Optional, TYPE_CHECKING, Union
from docarray.array.storage.sqlite.helper import initialize_table
from docarray.array.storage.base.backend import BaseBackendMixin
from docarray.helper import random_identity, dataclass_from_dict
if TYPE_CHECKING: # pragma: no cover
from docarray.typing import DocumentArraySourceType
def _sanitize_table_name(table_name: str, raise_warning=True) -> str:
ret = ''.join(c for c in table_name if c.isalnum() or c == '_')
if ret != table_name and raise_warning:
warnings.warn(f'The table name is changed to {ret} due to illegal characters')
return ret
@dataclass
class SqliteConfig:
connection: Optional[Union[str, 'sqlite3.Connection']] = None
table_name: Optional[str] = None
serialize_config: Dict = field(default_factory=dict)
conn_config: Dict = field(default_factory=dict)
journal_mode: str = 'WAL'
synchronous: str = 'OFF'
class BackendMixin(BaseBackendMixin):
"""Provide necessary functions to enable this storage backend."""
schema_version = '0'
def _sql(self, *args, **kwargs) -> 'sqlite3.Cursor':
return self._cursor.execute(*args, **kwargs)
def _commit(self):
self._connection.commit()
@property
def _cursor(self) -> 'sqlite3.Cursor':
return self._connection.cursor()
def _init_storage(
self,
_docs: Optional['DocumentArraySourceType'] = None,
config: Optional[Union[SqliteConfig, Dict]] = None,
**kwargs,
):
if not config:
config = SqliteConfig()
if isinstance(config, dict):
config = dataclass_from_dict(SqliteConfig, config)
from docarray import Document
sqlite3.register_adapter(
Document, lambda d: d.to_bytes(**config.serialize_config)
)
sqlite3.register_converter(
'Document', lambda x: Document.from_bytes(x, **config.serialize_config)
)
_conn_kwargs = dict()
_conn_kwargs.update(config.conn_config)
if config.connection is None:
config.connection = NamedTemporaryFile().name
if isinstance(config.connection, str):
self._connection = sqlite3.connect(
config.connection,
detect_types=sqlite3.PARSE_DECLTYPES,
check_same_thread=False,
**_conn_kwargs,
)
elif isinstance(config.connection, sqlite3.Connection):
self._connection = config.connection
else:
raise TypeError(
f'connection argument must be None or a string or a sqlite3.Connection, not `{type(config.connection)}`'
)
self._connection.execute(f'PRAGMA synchronous={config.synchronous}')
self._connection.execute(f'PRAGMA journal_mode={config.journal_mode}')
self._table_name = (
_sanitize_table_name(self.__class__.__name__ + random_identity())
if config.table_name is None
else _sanitize_table_name(config.table_name)
)
config.table_name = self._table_name
initialize_table(
self._table_name, self.__class__.__name__, self.schema_version, self._cursor
)
self._connection.commit()
self._config = config
super()._init_storage()
if _docs is None:
return
elif isinstance(_docs, Iterable):
self.clear()
self.extend(_docs)
else:
self.clear()
if isinstance(_docs, Document):
self.append(_docs)
def _ensure_unique_config(
self,
config_root: dict,
config_subindex: dict,
config_joined: dict,
subindex_name: str,
) -> dict:
if 'table_name' not in config_subindex:
subindex_table_name = _sanitize_table_name(
config_joined['table_name'] + 'subindex' + subindex_name,
raise_warning=False,
)
config_joined['table_name'] = subindex_table_name
return config_joined
def __getstate__(self):
d = dict(self.__dict__)
del d['_connection']
return d
def __setstate__(self, state):
self.__dict__ = state
_conn_kwargs = dict()
_conn_kwargs.update(state['_config'].conn_config)
self._connection = sqlite3.connect(
state['_config'].connection,
detect_types=sqlite3.PARSE_DECLTYPES,
check_same_thread=False,
**_conn_kwargs,
)
|
import contextlib
from collections.abc import Iterable
from pathlib import Path
from typing import Any
from tomlkit import dump, inline_table, load
from tomlkit.items import InlineTable
def _get_dep_inline_table(path: Path) -> InlineTable:
dep = inline_table()
dep.update({"path": str(path), "develop": True})
return dep
def add_dependencies_to_pyproject_toml(
pyproject_toml: Path, local_editable_dependencies: Iterable[tuple[str, Path]]
) -> None:
"""Add dependencies to pyproject.toml."""
with open(pyproject_toml, encoding="utf-8") as f:
# tomlkit types aren't amazing - treat as Dict instead
pyproject: dict[str, Any] = load(f)
pyproject["tool"]["poetry"]["dependencies"].update(
{
name: _get_dep_inline_table(loc.relative_to(pyproject_toml.parent))
for name, loc in local_editable_dependencies
}
)
with open(pyproject_toml, "w", encoding="utf-8") as f:
dump(pyproject, f)
def remove_dependencies_from_pyproject_toml(
pyproject_toml: Path, local_editable_dependencies: Iterable[str]
) -> None:
"""Remove dependencies from pyproject.toml."""
with open(pyproject_toml, encoding="utf-8") as f:
pyproject: dict[str, Any] = load(f)
# tomlkit types aren't amazing - treat as Dict instead
dependencies = pyproject["tool"]["poetry"]["dependencies"]
for name in local_editable_dependencies:
with contextlib.suppress(KeyError):
del dependencies[name]
with open(pyproject_toml, "w", encoding="utf-8") as f:
dump(pyproject, f)
|
from collections.abc import Iterable
from pathlib import Path
from typing import Any
from tomlkit import dump, inline_table, load
from tomlkit.items import InlineTable
def _get_dep_inline_table(path: Path) -> InlineTable:
dep = inline_table()
dep.update({"path": str(path), "develop": True})
return dep
def add_dependencies_to_pyproject_toml(
pyproject_toml: Path, local_editable_dependencies: Iterable[tuple[str, Path]]
) -> None:
"""Add dependencies to pyproject.toml."""
with open(pyproject_toml, encoding="utf-8") as f:
# tomlkit types aren't amazing - treat as Dict instead
pyproject: dict[str, Any] = load(f)
pyproject["tool"]["poetry"]["dependencies"].update(
{
name: _get_dep_inline_table(loc.relative_to(pyproject_toml.parent))
for name, loc in local_editable_dependencies
}
)
with open(pyproject_toml, "w", encoding="utf-8") as f:
dump(pyproject, f)
def remove_dependencies_from_pyproject_toml(
pyproject_toml: Path, local_editable_dependencies: Iterable[str]
) -> None:
"""Remove dependencies from pyproject.toml."""
with open(pyproject_toml, encoding="utf-8") as f:
pyproject: dict[str, Any] = load(f)
# tomlkit types aren't amazing - treat as Dict instead
dependencies = pyproject["tool"]["poetry"]["dependencies"]
for name in local_editable_dependencies:
try:
del dependencies[name]
except KeyError:
pass
with open(pyproject_toml, "w", encoding="utf-8") as f:
dump(pyproject, f)
|
import sys
from jina.parsers import set_gateway_parser
from jina.parsers.helper import _update_gateway_args
from jina.serve.runtimes.asyncio import AsyncNewLoopRuntime
from jina.serve.runtimes.gateway.request_handling import GatewayRequestHandler
def run(*args, **kwargs):
runtime_args = set_gateway_parser().parse_args(args)
_update_gateway_args(runtime_args)
with AsyncNewLoopRuntime(
runtime_args, req_handler_cls=GatewayRequestHandler
) as runtime:
runtime.run_forever()
if __name__ == '__main__':
run(*sys.argv[1:])
|
import sys
from jina.parsers import set_gateway_parser
from jina.parsers.helper import _update_gateway_args
from jina.serve.runtimes.asyncio import AsyncNewLoopRuntime
from jina.serve.runtimes.gateway.request_handling import GatewayRequestHandler
def run(*args, **kwargs):
runtime_args = set_gateway_parser().parse_args(args)
_update_gateway_args(runtime_args)
with AsyncNewLoopRuntime(runtime_args, req_handler_cls=GatewayRequestHandler) as runtime:
runtime.run_forever()
if __name__ == '__main__':
run(*sys.argv[1:])
|
_base_ = 'faster-rcnn_r50_fpn_crop640-50e_coco.py'
norm_cfg = dict(type='BN', requires_grad=True)
model = dict(
neck=dict(out_channels=128, inter_channels=128),
rpn_head=dict(in_channels=128),
roi_head=dict(
bbox_roi_extractor=dict(out_channels=128),
bbox_head=dict(in_channels=128)))
|
_base_ = 'faster_rcnn_r50_fpg_crop640_50e_coco.py'
norm_cfg = dict(type='BN', requires_grad=True)
model = dict(
neck=dict(out_channels=128, inter_channels=128),
rpn_head=dict(in_channels=128),
roi_head=dict(
bbox_roi_extractor=dict(out_channels=128),
bbox_head=dict(in_channels=128)))
|
from unittest import TestCase
import numpy as np
from mmengine.testing import assert_allclose
from mmdet.structures.mask import BitmapMasks, PolygonMasks
class TestMaskStructures(TestCase):
def test_bitmap_translate_same_size(self):
mask_array = np.zeros((5, 10, 10), dtype=np.uint8)
mask_array[:, 0:5, 0:5] = 1
mask_target = np.zeros((5, 10, 10), dtype=np.uint8)
mask_target[:, 0:5, 5:10] = 1
mask = BitmapMasks(mask_array, 10, 10)
mask = mask.translate((10, 10), 5)
assert mask.masks.shape == (5, 10, 10)
assert_allclose(mask_target, mask.masks)
def test_bitmap_translate_diff_size(self):
# test out shape larger
mask_array = np.zeros((5, 10, 10), dtype=np.uint8)
mask_array[:, 0:5, 0:5] = 1
mask_target = np.zeros((5, 20, 20), dtype=np.uint8)
mask_target[:, 0:5, 5:10] = 1
mask = BitmapMasks(mask_array, 10, 10)
mask = mask.translate((20, 20), 5)
assert mask.masks.shape == (5, 20, 20)
assert_allclose(mask_target, mask.masks)
# test out shape smaller
mask_array = np.zeros((5, 10, 10), dtype=np.uint8)
mask_array[:, 0:5, 0:5] = 1
mask_target = np.zeros((5, 20, 8), dtype=np.uint8)
mask_target[:, 0:5, 5:] = 1
mask = BitmapMasks(mask_array, 10, 10)
mask = mask.translate((20, 8), 5)
assert mask.masks.shape == (5, 20, 8)
assert_allclose(mask_target, mask.masks)
def test_bitmap_cat(self):
# test invalid inputs
with self.assertRaises(AssertionError):
BitmapMasks.cat(BitmapMasks.random(4))
with self.assertRaises(ValueError):
BitmapMasks.cat([])
with self.assertRaises(AssertionError):
BitmapMasks.cat([BitmapMasks.random(2), PolygonMasks.random(3)])
masks = [BitmapMasks.random(num_masks=3) for _ in range(5)]
cat_mask = BitmapMasks.cat(masks)
assert len(cat_mask) == 3 * 5
for i, m in enumerate(masks):
assert_allclose(m.masks, cat_mask.masks[i * 3:(i + 1) * 3])
def test_polygon_cat(self):
# test invalid inputs
with self.assertRaises(AssertionError):
PolygonMasks.cat(PolygonMasks.random(4))
with self.assertRaises(ValueError):
PolygonMasks.cat([])
with self.assertRaises(AssertionError):
PolygonMasks.cat([BitmapMasks.random(2), PolygonMasks.random(3)])
masks = [PolygonMasks.random(num_masks=3) for _ in range(5)]
cat_mask = PolygonMasks.cat(masks)
assert len(cat_mask) == 3 * 5
for i, m in enumerate(masks):
assert_allclose(m.masks, cat_mask.masks[i * 3:(i + 1) * 3])
|
from unittest import TestCase
import numpy as np
from mmengine.testing import assert_allclose
from mmdet.structures.mask import BitmapMasks
class TestMaskStructures(TestCase):
def test_bitmap_translate_same_size(self):
mask_array = np.zeros((5, 10, 10), dtype=np.uint8)
mask_array[:, 0:5, 0:5] = 1
mask_target = np.zeros((5, 10, 10), dtype=np.uint8)
mask_target[:, 0:5, 5:10] = 1
mask = BitmapMasks(mask_array, 10, 10)
mask = mask.translate((10, 10), 5)
assert mask.masks.shape == (5, 10, 10)
assert_allclose(mask_target, mask.masks)
def test_bitmap_translate_diff_size(self):
# test out shape larger
mask_array = np.zeros((5, 10, 10), dtype=np.uint8)
mask_array[:, 0:5, 0:5] = 1
mask_target = np.zeros((5, 20, 20), dtype=np.uint8)
mask_target[:, 0:5, 5:10] = 1
mask = BitmapMasks(mask_array, 10, 10)
mask = mask.translate((20, 20), 5)
assert mask.masks.shape == (5, 20, 20)
assert_allclose(mask_target, mask.masks)
# test out shape smaller
mask_array = np.zeros((5, 10, 10), dtype=np.uint8)
mask_array[:, 0:5, 0:5] = 1
mask_target = np.zeros((5, 20, 8), dtype=np.uint8)
mask_target[:, 0:5, 5:] = 1
mask = BitmapMasks(mask_array, 10, 10)
mask = mask.translate((20, 8), 5)
assert mask.masks.shape == (5, 20, 8)
assert_allclose(mask_target, mask.masks)
|
# Copyright (c) OpenMMLab. All rights reserved.
import unittest
from unittest import TestCase
import torch
from parameterized import parameterized
from mmdet.structures import DetDataSample
from mmdet.testing import demo_mm_inputs, get_detector_cfg
from mmdet.utils import register_all_modules
class TestRPN(TestCase):
def setUp(self):
register_all_modules()
@parameterized.expand(['rpn/rpn_r50_fpn_1x_coco.py'])
def test_init(self, cfg_file):
model = get_detector_cfg(cfg_file)
# backbone convert to ResNet18
model.backbone.depth = 18
model.neck.in_channels = [64, 128, 256, 512]
model.backbone.init_cfg = None
from mmdet.registry import MODELS
detector = MODELS.build(model)
self.assertTrue(detector.backbone)
self.assertTrue(detector.neck)
self.assertTrue(detector.bbox_head)
# if rpn.num_classes > 1, force set rpn.num_classes = 1
model.rpn_head.num_classes = 2
detector = MODELS.build(model)
self.assertEqual(detector.bbox_head.num_classes, 1)
@parameterized.expand([('rpn/rpn_r50_fpn_1x_coco.py', ('cpu', 'cuda'))])
def test_rpn_forward_loss_mode(self, cfg_file, devices):
model = get_detector_cfg(cfg_file)
# backbone convert to ResNet18
model.backbone.depth = 18
model.neck.in_channels = [64, 128, 256, 512]
model.backbone.init_cfg = None
from mmdet.registry import MODELS
assert all([device in ['cpu', 'cuda'] for device in devices])
for device in devices:
detector = MODELS.build(model)
if device == 'cuda':
if not torch.cuda.is_available():
return unittest.skip('test requires GPU and torch+cuda')
detector = detector.cuda()
packed_inputs = demo_mm_inputs(2, [[3, 128, 128], [3, 125, 130]])
data = detector.data_preprocessor(packed_inputs, True)
# Test forward train
losses = detector.forward(**data, mode='loss')
self.assertIsInstance(losses, dict)
@parameterized.expand([('rpn/rpn_r50_fpn_1x_coco.py', ('cpu', 'cuda'))])
def test_rpn_forward_predict_mode(self, cfg_file, devices):
model = get_detector_cfg(cfg_file)
# backbone convert to ResNet18
model.backbone.depth = 18
model.neck.in_channels = [64, 128, 256, 512]
model.backbone.init_cfg = None
from mmdet.registry import MODELS
assert all([device in ['cpu', 'cuda'] for device in devices])
for device in devices:
detector = MODELS.build(model)
if device == 'cuda':
if not torch.cuda.is_available():
return unittest.skip('test requires GPU and torch+cuda')
detector = detector.cuda()
packed_inputs = demo_mm_inputs(2, [[3, 128, 128], [3, 125, 130]])
data = detector.data_preprocessor(packed_inputs, False)
# Test forward test
detector.eval()
with torch.no_grad():
batch_results = detector.forward(**data, mode='predict')
self.assertEqual(len(batch_results), 2)
self.assertIsInstance(batch_results[0], DetDataSample)
@parameterized.expand([('rpn/rpn_r50_fpn_1x_coco.py', ('cpu', 'cuda'))])
def test_rpn_forward_tensor_mode(self, cfg_file, devices):
model = get_detector_cfg(cfg_file)
# backbone convert to ResNet18
model.backbone.depth = 18
model.neck.in_channels = [64, 128, 256, 512]
model.backbone.init_cfg = None
from mmdet.registry import MODELS
assert all([device in ['cpu', 'cuda'] for device in devices])
for device in devices:
detector = MODELS.build(model)
if device == 'cuda':
if not torch.cuda.is_available():
return unittest.skip('test requires GPU and torch+cuda')
detector = detector.cuda()
packed_inputs = demo_mm_inputs(2, [[3, 128, 128], [3, 125, 130]])
data = detector.data_preprocessor(packed_inputs, False)
batch_results = detector.forward(**data, mode='tensor')
self.assertIsInstance(batch_results, tuple)
|
# Copyright (c) OpenMMLab. All rights reserved.
import unittest
from unittest import TestCase
import torch
from parameterized import parameterized
from mmdet.structures import DetDataSample
from mmdet.testing import demo_mm_inputs, get_detector_cfg
from mmdet.utils import register_all_modules
class TestRPN(TestCase):
def setUp(self):
register_all_modules()
@parameterized.expand(['rpn/rpn_r50_fpn_1x_coco.py'])
def test_init(self, cfg_file):
model = get_detector_cfg(cfg_file)
# backbone convert to ResNet18
model.backbone.depth = 18
model.neck.in_channels = [64, 128, 256, 512]
model.backbone.init_cfg = None
from mmdet.models import build_detector
detector = build_detector(model)
self.assertTrue(detector.backbone)
self.assertTrue(detector.neck)
self.assertTrue(detector.bbox_head)
# if rpn.num_classes > 1, force set rpn.num_classes = 1
model.rpn_head.num_classes = 2
detector = build_detector(model)
self.assertEqual(detector.bbox_head.num_classes, 1)
@parameterized.expand([('rpn/rpn_r50_fpn_1x_coco.py', ('cpu', 'cuda'))])
def test_rpn_forward_loss_mode(self, cfg_file, devices):
model = get_detector_cfg(cfg_file)
# backbone convert to ResNet18
model.backbone.depth = 18
model.neck.in_channels = [64, 128, 256, 512]
model.backbone.init_cfg = None
from mmdet.models import build_detector
assert all([device in ['cpu', 'cuda'] for device in devices])
for device in devices:
detector = build_detector(model)
if device == 'cuda':
if not torch.cuda.is_available():
return unittest.skip('test requires GPU and torch+cuda')
detector = detector.cuda()
packed_inputs = demo_mm_inputs(2, [[3, 128, 128], [3, 125, 130]])
data = detector.data_preprocessor(packed_inputs, True)
# Test forward train
losses = detector.forward(**data, mode='loss')
self.assertIsInstance(losses, dict)
@parameterized.expand([('rpn/rpn_r50_fpn_1x_coco.py', ('cpu', 'cuda'))])
def test_rpn_forward_predict_mode(self, cfg_file, devices):
model = get_detector_cfg(cfg_file)
# backbone convert to ResNet18
model.backbone.depth = 18
model.neck.in_channels = [64, 128, 256, 512]
model.backbone.init_cfg = None
from mmdet.models import build_detector
assert all([device in ['cpu', 'cuda'] for device in devices])
for device in devices:
detector = build_detector(model)
if device == 'cuda':
if not torch.cuda.is_available():
return unittest.skip('test requires GPU and torch+cuda')
detector = detector.cuda()
packed_inputs = demo_mm_inputs(2, [[3, 128, 128], [3, 125, 130]])
data = detector.data_preprocessor(packed_inputs, False)
# Test forward test
detector.eval()
with torch.no_grad():
batch_results = detector.forward(**data, mode='predict')
self.assertEqual(len(batch_results), 2)
self.assertIsInstance(batch_results[0], DetDataSample)
@parameterized.expand([('rpn/rpn_r50_fpn_1x_coco.py', ('cpu', 'cuda'))])
def test_rpn_forward_tensor_mode(self, cfg_file, devices):
model = get_detector_cfg(cfg_file)
# backbone convert to ResNet18
model.backbone.depth = 18
model.neck.in_channels = [64, 128, 256, 512]
model.backbone.init_cfg = None
from mmdet.models import build_detector
assert all([device in ['cpu', 'cuda'] for device in devices])
for device in devices:
detector = build_detector(model)
if device == 'cuda':
if not torch.cuda.is_available():
return unittest.skip('test requires GPU and torch+cuda')
detector = detector.cuda()
packed_inputs = demo_mm_inputs(2, [[3, 128, 128], [3, 125, 130]])
data = detector.data_preprocessor(packed_inputs, False)
batch_results = detector.forward(**data, mode='tensor')
self.assertIsInstance(batch_results, tuple)
|
"""
This is a simple application for sentence embeddings: clustering
Sentences are mapped to sentence embeddings and then agglomerative clustering with a threshold is applied.
"""
from sklearn.cluster import AgglomerativeClustering
from sentence_transformers import SentenceTransformer
embedder = SentenceTransformer("all-MiniLM-L6-v2")
# Corpus with example sentences
corpus = [
"A man is eating food.",
"A man is eating a piece of bread.",
"A man is eating pasta.",
"The girl is carrying a baby.",
"The baby is carried by the woman",
"A man is riding a horse.",
"A man is riding a white horse on an enclosed ground.",
"A monkey is playing drums.",
"Someone in a gorilla costume is playing a set of drums.",
"A cheetah is running behind its prey.",
"A cheetah chases prey on across a field.",
]
corpus_embeddings = embedder.encode(corpus)
# Some models don't automatically normalize the embeddings, in which case you should normalize the embeddings:
# corpus_embeddings = corpus_embeddings / np.linalg.norm(corpus_embeddings, axis=1, keepdims=True)
# Perform agglomerative clustering
clustering_model = AgglomerativeClustering(
n_clusters=None, distance_threshold=1.5
) # , affinity='cosine', linkage='average', distance_threshold=0.4)
clustering_model.fit(corpus_embeddings)
cluster_assignment = clustering_model.labels_
clustered_sentences = {}
for sentence_id, cluster_id in enumerate(cluster_assignment):
if cluster_id not in clustered_sentences:
clustered_sentences[cluster_id] = []
clustered_sentences[cluster_id].append(corpus[sentence_id])
for i, cluster in clustered_sentences.items():
print("Cluster ", i + 1)
print(cluster)
print("")
|
"""
This is a simple application for sentence embeddings: clustering
Sentences are mapped to sentence embeddings and then agglomerative clustering with a threshold is applied.
"""
from sentence_transformers import SentenceTransformer
from sklearn.cluster import AgglomerativeClustering
embedder = SentenceTransformer("all-MiniLM-L6-v2")
# Corpus with example sentences
corpus = [
"A man is eating food.",
"A man is eating a piece of bread.",
"A man is eating pasta.",
"The girl is carrying a baby.",
"The baby is carried by the woman",
"A man is riding a horse.",
"A man is riding a white horse on an enclosed ground.",
"A monkey is playing drums.",
"Someone in a gorilla costume is playing a set of drums.",
"A cheetah is running behind its prey.",
"A cheetah chases prey on across a field.",
]
corpus_embeddings = embedder.encode(corpus)
# Some models don't automatically normalize the embeddings, in which case you should normalize the embeddings:
# corpus_embeddings = corpus_embeddings / np.linalg.norm(corpus_embeddings, axis=1, keepdims=True)
# Perform agglomerative clustering
clustering_model = AgglomerativeClustering(
n_clusters=None, distance_threshold=1.5
) # , affinity='cosine', linkage='average', distance_threshold=0.4)
clustering_model.fit(corpus_embeddings)
cluster_assignment = clustering_model.labels_
clustered_sentences = {}
for sentence_id, cluster_id in enumerate(cluster_assignment):
if cluster_id not in clustered_sentences:
clustered_sentences[cluster_id] = []
clustered_sentences[cluster_id].append(corpus[sentence_id])
for i, cluster in clustered_sentences.items():
print("Cluster ", i + 1)
print(cluster)
print("")
|
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import os
from executor.torch_encoder import ImageTorchEncoder
from pytest_mock import MockerFixture
from torch import hub
def test_load_from_url(tmpdir: str, mocker: MockerFixture) -> None:
os.environ['TORCH_HOME'] = str(tmpdir)
spy = mocker.spy(hub, 'urlopen')
_ = ImageTorchEncoder(model_name='mobilenet_v2')
assert os.path.isfile(
os.path.join(tmpdir, 'hub', 'checkpoints', 'mobilenet_v2-b0353104.pth')
)
assert spy.call_count == 1
|
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import os
from pytest_mock import MockerFixture
from torch import hub
from ...torch_encoder import ImageTorchEncoder
def test_load_from_url(tmpdir: str, mocker: MockerFixture) -> None:
os.environ['TORCH_HOME'] = str(tmpdir)
spy = mocker.spy(hub, 'urlopen')
_ = ImageTorchEncoder(model_name='mobilenet_v2')
assert os.path.isfile(
os.path.join(tmpdir, 'hub', 'checkpoints', 'mobilenet_v2-b0353104.pth')
)
assert spy.call_count == 1
|
import sys
from os import path
from setuptools import find_packages
from setuptools import setup
if sys.version_info < (3, 7, 0):
raise OSError(f'DocArray requires Python >=3.7, but yours is {sys.version}')
try:
pkg_name = 'docarray'
libinfo_py = path.join(pkg_name, '__init__.py')
libinfo_content = open(libinfo_py, 'r', encoding='utf8').readlines()
version_line = [l.strip() for l in libinfo_content if l.startswith('__version__')][
0
]
exec(version_line) # gives __version__
except FileNotFoundError:
__version__ = '0.0.0'
try:
with open('README.md', encoding='utf8') as fp:
_long_description = fp.read()
except FileNotFoundError:
_long_description = ''
setup(
name=pkg_name,
packages=find_packages(),
version=__version__,
include_package_data=True,
description='The data structure for unstructured data',
author='Jina AI',
author_email='hello@jina.ai',
license='Apache 2.0',
url='https://github.com/jina-ai/docarray',
download_url='https://github.com/jina-ai/docarray/tags',
long_description=_long_description,
long_description_content_type='text/markdown',
zip_safe=False,
setup_requires=['setuptools>=18.0', 'wheel'],
install_requires=['numpy', 'rich>=12.0.0'],
extras_require={
# req usage, please see https://docarray.jina.ai/#install
'common': [
'protobuf>=3.13.0,<=3.20.1',
'lz4',
'requests',
'matplotlib',
'Pillow',
'fastapi',
'uvicorn',
'jina-hubble-sdk>=0.11.0',
],
'full': [
'protobuf>=3.13.0,<=3.20.1',
'lz4',
'requests',
'matplotlib',
'Pillow',
'trimesh',
'scipy',
'jina-hubble-sdk>=0.10.0',
'av',
'fastapi',
'uvicorn',
'strawberry-graphql',
'weaviate-client~=3.3.0',
'annlite>=0.3.2',
'qdrant-client~=0.7.3',
'elasticsearch>=8.2.0',
],
'qdrant': [
'qdrant-client~=0.7.3',
],
'annlite': [
'annlite>=0.3.2',
],
'weaviate': [
'weaviate-client~=3.3.0',
],
'elasticsearch': [
'elasticsearch>=8.2.0',
],
'test': [
'pytest',
'pytest-timeout',
'pytest-mock',
'pytest-cov',
'pytest-repeat',
'pytest-reraise',
'mock',
'pytest-custom_exit_code',
'black==22.3.0',
'tensorflow==2.7.0',
'paddlepaddle==2.2.0',
'torch==1.9.0',
'torchvision==0.10.0',
'datasets',
'onnx',
'onnxruntime',
'jupyterlab',
'transformers>=4.16.2',
'weaviate-client~=3.3.0',
'annlite>=0.3.2',
'elasticsearch>=8.2.0',
'jina',
],
},
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Intended Audience :: Science/Research',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
'Programming Language :: Unix Shell',
'Environment :: Console',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Topic :: Database :: Database Engines/Servers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Internet :: WWW/HTTP :: Indexing/Search',
'Topic :: Scientific/Engineering :: Image Recognition',
'Topic :: Multimedia :: Video',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Mathematics',
'Topic :: Software Development',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
],
project_urls={
'Documentation': 'https://docarray.jina.ai',
'Source': 'https://github.com/jina-ai/docarray/',
'Tracker': 'https://github.com/jina-ai/docarray/issues',
},
keywords='docarray deep-learning data-structures cross-modal multi-modal unstructured-data nested-data neural-search',
)
|
import sys
from os import path
from setuptools import find_packages
from setuptools import setup
if sys.version_info < (3, 7, 0):
raise OSError(f'DocArray requires Python >=3.7, but yours is {sys.version}')
try:
pkg_name = 'docarray'
libinfo_py = path.join(pkg_name, '__init__.py')
libinfo_content = open(libinfo_py, 'r', encoding='utf8').readlines()
version_line = [l.strip() for l in libinfo_content if l.startswith('__version__')][
0
]
exec(version_line) # gives __version__
except FileNotFoundError:
__version__ = '0.0.0'
try:
with open('README.md', encoding='utf8') as fp:
_long_description = fp.read()
except FileNotFoundError:
_long_description = ''
setup(
name=pkg_name,
packages=find_packages(),
version=__version__,
include_package_data=True,
description='The data structure for unstructured data',
author='Jina AI',
author_email='hello@jina.ai',
license='Apache 2.0',
url='https://github.com/jina-ai/docarray',
download_url='https://github.com/jina-ai/docarray/tags',
long_description=_long_description,
long_description_content_type='text/markdown',
zip_safe=False,
setup_requires=['setuptools>=18.0', 'wheel'],
install_requires=['numpy', 'rich>=12.0.0'],
extras_require={
# req usage, please see https://docarray.jina.ai/#install
'common': [
'protobuf>=3.13.0,<=3.20.1',
'lz4',
'requests',
'matplotlib',
'Pillow',
'fastapi',
'uvicorn',
'jina-hubble-sdk>=0.10.0',
],
'full': [
'protobuf>=3.13.0,<=3.20.1',
'lz4',
'requests',
'matplotlib',
'Pillow',
'trimesh',
'scipy',
'jina-hubble-sdk>=0.10.0',
'av',
'fastapi',
'uvicorn',
'strawberry-graphql',
'weaviate-client~=3.3.0',
'annlite>=0.3.2',
'qdrant-client~=0.7.3',
'elasticsearch>=8.2.0',
],
'qdrant': [
'qdrant-client~=0.7.3',
],
'annlite': [
'annlite>=0.3.2',
],
'weaviate': [
'weaviate-client~=3.3.0',
],
'elasticsearch': [
'elasticsearch>=8.2.0',
],
'test': [
'pytest',
'pytest-timeout',
'pytest-mock',
'pytest-cov',
'pytest-repeat',
'pytest-reraise',
'mock',
'pytest-custom_exit_code',
'black==22.3.0',
'tensorflow==2.7.0',
'paddlepaddle==2.2.0',
'torch==1.9.0',
'torchvision==0.10.0',
'datasets',
'onnx',
'onnxruntime',
'jupyterlab',
'transformers>=4.16.2',
'weaviate-client~=3.3.0',
'annlite>=0.3.2',
'elasticsearch>=8.2.0',
'jina',
],
},
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Intended Audience :: Science/Research',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
'Programming Language :: Unix Shell',
'Environment :: Console',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Topic :: Database :: Database Engines/Servers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Internet :: WWW/HTTP :: Indexing/Search',
'Topic :: Scientific/Engineering :: Image Recognition',
'Topic :: Multimedia :: Video',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Mathematics',
'Topic :: Software Development',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
],
project_urls={
'Documentation': 'https://docarray.jina.ai',
'Source': 'https://github.com/jina-ai/docarray/',
'Tracker': 'https://github.com/jina-ai/docarray/issues',
},
keywords='docarray deep-learning data-structures cross-modal multi-modal unstructured-data nested-data neural-search',
)
|
import pytest
import random
import string
import logging
from llama_index.core.schema import (
TextNode,
RelatedNodeInfo,
NodeRelationship,
)
from llama_index.vector_stores.lindorm import (
LindormVectorStore,
LindormVectorClient,
)
from llama_index.core.vector_stores.types import (
VectorStoreQuery,
VectorStoreQueryMode,
MetadataFilters,
MetadataFilter,
FilterOperator,
FilterCondition,
)
logger = logging.getLogger(__name__)
def _get_lindorm_vector_store():
# Lindorm instance info, please replace with your own
host = "<ld-bp******jm*******-proxy-search-pub.lindorm.aliyuncs.com>"
port = 30070
username = "<your username>"
password = "<your password>"
index_name = "<lindorm_pytest_index>"
nprobe = "2"
reorder_factor = "10"
# Check if placeholder values exist, skip if they do
if "<" in host or "<" in username or "<" in password or "<" in index_name:
return None
# Create a client and vector store instance
client = LindormVectorClient(
host=host,
port=port,
username=username,
password=password,
index=index_name,
dimension=5,
nprobe=nprobe,
reorder_factor=reorder_factor,
)
return LindormVectorStore(client)
@pytest.fixture(scope="module")
def vector_store():
store = _get_lindorm_vector_store()
if not store:
pytest.skip("No Lindorm config, skipping test case!")
return store
@pytest.fixture(scope="session")
def nodes():
nodes = []
for i in range(1000):
vector = [random.random() for _ in range(5)]
characters = string.ascii_letters + string.digits
random_string = "".join(random.choices(characters, k=5))
new_node = TextNode(
embedding=vector,
text=random_string + " " + str(i),
relationships={
NodeRelationship.SOURCE: RelatedNodeInfo(node_id="test-" + str(i))
},
metadata={
"author": "test " + random_string,
"mark_id": i,
},
)
nodes.append(new_node)
return nodes
def test_add_nodes(vector_store, nodes):
added_ids = vector_store.add(nodes)
assert len(added_ids) == len(nodes)
assert all(id_ for id_ in added_ids)
def test_simple_query(vector_store):
query_embedding = [1.0, 1.0, 1.0, 1.0, 1.0]
simple_query = VectorStoreQuery(query_embedding=query_embedding, similarity_top_k=5)
result = vector_store.query(simple_query)
assert len(result.nodes) > 0
def test_query_with_metadata_filter(vector_store):
query_embedding = [1.0, 1.0, 1.0, 1.0, 1.0]
filter1 = MetadataFilter(key="mark_id", value=0, operator=FilterOperator.GTE)
filter2 = MetadataFilter(key="mark_id", value=500, operator=FilterOperator.LTE)
filters = MetadataFilters(filters=[filter1, filter2], condition=FilterCondition.AND)
filter_query = VectorStoreQuery(
query_embedding=query_embedding, similarity_top_k=5, filters=filters
)
result = vector_store.query(filter_query)
assert len(result.nodes) > 0
def test_lexical_query(vector_store):
query_embedding = [1.0, 1.0, 1.0, 1.0, 1.0]
lexical_query = VectorStoreQuery(
mode=VectorStoreQueryMode.TEXT_SEARCH,
query_embedding=query_embedding,
similarity_top_k=5,
# your query str match the field "content"(text you stored in),
# and note the minimum search granularity of query str is one token.
query_str="your query str",
)
result = vector_store.query(lexical_query)
assert len(result.nodes) > 0
def test_hybrid_query(vector_store):
query_embedding = [1.0, 1.0, 1.0, 1.0, 1.0]
hybrid_query = VectorStoreQuery(
mode=VectorStoreQueryMode.HYBRID,
query_embedding=query_embedding,
similarity_top_k=5,
# your query str match the field "content"(text you stored in),
# and note the minimum search granularity of query str is one token.
query_str="your query str",
)
result = vector_store.query(hybrid_query)
assert len(result.nodes) > 0
def test_delete_node(vector_store):
vector_store.delete(ref_doc_id="test-0")
query_embedding = [1.0, 1.0, 1.0, 1.0, 1.0]
filter_query = VectorStoreQuery(
query_embedding=query_embedding,
similarity_top_k=5,
filters=MetadataFilters(
filters=[
MetadataFilter(
key="relationships.SOURCE.node_id",
value="test-0",
operator=FilterOperator.EQ,
)
],
condition=FilterCondition.AND,
),
)
result = vector_store.query(filter_query)
assert len(result.nodes) == 0
|
import pytest
import random
import string
import logging
from llama_index.core.schema import (
TextNode,
RelatedNodeInfo,
NodeRelationship,
)
from llama_index.vector_stores.lindorm import (
LindormVectorStore,
LindormVectorClient,
)
from llama_index.core.vector_stores.types import (
VectorStoreQuery,
VectorStoreQueryMode,
MetadataFilters,
MetadataFilter,
FilterOperator,
FilterCondition,
)
logger = logging.getLogger(__name__)
def _get_lindorm_vector_store():
# Lindorm instance info, please replace with your own
host = "<ld-bp******jm*******-proxy-search-pub.lindorm.aliyuncs.com>"
port = 30070
username = "<your username>"
password = "<your password>"
index_name = "<lindorm_pytest_index>"
nprobe = "2"
reorder_factor = "10"
# Check if placeholder values exist, skip if they do
if "<" in host or "<" in username or "<" in password or "<" in index_name:
return None
# Create a client and vector store instance
client = LindormVectorClient(
host=host,
port=port,
username=username,
password=password,
index=index_name,
dimension=5,
nprobe=nprobe,
reorder_factor=reorder_factor,
)
return LindormVectorStore(client)
@pytest.fixture(scope="module")
def vector_store():
store = _get_lindorm_vector_store()
if not store:
pytest.skip("No Lindorm config, skipping test case!")
return store
@pytest.fixture(scope="session")
def nodes():
nodes = []
for i in range(1000):
vector = [random.random() for _ in range(5)]
characters = string.ascii_letters + string.digits
random_string = "".join(random.choices(characters, k=5))
new_node = TextNode(
embedding=vector,
text=random_string + " " + str(i),
relationships={
NodeRelationship.SOURCE: RelatedNodeInfo(node_id="test-" + str(i))
},
metadata={
"author": "test " + random_string,
"mark_id": i,
},
)
nodes.append(new_node)
return nodes
def test_add_nodes(vector_store, nodes):
added_ids = vector_store.add(nodes)
assert len(added_ids) == len(nodes)
assert all(id_ for id_ in added_ids)
def test_simple_query(vector_store):
query_embedding = [1.0, 1.0, 1.0, 1.0, 1.0]
simple_query = VectorStoreQuery(query_embedding=query_embedding, similarity_top_k=5)
result = vector_store.query(simple_query)
assert len(result.nodes) > 0
def test_query_with_metadata_filter(vector_store):
query_embedding = [1.0, 1.0, 1.0, 1.0, 1.0]
filter1 = MetadataFilter(key="mark_id", value=0, operator=FilterOperator.GTE)
filter2 = MetadataFilter(key="mark_id", value=500, operator=FilterOperator.LTE)
filters = MetadataFilters(filters=[filter1, filter2], condition=FilterCondition.AND)
filter_query = VectorStoreQuery(
query_embedding=query_embedding, similarity_top_k=5, filters=filters
)
result = vector_store.query(filter_query)
assert len(result.nodes) > 0
def test_lexical_query(vector_store):
query_embedding = [1.0, 1.0, 1.0, 1.0, 1.0]
lexical_query = VectorStoreQuery(
mode=VectorStoreQueryMode.TEXT_SEARCH,
query_embedding=query_embedding,
similarity_top_k=5,
# your query str match the field "content"(text you stored in),
# and note the the minimum search granularity of query str is one token.
query_str="your query str",
)
result = vector_store.query(lexical_query)
assert len(result.nodes) > 0
def test_hybrid_query(vector_store):
query_embedding = [1.0, 1.0, 1.0, 1.0, 1.0]
hybrid_query = VectorStoreQuery(
mode=VectorStoreQueryMode.HYBRID,
query_embedding=query_embedding,
similarity_top_k=5,
# your query str match the field "content"(text you stored in),
# and note the the minimum search granularity of query str is one token.
query_str="your query str",
)
result = vector_store.query(hybrid_query)
assert len(result.nodes) > 0
def test_delete_node(vector_store):
vector_store.delete(ref_doc_id="test-0")
query_embedding = [1.0, 1.0, 1.0, 1.0, 1.0]
filter_query = VectorStoreQuery(
query_embedding=query_embedding,
similarity_top_k=5,
filters=MetadataFilters(
filters=[
MetadataFilter(
key="relationships.SOURCE.node_id",
value="test-0",
operator=FilterOperator.EQ,
)
],
condition=FilterCondition.AND,
),
)
result = vector_store.query(filter_query)
assert len(result.nodes) == 0
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.chat_loaders.langsmith import (
LangSmithDatasetChatLoader,
LangSmithRunChatLoader,
)
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"LangSmithRunChatLoader": "langchain_community.chat_loaders.langsmith",
"LangSmithDatasetChatLoader": "langchain_community.chat_loaders.langsmith",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"LangSmithDatasetChatLoader",
"LangSmithRunChatLoader",
]
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.chat_loaders.langsmith import (
LangSmithDatasetChatLoader,
LangSmithRunChatLoader,
)
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"LangSmithRunChatLoader": "langchain_community.chat_loaders.langsmith",
"LangSmithDatasetChatLoader": "langchain_community.chat_loaders.langsmith",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"LangSmithRunChatLoader",
"LangSmithDatasetChatLoader",
]
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch.nn as nn
from mmcv.cnn import ConvModule
from mmcv.runner import BaseModule, auto_fp16, force_fp32
from mmdet.models.utils import ResLayer, SimplifiedBasicBlock
from mmdet.registry import MODELS
@MODELS.register_module()
class GlobalContextHead(BaseModule):
"""Global context head used in `SCNet <https://arxiv.org/abs/2012.10150>`_.
Args:
num_convs (int, optional): number of convolutional layer in GlbCtxHead.
Default: 4.
in_channels (int, optional): number of input channels. Default: 256.
conv_out_channels (int, optional): number of output channels before
classification layer. Default: 256.
num_classes (int, optional): number of classes. Default: 80.
loss_weight (float, optional): global context loss weight. Default: 1.
conv_cfg (dict, optional): config to init conv layer. Default: None.
norm_cfg (dict, optional): config to init norm layer. Default: None.
conv_to_res (bool, optional): if True, 2 convs will be grouped into
1 `SimplifiedBasicBlock` using a skip connection. Default: False.
init_cfg (dict or list[dict], optional): Initialization config dict.
"""
def __init__(self,
num_convs=4,
in_channels=256,
conv_out_channels=256,
num_classes=80,
loss_weight=1.0,
conv_cfg=None,
norm_cfg=None,
conv_to_res=False,
init_cfg=dict(
type='Normal', std=0.01, override=dict(name='fc'))):
super(GlobalContextHead, self).__init__(init_cfg)
self.num_convs = num_convs
self.in_channels = in_channels
self.conv_out_channels = conv_out_channels
self.num_classes = num_classes
self.loss_weight = loss_weight
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.conv_to_res = conv_to_res
self.fp16_enabled = False
if self.conv_to_res:
num_res_blocks = num_convs // 2
self.convs = ResLayer(
SimplifiedBasicBlock,
in_channels,
self.conv_out_channels,
num_res_blocks,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg)
self.num_convs = num_res_blocks
else:
self.convs = nn.ModuleList()
for i in range(self.num_convs):
in_channels = self.in_channels if i == 0 else conv_out_channels
self.convs.append(
ConvModule(
in_channels,
conv_out_channels,
3,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg))
self.pool = nn.AdaptiveAvgPool2d(1)
self.fc = nn.Linear(conv_out_channels, num_classes)
self.criterion = nn.BCEWithLogitsLoss()
@auto_fp16()
def forward(self, feats):
"""Forward function."""
x = feats[-1]
for i in range(self.num_convs):
x = self.convs[i](x)
x = self.pool(x)
# multi-class prediction
mc_pred = x.reshape(x.size(0), -1)
mc_pred = self.fc(mc_pred)
return mc_pred, x
@force_fp32(apply_to=('pred', ))
def loss(self, pred, labels):
"""Loss function."""
labels = [lbl.unique() for lbl in labels]
targets = pred.new_zeros(pred.size())
for i, label in enumerate(labels):
targets[i, label] = 1.0
loss = self.loss_weight * self.criterion(pred, targets)
return loss
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch.nn as nn
from mmcv.cnn import ConvModule
from mmcv.runner import BaseModule, auto_fp16, force_fp32
from mmdet.models.builder import HEADS
from mmdet.models.utils import ResLayer, SimplifiedBasicBlock
@HEADS.register_module()
class GlobalContextHead(BaseModule):
"""Global context head used in `SCNet <https://arxiv.org/abs/2012.10150>`_.
Args:
num_convs (int, optional): number of convolutional layer in GlbCtxHead.
Default: 4.
in_channels (int, optional): number of input channels. Default: 256.
conv_out_channels (int, optional): number of output channels before
classification layer. Default: 256.
num_classes (int, optional): number of classes. Default: 80.
loss_weight (float, optional): global context loss weight. Default: 1.
conv_cfg (dict, optional): config to init conv layer. Default: None.
norm_cfg (dict, optional): config to init norm layer. Default: None.
conv_to_res (bool, optional): if True, 2 convs will be grouped into
1 `SimplifiedBasicBlock` using a skip connection. Default: False.
init_cfg (dict or list[dict], optional): Initialization config dict.
"""
def __init__(self,
num_convs=4,
in_channels=256,
conv_out_channels=256,
num_classes=80,
loss_weight=1.0,
conv_cfg=None,
norm_cfg=None,
conv_to_res=False,
init_cfg=dict(
type='Normal', std=0.01, override=dict(name='fc'))):
super(GlobalContextHead, self).__init__(init_cfg)
self.num_convs = num_convs
self.in_channels = in_channels
self.conv_out_channels = conv_out_channels
self.num_classes = num_classes
self.loss_weight = loss_weight
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.conv_to_res = conv_to_res
self.fp16_enabled = False
if self.conv_to_res:
num_res_blocks = num_convs // 2
self.convs = ResLayer(
SimplifiedBasicBlock,
in_channels,
self.conv_out_channels,
num_res_blocks,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg)
self.num_convs = num_res_blocks
else:
self.convs = nn.ModuleList()
for i in range(self.num_convs):
in_channels = self.in_channels if i == 0 else conv_out_channels
self.convs.append(
ConvModule(
in_channels,
conv_out_channels,
3,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg))
self.pool = nn.AdaptiveAvgPool2d(1)
self.fc = nn.Linear(conv_out_channels, num_classes)
self.criterion = nn.BCEWithLogitsLoss()
@auto_fp16()
def forward(self, feats):
"""Forward function."""
x = feats[-1]
for i in range(self.num_convs):
x = self.convs[i](x)
x = self.pool(x)
# multi-class prediction
mc_pred = x.reshape(x.size(0), -1)
mc_pred = self.fc(mc_pred)
return mc_pred, x
@force_fp32(apply_to=('pred', ))
def loss(self, pred, labels):
"""Loss function."""
labels = [lbl.unique() for lbl in labels]
targets = pred.new_zeros(pred.size())
for i, label in enumerate(labels):
targets[i, label] = 1.0
loss = self.loss_weight * self.criterion(pred, targets)
return loss
|
# Copyright (c) OpenMMLab. All rights reserved.
from torch import Tensor
from mmdet.data_elements import SampleList
from mmdet.registry import MODELS
from mmdet.utils import ConfigType, OptConfigType, OptMultiConfig
from .faster_rcnn import FasterRCNN
@MODELS.register_module()
class TridentFasterRCNN(FasterRCNN):
"""Implementation of `TridentNet <https://arxiv.org/abs/1901.01892>`_"""
def __init__(self,
backbone: ConfigType,
rpn_head: ConfigType,
roi_head: ConfigType,
train_cfg: ConfigType,
test_cfg: ConfigType,
neck: OptConfigType = None,
data_preprocessor: OptConfigType = None,
init_cfg: OptMultiConfig = None) -> None:
super().__init__(
backbone=backbone,
neck=neck,
rpn_head=rpn_head,
roi_head=roi_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
data_preprocessor=data_preprocessor,
init_cfg=init_cfg)
assert self.backbone.num_branch == self.roi_head.num_branch
assert self.backbone.test_branch_idx == self.roi_head.test_branch_idx
self.num_branch = self.backbone.num_branch
self.test_branch_idx = self.backbone.test_branch_idx
def _forward(self, batch_inputs: Tensor,
batch_data_samples: SampleList) -> tuple:
"""copy the ``batch_data_samples`` to fit multi-branch."""
num_branch = self.num_branch \
if self.training or self.test_branch_idx == -1 else 1
trident_data_samples = batch_data_samples * num_branch
return super()._forward(
batch_inputs=batch_inputs, batch_data_samples=trident_data_samples)
def loss(self, batch_inputs: Tensor,
batch_data_samples: SampleList) -> dict:
"""copy the ``batch_data_samples`` to fit multi-branch."""
num_branch = self.num_branch \
if self.training or self.test_branch_idx == -1 else 1
trident_data_samples = batch_data_samples * num_branch
return super().loss(
batch_inputs=batch_inputs, batch_data_samples=trident_data_samples)
def predict(self,
batch_inputs: Tensor,
batch_data_samples: SampleList,
rescale: bool = True) -> SampleList:
"""copy the ``batch_data_samples`` to fit multi-branch."""
num_branch = self.num_branch \
if self.training or self.test_branch_idx == -1 else 1
trident_data_samples = batch_data_samples * num_branch
return super().predict(
batch_inputs=batch_inputs,
batch_data_samples=trident_data_samples,
rescale=rescale)
# TODO need to refactor
def aug_test(self, imgs, img_metas, rescale=False):
"""Test with augmentations.
If rescale is False, then returned bboxes and masks will fit the scale
of imgs[0].
"""
x = self.extract_feats(imgs)
num_branch = (self.num_branch if self.test_branch_idx == -1 else 1)
trident_img_metas = [img_metas * num_branch for img_metas in img_metas]
proposal_list = self.rpn_head.aug_test_rpn(x, trident_img_metas)
return self.roi_head.aug_test(
x, proposal_list, img_metas, rescale=rescale)
|
# Copyright (c) OpenMMLab. All rights reserved.
from torch import Tensor
from mmdet.core import SampleList
from mmdet.core.utils import ConfigType, OptConfigType, OptMultiConfig
from mmdet.registry import MODELS
from .faster_rcnn import FasterRCNN
@MODELS.register_module()
class TridentFasterRCNN(FasterRCNN):
"""Implementation of `TridentNet <https://arxiv.org/abs/1901.01892>`_"""
def __init__(self,
backbone: ConfigType,
rpn_head: ConfigType,
roi_head: ConfigType,
train_cfg: ConfigType,
test_cfg: ConfigType,
neck: OptConfigType = None,
data_preprocessor: OptConfigType = None,
init_cfg: OptMultiConfig = None) -> None:
super().__init__(
backbone=backbone,
neck=neck,
rpn_head=rpn_head,
roi_head=roi_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
data_preprocessor=data_preprocessor,
init_cfg=init_cfg)
assert self.backbone.num_branch == self.roi_head.num_branch
assert self.backbone.test_branch_idx == self.roi_head.test_branch_idx
self.num_branch = self.backbone.num_branch
self.test_branch_idx = self.backbone.test_branch_idx
def _forward(self, batch_inputs: Tensor,
batch_data_samples: SampleList) -> tuple:
"""copy the ``batch_data_samples`` to fit multi-branch."""
num_branch = self.num_branch \
if self.training or self.test_branch_idx == -1 else 1
trident_data_samples = batch_data_samples * num_branch
return super()._forward(
batch_inputs=batch_inputs, batch_data_samples=trident_data_samples)
def loss(self, batch_inputs: Tensor,
batch_data_samples: SampleList) -> dict:
"""copy the ``batch_data_samples`` to fit multi-branch."""
num_branch = self.num_branch \
if self.training or self.test_branch_idx == -1 else 1
trident_data_samples = batch_data_samples * num_branch
return super().loss(
batch_inputs=batch_inputs, batch_data_samples=trident_data_samples)
def predict(self,
batch_inputs: Tensor,
batch_data_samples: SampleList,
rescale: bool = True) -> SampleList:
"""copy the ``batch_data_samples`` to fit multi-branch."""
num_branch = self.num_branch \
if self.training or self.test_branch_idx == -1 else 1
trident_data_samples = batch_data_samples * num_branch
return super().predict(
batch_inputs=batch_inputs,
batch_data_samples=trident_data_samples,
rescale=rescale)
# TODO need to refactor
def aug_test(self, imgs, img_metas, rescale=False):
"""Test with augmentations.
If rescale is False, then returned bboxes and masks will fit the scale
of imgs[0].
"""
x = self.extract_feats(imgs)
num_branch = (self.num_branch if self.test_branch_idx == -1 else 1)
trident_img_metas = [img_metas * num_branch for img_metas in img_metas]
proposal_list = self.rpn_head.aug_test_rpn(x, trident_img_metas)
return self.roi_head.aug_test(
x, proposal_list, img_metas, rescale=rescale)
|
from ._vggish_pipeline import VGGISH as _VGGISH, VGGishBundle
from torchaudio._internal.module_utils import dropping_const_support
VGGISH = dropping_const_support(_VGGISH, "VGGISH")
__all__ = ["VGGISH", "VGGishBundle"]
|
from ._vggish_pipeline import VGGISH, VGGishBundle
__all__ = ["VGGISH", "VGGishBundle"]
|
import os
import torchaudio
import torchvision
from torch.utils.data import Dataset
def _load_list(args, *filenames):
output = []
length = []
for filename in filenames:
filepath = os.path.join(args.root_dir, "labels", filename)
for line in open(filepath).read().splitlines():
dataset, rel_path, input_length = line.split(",")[0], line.split(",")[1], line.split(",")[2]
path = os.path.normpath(os.path.join(args.root_dir, dataset, rel_path[:-4] + ".mp4"))
length.append(int(input_length))
output.append(path)
return output, length
def load_video(path):
"""
rtype: torch, T x C x H x W
"""
vid = torchvision.io.read_video(path, pts_unit="sec", output_format="THWC")[0]
vid = vid.permute((0, 3, 1, 2))
return vid
def load_audio(path):
"""
rtype: torch, T x 1
"""
waveform, sample_rate = torchaudio.load(path, normalize=True)
return waveform.transpose(1, 0)
def load_transcript(path):
transcript_path = path.replace("video_seg", "text_seg")[:-4] + ".txt"
return open(transcript_path).read().splitlines()[0]
def load_item(path, md):
if md == "v":
return (load_video(path), load_transcript(path))
if md == "a":
return (load_audio(path), load_transcript(path))
if md == "av":
return (load_audio(path), load_video(path), load_transcript(path))
class LRS3(Dataset):
def __init__(
self,
args,
subset: str = "train",
) -> None:
if subset is not None and subset not in ["train", "val", "test"]:
raise ValueError("When `subset` is not None, it must be one of ['train', 'val', 'test'].")
self.args = args
if subset == "train":
self._filelist, self._lengthlist = _load_list(self.args, "lrs3_train_transcript_lengths_seg16s.csv")
if subset == "val":
self._filelist, self._lengthlist = _load_list(self.args, "lrs3_test_transcript_lengths_seg16s.csv")
if subset == "test":
self._filelist, self._lengthlist = _load_list(self.args, "lrs3_test_transcript_lengths_seg16s.csv")
def __getitem__(self, n):
path = self._filelist[n]
return load_item(path, self.args.md)
def __len__(self) -> int:
return len(self._filelist)
|
import os
import torchaudio
import torchvision
from torch.utils.data import Dataset
def _load_list(args, *filenames):
output = []
length = []
for filename in filenames:
filepath = os.path.join(os.path.dirname(args.dataset_path), filename)
for line in open(filepath).read().splitlines():
rel_path, input_length = line.split(",")[1:3]
path = os.path.normpath(os.path.join(args.dataset_path, rel_path[:-4] + ".mp4"))
length.append(int(input_length))
output.append(path)
return output, length
def load_video(path):
"""
rtype: torch, T x C x H x W
"""
vid = torchvision.io.read_video(path, pts_unit="sec", output_format="THWC")[0]
vid = vid.permute((0, 3, 1, 2))
return vid
def load_audio(path):
"""
rtype: torch, T x 1
"""
waveform, sample_rate = torchaudio.load(path, normalize=True)
return waveform.transpose(1, 0)
def load_transcript(path):
transcript_path = path.replace("video_seg", "text_seg")[:-4] + ".txt"
return open(transcript_path).read().splitlines()[0]
def load_item(path, md):
if md == "v":
return (load_video(path), load_transcript(path))
if md == "a":
return (load_audio(path), load_transcript(path))
if md == "av":
return (load_audio(path), load_video(path), load_transcript(path))
class LRS3(Dataset):
def __init__(
self,
args,
subset: str = "train",
) -> None:
if subset is not None and subset not in ["train", "val", "test"]:
raise ValueError("When `subset` is not None, it must be one of ['train', 'val', 'test'].")
self.args = args
if subset == "train":
self._filelist, self._lengthlist = _load_list(self.args, "train_transcript_lengths_seg16s.csv")
if subset == "val":
self._filelist, self._lengthlist = _load_list(self.args, "test_transcript_lengths_seg16s.csv")
if subset == "test":
self._filelist, self._lengthlist = _load_list(self.args, "test_transcript_lengths_seg16s.csv")
def __getitem__(self, n):
path = self._filelist[n]
return load_item(path, self.args.md)
def __len__(self) -> int:
return len(self._filelist)
|
import math
from typing import List, Optional
from llama_index.core.agent.react.types import (
BaseReasoningStep,
ResponseReasoningStep,
)
from llama_index.core.bridge.pydantic import Field, BaseModel
from llama_index.core.prompts import PromptTemplate
# taken from the paper
DEFAULT_REFLECTION_PROMPT_STR = """\
Given a query and a conversation trajectory, evaluate two things regarding whether the conversation answers the question:
- **correctness**: Whether the thoughts and actions so far are correctly answering the query, even if the answer is not found yet. Rate from 1-10, where 1 is incorrect and 10 is correct.
- **completeness**: Whether the answer is found yet.
Provide your reasoning and analysis in detail.
Focus on the latest thought, action, and observation.
Incomplete trajectories can be correct if the thoughts and actions so far are correct, \
even if the answer is not found yet.
Do not generate additional thoughts or actions.
Query: {query}
Conversation History:
{conversation_history}
"""
DEFAULT_REFLECTION_PROMPT = PromptTemplate(DEFAULT_REFLECTION_PROMPT_STR)
DEFAULT_CANDIDATES_PROMPT_STR = """\
Given a query and a conversation trajectory, provide a list of candidates {num_candidates} for the next reasoning step.
Focus on the latest thought, action, and observation.
Do not generate additional thoughts or actions.
Query: {query}
Conversation History:
{conversation_history}
"""
DEFAULT_CANDIDATES_PROMPT = PromptTemplate(DEFAULT_CANDIDATES_PROMPT_STR)
class Candidates(BaseModel):
"""Candidates for the next reasoning step."""
candidates: List[str]
class Evaluation(BaseModel):
"""Evaluation of a given node."""
score: int = Field(
description="Score of the reflection indicating **correctness**. Integer from 1-10",
lte=10,
gte=0,
)
is_done: bool = Field(
False, description="Whether the answer is found yet (**completeness**)."
)
reasoning: str = Field(
default="", description="Reasoning and justification for the evaluation."
)
class SearchNode(BaseModel):
"""
Search node.
Named differently from `Node` which is a core module in LlamaIndex.
"""
current_reasoning: List[BaseReasoningStep] = Field(
..., description="Current reasoning."
)
parent: Optional["SearchNode"] = Field(default=None, description="Parent node.")
children: List["SearchNode"] = Field(
default_factory=list, description="Children nodes."
)
evaluation: Evaluation = Field(..., description="Evaluation of the node.")
visits: int = Field(default=0, description="Number of visits to the node.")
@property
def answer(self) -> Optional[str]:
"""Answer."""
if not self.current_reasoning:
return None
if isinstance(self.current_reasoning[-1], ResponseReasoningStep):
return self.current_reasoning[-1].response
else:
return self.current_reasoning[-1].get_content()
@property
def is_done(self) -> bool:
"""Is the node done."""
return self.evaluation.is_done
@property
def score(self) -> float:
"""Score of the node."""
return self.evaluation.score
@property
def upper_confidence_bound(self) -> float:
"""Upper confidence bound."""
return self.score + 1.0 * math.sqrt(math.log(self.parent.visits) / self.visits)
def backpropagate(self, reward: float) -> None:
"""Backpropagate the reward."""
cur_node = self
while cur_node is not None:
cur_node.visits += 1
cur_node.evaluation.score = (
reward + (cur_node.visits - 1) * cur_node.score
) / cur_node.visits
cur_node = cur_node.parent
def get_best_leaf(self) -> "SearchNode":
"""
Get best leaf node.
Get best leaf node across any children nodes.
"""
# only get children that aren't done yet
free_children = [c for c in self.children if not c.is_done]
if not free_children:
return self
best_child = max(free_children, key=lambda x: x.upper_confidence_bound)
return best_child.get_best_leaf()
|
import math
from typing import List, Optional
from llama_index.core.agent.react.types import (
BaseReasoningStep,
ResponseReasoningStep,
)
from llama_index.core.bridge.pydantic import Field, BaseModel
from llama_index.core.prompts import PromptTemplate
# taken from the paper
DEFAULT_REFLECTION_PROMPT_STR = """\
Given a query and a conversation trajectory, evaluate two things regarding whether the conversation answers the question:
- **correctness**: Whether the thoughts and actions so far are correctly answering the query, even if the answer is not found yet. Rate from 1-10, where 1 is incorrect and 10 is correct.
- **completeness**: Whether the answer is found yet.
Provide your reasoning and analysis in detail.
Focus on the latest thought, action, and observation.
Incomplete trajectories can be correct if the thoughts and actions so far are correct, \
even if the answer is not found yet.
Do not generate additional thoughts or actions.
Query: {query}
Conversation History:
{conversation_history}
"""
DEFAULT_REFLECTION_PROMPT = PromptTemplate(DEFAULT_REFLECTION_PROMPT_STR)
DEFAULT_CANDIDATES_PROMPT_STR = """\
Given a query and a conversation trajectory, provide a list of candidates {num_candidates} for the next reasoning step.
Focus on the latest thought, action, and observation.
Do not generate additional thoughts or actions.
Query: {query}
Conversation History:
{conversation_history}
"""
DEFAULT_CANDIDATES_PROMPT = PromptTemplate(DEFAULT_CANDIDATES_PROMPT_STR)
class Candidates(BaseModel):
"""Candidates for the next reasoning step."""
candidates: List[str]
class Evaluation(BaseModel):
"""Evaluation of a given node."""
score: int = Field(
description="Score of the reflection indicating **correctness**. Integer from 1-10",
lte=10,
gte=0,
)
is_done: bool = Field(
False, description="Whether the answer is found yet (**completeness**)."
)
reasoning: str = Field(
default="", description="Reasoning and justification for the evaluation."
)
class SearchNode(BaseModel):
"""Search node.
Named differently from `Node` which is a core module in LlamaIndex.
"""
current_reasoning: List[BaseReasoningStep] = Field(
..., description="Current reasoning."
)
parent: Optional["SearchNode"] = Field(default=None, description="Parent node.")
children: List["SearchNode"] = Field(
default_factory=list, description="Children nodes."
)
evaluation: Evaluation = Field(..., description="Evaluation of the node.")
visits: int = Field(default=0, description="Number of visits to the node.")
@property
def answer(self) -> Optional[str]:
"""Answer."""
if not self.current_reasoning:
return None
if isinstance(self.current_reasoning[-1], ResponseReasoningStep):
return self.current_reasoning[-1].response
else:
return self.current_reasoning[-1].get_content()
@property
def is_done(self) -> bool:
"""Is the node done."""
return self.evaluation.is_done
@property
def score(self) -> float:
"""Score of the node."""
return self.evaluation.score
@property
def upper_confidence_bound(self) -> float:
"""Upper confidence bound."""
return self.score + 1.0 * math.sqrt(math.log(self.parent.visits) / self.visits)
def backpropagate(self, reward: float) -> None:
"""Backpropagate the reward."""
cur_node = self
while cur_node is not None:
cur_node.visits += 1
cur_node.evaluation.score = (
reward + (cur_node.visits - 1) * cur_node.score
) / cur_node.visits
cur_node = cur_node.parent
def get_best_leaf(self) -> "SearchNode":
"""Get best leaf node.
Get best leaf node across any children nodes.
"""
# only get children that aren't done yet
free_children = [c for c in self.children if not c.is_done]
if not free_children:
return self
best_child = max(free_children, key=lambda x: x.upper_confidence_bound)
return best_child.get_best_leaf()
|
from typing import TYPE_CHECKING
from docarray.utils._internal.misc import import_library
if TYPE_CHECKING:
from google.protobuf import __version__ as __pb__version__
else:
protobuf = import_library('google.protobuf', raise_error=True)
__pb__version__ = protobuf.__version__
if __pb__version__.startswith('4'):
from docarray.proto.pb.docarray_pb2 import (
DictOfAnyProto,
DocArrayStackedProto,
DocumentArrayProto,
DocumentProto,
ListOfAnyProto,
ListOfDocArrayProto,
NdArrayProto,
NodeProto,
)
else:
from docarray.proto.pb2.docarray_pb2 import (
DictOfAnyProto,
DocArrayStackedProto,
DocumentArrayProto,
DocumentProto,
ListOfAnyProto,
ListOfDocArrayProto,
NdArrayProto,
NodeProto,
)
__all__ = [
'DocumentArrayProto',
'DocumentProto',
'NdArrayProto',
'NodeProto',
'DocArrayStackedProto',
'DocumentArrayProto',
'ListOfDocArrayProto',
'ListOfAnyProto',
'DictOfAnyProto',
]
|
from google.protobuf import __version__ as __pb__version__
if __pb__version__.startswith('4'):
from docarray.proto.pb.docarray_pb2 import (
DictOfAnyProto,
DocArrayStackedProto,
DocumentArrayProto,
DocumentProto,
ListOfAnyProto,
ListOfDocArrayProto,
NdArrayProto,
NodeProto,
)
else:
from docarray.proto.pb2.docarray_pb2 import (
DictOfAnyProto,
DocArrayStackedProto,
DocumentArrayProto,
DocumentProto,
ListOfAnyProto,
ListOfDocArrayProto,
NdArrayProto,
NodeProto,
)
__all__ = [
'DocumentArrayProto',
'DocumentProto',
'NdArrayProto',
'NodeProto',
'DocArrayStackedProto',
'DocumentArrayProto',
'ListOfDocArrayProto',
'ListOfAnyProto',
'DictOfAnyProto',
]
|
from __future__ import annotations
from typing import Any
from langchain_text_splitters.base import TextSplitter
class NLTKTextSplitter(TextSplitter):
"""Splitting text using NLTK package."""
def __init__(
self,
separator: str = "\n\n",
language: str = "english",
*,
use_span_tokenize: bool = False,
**kwargs: Any,
) -> None:
"""Initialize the NLTK splitter."""
super().__init__(**kwargs)
self._separator = separator
self._language = language
self._use_span_tokenize = use_span_tokenize
if self._use_span_tokenize and self._separator != "":
raise ValueError("When use_span_tokenize is True, separator should be ''")
try:
import nltk
if self._use_span_tokenize:
self._tokenizer = nltk.tokenize._get_punkt_tokenizer(self._language)
else:
self._tokenizer = nltk.tokenize.sent_tokenize
except ImportError:
raise ImportError(
"NLTK is not installed, please install it with `pip install nltk`."
)
def split_text(self, text: str) -> list[str]:
"""Split incoming text and return chunks."""
# First we naively split the large input into a bunch of smaller ones.
if self._use_span_tokenize:
spans = list(self._tokenizer.span_tokenize(text))
splits = []
for i, (start, end) in enumerate(spans):
if i > 0:
prev_end = spans[i - 1][1]
sentence = text[prev_end:start] + text[start:end]
else:
sentence = text[start:end]
splits.append(sentence)
else:
splits = self._tokenizer(text, language=self._language)
return self._merge_splits(splits, self._separator)
|
from __future__ import annotations
from typing import Any, List
from langchain_text_splitters.base import TextSplitter
class NLTKTextSplitter(TextSplitter):
"""Splitting text using NLTK package."""
def __init__(
self,
separator: str = "\n\n",
language: str = "english",
*,
use_span_tokenize: bool = False,
**kwargs: Any,
) -> None:
"""Initialize the NLTK splitter."""
super().__init__(**kwargs)
self._separator = separator
self._language = language
self._use_span_tokenize = use_span_tokenize
if self._use_span_tokenize and self._separator != "":
raise ValueError("When use_span_tokenize is True, separator should be ''")
try:
import nltk
if self._use_span_tokenize:
self._tokenizer = nltk.tokenize._get_punkt_tokenizer(self._language)
else:
self._tokenizer = nltk.tokenize.sent_tokenize
except ImportError:
raise ImportError(
"NLTK is not installed, please install it with `pip install nltk`."
)
def split_text(self, text: str) -> List[str]:
"""Split incoming text and return chunks."""
# First we naively split the large input into a bunch of smaller ones.
if self._use_span_tokenize:
spans = list(self._tokenizer.span_tokenize(text))
splits = []
for i, (start, end) in enumerate(spans):
if i > 0:
prev_end = spans[i - 1][1]
sentence = text[prev_end:start] + text[start:end]
else:
sentence = text[start:end]
splits.append(sentence)
else:
splits = self._tokenizer(text, language=self._language)
return self._merge_splits(splits, self._separator)
|
import textwrap
import pyarrow as pa
import pytest
from datasets import Features, Image
from datasets.builder import InvalidConfigName
from datasets.data_files import DataFilesList
from datasets.packaged_modules.text.text import Text, TextConfig
from ..utils import require_pil
@pytest.fixture
def text_file(tmp_path):
filename = tmp_path / "text.txt"
data = textwrap.dedent(
"""\
Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.
Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat.
Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur.
Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.
Second paragraph:
Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.
Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat.
Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur.
Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.
"""
)
with open(filename, "w", encoding="utf-8") as f:
f.write(data)
return str(filename)
@pytest.fixture
def text_file_with_image(tmp_path, image_file):
filename = tmp_path / "text_with_image.txt"
with open(filename, "w", encoding="utf-8") as f:
f.write(image_file)
return str(filename)
def test_config_raises_when_invalid_name() -> None:
with pytest.raises(InvalidConfigName, match="Bad characters"):
_ = TextConfig(name="name-with-*-invalid-character")
@pytest.mark.parametrize("data_files", ["str_path", ["str_path"], DataFilesList(["str_path"], [()])])
def test_config_raises_when_invalid_data_files(data_files) -> None:
with pytest.raises(ValueError, match="Expected a DataFilesDict"):
_ = TextConfig(name="name", data_files=data_files)
@pytest.mark.parametrize("keep_linebreaks", [True, False])
def test_text_linebreaks(text_file, keep_linebreaks):
with open(text_file, encoding="utf-8") as f:
expected_content = f.read().splitlines(keepends=keep_linebreaks)
text = Text(keep_linebreaks=keep_linebreaks, encoding="utf-8")
generator = text._generate_tables([[text_file]])
generated_content = pa.concat_tables([table for _, table in generator]).to_pydict()["text"]
assert generated_content == expected_content
@require_pil
def test_text_cast_image(text_file_with_image):
with open(text_file_with_image, encoding="utf-8") as f:
image_file = f.read().splitlines()[0]
text = Text(encoding="utf-8", features=Features({"image": Image()}))
generator = text._generate_tables([[text_file_with_image]])
pa_table = pa.concat_tables([table for _, table in generator])
assert pa_table.schema.field("image").type == Image()()
generated_content = pa_table.to_pydict()["image"]
assert generated_content == [{"path": image_file, "bytes": None}]
@pytest.mark.parametrize("sample_by", ["line", "paragraph", "document"])
def test_text_sample_by(sample_by, text_file):
with open(text_file, encoding="utf-8") as f:
expected_content = f.read()
if sample_by == "line":
expected_content = expected_content.splitlines()
elif sample_by == "paragraph":
expected_content = expected_content.split("\n\n")
elif sample_by == "document":
expected_content = [expected_content]
text = Text(sample_by=sample_by, encoding="utf-8", chunksize=100)
generator = text._generate_tables([[text_file]])
generated_content = pa.concat_tables([table for _, table in generator]).to_pydict()["text"]
assert generated_content == expected_content
|
import textwrap
import pyarrow as pa
import pytest
from datasets import Features, Image
from datasets.packaged_modules.text.text import Text
from ..utils import require_pil
@pytest.fixture
def text_file(tmp_path):
filename = tmp_path / "text.txt"
data = textwrap.dedent(
"""\
Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.
Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat.
Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur.
Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.
Second paragraph:
Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.
Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat.
Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur.
Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.
"""
)
with open(filename, "w", encoding="utf-8") as f:
f.write(data)
return str(filename)
@pytest.fixture
def text_file_with_image(tmp_path, image_file):
filename = tmp_path / "text_with_image.txt"
with open(filename, "w", encoding="utf-8") as f:
f.write(image_file)
return str(filename)
@pytest.mark.parametrize("keep_linebreaks", [True, False])
def test_text_linebreaks(text_file, keep_linebreaks):
with open(text_file, encoding="utf-8") as f:
expected_content = f.read().splitlines(keepends=keep_linebreaks)
text = Text(keep_linebreaks=keep_linebreaks, encoding="utf-8")
generator = text._generate_tables([[text_file]])
generated_content = pa.concat_tables([table for _, table in generator]).to_pydict()["text"]
assert generated_content == expected_content
@require_pil
def test_text_cast_image(text_file_with_image):
with open(text_file_with_image, encoding="utf-8") as f:
image_file = f.read().splitlines()[0]
text = Text(encoding="utf-8", features=Features({"image": Image()}))
generator = text._generate_tables([[text_file_with_image]])
pa_table = pa.concat_tables([table for _, table in generator])
assert pa_table.schema.field("image").type == Image()()
generated_content = pa_table.to_pydict()["image"]
assert generated_content == [{"path": image_file, "bytes": None}]
@pytest.mark.parametrize("sample_by", ["line", "paragraph", "document"])
def test_text_sample_by(sample_by, text_file):
with open(text_file, encoding="utf-8") as f:
expected_content = f.read()
if sample_by == "line":
expected_content = expected_content.splitlines()
elif sample_by == "paragraph":
expected_content = expected_content.split("\n\n")
elif sample_by == "document":
expected_content = [expected_content]
text = Text(sample_by=sample_by, encoding="utf-8", chunksize=100)
generator = text._generate_tables([[text_file]])
generated_content = pa.concat_tables([table for _, table in generator]).to_pydict()["text"]
assert generated_content == expected_content
|
from keras.src import ops
from keras.src.api_export import keras_export
from keras.src.backend.common.keras_tensor import KerasTensor
from keras.src.layers.input_spec import InputSpec
from keras.src.layers.layer import Layer
@keras_export("keras.layers.Permute")
class Permute(Layer):
"""Permutes the dimensions of the input according to a given pattern.
Useful e.g. connecting RNNs and convnets.
Args:
dims: Tuple of integers. Permutation pattern does not include the
batch dimension. Indexing starts at 1.
For instance, `(1, 3, 2)` permutes the second and third dimensions
of the input.
Input shape:
Arbitrary.
Output shape:
Same as the input shape, but with the dimensions re-ordered according
to the specified pattern.
Example:
>>> x = keras.Input(shape=(10, 64))
>>> y = keras.layers.Permute((2, 1))(x)
>>> y.shape
(None, 64, 10)
"""
def __init__(self, dims, **kwargs):
super().__init__(**kwargs)
self.dims = tuple(dims)
if sorted(dims) != list(range(1, len(dims) + 1)):
raise ValueError(
"Invalid permutation argument `dims` for Permute Layer. "
"The set of indices in `dims` must be consecutive and start "
f"from 1. Received dims={dims}"
)
self.input_spec = InputSpec(ndim=len(self.dims) + 1)
def compute_output_shape(self, input_shape):
output_shape = [input_shape[0]]
for dim in self.dims:
output_shape.append(input_shape[dim])
return tuple(output_shape)
def compute_output_spec(self, inputs):
output_shape = self.compute_output_shape(inputs.shape)
return KerasTensor(
shape=output_shape, dtype=inputs.dtype, sparse=inputs.sparse
)
def call(self, inputs):
return ops.transpose(inputs, axes=(0,) + self.dims)
def get_config(self):
config = {"dims": self.dims}
base_config = super().get_config()
return {**base_config, **config}
|
from keras.src import ops
from keras.src.api_export import keras_export
from keras.src.backend.common.keras_tensor import KerasTensor
from keras.src.layers.input_spec import InputSpec
from keras.src.layers.layer import Layer
@keras_export("keras.layers.Permute")
class Permute(Layer):
"""Permutes the dimensions of the input according to a given pattern.
Useful e.g. connecting RNNs and convnets.
Args:
dims: Tuple of integers. Permutation pattern does not include the
batch dimension. Indexing starts at 1.
For instance, `(2, 1)` permutes the first and second dimensions
of the input.
Input shape:
Arbitrary.
Output shape:
Same as the input shape, but with the dimensions re-ordered according
to the specified pattern.
Example:
>>> x = keras.Input(shape=(10, 64))
>>> y = keras.layers.Permute((2, 1))(x)
>>> y.shape
(None, 64, 10)
"""
def __init__(self, dims, **kwargs):
super().__init__(**kwargs)
self.dims = tuple(dims)
if sorted(dims) != list(range(1, len(dims) + 1)):
raise ValueError(
"Invalid permutation argument `dims` for Permute Layer. "
"The set of indices in `dims` must be consecutive and start "
f"from 1. Received dims={dims}"
)
self.input_spec = InputSpec(ndim=len(self.dims) + 1)
def compute_output_shape(self, input_shape):
output_shape = [input_shape[0]]
for dim in self.dims:
output_shape.append(input_shape[dim])
return tuple(output_shape)
def compute_output_spec(self, inputs):
output_shape = self.compute_output_shape(inputs.shape)
return KerasTensor(
shape=output_shape, dtype=inputs.dtype, sparse=inputs.sparse
)
def call(self, inputs):
return ops.transpose(inputs, axes=(0,) + self.dims)
def get_config(self):
config = {"dims": self.dims}
base_config = super().get_config()
return {**base_config, **config}
|
from __future__ import annotations
from sentence_transformers.losses.TripletLoss import TripletDistanceMetric, TripletLoss
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
class SparseTripletLoss(TripletLoss):
def __init__(
self, model: SparseEncoder, distance_metric=TripletDistanceMetric.EUCLIDEAN, triplet_margin: float = 5
) -> None:
"""
This class implements triplet loss. Given a triplet of (anchor, positive, negative),
the loss minimizes the distance between anchor and positive while it maximizes the distance
between anchor and negative. It compute the following loss function:
``loss = max(||anchor - positive|| - ||anchor - negative|| + margin, 0)``.
Margin is an important hyperparameter and needs to be tuned respectively.
Args:
model: SparseEncoder
distance_metric: Function to compute distance between two
embeddings. The class TripletDistanceMetric contains
common distance metrices that can be used.
triplet_margin: The negative should be at least this much
further away from the anchor than the positive.
References:
- For further details, see: https://en.wikipedia.org/wiki/Triplet_loss
Requirements:
1. (anchor, positive, negative) triplets
Inputs:
+---------------------------------------+--------+
| Texts | Labels |
+=======================================+========+
| (anchor, positive, negative) triplets | none |
+---------------------------------------+--------+
Example:
::
from datasets import Dataset
from sentence_transformers.sparse_encoder import SparseEncoder, SparseEncoderTrainer, losses
model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
train_dataset = Dataset.from_dict(
{
"anchor": ["It's nice weather outside today.", "He drove to work."],
"positive": ["It's so sunny.", "He took the car to the office."],
"negative": ["It's quite rainy, sadly.", "She walked to the store."],
}
)
loss = losses.SparseTripletLoss(model)
trainer = SparseEncoderTrainer(model=model, train_dataset=train_dataset, loss=loss)
trainer.train()
"""
super().__init__(model, distance_metric=distance_metric, triplet_margin=triplet_margin)
|
from __future__ import annotations
from sentence_transformers.losses.TripletLoss import TripletDistanceMetric, TripletLoss
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
class SparseTripletLoss(TripletLoss):
def __init__(
self, model: SparseEncoder, distance_metric=TripletDistanceMetric.EUCLIDEAN, triplet_margin: float = 5
) -> None:
"""
This class implements triplet loss. Given a triplet of (anchor, positive, negative),
the loss minimizes the distance between anchor and positive while it maximizes the distance
between anchor and negative. It compute the following loss function:
``loss = max(||anchor - positive|| - ||anchor - negative|| + margin, 0)``.
Margin is an important hyperparameter and needs to be tuned respectively.
Args:
model: SparseEncoder
distance_metric: Function to compute distance between two
embeddings. The class TripletDistanceMetric contains
common distance metrices that can be used.
triplet_margin: The negative should be at least this much
further away from the anchor than the positive.
References:
- For further details, see: https://en.wikipedia.org/wiki/Triplet_loss
Requirements:
1. (anchor, positive, negative) triplets
Inputs:
+---------------------------------------+--------+
| Texts | Labels |
+=======================================+========+
| (anchor, positive, negative) triplets | none |
+---------------------------------------+--------+
Example:
::
from datasets import Dataset
from sentence_transformers.sparse_encoder import SparseEncoder, SparseEncoderTrainer, losses
model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
train_dataset = Dataset.from_dict(
{
"anchor": ["It's nice weather outside today.", "He drove to work."],
"positive": ["It's so sunny.", "He took the car to the office."],
"negative": ["It's quite rainy, sadly.", "She walked to the store."],
}
)
loss = losses.SparseTripletLoss(model)
trainer = SparseEncoderTrainer(model=model, train_dataset=train_dataset, loss=loss)
trainer.train()
"""
super().__init__(model, distance_metric=distance_metric, triplet_margin=triplet_margin)
|
from __future__ import annotations
from enum import Enum
from typing import Any, Mapping, Optional, Sequence, Tuple, Union
import torch
from torch.utils._pytree import tree_flatten
from ._tv_tensor import TVTensor
class BoundingBoxFormat(Enum):
"""Coordinate format of a bounding box.
Available formats are
* ``XYXY``
* ``XYWH``
* ``CXCYWH``
"""
XYXY = "XYXY"
XYWH = "XYWH"
CXCYWH = "CXCYWH"
class BoundingBoxes(TVTensor):
""":class:`torch.Tensor` subclass for bounding boxes.
.. note::
There should be only one :class:`~torchvision.tv_tensors.BoundingBoxes`
instance per sample e.g. ``{"img": img, "bbox": BoundingBoxes(...)}``,
although one :class:`~torchvision.tv_tensors.BoundingBoxes` object can
contain multiple bounding boxes.
Args:
data: Any data that can be turned into a tensor with :func:`torch.as_tensor`.
format (BoundingBoxFormat, str): Format of the bounding box.
canvas_size (two-tuple of ints): Height and width of the corresponding image or video.
dtype (torch.dtype, optional): Desired data type of the bounding box. If omitted, will be inferred from
``data``.
device (torch.device, optional): Desired device of the bounding box. If omitted and ``data`` is a
:class:`torch.Tensor`, the device is taken from it. Otherwise, the bounding box is constructed on the CPU.
requires_grad (bool, optional): Whether autograd should record operations on the bounding box. If omitted and
``data`` is a :class:`torch.Tensor`, the value is taken from it. Otherwise, defaults to ``False``.
"""
format: BoundingBoxFormat
canvas_size: Tuple[int, int]
@classmethod
def _wrap(cls, tensor: torch.Tensor, *, format: Union[BoundingBoxFormat, str], canvas_size: Tuple[int, int], check_dims: bool = True) -> BoundingBoxes: # type: ignore[override]
if check_dims:
if tensor.ndim == 1:
tensor = tensor.unsqueeze(0)
elif tensor.ndim != 2:
raise ValueError(f"Expected a 1D or 2D tensor, got {tensor.ndim}D")
if isinstance(format, str):
format = BoundingBoxFormat[format.upper()]
bounding_boxes = tensor.as_subclass(cls)
bounding_boxes.format = format
bounding_boxes.canvas_size = canvas_size
return bounding_boxes
def __new__(
cls,
data: Any,
*,
format: Union[BoundingBoxFormat, str],
canvas_size: Tuple[int, int],
dtype: Optional[torch.dtype] = None,
device: Optional[Union[torch.device, str, int]] = None,
requires_grad: Optional[bool] = None,
) -> BoundingBoxes:
tensor = cls._to_tensor(data, dtype=dtype, device=device, requires_grad=requires_grad)
return cls._wrap(tensor, format=format, canvas_size=canvas_size)
@classmethod
def _wrap_output(
cls,
output: torch.Tensor,
args: Sequence[Any] = (),
kwargs: Optional[Mapping[str, Any]] = None,
) -> BoundingBoxes:
# If there are BoundingBoxes instances in the output, their metadata got lost when we called
# super().__torch_function__. We need to restore the metadata somehow, so we choose to take
# the metadata from the first bbox in the parameters.
# This should be what we want in most cases. When it's not, it's probably a mis-use anyway, e.g.
# something like some_xyxy_bbox + some_xywh_bbox; we don't guard against those cases.
flat_params, _ = tree_flatten(args + (tuple(kwargs.values()) if kwargs else ())) # type: ignore[operator]
first_bbox_from_args = next(x for x in flat_params if isinstance(x, BoundingBoxes))
format, canvas_size = first_bbox_from_args.format, first_bbox_from_args.canvas_size
if isinstance(output, torch.Tensor) and not isinstance(output, BoundingBoxes):
output = BoundingBoxes._wrap(output, format=format, canvas_size=canvas_size, check_dims=False)
elif isinstance(output, (tuple, list)):
output = type(output)(
BoundingBoxes._wrap(part, format=format, canvas_size=canvas_size, check_dims=False) for part in output
)
return output
def __repr__(self, *, tensor_contents: Any = None) -> str: # type: ignore[override]
return self._make_repr(format=self.format, canvas_size=self.canvas_size)
|
from __future__ import annotations
from enum import Enum
from typing import Any, Mapping, Optional, Sequence, Tuple, Union
import torch
from torch.utils._pytree import tree_flatten
from ._tv_tensor import TVTensor
class BoundingBoxFormat(Enum):
"""[BETA] Coordinate format of a bounding box.
Available formats are
* ``XYXY``
* ``XYWH``
* ``CXCYWH``
"""
XYXY = "XYXY"
XYWH = "XYWH"
CXCYWH = "CXCYWH"
class BoundingBoxes(TVTensor):
"""[BETA] :class:`torch.Tensor` subclass for bounding boxes.
.. note::
There should be only one :class:`~torchvision.tv_tensors.BoundingBoxes`
instance per sample e.g. ``{"img": img, "bbox": BoundingBoxes(...)}``,
although one :class:`~torchvision.tv_tensors.BoundingBoxes` object can
contain multiple bounding boxes.
Args:
data: Any data that can be turned into a tensor with :func:`torch.as_tensor`.
format (BoundingBoxFormat, str): Format of the bounding box.
canvas_size (two-tuple of ints): Height and width of the corresponding image or video.
dtype (torch.dtype, optional): Desired data type of the bounding box. If omitted, will be inferred from
``data``.
device (torch.device, optional): Desired device of the bounding box. If omitted and ``data`` is a
:class:`torch.Tensor`, the device is taken from it. Otherwise, the bounding box is constructed on the CPU.
requires_grad (bool, optional): Whether autograd should record operations on the bounding box. If omitted and
``data`` is a :class:`torch.Tensor`, the value is taken from it. Otherwise, defaults to ``False``.
"""
format: BoundingBoxFormat
canvas_size: Tuple[int, int]
@classmethod
def _wrap(cls, tensor: torch.Tensor, *, format: Union[BoundingBoxFormat, str], canvas_size: Tuple[int, int], check_dims: bool = True) -> BoundingBoxes: # type: ignore[override]
if check_dims:
if tensor.ndim == 1:
tensor = tensor.unsqueeze(0)
elif tensor.ndim != 2:
raise ValueError(f"Expected a 1D or 2D tensor, got {tensor.ndim}D")
if isinstance(format, str):
format = BoundingBoxFormat[format.upper()]
bounding_boxes = tensor.as_subclass(cls)
bounding_boxes.format = format
bounding_boxes.canvas_size = canvas_size
return bounding_boxes
def __new__(
cls,
data: Any,
*,
format: Union[BoundingBoxFormat, str],
canvas_size: Tuple[int, int],
dtype: Optional[torch.dtype] = None,
device: Optional[Union[torch.device, str, int]] = None,
requires_grad: Optional[bool] = None,
) -> BoundingBoxes:
tensor = cls._to_tensor(data, dtype=dtype, device=device, requires_grad=requires_grad)
return cls._wrap(tensor, format=format, canvas_size=canvas_size)
@classmethod
def _wrap_output(
cls,
output: torch.Tensor,
args: Sequence[Any] = (),
kwargs: Optional[Mapping[str, Any]] = None,
) -> BoundingBoxes:
# If there are BoundingBoxes instances in the output, their metadata got lost when we called
# super().__torch_function__. We need to restore the metadata somehow, so we choose to take
# the metadata from the first bbox in the parameters.
# This should be what we want in most cases. When it's not, it's probably a mis-use anyway, e.g.
# something like some_xyxy_bbox + some_xywh_bbox; we don't guard against those cases.
flat_params, _ = tree_flatten(args + (tuple(kwargs.values()) if kwargs else ())) # type: ignore[operator]
first_bbox_from_args = next(x for x in flat_params if isinstance(x, BoundingBoxes))
format, canvas_size = first_bbox_from_args.format, first_bbox_from_args.canvas_size
if isinstance(output, torch.Tensor) and not isinstance(output, BoundingBoxes):
output = BoundingBoxes._wrap(output, format=format, canvas_size=canvas_size, check_dims=False)
elif isinstance(output, (tuple, list)):
output = type(output)(
BoundingBoxes._wrap(part, format=format, canvas_size=canvas_size, check_dims=False) for part in output
)
return output
def __repr__(self, *, tensor_contents: Any = None) -> str: # type: ignore[override]
return self._make_repr(format=self.format, canvas_size=self.canvas_size)
|
"""Argparser module for Deployment runtimes"""
import argparse
from jina.enums import DeploymentRoleType
from jina.parsers.helper import _SHOW_ALL_ARGS, KVAppendAction, add_arg_group
from jina.parsers.orchestrate.runtimes.remote import _mixin_http_server_parser
def mixin_base_deployment_parser(parser):
"""Add mixin arguments required by :class:`BaseDeployment` into the given parser.
:param parser: the parser instance to which we add arguments
"""
gp = add_arg_group(parser, title='Deployment')
gp.add_argument(
'--uses-before',
type=str,
help='The executor attached before the Pods described by --uses, typically before sending to all '
'shards, accepted type follows `--uses`. This argument only applies for sharded Deployments (shards > 1).',
)
gp.add_argument(
'--uses-after',
type=str,
help='The executor attached after the Pods described by --uses, typically used for receiving from '
'all shards, accepted type follows `--uses`. This argument only applies for sharded Deployments (shards > 1).',
)
gp.add_argument(
'--when',
action=KVAppendAction,
metavar='KEY: VALUE',
nargs='*',
help='The condition that the documents need to fulfill before reaching the Executor.'
'The condition can be defined in the form of a `DocArray query condition <https://docarray.jina.ai/fundamentals/documentarray/find/#query-by-conditions>`',
)
gp.add_argument(
'--external',
action='store_true',
default=False,
help='The Deployment will be considered an external Deployment that has been started independently from the Flow.'
'This Deployment will not be context managed by the Flow.',
)
gp.add_argument(
'--grpc-metadata',
action=KVAppendAction,
metavar='KEY: VALUE',
nargs='*',
help='The metadata to be passed to the gRPC request.',
)
# hidden CLI used for internal only
gp.add_argument(
'--deployment-role',
type=DeploymentRoleType.from_string,
choices=list(DeploymentRoleType),
help=(
'The role of this deployment in the flow'
if _SHOW_ALL_ARGS
else argparse.SUPPRESS
),
)
gp.add_argument(
'--tls',
action='store_true',
default=False,
help='If set, connect to deployment using tls encryption',
)
_mixin_http_server_parser(gp)
|
"""Argparser module for Deployment runtimes"""
import argparse
from jina.enums import DeploymentRoleType
from jina.parsers.helper import _SHOW_ALL_ARGS, KVAppendAction, add_arg_group
from jina.parsers.orchestrate.runtimes.remote import _mixin_http_server_parser
def mixin_base_deployment_parser(parser):
"""Add mixin arguments required by :class:`BaseDeployment` into the given parser.
:param parser: the parser instance to which we add arguments
"""
gp = add_arg_group(parser, title='Deployment')
gp.add_argument(
'--uses-before',
type=str,
help='The executor attached before the Pods described by --uses, typically before sending to all '
'shards, accepted type follows `--uses`. This argument only applies for sharded Deployments (shards > 1).',
)
gp.add_argument(
'--uses-after',
type=str,
help='The executor attached after the Pods described by --uses, typically used for receiving from '
'all shards, accepted type follows `--uses`. This argument only applies for sharded Deployments (shards > 1).',
)
gp.add_argument(
'--when',
action=KVAppendAction,
metavar='KEY: VALUE',
nargs='*',
help='The condition that the documents need to fulfill before reaching the Executor.'
'The condition can be defined in the form of a `DocArray query condition <https://docarray.jina.ai/fundamentals/documentarray/find/#query-by-conditions>`',
)
gp.add_argument(
'--external',
action='store_true',
default=False,
help='The Deployment will be considered an external Deployment that has been started independently from the Flow.'
'This Deployment will not be context managed by the Flow.',
)
gp.add_argument(
'--grpc-metadata',
action=KVAppendAction,
metavar='KEY: VALUE',
nargs='*',
help='The metadata to be passed to the gRPC request.',
)
# hidden CLI used for internal only
gp.add_argument(
'--deployment-role',
type=DeploymentRoleType.from_string,
choices=list(DeploymentRoleType),
help='The role of this deployment in the flow'
if _SHOW_ALL_ARGS
else argparse.SUPPRESS,
)
gp.add_argument(
'--tls',
action='store_true',
default=False,
help='If set, connect to deployment using tls encryption',
)
_mixin_http_server_parser(gp)
|
import wave
from typing import Union, BinaryIO, TYPE_CHECKING
import numpy as np
if TYPE_CHECKING:
from docarray.typing import T
class AudioDataMixin:
"""Provide helper functions for :class:`Document` to support audio data."""
def save_audio_tensor_to_file(
self: 'T',
file: Union[str, BinaryIO],
sample_rate: int = 44100,
sample_width: int = 2,
) -> 'T':
"""Save :attr:`.tensor` into an wav file. Mono/stereo is preserved.
:param file: if file is a string, open the file by that name, otherwise treat it as a file-like object.
:param sample_rate: sampling frequency
:param sample_width: sample width in bytes
:return: Document itself after processed
"""
# Convert to (little-endian) 16 bit integers.
max_int16 = 2**15
tensor = (self.tensor * max_int16).astype('<h')
n_channels = 2 if self.tensor.ndim > 1 else 1
with wave.open(file, 'w') as f:
# 2 Channels.
f.setnchannels(n_channels)
# 2 bytes per sample.
f.setsampwidth(sample_width)
f.setframerate(sample_rate)
f.writeframes(tensor.tobytes())
return self
def load_uri_to_audio_tensor(self: 'T') -> 'T':
"""Convert an audio :attr:`.uri` into :attr:`.tensor` inplace
:return: Document itself after processed
"""
ifile = wave.open(
self.uri
) #: note wave is Python built-in module https://docs.python.org/3/library/wave.html
samples = ifile.getnframes()
audio = ifile.readframes(samples)
# Convert buffer to float32 using NumPy
audio_as_np_int16 = np.frombuffer(audio, dtype=np.int16)
audio_as_np_float32 = audio_as_np_int16.astype(np.float32)
# Normalise float32 array so that values are between -1.0 and +1.0
max_int16 = 2**15
audio_normalised = audio_as_np_float32 / max_int16
channels = ifile.getnchannels()
if channels == 2:
# 1 for mono, 2 for stereo
audio_stereo = np.empty((int(len(audio_normalised) / channels), channels))
audio_stereo[:, 0] = audio_normalised[range(0, len(audio_normalised), 2)]
audio_stereo[:, 1] = audio_normalised[range(1, len(audio_normalised), 2)]
self.tensor = audio_stereo
else:
self.tensor = audio_normalised
return self
|
import wave
from typing import Union, BinaryIO, TYPE_CHECKING
import numpy as np
if TYPE_CHECKING:
from ...typing import T
class AudioDataMixin:
"""Provide helper functions for :class:`Document` to support audio data."""
def save_audio_tensor_to_file(
self: 'T',
file: Union[str, BinaryIO],
sample_rate: int = 44100,
sample_width: int = 2,
) -> 'T':
"""Save :attr:`.tensor` into an wav file. Mono/stereo is preserved.
:param file: if file is a string, open the file by that name, otherwise treat it as a file-like object.
:param sample_rate: sampling frequency
:param sample_width: sample width in bytes
:return: Document itself after processed
"""
# Convert to (little-endian) 16 bit integers.
max_int16 = 2**15
tensor = (self.tensor * max_int16).astype('<h')
n_channels = 2 if self.tensor.ndim > 1 else 1
with wave.open(file, 'w') as f:
# 2 Channels.
f.setnchannels(n_channels)
# 2 bytes per sample.
f.setsampwidth(sample_width)
f.setframerate(sample_rate)
f.writeframes(tensor.tobytes())
return self
def load_uri_to_audio_tensor(self: 'T') -> 'T':
"""Convert an audio :attr:`.uri` into :attr:`.tensor` inplace
:return: Document itself after processed
"""
ifile = wave.open(
self.uri
) #: note wave is Python built-in module https://docs.python.org/3/library/wave.html
samples = ifile.getnframes()
audio = ifile.readframes(samples)
# Convert buffer to float32 using NumPy
audio_as_np_int16 = np.frombuffer(audio, dtype=np.int16)
audio_as_np_float32 = audio_as_np_int16.astype(np.float32)
# Normalise float32 array so that values are between -1.0 and +1.0
max_int16 = 2**15
audio_normalised = audio_as_np_float32 / max_int16
channels = ifile.getnchannels()
if channels == 2:
# 1 for mono, 2 for stereo
audio_stereo = np.empty((int(len(audio_normalised) / channels), channels))
audio_stereo[:, 0] = audio_normalised[range(0, len(audio_normalised), 2)]
audio_stereo[:, 1] = audio_normalised[range(1, len(audio_normalised), 2)]
self.tensor = audio_stereo
else:
self.tensor = audio_normalised
return self
|
from typing import TYPE_CHECKING, Dict, Iterable
from sentence_transformers.evaluation.SentenceEvaluator import SentenceEvaluator
if TYPE_CHECKING:
from sentence_transformers.SentenceTransformer import SentenceTransformer
class SequentialEvaluator(SentenceEvaluator):
"""
This evaluator allows that multiple sub-evaluators are passed. When the model is evaluated,
the data is passed sequentially to all sub-evaluators.
All scores are passed to 'main_score_function', which derives one final score value
"""
def __init__(self, evaluators: Iterable[SentenceEvaluator], main_score_function=lambda scores: scores[-1]):
"""
Initializes a SequentialEvaluator object.
Args:
evaluators (Iterable[SentenceEvaluator]): A collection of SentenceEvaluator objects.
main_score_function (function, optional): A function that takes a list of scores and returns the main score.
Defaults to selecting the last score in the list.
Example:
::
evaluator1 = BinaryClassificationEvaluator(...)
evaluator2 = InformationRetrievalEvaluator(...)
evaluator3 = MSEEvaluator(...)
seq_evaluator = SequentialEvaluator([evaluator1, evaluator2, evaluator3])
"""
super().__init__()
self.evaluators = evaluators
self.main_score_function = main_score_function
def __call__(
self, model: "SentenceTransformer", output_path: str = None, epoch: int = -1, steps: int = -1
) -> Dict[str, float]:
evaluations = []
scores = []
for evaluator_idx, evaluator in enumerate(self.evaluators):
evaluation = evaluator(model, output_path, epoch, steps)
if not isinstance(evaluation, dict):
scores.append(evaluation)
evaluation = {f"evaluator_{evaluator_idx}": evaluation}
else:
if hasattr(evaluator, "primary_metric"):
scores.append(evaluation[evaluator.primary_metric])
else:
scores.append(evaluation[list(evaluation.keys())[0]])
evaluations.append(evaluation)
self.primary_metric = "sequential_score"
main_score = self.main_score_function(scores)
results = {key: value for evaluation in evaluations for key, value in evaluation.items()}
results["sequential_score"] = main_score
return results
|
from typing import TYPE_CHECKING, Dict, Iterable
from sentence_transformers.evaluation.SentenceEvaluator import SentenceEvaluator
if TYPE_CHECKING:
from sentence_transformers.SentenceTransformer import SentenceTransformer
class SequentialEvaluator(SentenceEvaluator):
"""
This evaluator allows that multiple sub-evaluators are passed. When the model is evaluated,
the data is passed sequentially to all sub-evaluators.
All scores are passed to 'main_score_function', which derives one final score value
"""
def __init__(self, evaluators: Iterable[SentenceEvaluator], main_score_function=lambda scores: scores[-1]):
"""
Initializes a SequentialEvaluator object.
Args:
evaluators (Iterable[SentenceEvaluator]): A collection of SentenceEvaluator objects.
main_score_function (function, optional): A function that takes a list of scores and returns the main score.
Defaults to selecting the last score in the list.
Example:
::
evaluator1 = BinaryClassificationEvaluator(...)
evaluator2 = InformationRetrievalEvaluator(...)
evaluator3 = MSEEvaluator(...)
seq_evaluator = SequentialEvaluator([evaluator1, evaluator2, evaluator3])
"""
super().__init__()
self.evaluators = evaluators
self.main_score_function = main_score_function
def __call__(
self, model: "SentenceTransformer", output_path: str = None, epoch: int = -1, steps: int = -1
) -> Dict[str, float]:
evaluations = []
scores = []
for evaluator_idx, evaluator in enumerate(self.evaluators):
evaluation = evaluator(model, output_path, epoch, steps)
if not isinstance(evaluation, dict):
scores.append(evaluation)
evaluation = {f"evaluator_{evaluator_idx}": evaluation}
else:
if hasattr(evaluation, "primary_metric"):
scores.append(evaluation[evaluation.primary_metric])
else:
scores.append(evaluation[list(evaluation.keys())[0]])
evaluations.append(evaluation)
self.primary_metric = "sequential_score"
main_score = self.main_score_function(scores)
results = {key: value for evaluation in evaluations for key, value in evaluation.items()}
results["sequential_score"] = main_score
return results
|
from __future__ import annotations
import pytest
from torch.utils.data import BatchSampler, ConcatDataset, SequentialSampler
from sentence_transformers.sampler import RoundRobinBatchSampler
from sentence_transformers.util import is_datasets_available
if is_datasets_available():
from datasets import Dataset
else:
pytest.skip(
reason='Sentence Transformers was not installed with the `["train"]` extra.',
allow_module_level=True,
)
DATASET_LENGTH = 25
@pytest.fixture
def dummy_concat_dataset() -> ConcatDataset:
"""
Dummy dataset for testing purposes. The dataset looks as follows:
{
"data": [0, 1, 2, ... , 23, 24, 100, 101, ..., 123, 124],
"label": [0, 1, 0, 1, ..., 0, 1],
}
"""
values_1 = list(range(DATASET_LENGTH))
labels = [x % 2 for x in values_1]
dataset_1 = Dataset.from_dict({"data": values_1, "label": labels})
values_2 = [x + 100 for x in values_1] + [x + 200 for x in values_1]
dataset_2 = Dataset.from_dict({"data": values_2, "label": labels + labels})
return ConcatDataset([dataset_1, dataset_2])
def test_round_robin_batch_sampler(dummy_concat_dataset: ConcatDataset) -> None:
batch_size = 4
batch_sampler_1 = BatchSampler(
SequentialSampler(range(len(dummy_concat_dataset.datasets[0]))), batch_size=batch_size, drop_last=True
)
batch_sampler_2 = BatchSampler(
SequentialSampler(range(len(dummy_concat_dataset.datasets[1]))), batch_size=batch_size, drop_last=True
)
sampler = RoundRobinBatchSampler(dataset=dummy_concat_dataset, batch_samplers=[batch_sampler_1, batch_sampler_2])
batches = list(iter(sampler))
# Despite the second dataset being larger (2 * DATASET_LENGTH), we still only sample DATASET_LENGTH // batch_size batches from each dataset
# because the RoundRobinBatchSampler should stop sampling once it has sampled all elements from one dataset
assert len(batches) == 2 * DATASET_LENGTH // batch_size
assert len(sampler) == len(batches)
# Assert that batches are produced in a round-robin fashion
for i in range(0, len(batches), 2):
# Batch from the first part of the dataset
batch_1 = batches[i]
assert all(
dummy_concat_dataset[idx]["data"] < 100 for idx in batch_1
), f"Batch {i} contains data from the second part of the dataset: {[dummy_concat_dataset[idx]['data'] for idx in batch_1]}"
# Batch from the second part of the dataset
batch_2 = batches[i + 1]
assert all(
dummy_concat_dataset[idx]["data"] >= 100 for idx in batch_2
), f"Batch {i+1} contains data from the first part of the dataset: {[dummy_concat_dataset[idx]['data'] for idx in batch_2]}"
def test_round_robin_batch_sampler_value_error(dummy_concat_dataset: ConcatDataset) -> None:
batch_size = 4
batch_sampler_1 = BatchSampler(SequentialSampler(range(DATASET_LENGTH)), batch_size=batch_size, drop_last=True)
batch_sampler_2 = BatchSampler(SequentialSampler(range(DATASET_LENGTH)), batch_size=batch_size, drop_last=True)
batch_sampler_3 = BatchSampler(SequentialSampler(range(DATASET_LENGTH)), batch_size=batch_size, drop_last=True)
with pytest.raises(
ValueError, match="The number of batch samplers must match the number of datasets in the ConcatDataset"
):
RoundRobinBatchSampler(
dataset=dummy_concat_dataset, batch_samplers=[batch_sampler_1, batch_sampler_2, batch_sampler_3]
)
|
from __future__ import annotations
import pytest
from datasets import Dataset
from torch.utils.data import BatchSampler, ConcatDataset, SequentialSampler
from sentence_transformers.sampler import RoundRobinBatchSampler
DATASET_LENGTH = 25
@pytest.fixture
def dummy_concat_dataset() -> ConcatDataset:
"""
Dummy dataset for testing purposes. The dataset looks as follows:
{
"data": [0, 1, 2, ... , 23, 24, 100, 101, ..., 123, 124],
"label": [0, 1, 0, 1, ..., 0, 1],
}
"""
values_1 = list(range(DATASET_LENGTH))
labels = [x % 2 for x in values_1]
dataset_1 = Dataset.from_dict({"data": values_1, "label": labels})
values_2 = [x + 100 for x in values_1] + [x + 200 for x in values_1]
dataset_2 = Dataset.from_dict({"data": values_2, "label": labels + labels})
return ConcatDataset([dataset_1, dataset_2])
def test_round_robin_batch_sampler(dummy_concat_dataset: ConcatDataset) -> None:
batch_size = 4
batch_sampler_1 = BatchSampler(
SequentialSampler(range(len(dummy_concat_dataset.datasets[0]))), batch_size=batch_size, drop_last=True
)
batch_sampler_2 = BatchSampler(
SequentialSampler(range(len(dummy_concat_dataset.datasets[1]))), batch_size=batch_size, drop_last=True
)
sampler = RoundRobinBatchSampler(dataset=dummy_concat_dataset, batch_samplers=[batch_sampler_1, batch_sampler_2])
batches = list(iter(sampler))
# Despite the second dataset being larger (2 * DATASET_LENGTH), we still only sample DATASET_LENGTH // batch_size batches from each dataset
# because the RoundRobinBatchSampler should stop sampling once it has sampled all elements from one dataset
assert len(batches) == 2 * DATASET_LENGTH // batch_size
assert len(sampler) == len(batches)
# Assert that batches are produced in a round-robin fashion
for i in range(0, len(batches), 2):
# Batch from the first part of the dataset
batch_1 = batches[i]
assert all(
dummy_concat_dataset[idx]["data"] < 100 for idx in batch_1
), f"Batch {i} contains data from the second part of the dataset: {[dummy_concat_dataset[idx]['data'] for idx in batch_1]}"
# Batch from the second part of the dataset
batch_2 = batches[i + 1]
assert all(
dummy_concat_dataset[idx]["data"] >= 100 for idx in batch_2
), f"Batch {i+1} contains data from the first part of the dataset: {[dummy_concat_dataset[idx]['data'] for idx in batch_2]}"
def test_round_robin_batch_sampler_value_error(dummy_concat_dataset: ConcatDataset) -> None:
batch_size = 4
batch_sampler_1 = BatchSampler(SequentialSampler(range(DATASET_LENGTH)), batch_size=batch_size, drop_last=True)
batch_sampler_2 = BatchSampler(SequentialSampler(range(DATASET_LENGTH)), batch_size=batch_size, drop_last=True)
batch_sampler_3 = BatchSampler(SequentialSampler(range(DATASET_LENGTH)), batch_size=batch_size, drop_last=True)
with pytest.raises(
ValueError, match="The number of batch samplers must match the number of datasets in the ConcatDataset"
):
RoundRobinBatchSampler(
dataset=dummy_concat_dataset, batch_samplers=[batch_sampler_1, batch_sampler_2, batch_sampler_3]
)
|
import pytest
from docarray import DocumentArray
from docarray.array.qdrant import DocumentArrayQdrant
from docarray.array.sqlite import DocumentArraySqlite
from docarray.array.annlite import DocumentArrayAnnlite, AnnliteConfig
from docarray.array.storage.qdrant import QdrantConfig
from docarray.array.storage.weaviate import WeaviateConfig
from docarray.array.weaviate import DocumentArrayWeaviate
from docarray.array.elastic import DocumentArrayElastic, ElasticConfig
from docarray.array.redis import DocumentArrayRedis, RedisConfig
@pytest.mark.parametrize(
'da_cls,config',
[
(DocumentArray, None),
(DocumentArraySqlite, None),
(DocumentArrayAnnlite, AnnliteConfig(n_dim=128)),
(DocumentArrayWeaviate, WeaviateConfig(n_dim=128)),
(DocumentArrayQdrant, QdrantConfig(n_dim=128)),
(DocumentArrayElastic, ElasticConfig(n_dim=128)),
(DocumentArrayRedis, RedisConfig(n_dim=128)),
],
)
def test_sample(da_cls, config, start_storage):
if config:
da = da_cls.empty(100, config=config)
else:
da = da_cls.empty(100)
sampled = da.sample(1)
assert len(sampled) == 1
sampled = da.sample(5)
assert len(sampled) == 5
assert isinstance(sampled, DocumentArray)
with pytest.raises(ValueError):
da.sample(101) # can not sample with k greater than lenth of document array.
@pytest.mark.parametrize(
'da_cls,config',
[
(DocumentArray, None),
(DocumentArraySqlite, None),
(DocumentArrayAnnlite, AnnliteConfig(n_dim=128)),
(DocumentArrayWeaviate, WeaviateConfig(n_dim=128)),
(DocumentArrayQdrant, QdrantConfig(n_dim=128)),
(DocumentArrayElastic, ElasticConfig(n_dim=128)),
(DocumentArrayRedis, RedisConfig(n_dim=128)),
],
)
def test_sample_with_seed(da_cls, config, start_storage):
if config:
da = da_cls.empty(100, config=config)
else:
da = da_cls.empty(100)
sampled_1 = da.sample(5, seed=1)
sampled_2 = da.sample(5, seed=1)
sampled_3 = da.sample(5, seed=2)
assert len(sampled_1) == len(sampled_2) == len(sampled_3) == 5
assert sampled_1 == sampled_2
assert sampled_1 != sampled_3
@pytest.mark.parametrize(
'da_cls,config',
[
(DocumentArray, None),
(DocumentArraySqlite, None),
(DocumentArrayAnnlite, AnnliteConfig(n_dim=128)),
(DocumentArrayWeaviate, WeaviateConfig(n_dim=128)),
(DocumentArrayQdrant, QdrantConfig(n_dim=128)),
(DocumentArrayElastic, ElasticConfig(n_dim=128)),
(DocumentArrayRedis, RedisConfig(n_dim=128)),
],
)
def test_shuffle(da_cls, config, start_storage):
if config:
da = da_cls.empty(100, config=config)
else:
da = da_cls.empty(100)
shuffled = da.shuffle()
assert len(shuffled) == len(da)
assert isinstance(shuffled, DocumentArray)
ids_before_shuffle = [d.id for d in da]
ids_after_shuffle = [d.id for d in shuffled]
assert ids_before_shuffle != ids_after_shuffle
assert sorted(ids_before_shuffle) == sorted(ids_after_shuffle)
@pytest.mark.parametrize(
'da_cls,config',
[
(DocumentArray, None),
(DocumentArraySqlite, None),
(DocumentArrayAnnlite, AnnliteConfig(n_dim=128)),
(DocumentArrayWeaviate, WeaviateConfig(n_dim=128)),
(DocumentArrayQdrant, QdrantConfig(n_dim=128)),
(DocumentArrayElastic, ElasticConfig(n_dim=128)),
(DocumentArrayRedis, RedisConfig(n_dim=128)),
],
)
def test_shuffle_with_seed(da_cls, config, start_storage):
if config:
da = da_cls.empty(100, config=config)
else:
da = da_cls.empty(100)
shuffled_1 = da.shuffle(seed=1)
shuffled_2 = da.shuffle(seed=1)
shuffled_3 = da.shuffle(seed=2)
assert len(shuffled_1) == len(shuffled_2) == len(shuffled_3) == len(da)
assert shuffled_1 == shuffled_2
assert shuffled_1 != shuffled_3
|
import pytest
from docarray import DocumentArray
from docarray.array.qdrant import DocumentArrayQdrant
from docarray.array.sqlite import DocumentArraySqlite
from docarray.array.annlite import DocumentArrayAnnlite, AnnliteConfig
from docarray.array.storage.qdrant import QdrantConfig
from docarray.array.storage.weaviate import WeaviateConfig
from docarray.array.weaviate import DocumentArrayWeaviate
from docarray.array.elastic import DocumentArrayElastic, ElasticConfig
from docarray.array.redis import DocumentArrayRedis, RedisConfig
@pytest.mark.parametrize(
'da_cls,config',
[
(DocumentArray, None),
(DocumentArraySqlite, None),
(DocumentArrayAnnlite, AnnliteConfig(n_dim=128)),
(DocumentArrayWeaviate, WeaviateConfig(n_dim=128)),
(DocumentArrayQdrant, QdrantConfig(n_dim=128)),
(DocumentArrayElastic, ElasticConfig(n_dim=128)),
(DocumentArrayRedis, RedisConfig(n_dim=128, flush=True)),
],
)
def test_sample(da_cls, config, start_storage):
if config:
da = da_cls.empty(100, config=config)
else:
da = da_cls.empty(100)
sampled = da.sample(1)
assert len(sampled) == 1
sampled = da.sample(5)
assert len(sampled) == 5
assert isinstance(sampled, DocumentArray)
with pytest.raises(ValueError):
da.sample(101) # can not sample with k greater than lenth of document array.
@pytest.mark.parametrize(
'da_cls,config',
[
(DocumentArray, None),
(DocumentArraySqlite, None),
(DocumentArrayAnnlite, AnnliteConfig(n_dim=128)),
(DocumentArrayWeaviate, WeaviateConfig(n_dim=128)),
(DocumentArrayQdrant, QdrantConfig(n_dim=128)),
(DocumentArrayElastic, ElasticConfig(n_dim=128)),
(DocumentArrayRedis, RedisConfig(n_dim=128, flush=True)),
],
)
def test_sample_with_seed(da_cls, config, start_storage):
if config:
da = da_cls.empty(100, config=config)
else:
da = da_cls.empty(100)
sampled_1 = da.sample(5, seed=1)
sampled_2 = da.sample(5, seed=1)
sampled_3 = da.sample(5, seed=2)
assert len(sampled_1) == len(sampled_2) == len(sampled_3) == 5
assert sampled_1 == sampled_2
assert sampled_1 != sampled_3
@pytest.mark.parametrize(
'da_cls,config',
[
(DocumentArray, None),
(DocumentArraySqlite, None),
(DocumentArrayAnnlite, AnnliteConfig(n_dim=128)),
(DocumentArrayWeaviate, WeaviateConfig(n_dim=128)),
(DocumentArrayQdrant, QdrantConfig(n_dim=128)),
(DocumentArrayElastic, ElasticConfig(n_dim=128)),
(DocumentArrayRedis, RedisConfig(n_dim=128, flush=True)),
],
)
def test_shuffle(da_cls, config, start_storage):
if config:
da = da_cls.empty(100, config=config)
else:
da = da_cls.empty(100)
shuffled = da.shuffle()
assert len(shuffled) == len(da)
assert isinstance(shuffled, DocumentArray)
ids_before_shuffle = [d.id for d in da]
ids_after_shuffle = [d.id for d in shuffled]
assert ids_before_shuffle != ids_after_shuffle
assert sorted(ids_before_shuffle) == sorted(ids_after_shuffle)
@pytest.mark.parametrize(
'da_cls,config',
[
(DocumentArray, None),
(DocumentArraySqlite, None),
(DocumentArrayAnnlite, AnnliteConfig(n_dim=128)),
(DocumentArrayWeaviate, WeaviateConfig(n_dim=128)),
(DocumentArrayQdrant, QdrantConfig(n_dim=128)),
(DocumentArrayElastic, ElasticConfig(n_dim=128)),
(DocumentArrayRedis, RedisConfig(n_dim=128, flush=True)),
],
)
def test_shuffle_with_seed(da_cls, config, start_storage):
if config:
da = da_cls.empty(100, config=config)
else:
da = da_cls.empty(100)
shuffled_1 = da.shuffle(seed=1)
shuffled_2 = da.shuffle(seed=1)
shuffled_3 = da.shuffle(seed=2)
assert len(shuffled_1) == len(shuffled_2) == len(shuffled_3) == len(da)
assert shuffled_1 == shuffled_2
assert shuffled_1 != shuffled_3
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import ConvModule
from mmcv.runner import BaseModule
from torch.utils.checkpoint import checkpoint
from ..builder import NECKS
@NECKS.register_module()
class HRFPN(BaseModule):
"""HRFPN (High Resolution Feature Pyramids)
paper: `High-Resolution Representations for Labeling Pixels and Regions
<https://arxiv.org/abs/1904.04514>`_.
Args:
in_channels (list): number of channels for each branch.
out_channels (int): output channels of feature pyramids.
num_outs (int): number of output stages.
pooling_type (str): pooling for generating feature pyramids
from {MAX, AVG}.
conv_cfg (dict): dictionary to construct and config conv layer.
norm_cfg (dict): dictionary to construct and config norm layer.
with_cp (bool): Use checkpoint or not. Using checkpoint will save some
memory while slowing down the training speed.
stride (int): stride of 3x3 convolutional layers
init_cfg (dict or list[dict], optional): Initialization config dict.
"""
def __init__(self,
in_channels,
out_channels,
num_outs=5,
pooling_type='AVG',
conv_cfg=None,
norm_cfg=None,
with_cp=False,
stride=1,
init_cfg=dict(type='Caffe2Xavier', layer='Conv2d')):
super(HRFPN, self).__init__(init_cfg)
assert isinstance(in_channels, list)
self.in_channels = in_channels
self.out_channels = out_channels
self.num_ins = len(in_channels)
self.num_outs = num_outs
self.with_cp = with_cp
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.reduction_conv = ConvModule(
sum(in_channels),
out_channels,
kernel_size=1,
conv_cfg=self.conv_cfg,
act_cfg=None)
self.fpn_convs = nn.ModuleList()
for i in range(self.num_outs):
self.fpn_convs.append(
ConvModule(
out_channels,
out_channels,
kernel_size=3,
padding=1,
stride=stride,
conv_cfg=self.conv_cfg,
act_cfg=None))
if pooling_type == 'MAX':
self.pooling = F.max_pool2d
else:
self.pooling = F.avg_pool2d
def forward(self, inputs):
"""Forward function."""
assert len(inputs) == self.num_ins
outs = [inputs[0]]
for i in range(1, self.num_ins):
outs.append(
F.interpolate(inputs[i], scale_factor=2**i, mode='bilinear'))
out = torch.cat(outs, dim=1)
if out.requires_grad and self.with_cp:
out = checkpoint(self.reduction_conv, out)
else:
out = self.reduction_conv(out)
outs = [out]
for i in range(1, self.num_outs):
outs.append(self.pooling(out, kernel_size=2**i, stride=2**i))
outputs = []
for i in range(self.num_outs):
if outs[i].requires_grad and self.with_cp:
tmp_out = checkpoint(self.fpn_convs[i], outs[i])
else:
tmp_out = self.fpn_convs[i](outs[i])
outputs.append(tmp_out)
return tuple(outputs)
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import ConvModule
from mmcv.runner import BaseModule
from torch.utils.checkpoint import checkpoint
from ..builder import NECKS
@NECKS.register_module()
class HRFPN(BaseModule):
"""HRFPN (High Resolution Feature Pyramids)
paper: `High-Resolution Representations for Labeling Pixels and Regions
<https://arxiv.org/abs/1904.04514>`_.
Args:
in_channels (list): number of channels for each branch.
out_channels (int): output channels of feature pyramids.
num_outs (int): number of output stages.
pooling_type (str): pooling for generating feature pyramids
from {MAX, AVG}.
conv_cfg (dict): dictionary to construct and config conv layer.
norm_cfg (dict): dictionary to construct and config norm layer.
with_cp (bool): Use checkpoint or not. Using checkpoint will save some
memory while slowing down the training speed.
stride (int): stride of 3x3 convolutional layers
init_cfg (dict or list[dict], optional): Initialization config dict.
"""
def __init__(self,
in_channels,
out_channels,
num_outs=5,
pooling_type='AVG',
conv_cfg=None,
norm_cfg=None,
with_cp=False,
stride=1,
init_cfg=dict(type='Caffe2Xavier', layer='Conv2d')):
super(HRFPN, self).__init__(init_cfg)
assert isinstance(in_channels, list)
self.in_channels = in_channels
self.out_channels = out_channels
self.num_ins = len(in_channels)
self.num_outs = num_outs
self.with_cp = with_cp
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.reduction_conv = ConvModule(
sum(in_channels),
out_channels,
kernel_size=1,
conv_cfg=self.conv_cfg,
act_cfg=None)
self.fpn_convs = nn.ModuleList()
for i in range(self.num_outs):
self.fpn_convs.append(
ConvModule(
out_channels,
out_channels,
kernel_size=3,
padding=1,
stride=stride,
conv_cfg=self.conv_cfg,
act_cfg=None))
if pooling_type == 'MAX':
self.pooling = F.max_pool2d
else:
self.pooling = F.avg_pool2d
def forward(self, inputs):
"""Forward function."""
assert len(inputs) == self.num_ins
outs = [inputs[0]]
for i in range(1, self.num_ins):
outs.append(
F.interpolate(inputs[i], scale_factor=2**i, mode='bilinear'))
out = torch.cat(outs, dim=1)
if out.requires_grad and self.with_cp:
out = checkpoint(self.reduction_conv, out)
else:
out = self.reduction_conv(out)
outs = [out]
for i in range(1, self.num_outs):
outs.append(self.pooling(out, kernel_size=2**i, stride=2**i))
outputs = []
for i in range(self.num_outs):
if outs[i].requires_grad and self.with_cp:
tmp_out = checkpoint(self.fpn_convs[i], outs[i])
else:
tmp_out = self.fpn_convs[i](outs[i])
outputs.append(tmp_out)
return tuple(outputs)
|
from typing import Any, Dict, Optional
from llama_index.core.storage.kvstore.types import BaseKVStore
from llama_index.storage.kvstore.azurecosmosnosql import AzureCosmosNoSqlKVStore
DEFAULT_INDEX_DATABASE = "IndexStoreDB"
DEFAULT_INDEX_CONTAINER = "IndexStoreContainer"
class AzureCosmosNoSqlIndexStore(BaseKVStore):
"""Creates an Azure Cosmos DB NoSql Index Store."""
def __init__(
self,
azure_cosmos_nosql_kvstore: AzureCosmosNoSqlKVStore,
namespace: Optional[str] = None,
collection_suffix: Optional[str] = None,
) -> None:
"""Initializes the Azure Cosmos NoSql Index Store."""
super().__init__(azure_cosmos_nosql_kvstore, namespace, collection_suffix)
@classmethod
def from_connection_string(
cls,
connection_string: str,
index_db_name: str = DEFAULT_INDEX_DATABASE,
index_container_name: str = DEFAULT_INDEX_CONTAINER,
cosmos_container_properties: Dict[str, Any] = None,
cosmos_database_properties: Dict[str, Any] = None,
) -> "AzureCosmosNoSqlIndexStore":
"""Creates an instance of Azure Cosmos DB NoSql KV Store using a connection string."""
azure_cosmos_nosql_kvstore = AzureCosmosNoSqlKVStore.from_connection_string(
connection_string,
index_db_name,
index_container_name,
cosmos_container_properties,
cosmos_database_properties,
)
namespace = index_db_name + "." + index_container_name
return cls(azure_cosmos_nosql_kvstore, namespace)
@classmethod
def from_account_and_key(
cls,
endpoint: str,
key: str,
index_db_name: str = DEFAULT_INDEX_DATABASE,
index_container_name: str = DEFAULT_INDEX_CONTAINER,
cosmos_container_properties: Dict[str, Any] = None,
cosmos_database_properties: Dict[str, Any] = None,
) -> "AzureCosmosNoSqlIndexStore":
"""Creates an instance of Azure Cosmos DB NoSql KV Store using an account endpoint and key."""
azure_cosmos_nosql_kvstore = AzureCosmosNoSqlKVStore.from_account_and_key(
endpoint,
key,
index_db_name,
index_container_name,
cosmos_container_properties,
cosmos_database_properties,
)
namespace = index_db_name + "." + index_container_name
return cls(azure_cosmos_nosql_kvstore, namespace)
@classmethod
def from_aad_token(
cls,
endpoint: str,
index_db_name: str = DEFAULT_INDEX_DATABASE,
index_container_name: str = DEFAULT_INDEX_CONTAINER,
cosmos_container_properties: Dict[str, Any] = None,
cosmos_database_properties: Dict[str, Any] = None,
) -> "AzureCosmosNoSqlIndexStore":
"""Creates an instance of Azure Cosmos DB NoSql KV Store using an aad token."""
azure_cosmos_nosql_kvstore = AzureCosmosNoSqlKVStore.from_aad_token(
endpoint,
index_db_name,
index_container_name,
cosmos_container_properties,
cosmos_database_properties,
)
namespace = index_db_name + "." + index_container_name
return cls(azure_cosmos_nosql_kvstore, namespace)
|
from typing import Any, Dict, Optional
from llama_index.core.storage.index_store.keyval_index_store import KVIndexStore
from llama_index.storage.kvstore.azurecosmosnosql import AzureCosmosNoSqlKVStore
DEFAULT_INDEX_DATABASE = "IndexStoreDB"
DEFAULT_INDEX_CONTAINER = "IndexStoreContainer"
class AzureCosmosNoSqlIndexStore(KVIndexStore):
"""Creates an Azure Cosmos DB NoSql Index Store."""
def __init__(
self,
azure_cosmos_nosql_kvstore: AzureCosmosNoSqlKVStore,
namespace: Optional[str] = None,
collection_suffix: Optional[str] = None,
) -> None:
"""Initializes the Azure Cosmos NoSql Index Store."""
super().__init__(azure_cosmos_nosql_kvstore, namespace, collection_suffix)
@classmethod
def from_connection_string(
cls,
connection_string: str,
index_db_name: str = DEFAULT_INDEX_DATABASE,
index_container_name: str = DEFAULT_INDEX_CONTAINER,
cosmos_container_properties: Dict[str, Any] = None,
cosmos_database_properties: Dict[str, Any] = None,
) -> "AzureCosmosNoSqlIndexStore":
"""Creates an instance of Azure Cosmos DB NoSql KV Store using a connection string."""
azure_cosmos_nosql_kvstore = AzureCosmosNoSqlKVStore.from_connection_string(
connection_string,
index_db_name,
index_container_name,
cosmos_container_properties,
cosmos_database_properties,
)
namespace = index_db_name + "." + index_container_name
return cls(azure_cosmos_nosql_kvstore, namespace)
@classmethod
def from_account_and_key(
cls,
endpoint: str,
key: str,
index_db_name: str = DEFAULT_INDEX_DATABASE,
index_container_name: str = DEFAULT_INDEX_CONTAINER,
cosmos_container_properties: Dict[str, Any] = None,
cosmos_database_properties: Dict[str, Any] = None,
) -> "AzureCosmosNoSqlIndexStore":
"""Creates an instance of Azure Cosmos DB NoSql KV Store using an account endpoint and key."""
azure_cosmos_nosql_kvstore = AzureCosmosNoSqlKVStore.from_account_and_key(
endpoint,
key,
index_db_name,
index_container_name,
cosmos_container_properties,
cosmos_database_properties,
)
namespace = index_db_name + "." + index_container_name
return cls(azure_cosmos_nosql_kvstore, namespace)
@classmethod
def from_aad_token(
cls,
endpoint: str,
index_db_name: str = DEFAULT_INDEX_DATABASE,
index_container_name: str = DEFAULT_INDEX_CONTAINER,
cosmos_container_properties: Dict[str, Any] = None,
cosmos_database_properties: Dict[str, Any] = None,
) -> "AzureCosmosNoSqlIndexStore":
"""Creates an instance of Azure Cosmos DB NoSql KV Store using an aad token."""
azure_cosmos_nosql_kvstore = AzureCosmosNoSqlKVStore.from_aad_token(
endpoint,
index_db_name,
index_container_name,
cosmos_container_properties,
cosmos_database_properties,
)
namespace = index_db_name + "." + index_container_name
return cls(azure_cosmos_nosql_kvstore, namespace)
|
from .autograd_utils import use_deterministic_algorithms
from .case_utils import (
disabledInCI,
HttpServerMixin,
PytorchTestCase,
skipIfCudaSmallMemory,
skipIfNoAudioDevice,
skipIfNoCtcDecoder,
skipIfNoCuCtcDecoder,
skipIfNoCuda,
skipIfNoExec,
skipIfNoFFmpeg,
skipIfNoHWAccel,
skipIfNoMacOS,
skipIfNoModule,
skipIfNoQengine,
skipIfNoRIR,
skipIfNoSox,
skipIfNoSoxDecoder,
skipIfNoSoxEncoder,
skipIfPy310,
skipIfRocm,
TempDirMixin,
TestBaseMixin,
TorchaudioTestCase,
zip_equal,
)
from .data_utils import get_asset_path, get_sinusoid, get_spectrogram, get_whitenoise
from .func_utils import torch_script
from .image_utils import get_image, rgb_to_gray, rgb_to_yuv_ccir, save_image
from .parameterized_utils import load_params, nested_params
from .wav_utils import get_wav_data, load_wav, normalize_wav, save_wav
__all__ = [
"get_asset_path",
"get_whitenoise",
"get_sinusoid",
"get_spectrogram",
"TempDirMixin",
"HttpServerMixin",
"TestBaseMixin",
"PytorchTestCase",
"TorchaudioTestCase",
"skipIfNoAudioDevice",
"skipIfNoCtcDecoder",
"skipIfNoCuCtcDecoder",
"skipIfNoCuda",
"skipIfCudaSmallMemory",
"skipIfNoExec",
"skipIfNoMacOS",
"skipIfNoModule",
"skipIfNoRIR",
"skipIfNoSox",
"skipIfNoSoxDecoder",
"skipIfNoSoxEncoder",
"skipIfRocm",
"skipIfNoQengine",
"skipIfNoFFmpeg",
"skipIfNoHWAccel",
"skipIfPy310",
"disabledInCI",
"get_wav_data",
"normalize_wav",
"load_wav",
"save_wav",
"load_params",
"nested_params",
"torch_script",
"save_image",
"get_image",
"rgb_to_gray",
"rgb_to_yuv_ccir",
"use_deterministic_algorithms",
"zip_equal",
]
|
from .autograd_utils import use_deterministic_algorithms
from .backend_utils import set_audio_backend
from .case_utils import (
disabledInCI,
HttpServerMixin,
PytorchTestCase,
skipIfCudaSmallMemory,
skipIfNoAudioDevice,
skipIfNoCtcDecoder,
skipIfNoCuCtcDecoder,
skipIfNoCuda,
skipIfNoExec,
skipIfNoFFmpeg,
skipIfNoHWAccel,
skipIfNoMacOS,
skipIfNoModule,
skipIfNoQengine,
skipIfNoRIR,
skipIfNoSox,
skipIfNoSoxDecoder,
skipIfNoSoxEncoder,
skipIfPy310,
skipIfRocm,
TempDirMixin,
TestBaseMixin,
TorchaudioTestCase,
zip_equal,
)
from .data_utils import get_asset_path, get_sinusoid, get_spectrogram, get_whitenoise
from .func_utils import torch_script
from .image_utils import get_image, rgb_to_gray, rgb_to_yuv_ccir, save_image
from .parameterized_utils import load_params, nested_params
from .wav_utils import get_wav_data, load_wav, normalize_wav, save_wav
__all__ = [
"get_asset_path",
"get_whitenoise",
"get_sinusoid",
"get_spectrogram",
"set_audio_backend",
"TempDirMixin",
"HttpServerMixin",
"TestBaseMixin",
"PytorchTestCase",
"TorchaudioTestCase",
"skipIfNoAudioDevice",
"skipIfNoCtcDecoder",
"skipIfNoCuCtcDecoder",
"skipIfNoCuda",
"skipIfCudaSmallMemory",
"skipIfNoExec",
"skipIfNoMacOS",
"skipIfNoModule",
"skipIfNoRIR",
"skipIfNoSox",
"skipIfNoSoxDecoder",
"skipIfNoSoxEncoder",
"skipIfRocm",
"skipIfNoQengine",
"skipIfNoFFmpeg",
"skipIfNoHWAccel",
"skipIfPy310",
"disabledInCI",
"get_wav_data",
"normalize_wav",
"load_wav",
"save_wav",
"load_params",
"nested_params",
"torch_script",
"save_image",
"get_image",
"rgb_to_gray",
"rgb_to_yuv_ccir",
"use_deterministic_algorithms",
"zip_equal",
]
|
from pathlib import Path
import dask.array as da
import numpy as np
from distributed import Client, LocalCluster
from sklearn.datasets import load_svmlight_file
import lightgbm as lgb
if __name__ == "__main__":
print("loading data")
rank_example_dir = Path(__file__).absolute().parents[2] / "lambdarank"
X, y = load_svmlight_file(str(rank_example_dir / "rank.train"))
group = np.loadtxt(str(rank_example_dir / "rank.train.query"))
print("initializing a Dask cluster")
cluster = LocalCluster(n_workers=2)
client = Client(cluster)
print("created a Dask LocalCluster")
print("distributing training data on the Dask cluster")
# split training data into two partitions
rows_in_part1 = int(np.sum(group[:100]))
rows_in_part2 = X.shape[0] - rows_in_part1
num_features = X.shape[1]
# make this array dense because we're splitting across
# a sparse boundary to partition the data
X = X.toarray()
dX = da.from_array(x=X, chunks=[(rows_in_part1, rows_in_part2), (num_features,)])
dy = da.from_array(
x=y,
chunks=[
(rows_in_part1, rows_in_part2),
],
)
dg = da.from_array(x=group, chunks=[(100, group.size - 100)])
print("beginning training")
dask_model = lgb.DaskLGBMRanker(n_estimators=10)
dask_model.fit(dX, dy, group=dg)
assert dask_model.fitted_
print("done training")
|
from pathlib import Path
import dask.array as da
import numpy as np
from distributed import Client, LocalCluster
from sklearn.datasets import load_svmlight_file
import lightgbm as lgb
if __name__ == "__main__":
print("loading data")
rank_example_dir = Path(__file__).absolute().parents[2] / 'lambdarank'
X, y = load_svmlight_file(str(rank_example_dir / 'rank.train'))
group = np.loadtxt(str(rank_example_dir / 'rank.train.query'))
print("initializing a Dask cluster")
cluster = LocalCluster(n_workers=2)
client = Client(cluster)
print("created a Dask LocalCluster")
print("distributing training data on the Dask cluster")
# split training data into two partitions
rows_in_part1 = int(np.sum(group[:100]))
rows_in_part2 = X.shape[0] - rows_in_part1
num_features = X.shape[1]
# make this array dense because we're splitting across
# a sparse boundary to partition the data
X = X.toarray()
dX = da.from_array(
x=X,
chunks=[
(rows_in_part1, rows_in_part2),
(num_features,)
]
)
dy = da.from_array(
x=y,
chunks=[
(rows_in_part1, rows_in_part2),
]
)
dg = da.from_array(
x=group,
chunks=[
(100, group.size - 100)
]
)
print("beginning training")
dask_model = lgb.DaskLGBMRanker(n_estimators=10)
dask_model.fit(dX, dy, group=dg)
assert dask_model.fitted_
print("done training")
|
"""**sys_info** prints information about the system and langchain packages for debugging purposes.""" # noqa: E501
from collections.abc import Sequence
def _get_sub_deps(packages: Sequence[str]) -> list[str]:
"""Get any specified sub-dependencies."""
from importlib import metadata
sub_deps = set()
_underscored_packages = {pkg.replace("-", "_") for pkg in packages}
for pkg in packages:
try:
required = metadata.requires(pkg)
except metadata.PackageNotFoundError:
continue
if not required:
continue
for req in required:
try:
cleaned_req = req.split(" ")[0]
except Exception: # In case parsing of requirement spec fails
continue
if cleaned_req.replace("-", "_") not in _underscored_packages:
sub_deps.add(cleaned_req)
return sorted(sub_deps, key=lambda x: x.lower())
def print_sys_info(*, additional_pkgs: Sequence[str] = ()) -> None:
"""Print information about the environment for debugging purposes.
Args:
additional_pkgs: Additional packages to include in the output.
"""
import pkgutil
import platform
import sys
from importlib import metadata, util
# Packages that do not start with "langchain" prefix.
other_langchain_packages = [
"langserve",
"langsmith",
]
langchain_pkgs = [
name for _, name, _ in pkgutil.iter_modules() if name.startswith("langchain")
]
langgraph_pkgs = [
name for _, name, _ in pkgutil.iter_modules() if name.startswith("langgraph")
]
all_packages = sorted(
set(
langchain_pkgs
+ langgraph_pkgs
+ other_langchain_packages
+ list(additional_pkgs)
)
)
# Always surface these packages to the top
order_by = ["langchain_core", "langchain", "langchain_community", "langsmith"]
for pkg in reversed(order_by):
if pkg in all_packages:
all_packages.remove(pkg)
all_packages = [pkg] + list(all_packages)
system_info = {
"OS": platform.system(),
"OS Version": platform.version(),
"Python Version": sys.version,
}
print() # noqa: T201
print("System Information") # noqa: T201
print("------------------") # noqa: T201
print("> OS: ", system_info["OS"]) # noqa: T201
print("> OS Version: ", system_info["OS Version"]) # noqa: T201
print("> Python Version: ", system_info["Python Version"]) # noqa: T201
# Print out only langchain packages
print() # noqa: T201
print("Package Information") # noqa: T201
print("-------------------") # noqa: T201
not_installed = []
for pkg in all_packages:
try:
found_package = util.find_spec(pkg)
except Exception:
found_package = None
if found_package is None:
not_installed.append(pkg)
continue
# Package version
try:
package_version = metadata.version(pkg)
except Exception:
package_version = None
# Print package with version
if package_version is not None:
print(f"> {pkg}: {package_version}") # noqa: T201
else:
print(f"> {pkg}: Installed. No version info available.") # noqa: T201
if not_installed:
print() # noqa: T201
print("Optional packages not installed") # noqa: T201
print("-------------------------------") # noqa: T201
for pkg in not_installed:
print(f"> {pkg}") # noqa: T201
sub_dependencies = _get_sub_deps(all_packages)
if sub_dependencies:
print() # noqa: T201
print("Other Dependencies") # noqa: T201
print("------------------") # noqa: T201
for dep in sub_dependencies:
try:
dep_version = metadata.version(dep)
print(f"> {dep}: {dep_version}") # noqa: T201
except Exception:
print(f"> {dep}: Installed. No version info available.") # noqa: T201
if __name__ == "__main__":
print_sys_info()
|
"""**sys_info** prints information about the system and langchain packages
for debugging purposes.
"""
from collections.abc import Sequence
def _get_sub_deps(packages: Sequence[str]) -> list[str]:
"""Get any specified sub-dependencies."""
from importlib import metadata
sub_deps = set()
_underscored_packages = {pkg.replace("-", "_") for pkg in packages}
for pkg in packages:
try:
required = metadata.requires(pkg)
except metadata.PackageNotFoundError:
continue
if not required:
continue
for req in required:
try:
cleaned_req = req.split(" ")[0]
except Exception: # In case parsing of requirement spec fails
continue
if cleaned_req.replace("-", "_") not in _underscored_packages:
sub_deps.add(cleaned_req)
return sorted(sub_deps, key=lambda x: x.lower())
def print_sys_info(*, additional_pkgs: Sequence[str] = ()) -> None:
"""Print information about the environment for debugging purposes.
Args:
additional_pkgs: Additional packages to include in the output.
"""
import pkgutil
import platform
import sys
from importlib import metadata, util
# Packages that do not start with "langchain" prefix.
other_langchain_packages = [
"langserve",
"langsmith",
]
langchain_pkgs = [
name for _, name, _ in pkgutil.iter_modules() if name.startswith("langchain")
]
langgraph_pkgs = [
name for _, name, _ in pkgutil.iter_modules() if name.startswith("langgraph")
]
all_packages = sorted(
set(
langchain_pkgs
+ langgraph_pkgs
+ other_langchain_packages
+ list(additional_pkgs)
)
)
# Always surface these packages to the top
order_by = ["langchain_core", "langchain", "langchain_community", "langsmith"]
for pkg in reversed(order_by):
if pkg in all_packages:
all_packages.remove(pkg)
all_packages = [pkg] + list(all_packages)
system_info = {
"OS": platform.system(),
"OS Version": platform.version(),
"Python Version": sys.version,
}
print() # noqa: T201
print("System Information") # noqa: T201
print("------------------") # noqa: T201
print("> OS: ", system_info["OS"]) # noqa: T201
print("> OS Version: ", system_info["OS Version"]) # noqa: T201
print("> Python Version: ", system_info["Python Version"]) # noqa: T201
# Print out only langchain packages
print() # noqa: T201
print("Package Information") # noqa: T201
print("-------------------") # noqa: T201
not_installed = []
for pkg in all_packages:
try:
found_package = util.find_spec(pkg)
except Exception:
found_package = None
if found_package is None:
not_installed.append(pkg)
continue
# Package version
try:
package_version = metadata.version(pkg)
except Exception:
package_version = None
# Print package with version
if package_version is not None:
print(f"> {pkg}: {package_version}") # noqa: T201
else:
print(f"> {pkg}: Installed. No version info available.") # noqa: T201
if not_installed:
print() # noqa: T201
print("Optional packages not installed") # noqa: T201
print("-------------------------------") # noqa: T201
for pkg in not_installed:
print(f"> {pkg}") # noqa: T201
sub_dependencies = _get_sub_deps(all_packages)
if sub_dependencies:
print() # noqa: T201
print("Other Dependencies") # noqa: T201
print("------------------") # noqa: T201
for dep in sub_dependencies:
try:
dep_version = metadata.version(dep)
print(f"> {dep}: {dep_version}") # noqa: T201
except Exception:
print(f"> {dep}: Installed. No version info available.") # noqa: T201
if __name__ == "__main__":
print_sys_info()
|
"""
This examples trains a CrossEncoder for the STSbenchmark task. A CrossEncoder takes a sentence pair
as input and outputs a label. Here, it output a continuous labels 0...1 to indicate the similarity between the input pair.
It does NOT produce a sentence embedding and does NOT work for individual sentences.
Usage:
python training_stsbenchmark.py
"""
import csv
import gzip
import logging
import math
import os
from datetime import datetime
from torch.utils.data import DataLoader
from sentence_transformers import InputExample, LoggingHandler, util
from sentence_transformers.cross_encoder import CrossEncoder
from sentence_transformers.cross_encoder.evaluation import CrossEncoderCorrelationEvaluator
#### Just some code to print debug information to stdout
logging.basicConfig(
format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO, handlers=[LoggingHandler()]
)
logger = logging.getLogger(__name__)
#### /print debug information to stdout
# Check if dataset exists. If not, download and extract it
sts_dataset_path = "datasets/stsbenchmark.tsv.gz"
if not os.path.exists(sts_dataset_path):
util.http_get("https://sbert.net/datasets/stsbenchmark.tsv.gz", sts_dataset_path)
# Define our Cross-Encoder
train_batch_size = 16
num_epochs = 4
model_save_path = "output/training_stsbenchmark-" + datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
# We use distilroberta-base as base model and set num_labels=1, which predicts a continuous score between 0 and 1
model = CrossEncoder("distilroberta-base", num_labels=1)
# Read STSb dataset
logger.info("Read STSbenchmark train dataset")
train_samples = []
dev_samples = []
test_samples = []
with gzip.open(sts_dataset_path, "rt", encoding="utf8") as fIn:
reader = csv.DictReader(fIn, delimiter="\t", quoting=csv.QUOTE_NONE)
for row in reader:
score = float(row["score"]) / 5.0 # Normalize score to range 0 ... 1
if row["split"] == "dev":
dev_samples.append(InputExample(texts=[row["sentence1"], row["sentence2"]], label=score))
elif row["split"] == "test":
test_samples.append(InputExample(texts=[row["sentence1"], row["sentence2"]], label=score))
else:
# As we want to get symmetric scores, i.e. CrossEncoder(A,B) = CrossEncoder(B,A), we pass both combinations to the train set
train_samples.append(InputExample(texts=[row["sentence1"], row["sentence2"]], label=score))
train_samples.append(InputExample(texts=[row["sentence2"], row["sentence1"]], label=score))
# We wrap train_samples (which is a List[InputExample]) into a pytorch DataLoader
train_dataloader = DataLoader(train_samples, shuffle=True, batch_size=train_batch_size)
# We add an evaluator, which evaluates the performance during training
evaluator = CrossEncoderCorrelationEvaluator.from_input_examples(dev_samples, name="sts-dev")
# Configure the training
warmup_steps = math.ceil(len(train_dataloader) * num_epochs * 0.1) # 10% of train data for warm-up
logger.info(f"Warmup-steps: {warmup_steps}")
# Train the model
model.fit(
train_dataloader=train_dataloader,
evaluator=evaluator,
epochs=num_epochs,
warmup_steps=warmup_steps,
output_path=model_save_path,
)
##### Load model and eval on test set
model = CrossEncoder(model_save_path)
evaluator = CrossEncoderCorrelationEvaluator.from_input_examples(test_samples, name="sts-test")
evaluator(model)
|
"""
This examples trains a CrossEncoder for the STSbenchmark task. A CrossEncoder takes a sentence pair
as input and outputs a label. Here, it output a continuous labels 0...1 to indicate the similarity between the input pair.
It does NOT produce a sentence embedding and does NOT work for individual sentences.
Usage:
python training_stsbenchmark.py
"""
import csv
import gzip
import logging
import math
import os
from datetime import datetime
from torch.utils.data import DataLoader
from sentence_transformers import InputExample, LoggingHandler, util
from sentence_transformers.cross_encoder import CrossEncoder
from sentence_transformers.cross_encoder.evaluation import CECorrelationEvaluator
#### Just some code to print debug information to stdout
logging.basicConfig(
format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO, handlers=[LoggingHandler()]
)
logger = logging.getLogger(__name__)
#### /print debug information to stdout
# Check if dataset exists. If not, download and extract it
sts_dataset_path = "datasets/stsbenchmark.tsv.gz"
if not os.path.exists(sts_dataset_path):
util.http_get("https://sbert.net/datasets/stsbenchmark.tsv.gz", sts_dataset_path)
# Define our Cross-Encoder
train_batch_size = 16
num_epochs = 4
model_save_path = "output/training_stsbenchmark-" + datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
# We use distilroberta-base as base model and set num_labels=1, which predicts a continuous score between 0 and 1
model = CrossEncoder("distilroberta-base", num_labels=1)
# Read STSb dataset
logger.info("Read STSbenchmark train dataset")
train_samples = []
dev_samples = []
test_samples = []
with gzip.open(sts_dataset_path, "rt", encoding="utf8") as fIn:
reader = csv.DictReader(fIn, delimiter="\t", quoting=csv.QUOTE_NONE)
for row in reader:
score = float(row["score"]) / 5.0 # Normalize score to range 0 ... 1
if row["split"] == "dev":
dev_samples.append(InputExample(texts=[row["sentence1"], row["sentence2"]], label=score))
elif row["split"] == "test":
test_samples.append(InputExample(texts=[row["sentence1"], row["sentence2"]], label=score))
else:
# As we want to get symmetric scores, i.e. CrossEncoder(A,B) = CrossEncoder(B,A), we pass both combinations to the train set
train_samples.append(InputExample(texts=[row["sentence1"], row["sentence2"]], label=score))
train_samples.append(InputExample(texts=[row["sentence2"], row["sentence1"]], label=score))
# We wrap train_samples (which is a List[InputExample]) into a pytorch DataLoader
train_dataloader = DataLoader(train_samples, shuffle=True, batch_size=train_batch_size)
# We add an evaluator, which evaluates the performance during training
evaluator = CECorrelationEvaluator.from_input_examples(dev_samples, name="sts-dev")
# Configure the training
warmup_steps = math.ceil(len(train_dataloader) * num_epochs * 0.1) # 10% of train data for warm-up
logger.info(f"Warmup-steps: {warmup_steps}")
# Train the model
model.fit(
train_dataloader=train_dataloader,
evaluator=evaluator,
epochs=num_epochs,
warmup_steps=warmup_steps,
output_path=model_save_path,
)
##### Load model and eval on test set
model = CrossEncoder(model_save_path)
evaluator = CECorrelationEvaluator.from_input_examples(test_samples, name="sts-test")
evaluator(model)
|
from typing import TYPE_CHECKING, Union
import numpy as np
if TYPE_CHECKING: # pragma: no cover
from docarray.typing import T
import trimesh
class Mesh:
FILE_EXTENSIONS = [
'glb',
'obj',
'ply',
]
VERTICES = 'vertices'
FACES = 'faces'
class MeshDataMixin:
"""Provide helper functions for :class:`Document` to support 3D mesh data and point cloud."""
def _load_mesh(
self, force: str = None
) -> Union['trimesh.Trimesh', 'trimesh.Scene']:
"""Load a trimesh.Mesh or trimesh.Scene object from :attr:`.uri`.
:param force: str or None. For 'mesh' try to coerce scenes into a single mesh. For 'scene'
try to coerce everything into a scene.
:return: trimesh.Mesh or trimesh.Scene object
"""
import urllib.parse
import trimesh
scheme = urllib.parse.urlparse(self.uri).scheme
loader = trimesh.load_remote if scheme in ['http', 'https'] else trimesh.load
mesh = loader(self.uri, force=force)
return mesh
def load_uri_to_point_cloud_tensor(
self: 'T', samples: int, as_chunks: bool = False
) -> 'T':
"""Convert a 3d mesh-like :attr:`.uri` into :attr:`.tensor`
:param samples: number of points to sample from the mesh
:param as_chunks: when multiple geometry stored in one mesh file,
then store each geometry into different :attr:`.chunks`
:return: itself after processed
"""
if as_chunks:
import trimesh
from docarray.document import Document
# try to coerce everything into a scene
scene = self._load_mesh(force='scene')
for geo in scene.geometry.values():
geo: trimesh.Trimesh
self.chunks.append(Document(tensor=np.array(geo.sample(samples))))
else:
# combine a scene into a single mesh
mesh = self._load_mesh(force='mesh')
self.tensor = np.array(mesh.sample(samples))
return self
def load_uri_to_vertices_and_faces(self: 'T') -> 'T':
"""Convert a 3d mesh-like :attr:`.uri` into :attr:`.chunks` as vertices and faces
:return: itself after processed
"""
from docarray.document import Document
mesh = self._load_mesh(force='mesh')
vertices = mesh.vertices.view(np.ndarray)
faces = mesh.faces.view(np.ndarray)
self.chunks = [
Document(name=Mesh.VERTICES, tensor=vertices),
Document(name=Mesh.FACES, tensor=faces),
]
return self
def load_vertices_and_faces_to_point_cloud(self: 'T', samples: int) -> 'T':
"""Convert a 3d mesh of vertices and faces from :attr:`.chunks` into point cloud :attr:`.tensor`
:param samples: number of points to sample from the mesh
:return: itself after processed
"""
vertices = None
faces = None
for chunk in self.chunks:
if chunk.tags['name'] == Mesh.VERTICES:
vertices = chunk.tensor
if chunk.tags['name'] == Mesh.FACES:
faces = chunk.tensor
if vertices is not None and faces is not None:
import trimesh
mesh = trimesh.Trimesh(vertices=vertices, faces=faces)
self.tensor = np.array(mesh.sample(samples))
else:
raise AttributeError(
'Point cloud tensor can not be set, since vertices and faces chunk tensor have not been set.'
)
return self
def load_uris_to_rgbd_tensor(self: 'T') -> 'T':
"""Load RGB image from :attr:`.uri` of :attr:`.chunks[0]` and depth image from :attr:`.uri` of :attr:`.chunks[1]` and merge them into :attr:`.tensor`.
:return: itself after processed
"""
from PIL import Image
if len(self.chunks) != 2:
raise ValueError(
f'The provided Document does not have two chunks but instead {len(self.chunks)}. To load uris to RGBD tensor, the Document needs to have two chunks, with the first one providing the RGB image uri, and the second one providing the depth image uri.'
)
for chunk in self.chunks:
if chunk.uri == '':
raise ValueError(
'A chunk of the given Document does not provide a uri.'
)
rgb_img = np.array(Image.open(self.chunks[0].uri).convert('RGB'))
depth_img = np.array(Image.open(self.chunks[1].uri))
if rgb_img.shape[0:2] != depth_img.shape:
raise ValueError(
f'The provided RGB image and depth image are not of the same shapes: {rgb_img.shape[0:2]} != {depth_img.shape}'
)
self.tensor = np.concatenate(
(rgb_img, np.expand_dims(depth_img, axis=2)), axis=-1
)
return self
|
from typing import TYPE_CHECKING, Union
import numpy as np
if TYPE_CHECKING: # pragma: no cover
from docarray.typing import T
import trimesh
class Mesh:
FILE_EXTENSIONS = [
'glb',
'obj',
'ply',
]
VERTICES = 'vertices'
FACES = 'faces'
class MeshDataMixin:
"""Provide helper functions for :class:`Document` to support 3D mesh data and point cloud."""
def _load_mesh(
self, force: str = None
) -> Union['trimesh.Trimesh', 'trimesh.Scene']:
"""Load a trimesh.Mesh or trimesh.Scene object from :attr:`.uri`.
:param force: str or None. For 'mesh' try to coerce scenes into a single mesh. For 'scene'
try to coerce everything into a scene.
:return: trimesh.Mesh or trimesh.Scene object
"""
import urllib.parse
import trimesh
scheme = urllib.parse.urlparse(self.uri).scheme
loader = trimesh.load_remote if scheme in ['http', 'https'] else trimesh.load
mesh = loader(self.uri, force=force)
return mesh
def load_uri_to_point_cloud_tensor(
self: 'T', samples: int, as_chunks: bool = False
) -> 'T':
"""Convert a 3d mesh-like :attr:`.uri` into :attr:`.tensor`
:param samples: number of points to sample from the mesh
:param as_chunks: when multiple geometry stored in one mesh file,
then store each geometry into different :attr:`.chunks`
:return: itself after processed
"""
if as_chunks:
import trimesh
from docarray.document import Document
# try to coerce everything into a scene
scene = self._load_mesh(force='scene')
for geo in scene.geometry.values():
geo: trimesh.Trimesh
self.chunks.append(Document(tensor=np.array(geo.sample(samples))))
else:
# combine a scene into a single mesh
mesh = self._load_mesh(force='mesh')
self.tensor = np.array(mesh.sample(samples))
return self
def load_uri_to_vertices_and_faces(self: 'T') -> 'T':
"""Convert a 3d mesh-like :attr:`.uri` into :attr:`.chunks` as vertices and faces
:return: itself after processed
"""
from docarray.document import Document
mesh = self._load_mesh(force='mesh')
vertices = mesh.vertices.view(np.ndarray)
faces = mesh.faces.view(np.ndarray)
self.chunks = [
Document(name=Mesh.VERTICES, tensor=vertices),
Document(name=Mesh.FACES, tensor=faces),
]
return self
def load_vertices_and_faces_to_point_cloud(self: 'T', samples: int) -> 'T':
"""Convert a 3d mesh of vertices and faces from :attr:`.chunks` into point cloud :attr:`.tensor`
:param samples: number of points to sample from the mesh
:return: itself after processed
"""
vertices = None
faces = None
for chunk in self.chunks:
if chunk.tags['name'] == Mesh.VERTICES:
vertices = chunk.tensor
if chunk.tags['name'] == Mesh.FACES:
faces = chunk.tensor
if vertices is not None and faces is not None:
import trimesh
mesh = trimesh.Trimesh(vertices=vertices, faces=faces)
self.tensor = np.array(mesh.sample(samples))
else:
raise AttributeError(
'Point cloud tensor can not be set, since vertices and faces chunk tensor have not been set.'
)
return self
|
# mypy: allow-untyped-defs
from typing import Callable, Optional, Union
import torch
from .base_structured_sparsifier import BaseStructuredSparsifier
__all__ = ["FPGMPruner"]
class FPGMPruner(BaseStructuredSparsifier):
r"""Filter Pruning via Geometric Median (FPGM) Structured Pruner
This sparsifier prune filter (row) in a tensor according to distances among filters according to
`Filter Pruning via Geometric Median for Deep Convolutional Neural Networks Acceleration <https://arxiv.org/abs/1811.00250>`_.
This sparsifier is controlled by three variables:
1. `sparsity_level` defines the number of filters (rows) that are zeroed-out.
2. `dist` defines the distance measurement type. Default: 3 (L2 distance).
Available options are: [1, 2, (custom callable distance function)].
Note::
Inputs should be a 4D convolutional tensor of shape (N, C, H, W).
- N: output channels size
- C: input channels size
- H: height of kernel
- W: width of kernel
"""
def __init__(
self, sparsity_level: float = 0.5, dist: Optional[Union[Callable, int]] = None
):
defaults = {
"sparsity_level": sparsity_level,
}
if dist is None:
dist = 2
if callable(dist):
self.dist_fn = dist
elif dist == 1:
self.dist_fn = lambda x: torch.cdist(x, x, p=1)
elif dist == 2:
self.dist_fn = lambda x: torch.cdist(x, x, p=2)
else:
raise NotImplementedError("Distance function is not yet implemented.")
super().__init__(defaults=defaults)
def _compute_distance(self, t):
r"""Compute distance across all entries in tensor `t` along all dimension
except for the one identified by dim.
Args:
t (torch.Tensor): tensor representing the parameter to prune
Returns:
distance (torch.Tensor): distance computed across filtters
"""
dim = 0 # prune filter (row)
size = t.size(dim)
slc = [slice(None)] * t.dim()
# flatten the tensor along the dimension
t_flatten = [
t[tuple(slc[:dim] + [slice(i, i + 1)] + slc[dim + 1 :])].reshape(-1)
for i in range(size)
]
t_flatten = torch.stack(t_flatten)
# distance measurement
dist_matrix = self.dist_fn(t_flatten)
# more similar with other filter indicates large in the sum of row
distance = torch.sum(torch.abs(dist_matrix), 1)
return distance
def update_mask( # type: ignore[override]
self, module, tensor_name, sparsity_level, **kwargs
):
tensor_weight = getattr(module, tensor_name)
mask = getattr(module.parametrizations, tensor_name)[0].mask
if sparsity_level <= 0:
mask.data = torch.ones_like(mask).bool()
elif sparsity_level >= 1.0:
mask.data = torch.zeros_like(mask).bool()
else:
distance = self._compute_distance(tensor_weight)
tensor_size = tensor_weight.shape[0] # prune filter (row)
nparams_toprune = round(sparsity_level * tensor_size)
nparams_toprune = min(
max(nparams_toprune, 0), tensor_size
) # clamp to [0, tensor_size]
topk = torch.topk(distance, k=nparams_toprune, largest=False)
mask[topk.indices] = False
|
# mypy: allow-untyped-defs
from typing import Callable, Optional, Union
import torch
from .base_structured_sparsifier import BaseStructuredSparsifier
__all__ = ["FPGMPruner"]
class FPGMPruner(BaseStructuredSparsifier):
r"""Filter Pruning via Geometric Median (FPGM) Structured Pruner
This sparsifier prune fliter (row) in a tensor according to distances among filters according to
`Filter Pruning via Geometric Median for Deep Convolutional Neural Networks Acceleration <https://arxiv.org/abs/1811.00250>`_.
This sparsifier is controlled by three variables:
1. `sparsity_level` defines the number of filters (rows) that are zeroed-out.
2. `dist` defines the distance measurement type. Default: 3 (L2 distance).
Available options are: [1, 2, (custom callable distance function)].
Note::
Inputs should be a 4D convolutional tensor of shape (N, C, H, W).
- N: output channels size
- C: input channels size
- H: height of kernel
- W: width of kernel
"""
def __init__(
self, sparsity_level: float = 0.5, dist: Optional[Union[Callable, int]] = None
):
defaults = {
"sparsity_level": sparsity_level,
}
if dist is None:
dist = 2
if callable(dist):
self.dist_fn = dist
elif dist == 1:
self.dist_fn = lambda x: torch.cdist(x, x, p=1)
elif dist == 2:
self.dist_fn = lambda x: torch.cdist(x, x, p=2)
else:
raise NotImplementedError("Distance function is not yet implemented.")
super().__init__(defaults=defaults)
def _compute_distance(self, t):
r"""Compute distance across all entries in tensor `t` along all dimension
except for the one identified by dim.
Args:
t (torch.Tensor): tensor representing the parameter to prune
Returns:
distance (torch.Tensor): distance computed across filtters
"""
dim = 0 # prune filter (row)
size = t.size(dim)
slc = [slice(None)] * t.dim()
# flatten the tensor along the dimension
t_flatten = [
t[tuple(slc[:dim] + [slice(i, i + 1)] + slc[dim + 1 :])].reshape(-1)
for i in range(size)
]
t_flatten = torch.stack(t_flatten)
# distance measurement
dist_matrix = self.dist_fn(t_flatten)
# more similar with other filter indicates large in the sum of row
distance = torch.sum(torch.abs(dist_matrix), 1)
return distance
def update_mask( # type: ignore[override]
self, module, tensor_name, sparsity_level, **kwargs
):
tensor_weight = getattr(module, tensor_name)
mask = getattr(module.parametrizations, tensor_name)[0].mask
if sparsity_level <= 0:
mask.data = torch.ones_like(mask).bool()
elif sparsity_level >= 1.0:
mask.data = torch.zeros_like(mask).bool()
else:
distance = self._compute_distance(tensor_weight)
tensor_size = tensor_weight.shape[0] # prune filter (row)
nparams_toprune = round(sparsity_level * tensor_size)
nparams_toprune = min(
max(nparams_toprune, 0), tensor_size
) # clamp to [0, tensor_size]
topk = torch.topk(distance, k=nparams_toprune, largest=False)
mask[topk.indices] = False
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.agent_toolkits.spark_sql.base import create_spark_sql_agent
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"create_spark_sql_agent": "langchain_community.agent_toolkits.spark_sql.base",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"create_spark_sql_agent",
]
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.agent_toolkits.spark_sql.base import create_spark_sql_agent
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"create_spark_sql_agent": "langchain_community.agent_toolkits.spark_sql.base"
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"create_spark_sql_agent",
]
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for DecodeRaw op from parsing_ops."""
import gzip
import io
import zlib
import zstandard as zstd
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.platform import test
class DecodeCompressedOpTest(test.TestCase):
def _compress(self, bytes_in, compression_type):
if not compression_type:
return bytes_in
elif compression_type == "ZLIB":
return zlib.compress(bytes_in)
elif compression_type == "ZSTD":
return zstd.compress(bytes_in)
else:
out = io.BytesIO()
with gzip.GzipFile(fileobj=out, mode="wb") as f:
f.write(bytes_in)
return out.getvalue()
def testDecompressShapeInference(self):
with ops.Graph().as_default():
for compression_type in ["ZLIB", "GZIP", "ZSTD", ""]:
with self.cached_session():
in_bytes = array_ops.placeholder(dtypes.string, shape=[2])
decompressed = parsing_ops.decode_compressed(
in_bytes, compression_type=compression_type)
self.assertEqual([2], decompressed.get_shape().as_list())
def testDecompress(self):
for compression_type in ["ZLIB", "GZIP", "ZSTD", ""]:
with self.cached_session():
def decode(in_bytes, compression_type=compression_type):
return parsing_ops.decode_compressed(
in_bytes, compression_type=compression_type)
in_val = [self._compress(b"AaAA", compression_type),
self._compress(b"bBbb", compression_type)]
result = self.evaluate(decode(in_val))
self.assertAllEqual([b"AaAA", b"bBbb"], result)
def testDecompressWithRaw(self):
for compression_type in ["ZLIB", "GZIP", "ZSTD", ""]:
with self.cached_session():
def decode(in_bytes, compression_type=compression_type):
decompressed = parsing_ops.decode_compressed(in_bytes,
compression_type)
return parsing_ops.decode_raw(decompressed, out_type=dtypes.int16)
result = self.evaluate(
decode([self._compress(b"AaBC", compression_type)]))
self.assertAllEqual(
[[ord("A") + ord("a") * 256, ord("B") + ord("C") * 256]], result)
if __name__ == "__main__":
test.main()
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for DecodeRaw op from parsing_ops."""
import gzip
import io
import zlib
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.platform import test
class DecodeCompressedOpTest(test.TestCase):
def _compress(self, bytes_in, compression_type):
if not compression_type:
return bytes_in
elif compression_type == "ZLIB":
return zlib.compress(bytes_in)
else:
out = io.BytesIO()
with gzip.GzipFile(fileobj=out, mode="wb") as f:
f.write(bytes_in)
return out.getvalue()
def testDecompressShapeInference(self):
with ops.Graph().as_default():
for compression_type in ["ZLIB", "GZIP", ""]:
with self.cached_session():
in_bytes = array_ops.placeholder(dtypes.string, shape=[2])
decompressed = parsing_ops.decode_compressed(
in_bytes, compression_type=compression_type)
self.assertEqual([2], decompressed.get_shape().as_list())
def testDecompress(self):
for compression_type in ["ZLIB", "GZIP", ""]:
with self.cached_session():
def decode(in_bytes, compression_type=compression_type):
return parsing_ops.decode_compressed(
in_bytes, compression_type=compression_type)
in_val = [self._compress(b"AaAA", compression_type),
self._compress(b"bBbb", compression_type)]
result = self.evaluate(decode(in_val))
self.assertAllEqual([b"AaAA", b"bBbb"], result)
def testDecompressWithRaw(self):
for compression_type in ["ZLIB", "GZIP", ""]:
with self.cached_session():
def decode(in_bytes, compression_type=compression_type):
decompressed = parsing_ops.decode_compressed(in_bytes,
compression_type)
return parsing_ops.decode_raw(decompressed, out_type=dtypes.int16)
result = self.evaluate(
decode([self._compress(b"AaBC", compression_type)]))
self.assertAllEqual(
[[ord("A") + ord("a") * 256, ord("B") + ord("C") * 256]], result)
if __name__ == "__main__":
test.main()
|
from dataclasses import dataclass, asdict, field
from typing import (
Union,
Dict,
Optional,
TYPE_CHECKING,
Iterable,
List,
Tuple,
)
import numpy as np
from docarray.array.storage.base.backend import BaseBackendMixin, TypeMap
from docarray.helper import dataclass_from_dict, filter_dict, _safe_cast_int
if TYPE_CHECKING:
from docarray.typing import DocumentArraySourceType, ArrayType
@dataclass
class AnnliteConfig:
n_dim: int
metric: str = 'cosine'
serialize_config: Dict = field(default_factory=dict)
data_path: Optional[str] = None
ef_construction: Optional[int] = None
ef_search: Optional[int] = None
max_connection: Optional[int] = None
n_components: Optional[int] = None
columns: Optional[Union[List[Tuple[str, str]], Dict[str, str]]] = None
class BackendMixin(BaseBackendMixin):
"""Provide necessary functions to enable this storage backend."""
TYPE_MAP = {
'str': TypeMap(type='str', converter=str),
'float': TypeMap(type='float', converter=float),
'int': TypeMap(type='int', converter=_safe_cast_int),
}
def _map_embedding(self, embedding: 'ArrayType') -> 'ArrayType':
if embedding is None:
embedding = np.zeros(self.n_dim, dtype=np.float32)
elif isinstance(embedding, list):
from docarray.math.ndarray import to_numpy_array
embedding = to_numpy_array(embedding)
if embedding.ndim > 1:
embedding = np.asarray(embedding).squeeze()
return embedding
def _normalize_columns(self, columns):
columns = super()._normalize_columns(columns)
for key in columns.keys():
columns[key] = self._map_type(columns[key])
return columns
def _ensure_unique_config(
self,
config_root: dict,
config_subindex: dict,
config_joined: dict,
subindex_name: str,
) -> dict:
import os
if 'data_path' not in config_subindex:
config_joined['data_path'] = os.path.join(
config_joined['data_path'], 'subindex_' + subindex_name
)
return config_joined
def _init_storage(
self,
_docs: Optional['DocumentArraySourceType'] = None,
config: Optional[Union[AnnliteConfig, Dict]] = None,
subindex_configs: Optional[Dict] = None,
**kwargs,
):
from docarray import Document
if not config:
raise ValueError('Config object must be specified')
elif isinstance(config, dict):
config = dataclass_from_dict(AnnliteConfig, config)
self._persist = bool(config.data_path)
if not self._persist:
from tempfile import TemporaryDirectory
config.data_path = TemporaryDirectory().name
self._config = config
self._config.columns = self._normalize_columns(self._config.columns)
config = asdict(config)
self.n_dim = config.pop('n_dim')
from annlite import AnnLite
self._annlite = AnnLite(self.n_dim, lock=False, **filter_dict(config))
super()._init_storage()
if _docs is None:
return
self.clear()
if isinstance(_docs, Iterable):
self.extend(_docs)
elif isinstance(_docs, Document):
self.append(_docs)
def __getstate__(self):
state = dict(self.__dict__)
del state['_annlite']
del state['_offsetmapping']
return state
def __setstate__(self, state):
self.__dict__ = state
config = state['_config']
config = asdict(config)
n_dim = config.pop('n_dim')
from annlite import AnnLite
self._annlite = AnnLite(n_dim, lock=False, **filter_dict(config))
def __len__(self):
return self._annlite.index_size
|
from dataclasses import dataclass, asdict, field
from typing import (
Union,
Dict,
Optional,
TYPE_CHECKING,
Iterable,
List,
Tuple,
)
import numpy as np
from docarray.array.storage.base.backend import BaseBackendMixin, TypeMap
from docarray.helper import dataclass_from_dict, filter_dict, _safe_cast_int
if TYPE_CHECKING:
from docarray.typing import DocumentArraySourceType, ArrayType
@dataclass
class AnnliteConfig:
n_dim: int
metric: str = 'cosine'
serialize_config: Dict = field(default_factory=dict)
data_path: Optional[str] = None
ef_construction: Optional[int] = None
ef_search: Optional[int] = None
max_connection: Optional[int] = None
columns: Optional[Union[List[Tuple[str, str]], Dict[str, str]]] = None
class BackendMixin(BaseBackendMixin):
"""Provide necessary functions to enable this storage backend."""
TYPE_MAP = {
'str': TypeMap(type='str', converter=str),
'float': TypeMap(type='float', converter=float),
'int': TypeMap(type='int', converter=_safe_cast_int),
}
def _map_embedding(self, embedding: 'ArrayType') -> 'ArrayType':
if embedding is None:
embedding = np.zeros(self.n_dim, dtype=np.float32)
elif isinstance(embedding, list):
from docarray.math.ndarray import to_numpy_array
embedding = to_numpy_array(embedding)
if embedding.ndim > 1:
embedding = np.asarray(embedding).squeeze()
return embedding
def _normalize_columns(self, columns):
columns = super()._normalize_columns(columns)
for key in columns.keys():
columns[key] = self._map_type(columns[key])
return columns
def _ensure_unique_config(
self,
config_root: dict,
config_subindex: dict,
config_joined: dict,
subindex_name: str,
) -> dict:
import os
if 'data_path' not in config_subindex:
config_joined['data_path'] = os.path.join(
config_joined['data_path'], 'subindex_' + subindex_name
)
return config_joined
def _init_storage(
self,
_docs: Optional['DocumentArraySourceType'] = None,
config: Optional[Union[AnnliteConfig, Dict]] = None,
subindex_configs: Optional[Dict] = None,
**kwargs,
):
from docarray import Document
if not config:
raise ValueError('Config object must be specified')
elif isinstance(config, dict):
config = dataclass_from_dict(AnnliteConfig, config)
self._persist = bool(config.data_path)
if not self._persist:
from tempfile import TemporaryDirectory
config.data_path = TemporaryDirectory().name
self._config = config
self._config.columns = self._normalize_columns(self._config.columns)
config = asdict(config)
self.n_dim = config.pop('n_dim')
from annlite import AnnLite
self._annlite = AnnLite(self.n_dim, lock=False, **filter_dict(config))
super()._init_storage()
if _docs is None:
return
self.clear()
if isinstance(_docs, Iterable):
self.extend(_docs)
elif isinstance(_docs, Document):
self.append(_docs)
def __getstate__(self):
state = dict(self.__dict__)
del state['_annlite']
del state['_offsetmapping']
return state
def __setstate__(self, state):
self.__dict__ = state
config = state['_config']
config = asdict(config)
n_dim = config.pop('n_dim')
from annlite import AnnLite
self._annlite = AnnLite(n_dim, lock=False, **filter_dict(config))
def __len__(self):
return self._annlite.index_size
|
import logging
import typing
import autogpt_libs.auth.depends
import autogpt_libs.auth.middleware
import fastapi
import prisma
import backend.data.graph
import backend.integrations.creds_manager
import backend.integrations.webhooks.graph_lifecycle_hooks
import backend.server.v2.library.db
import backend.server.v2.library.model
logger = logging.getLogger(__name__)
router = fastapi.APIRouter()
integration_creds_manager = (
backend.integrations.creds_manager.IntegrationCredentialsManager()
)
@router.get(
"/agents",
tags=["library", "private"],
dependencies=[fastapi.Depends(autogpt_libs.auth.middleware.auth_middleware)],
)
async def get_library_agents(
user_id: typing.Annotated[
str, fastapi.Depends(autogpt_libs.auth.depends.get_user_id)
]
) -> typing.Sequence[backend.server.v2.library.model.LibraryAgent]:
"""
Get all agents in the user's library, including both created and saved agents.
"""
try:
agents = await backend.server.v2.library.db.get_library_agents(user_id)
return agents
except Exception:
logger.exception("Exception occurred whilst getting library agents")
raise fastapi.HTTPException(
status_code=500, detail="Failed to get library agents"
)
@router.post(
"/agents/{store_listing_version_id}",
tags=["library", "private"],
dependencies=[fastapi.Depends(autogpt_libs.auth.middleware.auth_middleware)],
status_code=201,
)
async def add_agent_to_library(
store_listing_version_id: str,
user_id: typing.Annotated[
str, fastapi.Depends(autogpt_libs.auth.depends.get_user_id)
],
) -> fastapi.Response:
"""
Add an agent from the store to the user's library.
Args:
store_listing_version_id (str): ID of the store listing version to add
user_id (str): ID of the authenticated user
Returns:
fastapi.Response: 201 status code on success
Raises:
HTTPException: If there is an error adding the agent to the library
"""
try:
# Get the graph from the store listing
store_listing_version = (
await prisma.models.StoreListingVersion.prisma().find_unique(
where={"id": store_listing_version_id}, include={"Agent": True}
)
)
if not store_listing_version or not store_listing_version.Agent:
raise fastapi.HTTPException(
status_code=404,
detail=f"Store listing version {store_listing_version_id} not found",
)
agent = store_listing_version.Agent
if agent.userId == user_id:
raise fastapi.HTTPException(
status_code=400, detail="Cannot add own agent to library"
)
# Create a new graph from the template
graph = await backend.data.graph.get_graph(
agent.id, agent.version, user_id=user_id
)
if not graph:
raise fastapi.HTTPException(
status_code=404, detail=f"Agent {agent.id} not found"
)
# Create a deep copy with new IDs
graph.version = 1
graph.is_template = False
graph.is_active = True
graph.reassign_ids(user_id=user_id, reassign_graph_id=True)
# Save the new graph
graph = await backend.data.graph.create_graph(graph, user_id=user_id)
graph = (
await backend.integrations.webhooks.graph_lifecycle_hooks.on_graph_activate(
graph,
get_credentials=lambda id: integration_creds_manager.get(user_id, id),
)
)
return fastapi.Response(status_code=201)
except Exception:
logger.exception("Exception occurred whilst adding agent to library")
raise fastapi.HTTPException(
status_code=500, detail="Failed to add agent to library"
)
|
import logging
import typing
import autogpt_libs.auth.depends
import autogpt_libs.auth.middleware
import fastapi
import prisma
import backend.data.graph
import backend.integrations.creds_manager
import backend.integrations.webhooks.graph_lifecycle_hooks
import backend.server.v2.library.db
import backend.server.v2.library.model
logger = logging.getLogger(__name__)
router = fastapi.APIRouter()
integration_creds_manager = (
backend.integrations.creds_manager.IntegrationCredentialsManager()
)
@router.get(
"/agents",
tags=["library", "private"],
dependencies=[fastapi.Depends(autogpt_libs.auth.middleware.auth_middleware)],
)
async def get_library_agents(
user_id: typing.Annotated[
str, fastapi.Depends(autogpt_libs.auth.depends.get_user_id)
]
) -> typing.Sequence[backend.server.v2.library.model.LibraryAgent]:
"""
Get all agents in the user's library, including both created and saved agents.
"""
try:
agents = await backend.server.v2.library.db.get_library_agents(user_id)
return agents
except Exception:
logger.exception("Exception occurred whilst getting library agents")
raise fastapi.HTTPException(
status_code=500, detail="Failed to get library agents"
)
@router.post(
"/agents/{store_listing_version_id}",
tags=["library", "private"],
dependencies=[fastapi.Depends(autogpt_libs.auth.middleware.auth_middleware)],
status_code=201,
)
async def add_agent_to_library(
store_listing_version_id: str,
user_id: typing.Annotated[
str, fastapi.Depends(autogpt_libs.auth.depends.get_user_id)
],
) -> fastapi.Response:
"""
Add an agent from the store to the user's library.
Args:
store_listing_version_id (str): ID of the store listing version to add
user_id (str): ID of the authenticated user
Returns:
fastapi.Response: 201 status code on success
Raises:
HTTPException: If there is an error adding the agent to the library
"""
try:
# Get the graph from the store listing
store_listing_version = (
await prisma.models.StoreListingVersion.prisma().find_unique(
where={"id": store_listing_version_id}, include={"Agent": True}
)
)
if not store_listing_version or not store_listing_version.Agent:
raise fastapi.HTTPException(
status_code=404,
detail=f"Store listing version {store_listing_version_id} not found",
)
agent = store_listing_version.Agent
if agent.userId == user_id:
raise fastapi.HTTPException(
status_code=400, detail="Cannot add own agent to library"
)
# Create a new graph from the template
graph = await backend.data.graph.get_graph(
agent.id, agent.version, template=True, user_id=user_id
)
if not graph:
raise fastapi.HTTPException(
status_code=404, detail=f"Agent {agent.id} not found"
)
# Create a deep copy with new IDs
graph.version = 1
graph.is_template = False
graph.is_active = True
graph.reassign_ids(user_id=user_id, reassign_graph_id=True)
# Save the new graph
graph = await backend.data.graph.create_graph(graph, user_id=user_id)
graph = (
await backend.integrations.webhooks.graph_lifecycle_hooks.on_graph_activate(
graph,
get_credentials=lambda id: integration_creds_manager.get(user_id, id),
)
)
return fastapi.Response(status_code=201)
except Exception:
logger.exception("Exception occurred whilst adding agent to library")
raise fastapi.HTTPException(
status_code=500, detail="Failed to add agent to library"
)
|
_base_ = [
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
model = dict(
type='YOLOF',
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(3, ),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=False),
norm_eval=True,
style='caffe',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron/resnet50_caffe')),
neck=dict(
type='DilatedEncoder',
in_channels=2048,
out_channels=512,
block_mid_channels=128,
num_residual_blocks=4),
bbox_head=dict(
type='YOLOFHead',
num_classes=80,
in_channels=512,
reg_decoded_bbox=True,
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
scales=[1, 2, 4, 8, 16],
strides=[32]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[1., 1., 1., 1.],
add_ctr_clamp=True,
ctr_clamp=32),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=1.0)),
# training and testing settings
train_cfg=dict(
assigner=dict(
type='UniformAssigner', pos_ignore_thr=0.15, neg_ignore_thr=0.7),
allowed_border=-1,
pos_weight=-1,
debug=False),
test_cfg=dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.6),
max_per_img=100))
# optimizer
optimizer = dict(
type='SGD',
lr=0.12,
momentum=0.9,
weight_decay=0.0001,
paramwise_cfg=dict(
norm_decay_mult=0., custom_keys={'backbone': dict(lr_mult=1. / 3)}))
lr_config = dict(warmup_iters=1500, warmup_ratio=0.00066667)
# use caffe img_norm
img_norm_cfg = dict(
mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='RandomShift', shift_ratio=0.5, max_shift_px=32),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
samples_per_gpu=8,
workers_per_gpu=8,
train=dict(pipeline=train_pipeline),
val=dict(pipeline=test_pipeline),
test=dict(pipeline=test_pipeline))
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (8 GPUs) x (8 samples per GPU)
auto_scale_lr = dict(base_batch_size=64)
|
_base_ = [
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
model = dict(
type='YOLOF',
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(3, ),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=False),
norm_eval=True,
style='caffe',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron/resnet50_caffe')),
neck=dict(
type='DilatedEncoder',
in_channels=2048,
out_channels=512,
block_mid_channels=128,
num_residual_blocks=4),
bbox_head=dict(
type='YOLOFHead',
num_classes=80,
in_channels=512,
reg_decoded_bbox=True,
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
scales=[1, 2, 4, 8, 16],
strides=[32]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[1., 1., 1., 1.],
add_ctr_clamp=True,
ctr_clamp=32),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=1.0)),
# training and testing settings
train_cfg=dict(
assigner=dict(
type='UniformAssigner', pos_ignore_thr=0.15, neg_ignore_thr=0.7),
allowed_border=-1,
pos_weight=-1,
debug=False),
test_cfg=dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.6),
max_per_img=100))
# optimizer
optimizer = dict(
type='SGD',
lr=0.12,
momentum=0.9,
weight_decay=0.0001,
paramwise_cfg=dict(
norm_decay_mult=0., custom_keys={'backbone': dict(lr_mult=1. / 3)}))
lr_config = dict(warmup_iters=1500, warmup_ratio=0.00066667)
# use caffe img_norm
img_norm_cfg = dict(
mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='RandomShift', shift_ratio=0.5, max_shift_px=32),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
samples_per_gpu=8,
workers_per_gpu=8,
train=dict(pipeline=train_pipeline),
val=dict(pipeline=test_pipeline),
test=dict(pipeline=test_pipeline))
|
_base_ = 'ssd300_coco.py'
# model settings
input_size = 512
model = dict(
neck=dict(
out_channels=(512, 1024, 512, 256, 256, 256, 256),
level_strides=(2, 2, 2, 2, 1),
level_paddings=(1, 1, 1, 1, 1),
last_kernel_size=4),
bbox_head=dict(
in_channels=(512, 1024, 512, 256, 256, 256, 256),
anchor_generator=dict(
type='SSDAnchorGenerator',
scale_major=False,
input_size=input_size,
basesize_ratio_range=(0.1, 0.9),
strides=[8, 16, 32, 64, 128, 256, 512],
ratios=[[2], [2, 3], [2, 3], [2, 3], [2, 3], [2], [2]])))
# dataset settings
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='Expand',
mean={{_base_.model.data_preprocessor.mean}},
to_rgb={{_base_.model.data_preprocessor.bgr_to_rgb}},
ratio_range=(1, 4)),
dict(
type='MinIoURandomCrop',
min_ious=(0.1, 0.3, 0.5, 0.7, 0.9),
min_crop_size=0.3),
dict(type='Resize', scale=(input_size, input_size), keep_ratio=False),
dict(type='RandomFlip', prob=0.5),
dict(
type='PhotoMetricDistortion',
brightness_delta=32,
contrast_range=(0.5, 1.5),
saturation_range=(0.5, 1.5),
hue_delta=18),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='Resize', scale=(input_size, input_size), keep_ratio=False),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(dataset=dict(dataset=dict(pipeline=train_pipeline)))
val_dataloader = dict(dataset=dict(pipeline=test_pipeline))
test_dataloader = val_dataloader
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (8 GPUs) x (8 samples per GPU)
auto_scale_lr = dict(base_batch_size=64)
|
_base_ = 'ssd300_coco.py'
# model settings
input_size = 512
model = dict(
neck=dict(
out_channels=(512, 1024, 512, 256, 256, 256, 256),
level_strides=(2, 2, 2, 2, 1),
level_paddings=(1, 1, 1, 1, 1),
last_kernel_size=4),
bbox_head=dict(
in_channels=(512, 1024, 512, 256, 256, 256, 256),
anchor_generator=dict(
type='SSDAnchorGenerator',
scale_major=False,
input_size=input_size,
basesize_ratio_range=(0.1, 0.9),
strides=[8, 16, 32, 64, 128, 256, 512],
ratios=[[2], [2, 3], [2, 3], [2, 3], [2, 3], [2], [2]])))
# dataset settings
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='Expand',
mean=[123.675, 116.28, 103.53],
to_rgb=True,
ratio_range=(1, 4)),
dict(
type='MinIoURandomCrop',
min_ious=(0.1, 0.3, 0.5, 0.7, 0.9),
min_crop_size=0.3),
dict(type='Resize', scale=(input_size, input_size), keep_ratio=False),
dict(type='RandomFlip', prob=0.5),
dict(
type='PhotoMetricDistortion',
brightness_delta=32,
contrast_range=(0.5, 1.5),
saturation_range=(0.5, 1.5),
hue_delta=18),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='Resize', scale=(input_size, input_size), keep_ratio=False),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(dataset=dict(dataset=dict(pipeline=train_pipeline)))
val_dataloader = dict(dataset=dict(pipeline=test_pipeline))
test_dataloader = val_dataloader
# TODO support auto_scale_lr
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (8 GPUs) x (8 samples per GPU)
# auto_scale_lr = dict(base_batch_size=64)
|
_base_ = './reppoints-moment_r50_fpn-gn_head-gn_2x_coco.py'
model = dict(
backbone=dict(
depth=101,
dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False),
stage_with_dcn=(False, True, True, True),
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')))
|
_base_ = './reppoints_moment_r50_fpn_gn-neck+head_2x_coco.py'
model = dict(
backbone=dict(
depth=101,
dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False),
stage_with_dcn=(False, True, True, True),
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')))
|
from typing import Iterable, Union
from docarray import Document, DocumentArray
from docarray.array.storage.base.seqlike import BaseSequenceLikeMixin
class SequenceLikeMixin(BaseSequenceLikeMixin):
"""Implement sequence-like methods for DocumentArray with Redis as storage"""
def __eq__(self, other):
"""Compare this object to the other, returns True if and only if other
as the same type as self and other has the same meta information
:param other: the other object to check for equality
:return: ``True`` if other is equal to self
"""
# two DA are considered as the same if they have the same client meta data
return (
type(self) is type(other)
and self._client.client_info() == other._client.client_info()
and self._config == other._config
)
def __len__(self):
"""Return the length of :class:`DocumentArray` that uses Redis as storage
:return: the length of this :class:`DocumentArrayRedis` object
"""
if self._list_like:
return len(self._offset2ids)
try:
lua_script = f'return #redis.pcall("keys", "{self._config.index_name}:*")'
cmd = self._client.register_script(lua_script)
return cmd()
except:
return 0
def __contains__(self, x: Union[str, 'Document']):
"""Check if ``x`` is contained in this :class:`DocumentArray` with Redis storage
:param x: the id of the document to check or the document object itself
:return: True if ``x`` is contained in self
"""
if isinstance(x, str):
return self._doc_id_exists(x)
elif isinstance(x, Document):
return self._doc_id_exists(x.id)
else:
return False
def __repr__(self):
"""Return the string representation of :class:`DocumentArrayRedis` object
:return: string representation of this object
"""
return f'<DocumentArray[Redis] (length={len(self)}) at {id(self)}>'
def _upload_batch(self, batch_of_docs: Iterable['Document']):
pipe = self._client.pipeline()
for doc in batch_of_docs:
payload = self._document_to_redis(doc)
pipe.hset(self._doc_prefix + doc.id, mapping=payload)
pipe.execute()
def _extend(self, docs: Iterable['Document']):
da = DocumentArray(docs)
for batch_of_docs in da.batch(self._config.batch_size):
self._upload_batch(batch_of_docs)
if self._list_like:
self._offset2ids.extend(batch_of_docs[:, 'id'])
|
from typing import Iterable, Union
from docarray import Document, DocumentArray
from docarray.array.storage.base.seqlike import BaseSequenceLikeMixin
class SequenceLikeMixin(BaseSequenceLikeMixin):
"""Implement sequence-like methods for DocumentArray with Redis as storage"""
def __eq__(self, other):
"""Compare this object to the other, returns True if and only if other
as the same type as self and other has the same meta information
:param other: the other object to check for equality
:return: ``True`` if other is equal to self
"""
# two DA are considered as the same if they have the same client meta data
return (
type(self) is type(other)
and self._client.client_info() == other._client.client_info()
and self._config == other._config
)
def __len__(self):
"""Return the length of :class:`DocumentArray` that uses Redis as storage
:return: the length of this :class:`DocumentArrayRedis` object
"""
try:
return len(self._offset2ids)
except:
return 0
def __contains__(self, x: Union[str, 'Document']):
"""Check if ``x`` is contained in this :class:`DocumentArray` with Redis storage
:param x: the id of the document to check or the document object itself
:return: True if ``x`` is contained in self
"""
if isinstance(x, str):
return self._doc_id_exists(x)
elif isinstance(x, Document):
return self._doc_id_exists(x.id)
else:
return False
def __repr__(self):
"""Return the string representation of :class:`DocumentArrayRedis` object
:return: string representation of this object
"""
return f'<DocumentArray[Redis] (length={len(self)}) at {id(self)}>'
def _upload_batch(self, batch_of_docs: Iterable['Document']):
pipe = self._client.pipeline()
for doc in batch_of_docs:
payload = self._document_to_redis(doc)
pipe.hset(self._doc_prefix + doc.id, mapping=payload)
pipe.execute()
def _extend(self, docs: Iterable['Document']):
da = DocumentArray(docs)
for batch_of_docs in da.batch(self._config.batch_size):
self._upload_batch(batch_of_docs)
if self._list_like:
self._offset2ids.extend(batch_of_docs[:, 'id'])
|
"""langchain-core version information and utilities."""
VERSION = "0.3.64"
|
"""langchain-core version information and utilities."""
VERSION = "0.3.63"
|
_base_ = [
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
model = dict(
type='ATSS',
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[103.530, 116.280, 123.675],
std=[1.0, 1.0, 1.0],
bgr_to_rgb=False,
pad_size_divisor=128),
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=False),
norm_eval=True,
style='caffe',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet50_caffe')),
neck=[
dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
add_extra_convs='on_output',
num_outs=5),
dict(
type='DyHead',
in_channels=256,
out_channels=256,
num_blocks=6,
# disable zero_init_offset to follow official implementation
zero_init_offset=False)
],
bbox_head=dict(
type='ATSSHead',
num_classes=80,
in_channels=256,
pred_kernel_size=1, # follow DyHead official implementation
stacked_convs=0,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
octave_base_scale=8,
scales_per_octave=1,
strides=[8, 16, 32, 64, 128],
center_offset=0.5), # follow DyHead official implementation
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[0.1, 0.1, 0.2, 0.2]),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=2.0),
loss_centerness=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0)),
# training and testing settings
train_cfg=dict(
assigner=dict(type='ATSSAssigner', topk=9),
allowed_border=-1,
pos_weight=-1,
debug=False),
test_cfg=dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.6),
max_per_img=100))
# optimizer
optim_wrapper = dict(optimizer=dict(lr=0.01))
train_pipeline = [
dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', scale=(1333, 800), keep_ratio=True, backend='pillow'),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
dict(type='Resize', scale=(1333, 800), keep_ratio=True, backend='pillow'),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
val_dataloader = dict(dataset=dict(pipeline=test_pipeline))
test_dataloader = val_dataloader
|
_base_ = [
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
model = dict(
type='ATSS',
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[103.530, 116.280, 123.675],
std=[1.0, 1.0, 1.0],
bgr_to_rgb=False,
pad_size_divisor=128),
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=False),
norm_eval=True,
style='caffe',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet50_caffe')),
neck=[
dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
add_extra_convs='on_output',
num_outs=5),
dict(
type='DyHead',
in_channels=256,
out_channels=256,
num_blocks=6,
# disable zero_init_offset to follow official implementation
zero_init_offset=False)
],
bbox_head=dict(
type='ATSSHead',
num_classes=80,
in_channels=256,
pred_kernel_size=1, # follow DyHead official implementation
stacked_convs=0,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
octave_base_scale=8,
scales_per_octave=1,
strides=[8, 16, 32, 64, 128],
center_offset=0.5), # follow DyHead official implementation
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[0.1, 0.1, 0.2, 0.2]),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=2.0),
loss_centerness=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0)),
# training and testing settings
train_cfg=dict(
assigner=dict(type='ATSSAssigner', topk=9),
allowed_border=-1,
pos_weight=-1,
debug=False),
test_cfg=dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.6),
max_per_img=100))
# optimizer
optim_wrapper = dict(optimizer=dict(lr=0.01))
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', scale=(1333, 800), keep_ratio=True, backend='pillow'),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='Resize', scale=(1333, 800), keep_ratio=True, backend='pillow'),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
val_dataloader = dict(dataset=dict(pipeline=test_pipeline))
test_dataloader = val_dataloader
|
import re
import sys
meetup_svg = '.github/images/meetup.svg'
readme_md = 'README.md'
conf_py = 'docs/conf.py'
def rm_announce():
# remove all announcement
with open(readme_md, encoding='utf-8') as fp:
_old = fp.read()
_new = re.sub(
r'(<!--startmsg-->\s*?\n).*(\n\s*?<!--endmsg-->)',
rf'\g<1>\g<2>',
_old,
flags=re.DOTALL,
)
with open(readme_md, 'w', encoding='utf-8') as fp:
fp.write(_new)
with open(conf_py, encoding='utf-8') as fp:
_old = fp.read()
_new = re.sub(
r'(# start-announce\s*?\n).*(\n\s*?# end-announce)',
rf'\g<1>\g<2>',
_old,
flags=re.DOTALL,
)
with open(conf_py, 'w', encoding='utf-8') as fp:
fp.write(_new)
if len(sys.argv) < 3:
rm_announce()
else:
text = sys.argv[1]
url = sys.argv[2]
if not text or not url:
rm_announce()
else:
announce_url = f'''
"announcement": \'\'\'
<a href="{url}">{text}</a>
\'\'\',
'''
meetup_svg_url = f'<a href="{url}"><img src="https://github.com/jina-ai/jina/blob/master/{meetup_svg}?raw=true"></a>'
# update meetup_svg
with open(meetup_svg, encoding='utf-8') as fp:
_old = fp.read()
_new = re.sub(r'(<a href=").*(")', rf'\g<1>{url}\g<2>', _old)
_new = re.sub(
r'(<!--startmsg-->\s*?\n).*(\n\s*?<!--endmsg-->)',
rf'\g<1>{text}\g<2>',
_new,
flags=re.DOTALL,
)
with open(meetup_svg, 'w', encoding='utf-8') as fp:
fp.write(_new)
# update readme_md
with open(readme_md, encoding='utf-8') as fp:
_old = fp.read()
_new = re.sub(
r'(<!--startmsg-->\s*?\n).*(\n\s*?<!--endmsg-->)',
rf'\g<1>{meetup_svg_url}\g<2>',
_old,
flags=re.DOTALL,
)
with open(readme_md, 'w', encoding='utf-8') as fp:
fp.write(_new)
# update conf
with open(conf_py, encoding='utf-8') as fp:
_old = fp.read()
_new = re.sub(
r'(# start-announce\s*?\n).*(\n\s*?# end-announce)',
rf'\g<1>{announce_url}\g<2>',
_old,
flags=re.DOTALL,
)
with open(conf_py, 'w', encoding='utf-8') as fp:
fp.write(_new)
|
import re
import sys
meetup_svg = '.github/images/meetup.svg'
readme_md = 'README.md'
conf_py = 'docs/conf.py'
def rm_announce():
# remove all announcement
with open(readme_md) as fp:
_old = fp.read()
_new = re.sub(
r'(<!--startmsg-->\s*?\n).*(\n\s*?<!--endmsg-->)',
rf'\g<1>\g<2>',
_old,
flags=re.DOTALL,
)
with open(readme_md, 'w') as fp:
fp.write(_new)
with open(conf_py) as fp:
_old = fp.read()
_new = re.sub(
r'(# start-announce\s*?\n).*(\n\s*?# end-announce)',
rf'\g<1>\g<2>',
_old,
flags=re.DOTALL,
)
with open(conf_py, 'w') as fp:
fp.write(_new)
if len(sys.argv) < 3:
rm_announce()
else:
text = sys.argv[1]
url = sys.argv[2]
if not text or not url:
rm_announce()
else:
announce_url = f'''
"announcement": \'\'\'
<a href="{url}">{text}</a>
\'\'\',
'''
meetup_svg_url = f'<a href="{url}"><img src="https://github.com/jina-ai/jina/blob/master/{meetup_svg}?raw=true"></a>'
# update meetup_svg
with open(meetup_svg) as fp:
_old = fp.read()
_new = re.sub(r'(<a href=").*(")', rf'\g<1>{url}\g<2>', _old)
_new = re.sub(
r'(<!--startmsg-->\s*?\n).*(\n\s*?<!--endmsg-->)',
rf'\g<1>{text}\g<2>',
_new,
flags=re.DOTALL,
)
with open(meetup_svg, 'w') as fp:
fp.write(_new)
# update readme_md
with open(readme_md) as fp:
_old = fp.read()
_new = re.sub(
r'(<!--startmsg-->\s*?\n).*(\n\s*?<!--endmsg-->)',
rf'\g<1>{meetup_svg_url}\g<2>',
_old,
flags=re.DOTALL,
)
with open(readme_md, 'w') as fp:
fp.write(_new)
# update conf
with open(conf_py) as fp:
_old = fp.read()
_new = re.sub(
r'(# start-announce\s*?\n).*(\n\s*?# end-announce)',
rf'\g<1>{announce_url}\g<2>',
_old,
flags=re.DOTALL,
)
with open(conf_py, 'w') as fp:
fp.write(_new)
|
"""langchain-core version information and utilities."""
VERSION = "0.3.60"
|
"""langchain-core version information and utilities."""
VERSION = "0.3.59"
|
"""
This script contains an example how to perform semantic search with Elasticsearch.
You need Elasticsearch up and running locally:
https://www.elastic.co/guide/en/elasticsearch/reference/current/run-elasticsearch-locally.html
Further, you need the Python Elasticsearch Client installed: https://elasticsearch-py.readthedocs.io/, e.g.:
```
pip install elasticsearch
```
This script was created for `elasticsearch` v8.0+.
"""
import time
from datasets import load_dataset
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.search_engines import semantic_search_elasticsearch
# 1. Load the natural-questions dataset with 100K answers
dataset = load_dataset("sentence-transformers/natural-questions", split="train")
num_docs = 10_000
corpus = dataset["answer"][:num_docs]
# 2. Come up with some queries
queries = dataset["query"][:2]
# 3. Load the model
sparse_model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
# 4. Encode the corpus
print("Start encoding corpus...")
start_time = time.time()
corpus_embeddings = sparse_model.encode(corpus, convert_to_sparse_tensor=True, batch_size=16, show_progress_bar=True)
corpus_embeddings_decoded = sparse_model.decode(corpus_embeddings)
print(f"Corpus encoding time: {time.time() - start_time:.6f} seconds")
corpus_index = None
while True:
# 5. Encode the queries using the full precision
start_time = time.time()
query_embeddings = sparse_model.encode(queries, convert_to_sparse_tensor=True)
query_embeddings_decoded = sparse_model.decode(query_embeddings)
print(f"Encoding time: {time.time() - start_time:.6f} seconds")
# 6. Perform semantic search using Elasticsearch
results, search_time, corpus_index = semantic_search_elasticsearch(
query_embeddings_decoded,
corpus_embeddings_decoded=corpus_embeddings_decoded if corpus_index is None else None,
corpus_index=corpus_index,
top_k=5,
output_index=True,
)
# 7. Output the results
print(f"Search time: {search_time:.6f} seconds")
for query, result in zip(queries, results):
print(f"Query: {query}")
for entry in result:
print(f"(Score: {entry['score']:.4f}) {corpus[entry['corpus_id']]}, corpus_id: {entry['corpus_id']}")
print("")
# 8. Prompt for more queries
queries = [input("Please enter a question: ")]
|
"""
This script contains an example how to perform semantic search with Elasticsearch.
You need Elasticsearch up and running locally:
https://www.elastic.co/guide/en/elasticsearch/reference/current/run-elasticsearch-locally.html
Further, you need the Python Elasticsearch Client installed: https://elasticsearch-py.readthedocs.io/, e.g.:
```
pip install elasticsearch
```
This script was created for `elasticsearch` v8.0+.
"""
import time
from datasets import load_dataset
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.search_engines import semantic_search_elasticsearch
# 1. Load the natural-questions dataset with 100K answers
dataset = load_dataset("sentence-transformers/natural-questions", split="train")
num_docs = 10_000
corpus = dataset["answer"][:num_docs]
# 2. Come up with some queries
queries = dataset["query"][:2]
# 3. Load the model
sparse_model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
# 4. Encode the corpus
print("Start encoding corpus...")
start_time = time.time()
corpus_embeddings = sparse_model.encode(corpus, convert_to_sparse_tensor=True, batch_size=16, show_progress_bar=True)
corpus_embeddings_decoded = sparse_model.decode(corpus_embeddings)
print(f"Corpus encoding time: {time.time() - start_time:.6f} seconds")
corpus_index = None
while True:
# 5. Encode the queries using the full precision
start_time = time.time()
query_embeddings = sparse_model.encode(queries, convert_to_sparse_tensor=True)
query_embeddings_decoded = sparse_model.decode(query_embeddings)
print(f"Encoding time: {time.time() - start_time:.6f} seconds")
# 6. Perform semantic search using Elasticsearch
results, search_time, corpus_index = semantic_search_elasticsearch(
query_embeddings_decoded,
corpus_index=corpus_index,
corpus_embeddings_decoded=corpus_embeddings_decoded if corpus_index is None else None,
top_k=5,
output_index=True,
)
# 7. Output the results
print(f"Search time: {search_time:.6f} seconds")
for query, result in zip(queries, results):
print(f"Query: {query}")
for entry in result:
print(f"(Score: {entry['score']:.4f}) {corpus[entry['corpus_id']]}, corpus_id: {entry['corpus_id']}")
print("")
# 8. Prompt for more queries
queries = [input("Please enter a question: ")]
|
# Licensed to the LF AI & Data foundation under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING, Any, List, Tuple, Type, TypeVar, Union
import numpy as np
from docarray.typing.proto_register import _register_proto
from docarray.typing.tensor.jaxarray import JaxArray, metaJax
from docarray.typing.tensor.video.video_tensor_mixin import VideoTensorMixin
T = TypeVar('T', bound='VideoJaxArray')
if TYPE_CHECKING:
from pydantic import BaseConfig
from pydantic.fields import ModelField
@_register_proto(proto_type_name='video_jaxarray')
class VideoJaxArray(JaxArray, VideoTensorMixin, metaclass=metaJax):
""" """
@classmethod
def validate(
cls: Type[T],
value: Union[T, np.ndarray, List[Any], Tuple[Any], Any],
field: 'ModelField',
config: 'BaseConfig',
) -> T:
tensor = super().validate(value=value, field=field, config=config)
return cls.validate_shape(value=tensor)
|
from typing import TYPE_CHECKING, Any, List, Tuple, Type, TypeVar, Union
import numpy as np
from docarray.typing.proto_register import _register_proto
from docarray.typing.tensor.jaxarray import JaxArray, metaJax
from docarray.typing.tensor.video.video_tensor_mixin import VideoTensorMixin
T = TypeVar('T', bound='VideoJaxArray')
if TYPE_CHECKING:
from pydantic import BaseConfig
from pydantic.fields import ModelField
@_register_proto(proto_type_name='video_jaxarray')
class VideoJaxArray(JaxArray, VideoTensorMixin, metaclass=metaJax):
""" """
@classmethod
def validate(
cls: Type[T],
value: Union[T, np.ndarray, List[Any], Tuple[Any], Any],
field: 'ModelField',
config: 'BaseConfig',
) -> T:
tensor = super().validate(value=value, field=field, config=config)
return cls.validate_shape(value=tensor)
|
import pytest
from langchain_core.utils.iter import batch_iterate
@pytest.mark.parametrize(
"input_size, input_iterable, expected_output",
[
(2, [1, 2, 3, 4, 5], [[1, 2], [3, 4], [5]]),
(3, [10, 20, 30, 40, 50], [[10, 20, 30], [40, 50]]),
(1, [100, 200, 300], [[100], [200], [300]]),
(4, [], []),
],
)
def test_batch_iterate(
input_size: int,
input_iterable: list[str],
expected_output: list[str],
) -> None:
"""Test batching function."""
assert list(batch_iterate(input_size, input_iterable)) == expected_output
|
import pytest
from langchain_core.utils.iter import batch_iterate
@pytest.mark.parametrize(
"input_size, input_iterable, expected_output",
[
(2, [1, 2, 3, 4, 5], [[1, 2], [3, 4], [5]]),
(3, [10, 20, 30, 40, 50], [[10, 20, 30], [40, 50]]),
(1, [100, 200, 300], [[100], [200], [300]]),
(4, [], []),
],
)
def test_batch_iterate(
input_size: int, input_iterable: list[str], expected_output: list[str]
) -> None:
"""Test batching function."""
assert list(batch_iterate(input_size, input_iterable)) == expected_output
|
from __future__ import annotations
import sys
from .classification import CrossEncoderClassificationEvaluator
from .correlation import CrossEncoderCorrelationEvaluator
from .deprecated import (
CEBinaryAccuracyEvaluator,
CEBinaryClassificationEvaluator,
CECorrelationEvaluator,
CEF1Evaluator,
CERerankingEvaluator,
CESoftmaxAccuracyEvaluator,
)
from .nano_beir import CrossEncoderNanoBEIREvaluator
from .reranking import CrossEncoderRerankingEvaluator
# Ensure that imports using deprecated paths still work
# Although importing via `from sentence_transformers.cross_encoder.evaluation import ...` is recommended
deprecated_modules = [
"sentence_transformers.cross_encoder.evaluation.CEBinaryAccuracyEvaluator",
"sentence_transformers.cross_encoder.evaluation.CEBinaryClassificationEvaluator",
"sentence_transformers.cross_encoder.evaluation.CEF1Evaluator",
"sentence_transformers.cross_encoder.evaluation.CESoftmaxAccuracyEvaluator",
"sentence_transformers.cross_encoder.evaluation.CECorrelationEvaluator",
"sentence_transformers.cross_encoder.evaluation.CERerankingEvaluator",
]
for module in deprecated_modules:
sys.modules[module] = sys.modules["sentence_transformers.cross_encoder.evaluation.deprecated"]
__all__ = [
"CrossEncoderClassificationEvaluator",
"CrossEncoderCorrelationEvaluator",
"CrossEncoderRerankingEvaluator",
"CrossEncoderNanoBEIREvaluator",
# Deprecated:
"CERerankingEvaluator",
"CECorrelationEvaluator",
"CEBinaryAccuracyEvaluator",
"CEBinaryClassificationEvaluator",
"CEF1Evaluator",
"CESoftmaxAccuracyEvaluator",
]
|
from __future__ import annotations
# TODO: Consider renaming all evaluators to CrossEncoder..., e.g. CrossEncoderNanoBEIREvaluator, CrossEncoderClassificationEvaluator, etc.
from .CEBinaryAccuracyEvaluator import CEBinaryAccuracyEvaluator
from .CEBinaryClassificationEvaluator import CEBinaryClassificationEvaluator
from .CEClassificationEvaluator import CEClassificationEvaluator
from .CECorrelationEvaluator import CECorrelationEvaluator
from .CEF1Evaluator import CEF1Evaluator
from .CENanoBEIREvaluator import CENanoBEIREvaluator
from .CERerankingEvaluator import CERerankingEvaluator
from .CESoftmaxAccuracyEvaluator import CESoftmaxAccuracyEvaluator
__all__ = [
"CEClassificationEvaluator",
"CECorrelationEvaluator",
"CERerankingEvaluator",
"CENanoBEIREvaluator",
"CEBinaryAccuracyEvaluator", # Deprecated
"CEBinaryClassificationEvaluator", # Deprecated
"CEF1Evaluator", # Deprecated
"CESoftmaxAccuracyEvaluator", # Deprecated
]
|
_base_ = './cascade-rcnn_r50_fpn_20e_coco.py'
model = dict(
backbone=dict(
type='ResNeXt',
depth=101,
groups=32,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
style='pytorch',
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d')))
|
_base_ = './cascade_rcnn_r50_fpn_20e_coco.py'
model = dict(
backbone=dict(
type='ResNeXt',
depth=101,
groups=32,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
style='pytorch',
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d')))
|
# Copyright (c) OpenMMLab. All rights reserved.
from ..builder import DETECTORS
from .single_stage import SingleStageDetector
@DETECTORS.register_module()
class PAA(SingleStageDetector):
"""Implementation of `PAA <https://arxiv.org/pdf/2007.08103.pdf>`_."""
def __init__(self,
backbone,
neck,
bbox_head,
train_cfg=None,
test_cfg=None,
pretrained=None,
init_cfg=None):
super(PAA, self).__init__(backbone, neck, bbox_head, train_cfg,
test_cfg, pretrained, init_cfg)
|
from ..builder import DETECTORS
from .single_stage import SingleStageDetector
@DETECTORS.register_module()
class PAA(SingleStageDetector):
"""Implementation of `PAA <https://arxiv.org/pdf/2007.08103.pdf>`_."""
def __init__(self,
backbone,
neck,
bbox_head,
train_cfg=None,
test_cfg=None,
pretrained=None,
init_cfg=None):
super(PAA, self).__init__(backbone, neck, bbox_head, train_cfg,
test_cfg, pretrained, init_cfg)
|
_base_ = [
'../_base_/models/faster-rcnn_r50-caffe-c4.py',
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
|
_base_ = [
'../_base_/models/faster-rcnn_r50-caffe-c4.py',
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
# use caffe img_norm
img_norm_cfg = dict(
mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
train=dict(pipeline=train_pipeline),
val=dict(pipeline=test_pipeline),
test=dict(pipeline=test_pipeline))
# optimizer
optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)
|
"""Dump objects to json."""
import json
from typing import Any
from pydantic import BaseModel
from langchain_core.load.serializable import Serializable, to_json_not_implemented
def default(obj: Any) -> Any:
"""Return a default value for an object.
Args:
obj: The object to serialize to json if it is a Serializable object.
Returns:
A json serializable object or a SerializedNotImplemented object.
"""
if isinstance(obj, Serializable):
return obj.to_json()
return to_json_not_implemented(obj)
def _dump_pydantic_models(obj: Any) -> Any:
from langchain_core.messages import AIMessage
from langchain_core.outputs import ChatGeneration
if (
isinstance(obj, ChatGeneration)
and isinstance(obj.message, AIMessage)
and (parsed := obj.message.additional_kwargs.get("parsed"))
and isinstance(parsed, BaseModel)
):
obj_copy = obj.model_copy(deep=True)
obj_copy.message.additional_kwargs["parsed"] = parsed.model_dump()
return obj_copy
return obj
def dumps(obj: Any, *, pretty: bool = False, **kwargs: Any) -> str:
"""Return a json string representation of an object.
Args:
obj: The object to dump.
pretty: Whether to pretty print the json. If true, the json will be
indented with 2 spaces (if no indent is provided as part of kwargs).
Default is False.
kwargs: Additional arguments to pass to json.dumps
Returns:
A json string representation of the object.
Raises:
ValueError: If `default` is passed as a kwarg.
"""
if "default" in kwargs:
msg = "`default` should not be passed to dumps"
raise ValueError(msg)
try:
obj = _dump_pydantic_models(obj)
if pretty:
indent = kwargs.pop("indent", 2)
return json.dumps(obj, default=default, indent=indent, **kwargs)
return json.dumps(obj, default=default, **kwargs)
except TypeError:
if pretty:
indent = kwargs.pop("indent", 2)
return json.dumps(to_json_not_implemented(obj), indent=indent, **kwargs)
return json.dumps(to_json_not_implemented(obj), **kwargs)
def dumpd(obj: Any) -> Any:
"""Return a dict representation of an object.
Note:
Unfortunately this function is not as efficient as it could be
because it first dumps the object to a json string and then loads it
back into a dictionary.
Args:
obj: The object to dump.
Returns:
dictionary that can be serialized to json using json.dumps
"""
return json.loads(dumps(obj))
|
"""Dump objects to json."""
import json
from typing import Any
from pydantic import BaseModel
from langchain_core.load.serializable import Serializable, to_json_not_implemented
def default(obj: Any) -> Any:
"""Return a default value for an object.
Args:
obj: The object to serialize to json if it is a Serializable object.
Returns:
A json serializable object or a SerializedNotImplemented object.
"""
if isinstance(obj, Serializable):
return obj.to_json()
else:
return to_json_not_implemented(obj)
def _dump_pydantic_models(obj: Any) -> Any:
from langchain_core.messages import AIMessage
from langchain_core.outputs import ChatGeneration
if (
isinstance(obj, ChatGeneration)
and isinstance(obj.message, AIMessage)
and (parsed := obj.message.additional_kwargs.get("parsed"))
and isinstance(parsed, BaseModel)
):
obj_copy = obj.model_copy(deep=True)
obj_copy.message.additional_kwargs["parsed"] = parsed.model_dump()
return obj_copy
else:
return obj
def dumps(obj: Any, *, pretty: bool = False, **kwargs: Any) -> str:
"""Return a json string representation of an object.
Args:
obj: The object to dump.
pretty: Whether to pretty print the json. If true, the json will be
indented with 2 spaces (if no indent is provided as part of kwargs).
Default is False.
kwargs: Additional arguments to pass to json.dumps
Returns:
A json string representation of the object.
Raises:
ValueError: If `default` is passed as a kwarg.
"""
if "default" in kwargs:
msg = "`default` should not be passed to dumps"
raise ValueError(msg)
try:
obj = _dump_pydantic_models(obj)
if pretty:
indent = kwargs.pop("indent", 2)
return json.dumps(obj, default=default, indent=indent, **kwargs)
else:
return json.dumps(obj, default=default, **kwargs)
except TypeError:
if pretty:
indent = kwargs.pop("indent", 2)
return json.dumps(to_json_not_implemented(obj), indent=indent, **kwargs)
else:
return json.dumps(to_json_not_implemented(obj), **kwargs)
def dumpd(obj: Any) -> Any:
"""Return a dict representation of an object.
Note:
Unfortunately this function is not as efficient as it could be
because it first dumps the object to a json string and then loads it
back into a dictionary.
Args:
obj: The object to dump.
Returns:
dictionary that can be serialized to json using json.dumps
"""
return json.loads(dumps(obj))
|
import json
from jina.logging.logger import JinaLogger
from jina.parsers import set_gateway_parser
from jina.serve.runtimes.gateway.http_fastapi_app import get_fastapi_app
from jina.serve.runtimes.gateway.streamer import GatewayStreamer
JINA_LOGO_URL = 'https://api.jina.ai/logo/logo-product/jina-core/horizontal-layout/colored/Product%20logo_Core_vertical_colorful%402x-margin.png'
GATEWAY_SCHEMA_FILENAME = 'gateway.json'
args = set_gateway_parser().parse_args([])
logger = JinaLogger('')
graph_description = json.loads(args.graph_description)
graph_conditions = json.loads(args.graph_conditions)
deployments_addresses = json.loads(args.deployments_addresses)
deployments_no_reduce = json.loads(args.deployments_no_reduce)
streamer = GatewayStreamer(
graph_representation=graph_description,
executor_addresses=deployments_addresses,
graph_conditions=graph_conditions,
deployments_no_reduce=deployments_no_reduce,
timeout_send=args.timeout_send,
retries=args.retries,
compression=args.compression,
runtime_name=args.name,
prefetch=args.prefetch,
logger=logger,
)
gateway_app = get_fastapi_app(
streamer=streamer,
title=args.title,
description=args.description,
no_debug_endpoints=args.no_debug_endpoints,
no_crud_endpoints=args.no_crud_endpoints,
expose_endpoints=args.expose_endpoints,
expose_graphql_endpoint=args.expose_graphql_endpoint,
cors=args.cors,
logger=logger,
)
gateway_schema = gateway_app.openapi()
gateway_schema['info']['x-logo'] = {'url': JINA_LOGO_URL}
gateway_schema['servers'] = []
gateway_schema['servers'].append(
{'url': f'http://localhost:{args.port}', 'description': 'Local Jina gateway'}
)
with open(GATEWAY_SCHEMA_FILENAME, 'w', encoding='utf-8') as f:
json.dump(gateway_schema, f)
|
import json
from jina.logging.logger import JinaLogger
from jina.parsers import set_gateway_parser
from jina.serve.runtimes.gateway.http_fastapi_app import get_fastapi_app
from jina.serve.runtimes.gateway.streamer import GatewayStreamer
JINA_LOGO_URL = 'https://api.jina.ai/logo/logo-product/jina-core/horizontal-layout/colored/Product%20logo_Core_vertical_colorful%402x-margin.png'
GATEWAY_SCHEMA_FILENAME = 'gateway.json'
args = set_gateway_parser().parse_args([])
logger = JinaLogger('')
graph_description = json.loads(args.graph_description)
graph_conditions = json.loads(args.graph_conditions)
deployments_addresses = json.loads(args.deployments_addresses)
deployments_no_reduce = json.loads(args.deployments_no_reduce)
streamer = GatewayStreamer(
graph_representation=graph_description,
executor_addresses=deployments_addresses,
graph_conditions=graph_conditions,
deployments_no_reduce=deployments_no_reduce,
timeout_send=args.timeout_send,
retries=args.retries,
compression=args.compression,
runtime_name=args.name,
prefetch=args.prefetch,
logger=logger,
)
gateway_app = get_fastapi_app(
streamer=streamer,
title=args.title,
description=args.description,
no_debug_endpoints=args.no_debug_endpoints,
no_crud_endpoints=args.no_crud_endpoints,
expose_endpoints=args.expose_endpoints,
expose_graphql_endpoint=args.expose_graphql_endpoint,
cors=args.cors,
logger=logger,
)
gateway_schema = gateway_app.openapi()
gateway_schema['info']['x-logo'] = {'url': JINA_LOGO_URL}
gateway_schema['servers'] = []
gateway_schema['servers'].append(
{'url': f'http://localhost:{args.port}', 'description': 'Local Jina gateway'}
)
with open(GATEWAY_SCHEMA_FILENAME, 'w') as f:
json.dump(gateway_schema, f)
|
_base_ = './htc_hrnetv2p_w40_20e_coco.py'
# learning policy
max_epochs = 28
train_cfg = dict(max_epochs=max_epochs)
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[24, 27],
gamma=0.1)
]
|
_base_ = './htc_hrnetv2p_w40_20e_coco.py'
# learning policy
lr_config = dict(step=[24, 27])
runner = dict(type='EpochBasedRunner', max_epochs=28)
|
# Licensed to the LF AI & Data foundation under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__all__ = ['reduce', 'reduce_all']
from typing import Dict, List, Optional
from docarray import DocList
def reduce(
left: DocList, right: DocList, left_id_map: Optional[Dict] = None
) -> 'DocList':
"""
Reduces left and right DocList into one DocList in-place.
Changes are applied to the left DocList.
Reducing 2 DocLists consists in adding Documents in the second DocList
to the first DocList if they do not exist.
If a Document exists in both DocLists (identified by ID),
the data properties are merged with priority to the left Document.
Nested DocLists are also reduced in the same way.
:param left: First DocList to be reduced. Changes will be applied to it
in-place
:param right: Second DocList to be reduced
:param left_id_map: Optional parameter to be passed in repeated calls
for optimizations, keeping a map of the Document ID to its offset
in the DocList
:return: Reduced DocList
"""
left_id_map = left_id_map or {doc.id: i for i, doc in enumerate(left)}
for doc in right:
if doc.id in left_id_map:
left[left_id_map[doc.id]].update(doc)
else:
casted = left.doc_type(**doc.__dict__)
left.append(casted)
return left
def reduce_all(docs: List[DocList]) -> DocList:
"""
Reduces a list of DocLists into one DocList.
Changes are applied to the first DocList in-place.
The resulting DocList contains Documents of all DocLists.
If a Document exists (identified by their ID) in many DocLists,
data properties are merged with priority to the left-most
DocLists (that is, if a data attribute is set in a Document
belonging to many DocLists, the attribute value of the left-most
DocList is kept).
Nested DocLists belonging to many DocLists
are also reduced in the same way.
!!! note
- Nested DocLists order does not follow any specific rule.
You might want to re-sort them in a later step.
- The final result depends on the order of DocLists
when applying reduction.
:param docs: List of DocLists to be reduced
:return: the resulting DocList
"""
if len(docs) <= 1:
raise Exception(
'In order to reduce DocLists' ' we should have more than one DocList'
)
left = docs[0]
others = docs[1:]
left_id_map = {doc.id: i for i, doc in enumerate(left)}
for other_docs in others:
reduce(left, other_docs, left_id_map)
return left
|
__all__ = ['reduce', 'reduce_all']
from typing import Dict, List, Optional
from docarray import DocList
def reduce(
left: DocList, right: DocList, left_id_map: Optional[Dict] = None
) -> 'DocList':
"""
Reduces left and right DocList into one DocList in-place.
Changes are applied to the left DocList.
Reducing 2 DocLists consists in adding Documents in the second DocList
to the first DocList if they do not exist.
If a Document exists in both DocLists (identified by ID),
the data properties are merged with priority to the left Document.
Nested DocLists are also reduced in the same way.
:param left: First DocList to be reduced. Changes will be applied to it
in-place
:param right: Second DocList to be reduced
:param left_id_map: Optional parameter to be passed in repeated calls
for optimizations, keeping a map of the Document ID to its offset
in the DocList
:return: Reduced DocList
"""
left_id_map = left_id_map or {doc.id: i for i, doc in enumerate(left)}
for doc in right:
if doc.id in left_id_map:
left[left_id_map[doc.id]].update(doc)
else:
casted = left.doc_type(**doc.__dict__)
left.append(casted)
return left
def reduce_all(docs: List[DocList]) -> DocList:
"""
Reduces a list of DocLists into one DocList.
Changes are applied to the first DocList in-place.
The resulting DocList contains Documents of all DocLists.
If a Document exists (identified by their ID) in many DocLists,
data properties are merged with priority to the left-most
DocLists (that is, if a data attribute is set in a Document
belonging to many DocLists, the attribute value of the left-most
DocList is kept).
Nested DocLists belonging to many DocLists
are also reduced in the same way.
!!! note
- Nested DocLists order does not follow any specific rule.
You might want to re-sort them in a later step.
- The final result depends on the order of DocLists
when applying reduction.
:param docs: List of DocLists to be reduced
:return: the resulting DocList
"""
if len(docs) <= 1:
raise Exception(
'In order to reduce DocLists' ' we should have more than one DocList'
)
left = docs[0]
others = docs[1:]
left_id_map = {doc.id: i for i, doc in enumerate(left)}
for other_docs in others:
reduce(left, other_docs, left_id_map)
return left
|
"""**OutputParser** classes parse the output of an LLM call.
**Class hierarchy:**
.. code-block::
BaseLLMOutputParser --> BaseOutputParser --> <name>OutputParser # ListOutputParser, PydanticOutputParser
**Main helpers:**
.. code-block::
Serializable, Generation, PromptValue
""" # noqa: E501
from importlib import import_module
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from langchain_core.output_parsers.base import (
BaseGenerationOutputParser,
BaseLLMOutputParser,
BaseOutputParser,
)
from langchain_core.output_parsers.json import (
JsonOutputParser,
SimpleJsonOutputParser,
)
from langchain_core.output_parsers.list import (
CommaSeparatedListOutputParser,
ListOutputParser,
MarkdownListOutputParser,
NumberedListOutputParser,
)
from langchain_core.output_parsers.openai_tools import (
JsonOutputKeyToolsParser,
JsonOutputToolsParser,
PydanticToolsParser,
)
from langchain_core.output_parsers.pydantic import PydanticOutputParser
from langchain_core.output_parsers.string import StrOutputParser
from langchain_core.output_parsers.transform import (
BaseCumulativeTransformOutputParser,
BaseTransformOutputParser,
)
from langchain_core.output_parsers.xml import XMLOutputParser
__all__ = [
"BaseLLMOutputParser",
"BaseGenerationOutputParser",
"BaseOutputParser",
"ListOutputParser",
"CommaSeparatedListOutputParser",
"NumberedListOutputParser",
"MarkdownListOutputParser",
"StrOutputParser",
"BaseTransformOutputParser",
"BaseCumulativeTransformOutputParser",
"SimpleJsonOutputParser",
"XMLOutputParser",
"JsonOutputParser",
"PydanticOutputParser",
"JsonOutputToolsParser",
"JsonOutputKeyToolsParser",
"PydanticToolsParser",
]
_dynamic_imports = {
"BaseLLMOutputParser": "base",
"BaseGenerationOutputParser": "base",
"BaseOutputParser": "base",
"JsonOutputParser": "json",
"SimpleJsonOutputParser": "json",
"ListOutputParser": "list",
"CommaSeparatedListOutputParser": "list",
"MarkdownListOutputParser": "list",
"NumberedListOutputParser": "list",
"JsonOutputKeyToolsParser": "openai_tools",
"JsonOutputToolsParser": "openai_tools",
"PydanticToolsParser": "openai_tools",
"PydanticOutputParser": "pydantic",
"StrOutputParser": "string",
"BaseTransformOutputParser": "transform",
"BaseCumulativeTransformOutputParser": "transform",
"XMLOutputParser": "xml",
}
def __getattr__(attr_name: str) -> object:
module_name = _dynamic_imports.get(attr_name)
package = __spec__.parent # type: ignore[name-defined]
if module_name == "__module__" or module_name is None:
result = import_module(f".{attr_name}", package=package)
else:
module = import_module(f".{module_name}", package=package)
result = getattr(module, attr_name)
globals()[attr_name] = result
return result
def __dir__() -> list[str]:
return list(__all__)
|
"""**OutputParser** classes parse the output of an LLM call.
**Class hierarchy:**
.. code-block::
BaseLLMOutputParser --> BaseOutputParser --> <name>OutputParser # ListOutputParser, PydanticOutputParser
**Main helpers:**
.. code-block::
Serializable, Generation, PromptValue
""" # noqa: E501
from langchain_core.output_parsers.base import (
BaseGenerationOutputParser,
BaseLLMOutputParser,
BaseOutputParser,
)
from langchain_core.output_parsers.json import JsonOutputParser, SimpleJsonOutputParser
from langchain_core.output_parsers.list import (
CommaSeparatedListOutputParser,
ListOutputParser,
MarkdownListOutputParser,
NumberedListOutputParser,
)
from langchain_core.output_parsers.openai_tools import (
JsonOutputKeyToolsParser,
JsonOutputToolsParser,
PydanticToolsParser,
)
from langchain_core.output_parsers.pydantic import PydanticOutputParser
from langchain_core.output_parsers.string import StrOutputParser
from langchain_core.output_parsers.transform import (
BaseCumulativeTransformOutputParser,
BaseTransformOutputParser,
)
from langchain_core.output_parsers.xml import XMLOutputParser
__all__ = [
"BaseLLMOutputParser",
"BaseGenerationOutputParser",
"BaseOutputParser",
"ListOutputParser",
"CommaSeparatedListOutputParser",
"NumberedListOutputParser",
"MarkdownListOutputParser",
"StrOutputParser",
"BaseTransformOutputParser",
"BaseCumulativeTransformOutputParser",
"SimpleJsonOutputParser",
"XMLOutputParser",
"JsonOutputParser",
"PydanticOutputParser",
"JsonOutputToolsParser",
"JsonOutputKeyToolsParser",
"PydanticToolsParser",
]
|
import json
from typing import Any, Callable, Iterator, List, Mapping, Optional
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
RecordHandler = Callable[[Any, Optional[str]], Document]
class AirbyteCDKReader(BaseReader):
"""
AirbyteCDKReader reader.
Retrieve documents from an Airbyte source implemented using the CDK.
Args:
source_class: The Airbyte source class.
config: The config object for the Airbyte source.
"""
def __init__(
self,
source_class: Any,
config: Mapping[str, Any],
record_handler: Optional[RecordHandler] = None,
) -> None:
"""Initialize with parameters."""
from airbyte_cdk.models.airbyte_protocol import AirbyteRecordMessage
from airbyte_cdk.sources.embedded.base_integration import (
BaseEmbeddedIntegration,
)
from airbyte_cdk.sources.embedded.runner import CDKRunner
class CDKIntegration(BaseEmbeddedIntegration):
def _handle_record(
self, record: AirbyteRecordMessage, id: Optional[str]
) -> Document:
if record_handler:
return record_handler(record, id)
return Document(
doc_id=id, text=json.dumps(record.data), extra_info=record.data
)
self._integration = CDKIntegration(
config=config,
runner=CDKRunner(source=source_class(), name=source_class.__name__),
)
def load_data(self, *args: Any, **kwargs: Any) -> List[Document]:
return list(self.lazy_load_data(*args, **kwargs))
def lazy_load_data(self, *args: Any, **kwargs: Any) -> Iterator[Document]:
return self._integration._load_data(*args, **kwargs)
@property
def last_state(self):
return self._integration.last_state
|
import json
from typing import Any, Callable, Iterator, List, Mapping, Optional
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
RecordHandler = Callable[[Any, Optional[str]], Document]
class AirbyteCDKReader(BaseReader):
"""AirbyteCDKReader reader.
Retrieve documents from an Airbyte source implemented using the CDK.
Args:
source_class: The Airbyte source class.
config: The config object for the Airbyte source.
"""
def __init__(
self,
source_class: Any,
config: Mapping[str, Any],
record_handler: Optional[RecordHandler] = None,
) -> None:
"""Initialize with parameters."""
from airbyte_cdk.models.airbyte_protocol import AirbyteRecordMessage
from airbyte_cdk.sources.embedded.base_integration import (
BaseEmbeddedIntegration,
)
from airbyte_cdk.sources.embedded.runner import CDKRunner
class CDKIntegration(BaseEmbeddedIntegration):
def _handle_record(
self, record: AirbyteRecordMessage, id: Optional[str]
) -> Document:
if record_handler:
return record_handler(record, id)
return Document(
doc_id=id, text=json.dumps(record.data), extra_info=record.data
)
self._integration = CDKIntegration(
config=config,
runner=CDKRunner(source=source_class(), name=source_class.__name__),
)
def load_data(self, *args: Any, **kwargs: Any) -> List[Document]:
return list(self.lazy_load_data(*args, **kwargs))
def lazy_load_data(self, *args: Any, **kwargs: Any) -> Iterator[Document]:
return self._integration._load_data(*args, **kwargs)
@property
def last_state(self):
return self._integration.last_state
|
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class Translation:
"""`Feature` for translations with fixed languages per example.
Here for compatiblity with tfds.
Args:
languages (`dict`):
A dictionary for each example mapping string language codes to string translations.
Example:
```python
>>> # At construction time:
>>> datasets.features.Translation(languages=['en', 'fr', 'de'])
>>> # During data generation:
>>> yield {
... 'en': 'the cat',
... 'fr': 'le chat',
... 'de': 'die katze'
... }
```
"""
languages: list[str]
id: Optional[str] = None
# Automatically constructed
dtype: ClassVar[str] = "dict"
pa_type: ClassVar[Any] = None
_type: str = field(default="Translation", init=False, repr=False)
def __call__(self):
return pa.struct({lang: pa.string() for lang in sorted(self.languages)})
def flatten(self) -> Union["FeatureType", dict[str, "FeatureType"]]:
"""Flatten the Translation feature into a dictionary."""
from .features import Value
return {k: Value("string") for k in sorted(self.languages)}
@dataclass
class TranslationVariableLanguages:
"""`Feature` for translations with variable languages per example.
Here for compatiblity with tfds.
Args:
languages (`dict`):
A dictionary for each example mapping string language codes to one or more string translations.
The languages present may vary from example to example.
Returns:
- `language` or `translation` (variable-length 1D `tf.Tensor` of `tf.string`):
Language codes sorted in ascending order or plain text translations, sorted to align with language codes.
Example:
```python
>>> # At construction time:
>>> datasets.features.TranslationVariableLanguages(languages=['en', 'fr', 'de'])
>>> # During data generation:
>>> yield {
... 'en': 'the cat',
... 'fr': ['le chat', 'la chatte,']
... 'de': 'die katze'
... }
>>> # Tensor returned :
>>> {
... 'language': ['en', 'de', 'fr', 'fr'],
... 'translation': ['the cat', 'die katze', 'la chatte', 'le chat'],
... }
```
"""
languages: Optional[list] = None
num_languages: Optional[int] = None
id: Optional[str] = None
# Automatically constructed
dtype: ClassVar[str] = "dict"
pa_type: ClassVar[Any] = None
_type: str = field(default="TranslationVariableLanguages", init=False, repr=False)
def __post_init__(self):
self.languages = sorted(set(self.languages)) if self.languages else None
self.num_languages = len(self.languages) if self.languages else None
def __call__(self):
return pa.struct({"language": pa.list_(pa.string()), "translation": pa.list_(pa.string())})
def encode_example(self, translation_dict):
lang_set = set(self.languages)
if set(translation_dict) == {"language", "translation"}:
return translation_dict
elif self.languages and set(translation_dict) - lang_set:
raise ValueError(
f"Some languages in example ({', '.join(sorted(set(translation_dict) - lang_set))}) are not in valid set ({', '.join(lang_set)})."
)
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
translation_tuples = []
for lang, text in translation_dict.items():
if isinstance(text, str):
translation_tuples.append((lang, text))
else:
translation_tuples.extend([(lang, el) for el in text])
# Ensure translations are in ascending order by language code.
languages, translations = zip(*sorted(translation_tuples))
return {"language": languages, "translation": translations}
def flatten(self) -> Union["FeatureType", dict[str, "FeatureType"]]:
"""Flatten the TranslationVariableLanguages feature into a dictionary."""
from .features import Sequence, Value
return {
"language": Sequence(Value("string")),
"translation": Sequence(Value("string")),
}
|
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class Translation:
"""`Feature` for translations with fixed languages per example.
Here for compatiblity with tfds.
Args:
languages (`dict`):
A dictionary for each example mapping string language codes to string translations.
Example:
```python
>>> # At construction time:
>>> datasets.features.Translation(languages=['en', 'fr', 'de'])
>>> # During data generation:
>>> yield {
... 'en': 'the cat',
... 'fr': 'le chat',
... 'de': 'die katze'
... }
```
"""
languages: List[str]
id: Optional[str] = None
# Automatically constructed
dtype: ClassVar[str] = "dict"
pa_type: ClassVar[Any] = None
_type: str = field(default="Translation", init=False, repr=False)
def __call__(self):
return pa.struct({lang: pa.string() for lang in sorted(self.languages)})
def flatten(self) -> Union["FeatureType", Dict[str, "FeatureType"]]:
"""Flatten the Translation feature into a dictionary."""
from .features import Value
return {k: Value("string") for k in sorted(self.languages)}
@dataclass
class TranslationVariableLanguages:
"""`Feature` for translations with variable languages per example.
Here for compatiblity with tfds.
Args:
languages (`dict`):
A dictionary for each example mapping string language codes to one or more string translations.
The languages present may vary from example to example.
Returns:
- `language` or `translation` (variable-length 1D `tf.Tensor` of `tf.string`):
Language codes sorted in ascending order or plain text translations, sorted to align with language codes.
Example:
```python
>>> # At construction time:
>>> datasets.features.TranslationVariableLanguages(languages=['en', 'fr', 'de'])
>>> # During data generation:
>>> yield {
... 'en': 'the cat',
... 'fr': ['le chat', 'la chatte,']
... 'de': 'die katze'
... }
>>> # Tensor returned :
>>> {
... 'language': ['en', 'de', 'fr', 'fr'],
... 'translation': ['the cat', 'die katze', 'la chatte', 'le chat'],
... }
```
"""
languages: Optional[List] = None
num_languages: Optional[int] = None
id: Optional[str] = None
# Automatically constructed
dtype: ClassVar[str] = "dict"
pa_type: ClassVar[Any] = None
_type: str = field(default="TranslationVariableLanguages", init=False, repr=False)
def __post_init__(self):
self.languages = sorted(set(self.languages)) if self.languages else None
self.num_languages = len(self.languages) if self.languages else None
def __call__(self):
return pa.struct({"language": pa.list_(pa.string()), "translation": pa.list_(pa.string())})
def encode_example(self, translation_dict):
lang_set = set(self.languages)
if set(translation_dict) == {"language", "translation"}:
return translation_dict
elif self.languages and set(translation_dict) - lang_set:
raise ValueError(
f"Some languages in example ({', '.join(sorted(set(translation_dict) - lang_set))}) are not in valid set ({', '.join(lang_set)})."
)
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
translation_tuples = []
for lang, text in translation_dict.items():
if isinstance(text, str):
translation_tuples.append((lang, text))
else:
translation_tuples.extend([(lang, el) for el in text])
# Ensure translations are in ascending order by language code.
languages, translations = zip(*sorted(translation_tuples))
return {"language": languages, "translation": translations}
def flatten(self) -> Union["FeatureType", Dict[str, "FeatureType"]]:
"""Flatten the TranslationVariableLanguages feature into a dictionary."""
from .features import Sequence, Value
return {
"language": Sequence(Value("string")),
"translation": Sequence(Value("string")),
}
|
import datasets
from ..folder_based_builder import folder_based_builder
logger = datasets.utils.logging.get_logger(__name__)
class VideoFolderConfig(folder_based_builder.FolderBasedBuilderConfig):
"""BuilderConfig for ImageFolder."""
drop_labels: bool = None
drop_metadata: bool = None
def __post_init__(self):
super().__post_init__()
class VideoFolder(folder_based_builder.FolderBasedBuilder):
BASE_FEATURE = datasets.Video
BASE_COLUMN_NAME = "video"
BUILDER_CONFIG_CLASS = VideoFolderConfig
EXTENSIONS: list[str] # definition at the bottom of the script
# TODO: initial list, we should check the compatibility of other formats
VIDEO_EXTENSIONS = [
".mkv",
".mp4",
".avi",
".mpeg",
".mov",
]
VideoFolder.EXTENSIONS = VIDEO_EXTENSIONS
|
from typing import List
import datasets
from ..folder_based_builder import folder_based_builder
logger = datasets.utils.logging.get_logger(__name__)
class VideoFolderConfig(folder_based_builder.FolderBasedBuilderConfig):
"""BuilderConfig for ImageFolder."""
drop_labels: bool = None
drop_metadata: bool = None
def __post_init__(self):
super().__post_init__()
class VideoFolder(folder_based_builder.FolderBasedBuilder):
BASE_FEATURE = datasets.Video
BASE_COLUMN_NAME = "video"
BUILDER_CONFIG_CLASS = VideoFolderConfig
EXTENSIONS: List[str] # definition at the bottom of the script
# TODO: initial list, we should check the compatibility of other formats
VIDEO_EXTENSIONS = [
".mkv",
".mp4",
".avi",
".mpeg",
".mov",
]
VideoFolder.EXTENSIONS = VIDEO_EXTENSIONS
|
# coding=utf-8
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Fast Image processor class for CLIP."""
from ...image_processing_utils_fast import BaseImageProcessorFast
from ...image_utils import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, PILImageResampling
from ...utils import auto_docstring
@auto_docstring
class CLIPImageProcessorFast(BaseImageProcessorFast):
# To be checked against the slow image processor
# None values left after checking can be removed
resample = PILImageResampling.BICUBIC
image_mean = OPENAI_CLIP_MEAN
image_std = OPENAI_CLIP_STD
size = {"shortest_edge": 224}
default_to_square = False
crop_size = {"height": 224, "width": 224}
do_resize = True
do_center_crop = True
do_rescale = True
do_normalize = True
do_convert_rgb = True
__all__ = ["CLIPImageProcessorFast"]
|
# coding=utf-8
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Fast Image processor class for CLIP."""
from ...image_processing_utils_fast import BASE_IMAGE_PROCESSOR_FAST_DOCSTRING, BaseImageProcessorFast
from ...image_utils import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, PILImageResampling
from ...utils import add_start_docstrings
@add_start_docstrings(
"Constructs a fast CLIP image processor.",
BASE_IMAGE_PROCESSOR_FAST_DOCSTRING,
)
class CLIPImageProcessorFast(BaseImageProcessorFast):
# To be checked against the slow image processor
# None values left after checking can be removed
resample = PILImageResampling.BICUBIC
image_mean = OPENAI_CLIP_MEAN
image_std = OPENAI_CLIP_STD
size = {"shortest_edge": 224}
default_to_square = False
crop_size = {"height": 224, "width": 224}
do_resize = True
do_center_crop = True
do_rescale = True
do_normalize = True
do_convert_rgb = True
__all__ = ["CLIPImageProcessorFast"]
|
import weakref
from keras.src.backend.common import global_state
def set_tensor_attr(tensor, attr, value):
try:
setattr(tensor, attr, value)
except AttributeError:
attr_dict = global_state.get_global_attribute(f"{attr}_dict")
if attr_dict is None:
if value is None:
return
attr_dict = weakref.WeakValueDictionary()
global_state.set_global_attribute(f"{attr}_dict", attr_dict)
if value is not None:
attr_dict[id(tensor)] = value
elif id(tensor) in attr_dict:
del attr_dict[id(tensor)]
def get_tensor_attr(tensor, attr):
if not hasattr(tensor, attr):
attr_dict = global_state.get_global_attribute(f"{attr}_dict")
if attr_dict is not None:
return attr_dict.get(id(tensor), None)
else:
return None
return getattr(tensor, attr, None)
|
import weakref
from keras.src.backend.common import global_state
def set_tensor_attr(tensor, attr, value):
try:
setattr(tensor, attr, value)
except AttributeError:
if value is None:
return
attr_dict = global_state.get_global_attribute(f"{attr}_dict")
if attr_dict is None:
attr_dict = weakref.WeakValueDictionary()
global_state.set_global_attribute(f"{attr}_dict", attr_dict)
attr_dict[id(tensor)] = value
def get_tensor_attr(tensor, attr):
if not hasattr(tensor, attr):
attr_dict = global_state.get_global_attribute(f"{attr}_dict")
if attr_dict is not None:
return attr_dict.get(id(tensor), None)
return getattr(tensor, attr, None)
|
from typing import TYPE_CHECKING, Optional, Type, TypeVar
from pydantic import AnyUrl as BaseAnyUrl
from pydantic import errors, parse_obj_as
from docarray.typing.abstract_type import AbstractType
from docarray.typing.proto_register import _register_proto
if TYPE_CHECKING:
from pydantic.networks import Parts
from docarray.proto import NodeProto
T = TypeVar('T', bound='AnyUrl')
@_register_proto(proto_type_name='any_url')
class AnyUrl(BaseAnyUrl, AbstractType):
host_required = (
False # turn off host requirement to allow passing of local paths as URL
)
def _to_node_protobuf(self) -> 'NodeProto':
"""Convert Document into a NodeProto protobuf message. This function should
be called when the Document is nested into another Document that need to
be converted into a protobuf
:return: the nested item protobuf message
"""
from docarray.proto import NodeProto
return NodeProto(text=str(self), type=self._proto_type_name)
@classmethod
def validate_parts(cls, parts: 'Parts', validate_port: bool = True) -> 'Parts':
"""
A method used to validate parts of a URL.
Our URLs should be able to function both in local and remote settings.
Therefore, we allow missing `scheme`, making it possible to pass a file
path without prefix.
If `scheme` is missing, we assume it is a local file path.
"""
scheme = parts['scheme']
if scheme is None:
# allow missing scheme, unlike pydantic
pass
elif cls.allowed_schemes and scheme.lower() not in cls.allowed_schemes:
raise errors.UrlSchemePermittedError(set(cls.allowed_schemes))
if validate_port:
cls._validate_port(parts['port'])
user = parts['user']
if cls.user_required and user is None:
raise errors.UrlUserInfoError()
return parts
@classmethod
def build(
cls,
*,
scheme: str,
user: Optional[str] = None,
password: Optional[str] = None,
host: str,
port: Optional[str] = None,
path: Optional[str] = None,
query: Optional[str] = None,
fragment: Optional[str] = None,
**_kwargs: str,
) -> str:
"""
Build a URL from its parts.
The only difference from the pydantic implementation is that we allow
missing `scheme`, making it possible to pass a file path without prefix.
"""
# allow missing scheme, unlike pydantic
scheme_ = scheme if scheme is not None else ''
url = super().build(
scheme=scheme_,
user=user,
password=password,
host=host,
port=port,
path=path,
query=query,
fragment=fragment,
**_kwargs,
)
if scheme is None and url.startswith('://'):
# remove the `://` prefix, since scheme is missing
url = url[3:]
return url
@classmethod
def from_protobuf(cls: Type[T], pb_msg: 'str') -> T:
"""
read url from a proto msg
:param pb_msg:
:return: url
"""
return parse_obj_as(cls, pb_msg)
|
from typing import TYPE_CHECKING, Optional, Type, TypeVar
from pydantic import AnyUrl as BaseAnyUrl
from pydantic import errors, parse_obj_as
from docarray.typing.abstract_type import AbstractType
if TYPE_CHECKING:
from pydantic.networks import Parts
from docarray.proto import NodeProto
T = TypeVar('T', bound='AnyUrl')
class AnyUrl(BaseAnyUrl, AbstractType):
host_required = (
False # turn off host requirement to allow passing of local paths as URL
)
def _to_node_protobuf(self) -> 'NodeProto':
"""Convert Document into a NodeProto protobuf message. This function should
be called when the Document is nested into another Document that need to
be converted into a protobuf
:return: the nested item protobuf message
"""
from docarray.proto import NodeProto
return NodeProto(any_url=str(self))
@classmethod
def validate_parts(cls, parts: 'Parts', validate_port: bool = True) -> 'Parts':
"""
A method used to validate parts of a URL.
Our URLs should be able to function both in local and remote settings.
Therefore, we allow missing `scheme`, making it possible to pass a file
path without prefix.
If `scheme` is missing, we assume it is a local file path.
"""
scheme = parts['scheme']
if scheme is None:
# allow missing scheme, unlike pydantic
pass
elif cls.allowed_schemes and scheme.lower() not in cls.allowed_schemes:
raise errors.UrlSchemePermittedError(set(cls.allowed_schemes))
if validate_port:
cls._validate_port(parts['port'])
user = parts['user']
if cls.user_required and user is None:
raise errors.UrlUserInfoError()
return parts
@classmethod
def build(
cls,
*,
scheme: str,
user: Optional[str] = None,
password: Optional[str] = None,
host: str,
port: Optional[str] = None,
path: Optional[str] = None,
query: Optional[str] = None,
fragment: Optional[str] = None,
**_kwargs: str,
) -> str:
"""
Build a URL from its parts.
The only difference from the pydantic implementation is that we allow
missing `scheme`, making it possible to pass a file path without prefix.
"""
# allow missing scheme, unlike pydantic
scheme_ = scheme if scheme is not None else ''
url = super().build(
scheme=scheme_,
user=user,
password=password,
host=host,
port=port,
path=path,
query=query,
fragment=fragment,
**_kwargs,
)
if scheme is None and url.startswith('://'):
# remove the `://` prefix, since scheme is missing
url = url[3:]
return url
@classmethod
def from_protobuf(cls: Type[T], pb_msg: 'str') -> T:
"""
read url from a proto msg
:param pb_msg:
:return: url
"""
return parse_obj_as(cls, pb_msg)
|
import csv
import os
from pathlib import Path
from typing import Dict, List, Tuple, Union
import torchaudio
from torch import Tensor
from torch.utils.data import Dataset
def load_commonvoice_item(
line: List[str], header: List[str], path: str, folder_audio: str, ext_audio: str
) -> Tuple[Tensor, int, Dict[str, str]]:
# Each line as the following data:
# client_id, path, sentence, up_votes, down_votes, age, gender, accent
if header[1] != "path":
raise ValueError(f"expect `header[1]` to be 'path', but got {header[1]}")
fileid = line[1]
filename = os.path.join(path, folder_audio, fileid)
if not filename.endswith(ext_audio):
filename += ext_audio
waveform, sample_rate = torchaudio.load(filename)
dic = dict(zip(header, line))
return waveform, sample_rate, dic
class COMMONVOICE(Dataset):
"""*CommonVoice* :cite:`ardila2020common` dataset.
Args:
root (str or Path): Path to the directory where the dataset is located.
(Where the ``tsv`` file is present.)
tsv (str, optional):
The name of the tsv file used to construct the metadata, such as
``"train.tsv"``, ``"test.tsv"``, ``"dev.tsv"``, ``"invalidated.tsv"``,
``"validated.tsv"`` and ``"other.tsv"``. (default: ``"train.tsv"``)
"""
_ext_txt = ".txt"
_ext_audio = ".mp3"
_folder_audio = "clips"
def __init__(self, root: Union[str, Path], tsv: str = "train.tsv") -> None:
# Get string representation of 'root' in case Path object is passed
self._path = os.fspath(root)
self._tsv = os.path.join(self._path, tsv)
with open(self._tsv, "r") as tsv_:
walker = csv.reader(tsv_, delimiter="\t")
self._header = next(walker)
self._walker = list(walker)
def __getitem__(self, n: int) -> Tuple[Tensor, int, Dict[str, str]]:
"""Load the n-th sample from the dataset.
Args:
n (int): The index of the sample to be loaded
Returns:
Tuple of the following items;
Tensor:
Waveform
int:
Sample rate
Dict[str, str]:
Dictionary containing the following items from the corresponding TSV file;
* ``"client_id"``
* ``"path"``
* ``"sentence"``
* ``"up_votes"``
* ``"down_votes"``
* ``"age"``
* ``"gender"``
* ``"accent"``
"""
line = self._walker[n]
return load_commonvoice_item(line, self._header, self._path, self._folder_audio, self._ext_audio)
def __len__(self) -> int:
return len(self._walker)
|
import csv
import os
from pathlib import Path
from typing import Dict, List, Tuple, Union
import torchaudio
from torch import Tensor
from torch.utils.data import Dataset
def load_commonvoice_item(
line: List[str], header: List[str], path: str, folder_audio: str, ext_audio: str
) -> Tuple[Tensor, int, Dict[str, str]]:
# Each line as the following data:
# client_id, path, sentence, up_votes, down_votes, age, gender, accent
if header[1] != "path":
raise ValueError(f"expect `header[1]` to be 'path', but got {header[1]}")
fileid = line[1]
filename = os.path.join(path, folder_audio, fileid)
if not filename.endswith(ext_audio):
filename += ext_audio
waveform, sample_rate = torchaudio.load(filename)
dic = dict(zip(header, line))
return waveform, sample_rate, dic
class COMMONVOICE(Dataset):
"""Create a Dataset for *CommonVoice* :cite:`ardila2020common`.
Args:
root (str or Path): Path to the directory where the dataset is located.
(Where the ``tsv`` file is present.)
tsv (str, optional):
The name of the tsv file used to construct the metadata, such as
``"train.tsv"``, ``"test.tsv"``, ``"dev.tsv"``, ``"invalidated.tsv"``,
``"validated.tsv"`` and ``"other.tsv"``. (default: ``"train.tsv"``)
"""
_ext_txt = ".txt"
_ext_audio = ".mp3"
_folder_audio = "clips"
def __init__(self, root: Union[str, Path], tsv: str = "train.tsv") -> None:
# Get string representation of 'root' in case Path object is passed
self._path = os.fspath(root)
self._tsv = os.path.join(self._path, tsv)
with open(self._tsv, "r") as tsv_:
walker = csv.reader(tsv_, delimiter="\t")
self._header = next(walker)
self._walker = list(walker)
def __getitem__(self, n: int) -> Tuple[Tensor, int, Dict[str, str]]:
"""Load the n-th sample from the dataset.
Args:
n (int): The index of the sample to be loaded
Returns:
(Tensor, int, Dict[str, str]): ``(waveform, sample_rate, dictionary)``, where dictionary
is built from the TSV file with the following keys: ``client_id``, ``path``, ``sentence``,
``up_votes``, ``down_votes``, ``age``, ``gender`` and ``accent``.
"""
line = self._walker[n]
return load_commonvoice_item(line, self._header, self._path, self._folder_audio, self._ext_audio)
def __len__(self) -> int:
return len(self._walker)
|
"""
This file loads sentences from a provided text file. It is expected, that the there is one sentence per line in that text file.
SimCSE will be training using these sentences. Checkpoints are stored every 500 steps to the output folder.
Usage:
python train_simcse_from_file.py path/to/sentences.txt
"""
import gzip
import logging
import math
import sys
from datetime import datetime
import tqdm
from torch.utils.data import DataLoader
from sentence_transformers import InputExample, LoggingHandler, SentenceTransformer, losses, models
#### Just some code to print debug information to stdout
logging.basicConfig(
format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO, handlers=[LoggingHandler()]
)
#### /print debug information to stdout
# Training parameters
model_name = "distilroberta-base"
train_batch_size = 128
max_seq_length = 32
num_epochs = 1
# Input file path (a text file, each line a sentence)
if len(sys.argv) < 2:
print(f"Run this script with: python {sys.argv[0]} path/to/sentences.txt")
exit()
filepath = sys.argv[1]
# Save path to store our model
output_name = ""
if len(sys.argv) >= 3:
output_name = "-" + sys.argv[2].replace(" ", "_").replace("/", "_").replace("\\", "_")
model_output_path = "output/train_simcse{}-{}".format(output_name, datetime.now().strftime("%Y-%m-%d_%H-%M-%S"))
# Use Hugging Face/transformers model (like BERT, RoBERTa, XLNet, XLM-R) for mapping tokens to embeddings
word_embedding_model = models.Transformer(model_name, max_seq_length=max_seq_length)
# Apply mean pooling to get one fixed sized sentence vector
pooling_model = models.Pooling(word_embedding_model.get_word_embedding_dimension())
model = SentenceTransformer(modules=[word_embedding_model, pooling_model])
################# Read the train corpus #################
train_samples = []
with (
gzip.open(filepath, "rt", encoding="utf8") if filepath.endswith(".gz") else open(filepath, encoding="utf8") as fIn
):
for line in tqdm.tqdm(fIn, desc="Read file"):
line = line.strip()
if len(line) >= 10:
train_samples.append(InputExample(texts=[line, line]))
logging.info(f"Train sentences: {len(train_samples)}")
# We train our model using the MultipleNegativesRankingLoss
train_dataloader = DataLoader(train_samples, shuffle=True, batch_size=train_batch_size, drop_last=True)
train_loss = losses.MultipleNegativesRankingLoss(model)
warmup_steps = math.ceil(len(train_dataloader) * num_epochs * 0.1) # 10% of train data for warm-up
logging.info(f"Warmup-steps: {warmup_steps}")
# Train the model
model.fit(
train_objectives=[(train_dataloader, train_loss)],
epochs=num_epochs,
warmup_steps=warmup_steps,
optimizer_params={"lr": 5e-5},
checkpoint_path=model_output_path,
show_progress_bar=True,
use_amp=False, # Set to True, if your GPU supports FP16 cores
)
|
"""
This file loads sentences from a provided text file. It is expected, that the there is one sentence per line in that text file.
SimCSE will be training using these sentences. Checkpoints are stored every 500 steps to the output folder.
Usage:
python train_simcse_from_file.py path/to/sentences.txt
"""
import gzip
import logging
import math
import sys
from datetime import datetime
import tqdm
from torch.utils.data import DataLoader
from sentence_transformers import InputExample, LoggingHandler, SentenceTransformer, losses, models
#### Just some code to print debug information to stdout
logging.basicConfig(
format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO, handlers=[LoggingHandler()]
)
#### /print debug information to stdout
# Training parameters
model_name = "distilroberta-base"
train_batch_size = 128
max_seq_length = 32
num_epochs = 1
# Input file path (a text file, each line a sentence)
if len(sys.argv) < 2:
print(f"Run this script with: python {sys.argv[0]} path/to/sentences.txt")
exit()
filepath = sys.argv[1]
# Save path to store our model
output_name = ""
if len(sys.argv) >= 3:
output_name = "-" + sys.argv[2].replace(" ", "_").replace("/", "_").replace("\\", "_")
model_output_path = "output/train_simcse{}-{}".format(output_name, datetime.now().strftime("%Y-%m-%d_%H-%M-%S"))
# Use Hugging Face/transformers model (like BERT, RoBERTa, XLNet, XLM-R) for mapping tokens to embeddings
word_embedding_model = models.Transformer(model_name, max_seq_length=max_seq_length)
# Apply mean pooling to get one fixed sized sentence vector
pooling_model = models.Pooling(word_embedding_model.get_word_embedding_dimension())
model = SentenceTransformer(modules=[word_embedding_model, pooling_model])
################# Read the train corpus #################
train_samples = []
with gzip.open(filepath, "rt", encoding="utf8") if filepath.endswith(".gz") else open(
filepath, encoding="utf8"
) as fIn:
for line in tqdm.tqdm(fIn, desc="Read file"):
line = line.strip()
if len(line) >= 10:
train_samples.append(InputExample(texts=[line, line]))
logging.info(f"Train sentences: {len(train_samples)}")
# We train our model using the MultipleNegativesRankingLoss
train_dataloader = DataLoader(train_samples, shuffle=True, batch_size=train_batch_size, drop_last=True)
train_loss = losses.MultipleNegativesRankingLoss(model)
warmup_steps = math.ceil(len(train_dataloader) * num_epochs * 0.1) # 10% of train data for warm-up
logging.info(f"Warmup-steps: {warmup_steps}")
# Train the model
model.fit(
train_objectives=[(train_dataloader, train_loss)],
epochs=num_epochs,
warmup_steps=warmup_steps,
optimizer_params={"lr": 5e-5},
checkpoint_path=model_output_path,
show_progress_bar=True,
use_amp=False, # Set to True, if your GPU supports FP16 cores
)
|
import copy
from typing import Dict, Tuple
from jina.serve.runtimes.request_handlers.data_request_handler import DataRequestHandler
_SPECIFIC_EXECUTOR_SEPARATOR = '__'
def _spit_key_and_executor_name(key_name: str) -> Tuple[str]:
"""Split a specific key into a key, name pair
ex: 'key__my_executor' will be split into 'key', 'my_executor'
:param key_name: key name of the param
:return: return the split 'key', 'executor_name' for the key_name
"""
key_split = key_name.split(_SPECIFIC_EXECUTOR_SEPARATOR)
new_key_name = key_split.pop(-1)
executor_name = ''.join(key_split)
return new_key_name, executor_name
def _get_name_from_replicas_name(name: str) -> Tuple[str]:
"""return the original name without the replicas
ex: 'exec1/rep-0' will be transform into 'exec1'
:param name: name of the DataRequest
:return: return the original name without the replicas
"""
return name.split('/')[0]
def _is_param_for_specific_executor(key_name: str) -> bool:
"""Tell if a key is for a specific Executor
ex: 'key' is for every Executor whereas 'my_executor__key' is only for 'my_executor'
:param key_name: key name of the param
:return: return True if key_name is for specific Executor, False otherwise
"""
if _SPECIFIC_EXECUTOR_SEPARATOR in key_name:
if key_name.startswith(_SPECIFIC_EXECUTOR_SEPARATOR) or key_name.endswith(
_SPECIFIC_EXECUTOR_SEPARATOR
):
return False
return True
else:
return False
def _parse_specific_params(parameters: Dict, executor_name: str):
"""Parse the parameters dictionary to filter executor specific parameters
:param parameters: dictionary container the parameters
:param executor_name: name of the Executor
:returns: the parsed parameters after applying filtering for the specific Executor
"""
parsed_params = copy.deepcopy(parameters)
for key in parameters:
if _is_param_for_specific_executor(key):
(
key_name,
key_executor_name,
) = _spit_key_and_executor_name(key)
if key_executor_name == executor_name:
parsed_params[key_name] = parameters[key]
del parsed_params[key]
specific_parameters = parameters.get(executor_name, None)
if specific_parameters:
parsed_params.update(**specific_parameters)
return parsed_params
|
import copy
from typing import Dict, Tuple
from jina.serve.runtimes.request_handlers.data_request_handler import DataRequestHandler
_SPECIFIC_EXECUTOR_SEPARATOR = '__'
def _spit_key_and_executor_name(key_name: str) -> Tuple[str]:
"""Split a specific key into a key, name pair
ex: 'key__my_executor' will be split into 'key', 'my_executor'
:param key_name: key name of the param
:return: return the split 'key', 'executor_name' for the key_name
"""
key_split = key_name.split(_SPECIFIC_EXECUTOR_SEPARATOR)
new_key_name = key_split.pop(-1)
executor_name = ''.join(key_split)
return new_key_name, executor_name
def _get_name_from_replicas_name(name: str) -> Tuple[str]:
"""return the original name without the replicas
ex: 'exec1/rep-0' will be transform into 'exec1'
:param name: name of the DataRequest
:return: return the original name without the replicas
"""
return name.split('/')[0]
def _is_param_for_specific_executor(key_name: str) -> bool:
"""Tell if a key is for a specific Executor
ex: 'key' is for every Executor whereas 'key__my_executor' is only for 'my_executor'
:param key_name: key name of the param
:return: return True if key_name is for specific Executor, False otherwise
"""
if _SPECIFIC_EXECUTOR_SEPARATOR in key_name:
if key_name.startswith(_SPECIFIC_EXECUTOR_SEPARATOR) or key_name.endswith(
_SPECIFIC_EXECUTOR_SEPARATOR
):
return False
return True
else:
return False
def _parse_specific_params(parameters: Dict, executor_name: str):
"""Parse the parameters dictionary to filter executor specific parameters
:param parameters: dictionary container the parameters
:param executor_name: name of the Executor
:returns: the parsed parameters after applying filtering for the specific Executor
"""
parsed_params = copy.deepcopy(parameters)
for key in parameters:
if _is_param_for_specific_executor(key):
(
key_name,
key_executor_name,
) = _spit_key_and_executor_name(key)
if key_executor_name == executor_name:
parsed_params[key_name] = parameters[key]
del parsed_params[key]
specific_parameters = parameters.get(executor_name, None)
if specific_parameters:
parsed_params.update(**specific_parameters)
return parsed_params
|
from abc import abstractmethod
from typing import TYPE_CHECKING, Dict, List, Optional, Sequence, TypeVar, Union
from docarray import Document, DocumentArray
from docarray.math import ndarray
from docarray.score import NamedScore
from qdrant_client.http import models as rest
from qdrant_client.http.models.models import Distance
if TYPE_CHECKING: # pragma: no cover
import numpy as np
import tensorflow
import torch
from qdrant_client import QdrantClient
QdrantArrayType = TypeVar(
'QdrantArrayType',
np.ndarray,
tensorflow.Tensor,
torch.Tensor,
Sequence[float],
)
class FindMixin:
@property
@abstractmethod
def client(self) -> 'QdrantClient':
raise NotImplementedError()
@property
@abstractmethod
def collection_name(self) -> str:
raise NotImplementedError()
@property
@abstractmethod
def serialize_config(self) -> dict:
raise NotImplementedError()
@property
@abstractmethod
def distance(self) -> 'Distance':
raise NotImplementedError()
def _find_similar_vectors(
self,
q: 'QdrantArrayType',
limit: int = 10,
filter: Optional[Dict] = None,
search_params: Optional[Dict] = None,
**kwargs,
):
query_vector = self._map_embedding(q)
search_result = self.client.search(
self.collection_name,
query_vector=query_vector,
query_filter=filter,
search_params=None
if not search_params
else rest.SearchParams(**search_params),
limit=limit,
append_payload=['_serialized'],
)
docs = []
for hit in search_result:
doc = Document.from_base64(
hit.payload['_serialized'], **self.serialize_config
)
doc.scores[f'{self.distance.lower()}_similarity'] = NamedScore(
value=hit.score
)
docs.append(doc)
return DocumentArray(docs)
def _find(
self,
query: 'QdrantArrayType',
limit: int = 10,
filter: Optional[Dict] = None,
search_params: Optional[Dict] = None,
**kwargs,
) -> List['DocumentArray']:
"""Returns approximate nearest neighbors given a batch of input queries.
:param query: input supported to be used in Qdrant.
:param limit: number of retrieved items
:param filter: filter query used for pre-filtering
:param search_params: additional parameters of the search
:return: a list of DocumentArrays containing the closest Document objects for each of the queries in `query`.
"""
num_rows, _ = ndarray.get_array_rows(query)
if num_rows == 1:
return [
self._find_similar_vectors(
query, limit=limit, filter=filter, search_params=search_params
)
]
else:
closest_docs = []
for q in query:
da = self._find_similar_vectors(
q, limit=limit, filter=filter, search_params=search_params
)
closest_docs.append(da)
return closest_docs
def _find_with_filter(
self, filter: Optional[Dict], limit: Optional[Union[int, float]] = 10
):
list_of_points, _offset = self.client.scroll(
collection_name=self.collection_name,
scroll_filter=rest.Filter(**filter),
with_payload=True,
limit=limit,
)
da = DocumentArray()
for result in list_of_points[:limit]:
doc = Document.from_base64(
result.payload['_serialized'], **self.serialize_config
)
da.append(doc)
return da
def _filter(
self, filter: Optional[Dict], limit: Optional[Union[int, float]] = 10
) -> 'DocumentArray':
"""Returns a subset of documents by filtering by the given filter (`Qdrant` filter)..
:param limit: number of retrieved items
:param filter: filter query used for filtering.
For more information: https://docarray.jina.ai/advanced/document-store/qdrant/#qdrant
:return: a `DocumentArray` containing the `Document` objects that verify the filter.
"""
return self._find_with_filter(filter, limit=limit)
|
from abc import abstractmethod
from typing import TYPE_CHECKING, Dict, List, Optional, Sequence, TypeVar, Union
from docarray import Document, DocumentArray
from docarray.math import ndarray
from docarray.score import NamedScore
from qdrant_client.http import models as rest
from qdrant_client.http.models.models import Distance
if TYPE_CHECKING: # pragma: no cover
import numpy as np
import tensorflow
import torch
from qdrant_client import QdrantClient
QdrantArrayType = TypeVar(
'QdrantArrayType',
np.ndarray,
tensorflow.Tensor,
torch.Tensor,
Sequence[float],
)
class FindMixin:
@property
@abstractmethod
def client(self) -> 'QdrantClient':
raise NotImplementedError()
@property
@abstractmethod
def collection_name(self) -> str:
raise NotImplementedError()
@property
@abstractmethod
def serialize_config(self) -> dict:
raise NotImplementedError()
@property
@abstractmethod
def distance(self) -> 'Distance':
raise NotImplementedError()
def _find_similar_vectors(
self,
q: 'QdrantArrayType',
limit: int = 10,
filter: Optional[Dict] = None,
search_params: Optional[Dict] = None,
**kwargs,
):
query_vector = self._map_embedding(q)
search_result = self.client.search(
self.collection_name,
query_vector=query_vector,
query_filter=filter,
search_params=None
if not search_params
else rest.SearchParams(**search_params),
top=limit,
append_payload=['_serialized'],
)
docs = []
for hit in search_result:
doc = Document.from_base64(
hit.payload['_serialized'], **self.serialize_config
)
doc.scores[f'{self.distance.lower()}_similarity'] = NamedScore(
value=hit.score
)
docs.append(doc)
return DocumentArray(docs)
def _find(
self,
query: 'QdrantArrayType',
limit: int = 10,
filter: Optional[Dict] = None,
search_params: Optional[Dict] = None,
**kwargs,
) -> List['DocumentArray']:
"""Returns approximate nearest neighbors given a batch of input queries.
:param query: input supported to be used in Qdrant.
:param limit: number of retrieved items
:param filter: filter query used for pre-filtering
:param search_params: additional parameters of the search
:return: a list of DocumentArrays containing the closest Document objects for each of the queries in `query`.
"""
num_rows, _ = ndarray.get_array_rows(query)
if num_rows == 1:
return [
self._find_similar_vectors(
query, limit=limit, filter=filter, search_params=search_params
)
]
else:
closest_docs = []
for q in query:
da = self._find_similar_vectors(
q, limit=limit, filter=filter, search_params=search_params
)
closest_docs.append(da)
return closest_docs
def _find_with_filter(
self, filter: Optional[Dict], limit: Optional[Union[int, float]] = 10
):
list_of_points, _offset = self.client.scroll(
collection_name=self.collection_name,
scroll_filter=filter,
with_payload=True,
limit=limit,
)
da = DocumentArray()
for result in list_of_points[:limit]:
doc = Document.from_base64(
result.payload['_serialized'], **self.serialize_config
)
da.append(doc)
return da
def _filter(
self, filter: Optional[Dict], limit: Optional[Union[int, float]] = 10
) -> 'DocumentArray':
"""Returns a subset of documents by filtering by the given filter (`Qdrant` filter)..
:param limit: number of retrieved items
:param filter: filter query used for filtering.
For more information: https://docarray.jina.ai/advanced/document-store/qdrant/#qdrant
:return: a `DocumentArray` containing the `Document` objects that verify the filter.
"""
return self._find_with_filter(filter, limit=limit)
|
# Copyright (c) OpenMMLab. All rights reserved.
from .dist_utils import (DistOptimizerHook, all_reduce_dict, allreduce_grads,
reduce_mean, sync_random_seed)
from .misc import (center_of_mass, filter_scores_and_topk, flip_tensor,
generate_coordinate, mask2ndarray, multi_apply,
select_single_mlvl, unmap)
__all__ = [
'allreduce_grads', 'DistOptimizerHook', 'reduce_mean', 'multi_apply',
'unmap', 'mask2ndarray', 'flip_tensor', 'all_reduce_dict',
'center_of_mass', 'generate_coordinate', 'select_single_mlvl',
'filter_scores_and_topk', 'sync_random_seed'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .dist_utils import (DistOptimizerHook, all_reduce_dict, allreduce_grads,
reduce_mean)
from .misc import (center_of_mass, filter_scores_and_topk, flip_tensor,
generate_coordinate, mask2ndarray, multi_apply,
select_single_mlvl, unmap)
__all__ = [
'allreduce_grads', 'DistOptimizerHook', 'reduce_mean', 'multi_apply',
'unmap', 'mask2ndarray', 'flip_tensor', 'all_reduce_dict',
'center_of_mass', 'generate_coordinate', 'select_single_mlvl',
'filter_scores_and_topk'
]
|
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from ..providers import ProviderName
from ._base import BaseWebhooksManager
_WEBHOOK_MANAGERS: dict["ProviderName", type["BaseWebhooksManager"]] = {}
# --8<-- [start:load_webhook_managers]
def load_webhook_managers() -> dict["ProviderName", type["BaseWebhooksManager"]]:
if _WEBHOOK_MANAGERS:
return _WEBHOOK_MANAGERS
from .compass import CompassWebhookManager
from .generic import GenericWebhooksManager
from .github import GithubWebhooksManager
from .slant3d import Slant3DWebhooksManager
_WEBHOOK_MANAGERS.update(
{
handler.PROVIDER_NAME: handler
for handler in [
CompassWebhookManager,
GithubWebhooksManager,
Slant3DWebhooksManager,
GenericWebhooksManager,
]
}
)
return _WEBHOOK_MANAGERS
# --8<-- [end:load_webhook_managers]
def get_webhook_manager(provider_name: "ProviderName") -> "BaseWebhooksManager":
return load_webhook_managers()[provider_name]()
def supports_webhooks(provider_name: "ProviderName") -> bool:
return provider_name in load_webhook_managers()
__all__ = ["get_webhook_manager", "supports_webhooks"]
|
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from ..providers import ProviderName
from ._base import BaseWebhooksManager
_WEBHOOK_MANAGERS: dict["ProviderName", type["BaseWebhooksManager"]] = {}
# --8<-- [start:load_webhook_managers]
def load_webhook_managers() -> dict["ProviderName", type["BaseWebhooksManager"]]:
if _WEBHOOK_MANAGERS:
return _WEBHOOK_MANAGERS
from .compass import CompassWebhookManager
from .github import GithubWebhooksManager
from .slant3d import Slant3DWebhooksManager
_WEBHOOK_MANAGERS.update(
{
handler.PROVIDER_NAME: handler
for handler in [
CompassWebhookManager,
GithubWebhooksManager,
Slant3DWebhooksManager,
]
}
)
return _WEBHOOK_MANAGERS
# --8<-- [end:load_webhook_managers]
def get_webhook_manager(provider_name: "ProviderName") -> "BaseWebhooksManager":
return load_webhook_managers()[provider_name]()
def supports_webhooks(provider_name: "ProviderName") -> bool:
return provider_name in load_webhook_managers()
__all__ = ["get_webhook_manager", "supports_webhooks"]
|
# Copyright (c) OpenMMLab. All rights reserved.
import glob
import os
import os.path as osp
import warnings
from typing import Union
from mmengine.config import Config, ConfigDict
from mmengine.logging import print_log
def find_latest_checkpoint(path, suffix='pth'):
"""Find the latest checkpoint from the working directory.
Args:
path(str): The path to find checkpoints.
suffix(str): File extension.
Defaults to pth.
Returns:
latest_path(str | None): File path of the latest checkpoint.
References:
.. [1] https://github.com/microsoft/SoftTeacher
/blob/main/ssod/utils/patch.py
"""
if not osp.exists(path):
warnings.warn('The path of checkpoints does not exist.')
return None
if osp.exists(osp.join(path, f'latest.{suffix}')):
return osp.join(path, f'latest.{suffix}')
checkpoints = glob.glob(osp.join(path, f'*.{suffix}'))
if len(checkpoints) == 0:
warnings.warn('There are no checkpoints in the path.')
return None
latest = -1
latest_path = None
for checkpoint in checkpoints:
count = int(osp.basename(checkpoint).split('_')[-1].split('.')[0])
if count > latest:
latest = count
latest_path = checkpoint
return latest_path
def update_data_root(cfg, logger=None):
"""Update data root according to env MMDET_DATASETS.
If set env MMDET_DATASETS, update cfg.data_root according to
MMDET_DATASETS. Otherwise, using cfg.data_root as default.
Args:
cfg (:obj:`Config`): The model config need to modify
logger (logging.Logger | str | None): the way to print msg
"""
assert isinstance(cfg, Config), \
f'cfg got wrong type: {type(cfg)}, expected mmengine.Config'
if 'MMDET_DATASETS' in os.environ:
dst_root = os.environ['MMDET_DATASETS']
print_log(f'MMDET_DATASETS has been set to be {dst_root}.'
f'Using {dst_root} as data root.')
else:
return
assert isinstance(cfg, Config), \
f'cfg got wrong type: {type(cfg)}, expected mmengine.Config'
def update(cfg, src_str, dst_str):
for k, v in cfg.items():
if isinstance(v, ConfigDict):
update(cfg[k], src_str, dst_str)
if isinstance(v, str) and src_str in v:
cfg[k] = v.replace(src_str, dst_str)
update(cfg.data, cfg.data_root, dst_root)
cfg.data_root = dst_root
def get_test_pipeline_cfg(cfg: Union[str, ConfigDict]) -> ConfigDict:
"""Get the test dataset pipeline from entire config.
Args:
cfg (str or :obj:`ConfigDict`): the entire config. Can be a config
file or a ``ConfigDict``.
Returns:
:obj:`ConfigDict`: the config of test dataset.
"""
if isinstance(cfg, str):
cfg = Config.fromfile(cfg)
def _get_test_pipeline_cfg(dataset_cfg):
if 'pipeline' in dataset_cfg:
return dataset_cfg.pipeline
# handle dataset wrapper
elif 'dataset' in dataset_cfg:
return _get_test_pipeline_cfg(dataset_cfg.dataset)
# handle dataset wrappers like ConcatDataset
elif 'datasets' in dataset_cfg:
return _get_test_pipeline_cfg(dataset_cfg.datasets[0])
raise RuntimeError('Cannot find `pipeline` in `test_dataloader`')
return _get_test_pipeline_cfg(cfg.test_dataloader.dataset)
|
# Copyright (c) OpenMMLab. All rights reserved.
import glob
import os
import os.path as osp
import warnings
from mmengine.config import Config, ConfigDict
from mmengine.logging import print_log
def find_latest_checkpoint(path, suffix='pth'):
"""Find the latest checkpoint from the working directory.
Args:
path(str): The path to find checkpoints.
suffix(str): File extension.
Defaults to pth.
Returns:
latest_path(str | None): File path of the latest checkpoint.
References:
.. [1] https://github.com/microsoft/SoftTeacher
/blob/main/ssod/utils/patch.py
"""
if not osp.exists(path):
warnings.warn('The path of checkpoints does not exist.')
return None
if osp.exists(osp.join(path, f'latest.{suffix}')):
return osp.join(path, f'latest.{suffix}')
checkpoints = glob.glob(osp.join(path, f'*.{suffix}'))
if len(checkpoints) == 0:
warnings.warn('There are no checkpoints in the path.')
return None
latest = -1
latest_path = None
for checkpoint in checkpoints:
count = int(osp.basename(checkpoint).split('_')[-1].split('.')[0])
if count > latest:
latest = count
latest_path = checkpoint
return latest_path
def update_data_root(cfg, logger=None):
"""Update data root according to env MMDET_DATASETS.
If set env MMDET_DATASETS, update cfg.data_root according to
MMDET_DATASETS. Otherwise, using cfg.data_root as default.
Args:
cfg (:obj:`Config`): The model config need to modify
logger (logging.Logger | str | None): the way to print msg
"""
assert isinstance(cfg, Config), \
f'cfg got wrong type: {type(cfg)}, expected mmengine.Config'
if 'MMDET_DATASETS' in os.environ:
dst_root = os.environ['MMDET_DATASETS']
print_log(f'MMDET_DATASETS has been set to be {dst_root}.'
f'Using {dst_root} as data root.')
else:
return
assert isinstance(cfg, Config), \
f'cfg got wrong type: {type(cfg)}, expected mmengine.Config'
def update(cfg, src_str, dst_str):
for k, v in cfg.items():
if isinstance(v, ConfigDict):
update(cfg[k], src_str, dst_str)
if isinstance(v, str) and src_str in v:
cfg[k] = v.replace(src_str, dst_str)
update(cfg.data, cfg.data_root, dst_root)
cfg.data_root = dst_root
|
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
# file_client_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/detection/',
# 'data/': 's3://openmmlab/datasets/detection/'
# }))
file_client_args = dict(backend='disk')
train_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(
batch_size=2,
num_workers=2,
persistent_workers=True,
sampler=dict(type='DefaultSampler', shuffle=True),
batch_sampler=dict(type='AspectRatioBatchSampler'),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instances_train2017.json',
data_prefix=dict(img='train2017/'),
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=train_pipeline))
val_dataloader = dict(
batch_size=1,
num_workers=2,
persistent_workers=True,
drop_last=False,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instances_val2017.json',
data_prefix=dict(img='val2017/'),
test_mode=True,
pipeline=test_pipeline))
test_dataloader = val_dataloader
val_evaluator = dict(
type='CocoMetric',
ann_file=data_root + 'annotations/instances_val2017.json',
metric=['bbox', 'segm'])
test_evaluator = val_evaluator
|
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
# file_client_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/detection/',
# 'data/': 's3://openmmlab/datasets/detection/'
# }))
file_client_args = dict(backend='disk')
train_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(
batch_size=2,
num_workers=2,
persistent_workers=True,
sampler=dict(type='DefaultSampler', shuffle=True),
batch_sampler=dict(type='AspectRatioBatchSampler'),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instances_train2017.json',
data_prefix=dict(img='train2017/'),
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=train_pipeline))
val_dataloader = dict(
batch_size=2,
num_workers=2,
persistent_workers=True,
drop_last=False,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instances_val2017.json',
data_prefix=dict(img='val2017/'),
test_mode=True,
pipeline=test_pipeline))
test_dataloader = val_dataloader
val_evaluator = dict(
type='CocoMetric',
ann_file=data_root + 'annotations/instances_val2017.json',
metric=['bbox', 'segm'])
test_evaluator = val_evaluator
|
# Copyright (c) OpenMMLab. All rights reserved.
from .augment_wrappers import AutoAugment, RandAugment
from .colorspace import (AutoContrast, Brightness, Color, ColorTransform,
Contrast, Equalize, Invert, Posterize, Sharpness,
Solarize, SolarizeAdd)
from .formatting import (ImageToTensor, PackDetInputs, ToDataContainer,
ToTensor, Transpose)
from .geometric import (GeomTransform, Rotate, ShearX, ShearY, TranslateX,
TranslateY)
from .instaboost import InstaBoost
from .loading import (FilterAnnotations, LoadAnnotations, LoadEmptyAnnotations,
LoadImageFromNDArray, LoadMultiChannelImageFromFiles,
LoadPanopticAnnotations, LoadProposals)
from .transforms import (Albu, CopyPaste, CutOut, Expand, MinIoURandomCrop,
MixUp, Mosaic, Normalize, Pad, PhotoMetricDistortion,
RandomAffine, RandomCenterCropPad, RandomCrop,
RandomErasing, RandomFlip, RandomShift, Resize,
SegRescale, YOLOXHSVRandomAug)
from .wrappers import MultiBranch, RandomOrder
__all__ = [
'PackDetInputs', 'ToTensor', 'ImageToTensor', 'ToDataContainer',
'Transpose', 'LoadImageFromNDArray', 'LoadAnnotations',
'LoadPanopticAnnotations', 'LoadMultiChannelImageFromFiles',
'LoadProposals', 'Resize', 'RandomFlip', 'RandomCrop', 'Normalize',
'SegRescale', 'MinIoURandomCrop', 'Expand', 'PhotoMetricDistortion',
'Albu', 'InstaBoost', 'RandomCenterCropPad', 'AutoAugment', 'CutOut',
'ShearX', 'ShearY', 'Rotate', 'Color', 'Equalize', 'Brightness',
'Contrast', 'TranslateX', 'TranslateY', 'RandomShift', 'Mosaic', 'MixUp',
'RandomAffine', 'YOLOXHSVRandomAug', 'CopyPaste', 'FilterAnnotations',
'Pad', 'GeomTransform', 'ColorTransform', 'RandAugment', 'Sharpness',
'Solarize', 'SolarizeAdd', 'Posterize', 'AutoContrast', 'Invert',
'MultiBranch', 'RandomErasing', 'LoadEmptyAnnotations', 'RandomOrder'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .augment_wrappers import AutoAugment, RandAugment
from .colorspace import (AutoContrast, Brightness, Color, ColorTransform,
Contrast, Equalize, Invert, Posterize, Sharpness,
Solarize, SolarizeAdd)
from .formatting import (ImageToTensor, PackDetInputs, ToDataContainer,
ToTensor, Transpose)
from .geometric import (GeomTransform, Rotate, ShearX, ShearY, TranslateX,
TranslateY)
from .instaboost import InstaBoost
from .loading import (FilterAnnotations, LoadAnnotations, LoadEmptyAnnotations,
LoadImageFromNDArray, LoadMultiChannelImageFromFiles,
LoadPanopticAnnotations, LoadProposals)
from .transforms import (Albu, CopyPaste, CutOut, Expand, MinIoURandomCrop,
MixUp, Mosaic, Normalize, Pad, PhotoMetricDistortion,
RandomAffine, RandomCenterCropPad, RandomCrop,
RandomErasing, RandomFlip, RandomShift, Resize,
SegRescale, YOLOXHSVRandomAug)
from .wrappers import MultiBranch
__all__ = [
'PackDetInputs', 'ToTensor', 'ImageToTensor', 'ToDataContainer',
'Transpose', 'LoadImageFromNDArray', 'LoadAnnotations',
'LoadPanopticAnnotations', 'LoadMultiChannelImageFromFiles',
'LoadProposals', 'Resize', 'RandomFlip', 'RandomCrop', 'Normalize',
'SegRescale', 'MinIoURandomCrop', 'Expand', 'PhotoMetricDistortion',
'Albu', 'InstaBoost', 'RandomCenterCropPad', 'AutoAugment', 'CutOut',
'ShearX', 'ShearY', 'Rotate', 'Color', 'Equalize', 'Brightness',
'Contrast', 'TranslateX', 'TranslateY', 'RandomShift', 'Mosaic', 'MixUp',
'RandomAffine', 'YOLOXHSVRandomAug', 'CopyPaste', 'FilterAnnotations',
'Pad', 'GeomTransform', 'ColorTransform', 'RandAugment', 'Sharpness',
'Solarize', 'SolarizeAdd', 'Posterize', 'AutoContrast', 'Invert',
'MultiBranch', 'RandomErasing', 'LoadEmptyAnnotations'
]
|
_base_ = [
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
# model settings
model = dict(
type='VFNet',
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
bgr_to_rgb=True,
pad_size_divisor=32),
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
add_extra_convs='on_output', # use P5
num_outs=5,
relu_before_extra_convs=True),
bbox_head=dict(
type='VFNetHead',
num_classes=80,
in_channels=256,
stacked_convs=3,
feat_channels=256,
strides=[8, 16, 32, 64, 128],
center_sampling=False,
dcn_on_last_conv=False,
use_atss=True,
use_vfl=True,
loss_cls=dict(
type='VarifocalLoss',
use_sigmoid=True,
alpha=0.75,
gamma=2.0,
iou_weighted=True,
loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=1.5),
loss_bbox_refine=dict(type='GIoULoss', loss_weight=2.0)),
# training and testing settings
train_cfg=dict(
assigner=dict(type='ATSSAssigner', topk=9),
allowed_border=-1,
pos_weight=-1,
debug=False),
test_cfg=dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.6),
max_per_img=100))
# data setting
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
val_dataloader = dict(dataset=dict(pipeline=test_pipeline))
test_dataloader = val_dataloader
# optimizer
optim_wrapper = dict(
optimizer=dict(lr=0.01),
paramwise_cfg=dict(bias_lr_mult=2., bias_decay_mult=0.),
clip_grad=None)
# learning rate
max_epochs = 12
param_scheduler = [
dict(type='LinearLR', start_factor=0.1, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[8, 11],
gamma=0.1)
]
train_cfg = dict(max_epochs=max_epochs)
|
_base_ = [
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
# model settings
model = dict(
type='VFNet',
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
add_extra_convs='on_output', # use P5
num_outs=5,
relu_before_extra_convs=True),
bbox_head=dict(
type='VFNetHead',
num_classes=80,
in_channels=256,
stacked_convs=3,
feat_channels=256,
strides=[8, 16, 32, 64, 128],
center_sampling=False,
dcn_on_last_conv=False,
use_atss=True,
use_vfl=True,
loss_cls=dict(
type='VarifocalLoss',
use_sigmoid=True,
alpha=0.75,
gamma=2.0,
iou_weighted=True,
loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=1.5),
loss_bbox_refine=dict(type='GIoULoss', loss_weight=2.0)),
# training and testing settings
train_cfg=dict(
assigner=dict(type='ATSSAssigner', topk=9),
allowed_border=-1,
pos_weight=-1,
debug=False),
test_cfg=dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.6),
max_per_img=100))
# data setting
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img']),
])
]
data = dict(
samples_per_gpu=2,
workers_per_gpu=2,
train=dict(pipeline=train_pipeline),
val=dict(pipeline=test_pipeline),
test=dict(pipeline=test_pipeline))
# optimizer
optimizer = dict(
lr=0.01, paramwise_cfg=dict(bias_lr_mult=2., bias_decay_mult=0.))
optimizer_config = dict(grad_clip=None)
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=0.1,
step=[8, 11])
runner = dict(type='EpochBasedRunner', max_epochs=12)
|
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
def check_matplotlib_support(caller_name):
"""Raise ImportError with detailed error message if mpl is not installed.
Plot utilities like any of the Display's plotting functions should lazily import
matplotlib and call this helper before any computation.
Parameters
----------
caller_name : str
The name of the caller that requires matplotlib.
"""
try:
import matplotlib # noqa: F401
except ImportError as e:
raise ImportError(
"{} requires matplotlib. You can install matplotlib with "
"`pip install matplotlib`".format(caller_name)
) from e
def check_pandas_support(caller_name):
"""Raise ImportError with detailed error message if pandas is not installed.
Plot utilities like :func:`fetch_openml` should lazily import
pandas and call this helper before any computation.
Parameters
----------
caller_name : str
The name of the caller that requires pandas.
Returns
-------
pandas
The pandas package.
"""
try:
import pandas
return pandas
except ImportError as e:
raise ImportError("{} requires pandas.".format(caller_name)) from e
|
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
def check_matplotlib_support(caller_name):
"""Raise ImportError with detailed error message if mpl is not installed.
Plot utilities like any of the Display's plotting functions should lazily import
matplotlib and call this helper before any computation.
Parameters
----------
caller_name : str
The name of the caller that requires matplotlib.
"""
try:
import matplotlib # noqa
except ImportError as e:
raise ImportError(
"{} requires matplotlib. You can install matplotlib with "
"`pip install matplotlib`".format(caller_name)
) from e
def check_pandas_support(caller_name):
"""Raise ImportError with detailed error message if pandas is not installed.
Plot utilities like :func:`fetch_openml` should lazily import
pandas and call this helper before any computation.
Parameters
----------
caller_name : str
The name of the caller that requires pandas.
Returns
-------
pandas
The pandas package.
"""
try:
import pandas
return pandas
except ImportError as e:
raise ImportError("{} requires pandas.".format(caller_name)) from e
|
_base_ = [
'../_base_/models/faster-rcnn_r50_fpn.py',
'../common/lsj-200e_coco-detection.py'
]
image_size = (1024, 1024)
batch_augments = [dict(type='BatchFixedSizePad', size=image_size)]
model = dict(data_preprocessor=dict(batch_augments=batch_augments))
train_dataloader = dict(batch_size=8, num_workers=4)
# Enable automatic-mixed-precision training with AmpOptimWrapper.
optim_wrapper = dict(
type='AmpOptimWrapper',
optimizer=dict(
type='SGD', lr=0.02 * 4, momentum=0.9, weight_decay=0.00004))
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (8 GPUs) x (8 samples per GPU)
auto_scale_lr = dict(base_batch_size=64)
|
_base_ = [
'../_base_/models/faster_rcnn_r50_fpn.py',
'../common/lsj_200e_coco_detection.py'
]
image_size = (1024, 1024)
batch_augments = [dict(type='BatchFixedSizePad', size=image_size)]
model = dict(data_preprocessor=dict(batch_augments=batch_augments))
train_dataloader = dict(batch_size=8, num_workers=4)
# Enable automatic-mixed-precision training with AmpOptimWrapper.
optim_wrapper = dict(
type='AmpOptimWrapper',
optimizer=dict(
type='SGD', lr=0.02 * 4, momentum=0.9, weight_decay=0.00004))
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (8 GPUs) x (8 samples per GPU)
auto_scale_lr = dict(base_batch_size=64)
|
from __future__ import annotations
import json
import logging
from typing import Any, Dict, List, Literal, Optional
import requests
from langchain_core.callbacks import CallbackManagerForLLMRun
from langchain_core.language_models.llms import LLM
from langchain_core.utils import convert_to_secret_str, get_from_dict_or_env
from pydantic import Field, SecretStr
from langchain_community.llms.utils import enforce_stop_tokens
logger = logging.getLogger(__name__)
class YiLLM(LLM):
"""Yi large language models."""
model: str = "yi-large"
temperature: float = 0.3
top_p: float = 0.95
timeout: int = 60
model_kwargs: Dict[str, Any] = Field(default_factory=dict)
yi_api_key: Optional[SecretStr] = None
region: Literal["auto", "domestic", "international"] = "auto"
yi_api_url_domestic: str = "https://api.lingyiwanwu.com/v1/chat/completions"
yi_api_url_international: str = "https://api.01.ai/v1/chat/completions"
def __init__(self, **kwargs: Any):
kwargs["yi_api_key"] = convert_to_secret_str(
get_from_dict_or_env(kwargs, "yi_api_key", "YI_API_KEY")
)
super().__init__(**kwargs)
@property
def _default_params(self) -> Dict[str, Any]:
return {
"model": self.model,
"temperature": self.temperature,
"top_p": self.top_p,
**self.model_kwargs,
}
def _post(self, request: Any) -> Any:
headers = {
"Content-Type": "application/json",
"Authorization": f"Bearer {self.yi_api_key.get_secret_value()}", # type: ignore[union-attr]
}
urls = []
if self.region == "domestic":
urls = [self.yi_api_url_domestic]
elif self.region == "international":
urls = [self.yi_api_url_international]
else: # auto
urls = [self.yi_api_url_domestic, self.yi_api_url_international]
for url in urls:
try:
response = requests.post(
url,
headers=headers,
json=request,
timeout=self.timeout,
)
if response.status_code == 200:
parsed_json = json.loads(response.text)
return parsed_json["choices"][0]["message"]["content"]
elif (
response.status_code != 403
): # If not a permission error, raise immediately
response.raise_for_status()
except requests.RequestException as e:
if url == urls[-1]: # If this is the last URL to try
raise ValueError(f"An error has occurred: {e}")
else:
logger.warning(f"Failed to connect to {url}, trying next URL")
continue
raise ValueError("Failed to connect to all available URLs")
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
request = self._default_params
request["messages"] = [{"role": "user", "content": prompt}]
request.update(kwargs)
text = self._post(request)
if stop is not None:
text = enforce_stop_tokens(text, stop)
return text
@property
def _llm_type(self) -> str:
"""Return type of chat_model."""
return "yi-llm"
|
from __future__ import annotations
import json
import logging
from typing import Any, Dict, List, Literal, Optional
import requests
from langchain_core.callbacks import CallbackManagerForLLMRun
from langchain_core.language_models.llms import LLM
from langchain_core.utils import convert_to_secret_str, get_from_dict_or_env
from pydantic import Field, SecretStr
from langchain_community.llms.utils import enforce_stop_tokens
logger = logging.getLogger(__name__)
class YiLLM(LLM):
"""Yi large language models."""
model: str = "yi-large"
temperature: float = 0.3
top_p: float = 0.95
timeout: int = 60
model_kwargs: Dict[str, Any] = Field(default_factory=dict)
yi_api_key: Optional[SecretStr] = None
region: Literal["auto", "domestic", "international"] = "auto"
yi_api_url_domestic: str = "https://api.lingyiwanwu.com/v1/chat/completions"
yi_api_url_international: str = "https://api.01.ai/v1/chat/completions"
def __init__(self, **kwargs: Any):
kwargs["yi_api_key"] = convert_to_secret_str(
get_from_dict_or_env(kwargs, "yi_api_key", "YI_API_KEY")
)
super().__init__(**kwargs)
@property
def _default_params(self) -> Dict[str, Any]:
return {
"model": self.model,
"temperature": self.temperature,
"top_p": self.top_p,
**self.model_kwargs,
}
def _post(self, request: Any) -> Any:
headers = {
"Content-Type": "application/json",
"Authorization": f"Bearer {self.yi_api_key.get_secret_value()}", # type: ignore
}
urls = []
if self.region == "domestic":
urls = [self.yi_api_url_domestic]
elif self.region == "international":
urls = [self.yi_api_url_international]
else: # auto
urls = [self.yi_api_url_domestic, self.yi_api_url_international]
for url in urls:
try:
response = requests.post(
url,
headers=headers,
json=request,
timeout=self.timeout,
)
if response.status_code == 200:
parsed_json = json.loads(response.text)
return parsed_json["choices"][0]["message"]["content"]
elif (
response.status_code != 403
): # If not a permission error, raise immediately
response.raise_for_status()
except requests.RequestException as e:
if url == urls[-1]: # If this is the last URL to try
raise ValueError(f"An error has occurred: {e}")
else:
logger.warning(f"Failed to connect to {url}, trying next URL")
continue
raise ValueError("Failed to connect to all available URLs")
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
request = self._default_params
request["messages"] = [{"role": "user", "content": prompt}]
request.update(kwargs)
text = self._post(request)
if stop is not None:
text = enforce_stop_tokens(text, stop)
return text
@property
def _llm_type(self) -> str:
"""Return type of chat_model."""
return "yi-llm"
|
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
from typing import Dict, Optional, Tuple
import numpy as np
import paddlehub as hub
from jina import DocumentArray, Executor, requests
from jina_commons.batching import get_docs_batch_generator
class TextPaddleEncoder(Executor):
"""
Encode an array of string in size `B` into an ndarray in size `B x D`
The ndarray potentially is BatchSize x (Channel x Height x Width)
Internally, :class:`TextPaddlehubEncoder` wraps the Ernie module from paddlehub.
https://github.com/PaddlePaddle/PaddleHub
For models' details refer to
https://www.paddlepaddle.org.cn/hublist?filter=en_category&value=SemanticModel
"""
def __init__(
self,
model_name: Optional[str] = 'ernie_tiny',
on_gpu: bool = False,
default_batch_size: int = 32,
default_traversal_paths: Tuple[str] = ('r',),
*args,
**kwargs,
):
"""
:param model_name: the name of the model. Supported models include
``ernie``, ``ernie_tiny``, ``ernie_v2_eng_base``, ``ernie_v2_eng_large``,
``bert_chinese_L-12_H-768_A-12``, ``bert_multi_cased_L-12_H-768_A-12``,
``bert_multi_uncased_L-12_H-768_A-12``, ``bert_uncased_L-12_H-768_A-12``,
``bert_uncased_L-24_H-1024_A-16``, ``chinese-bert-wwm``,
``chinese-bert-wwm-ext``, ``chinese-electra-base``,
``chinese-electra-small``, ``chinese-roberta-wwm-ext``,
``chinese-roberta-wwm-ext-large``, ``rbt3``, ``rbtl3``
:param on_gpu: If use gpu to get the output.
:param default_batch_size: fallback batch size in case there is not batch size sent in the request
:param default_traversal_paths: fallback traversal path in case there is not traversal path sent in the request
"""
super().__init__(*args, **kwargs)
self.on_gpu = on_gpu
self.model = hub.Module(name=model_name)
self.default_batch_size = default_batch_size
self.default_traversal_paths = default_traversal_paths
@requests
def encode(self, docs: DocumentArray, parameters: Dict, **kwargs):
"""Encode doc content into vector representation.
:param docs: `DocumentArray` passed from the previous ``Executor``.
:param parameters: dictionary to define the `traversal_paths` and the `batch_size`. For example,
`parameters={'traversal_paths': ['r'], 'batch_size': 10}`.
:param kwargs: Additional key value arguments.
"""
if docs:
document_batches_generator = get_docs_batch_generator(
docs,
traversal_path=parameters.get(
'traversal_paths', self.default_traversal_paths
),
batch_size=parameters.get('batch_size', self.default_batch_size),
needs_attr='text',
)
for batch_of_docs in document_batches_generator:
pooled_features = []
contents = [[doc.content] for doc in batch_of_docs]
results = self.model.get_embedding(contents, use_gpu=self.on_gpu)
for pooled_feature, _ in results:
pooled_features.append(pooled_feature)
for doc, feature in zip(batch_of_docs, pooled_features):
doc.embedding = np.asarray(feature)
|
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
from typing import Dict, Optional, Tuple
import numpy as np
import paddlehub as hub
from jina import DocumentArray, Executor, requests
from jina_commons.batching import get_docs_batch_generator
class TextPaddleEncoder(Executor):
"""
Encode an array of string in size `B` into an ndarray in size `B x D`
The ndarray potentially is BatchSize x (Channel x Height x Width)
Internally, :class:`TextPaddlehubEncoder` wraps the Ernie module from paddlehub.
https://github.com/PaddlePaddle/PaddleHub
For models' details refer to
https://www.paddlepaddle.org.cn/hublist?filter=en_category&value=SemanticModel
:param model_name: the name of the model. Supported models include
``ernie``, ``ernie_tiny``, ``ernie_v2_eng_base``, ``ernie_v2_eng_large``,
``bert_chinese_L-12_H-768_A-12``, ``bert_multi_cased_L-12_H-768_A-12``,
``bert_multi_uncased_L-12_H-768_A-12``, ``bert_uncased_L-12_H-768_A-12``,
``bert_uncased_L-24_H-1024_A-16``, ``chinese-bert-wwm``,
``chinese-bert-wwm-ext``, ``chinese-electra-base``,
``chinese-electra-small``, ``chinese-roberta-wwm-ext``,
``chinese-roberta-wwm-ext-large``, ``rbt3``, ``rbtl3``
:param on_gpu: If use gpu to get the output.
:param default_batch_size: fallback batch size in case there is not batch size sent in the request
:param default_traversal_paths: fallback traversal path in case there is not traversal path sent in the request
:param args: Additional positional arguments
:param kwargs: Additional keyword arguments
"""
def __init__(
self,
model_name: Optional[str] = 'ernie_tiny',
on_gpu: bool = False,
default_batch_size: int = 32,
default_traversal_paths: Tuple[str] = ('r',),
*args,
**kwargs,
):
super().__init__(*args, **kwargs)
self.on_gpu = on_gpu
self.model = hub.Module(name=model_name)
self.default_batch_size = default_batch_size
self.default_traversal_paths = default_traversal_paths
@requests
def encode(self, docs: DocumentArray, parameters: Dict, **kwargs):
"""Encode doc content into vector representation.
:param docs: `DocumentArray` passed from the previous ``Executor``.
:param parameters: dictionary to define the `traversal_paths` and the `batch_size`. For example,
`parameters={'traversal_paths': ['r'], 'batch_size': 10}`.
:param kwargs: Additional key value arguments.
"""
if docs:
document_batches_generator = get_docs_batch_generator(
docs,
traversal_path=parameters.get(
'traversal_paths', self.default_traversal_paths
),
batch_size=parameters.get('batch_size', self.default_batch_size),
needs_attr='text',
)
for batch_of_docs in document_batches_generator:
pooled_features = []
contents = [[doc.content] for doc in batch_of_docs]
results = self.model.get_embedding(contents, use_gpu=self.on_gpu)
for pooled_feature, _ in results:
pooled_features.append(pooled_feature)
for doc, feature in zip(batch_of_docs, pooled_features):
doc.embedding = np.asarray(feature)
|
from llama_index.core.base.llms.types import (
ChatMessage,
ChatResponse,
ChatResponseAsyncGen,
ChatResponseGen,
CompletionResponse,
CompletionResponseAsyncGen,
CompletionResponseGen,
ImageBlock,
LLMMetadata,
MessageRole,
TextBlock,
AudioBlock,
)
from llama_index.core.llms.custom import CustomLLM
from llama_index.core.llms.llm import LLM
from llama_index.core.llms.mock import MockLLM
__all__ = [
"CustomLLM",
"LLM",
"ChatMessage",
"ChatResponse",
"ChatResponseAsyncGen",
"ChatResponseGen",
"CompletionResponse",
"CompletionResponseAsyncGen",
"CompletionResponseGen",
"LLMMetadata",
"MessageRole",
"MockLLM",
"ImageBlock",
"TextBlock",
"AudioBlock",
]
|
from llama_index.core.base.llms.types import (
ChatMessage,
ChatResponse,
ChatResponseAsyncGen,
ChatResponseGen,
CompletionResponse,
CompletionResponseAsyncGen,
CompletionResponseGen,
ImageBlock,
LLMMetadata,
MessageRole,
TextBlock,
)
from llama_index.core.llms.custom import CustomLLM
from llama_index.core.llms.llm import LLM
from llama_index.core.llms.mock import MockLLM
__all__ = [
"CustomLLM",
"LLM",
"ChatMessage",
"ChatResponse",
"ChatResponseAsyncGen",
"ChatResponseGen",
"CompletionResponse",
"CompletionResponseAsyncGen",
"CompletionResponseGen",
"LLMMetadata",
"MessageRole",
"MockLLM",
"ImageBlock",
"TextBlock",
]
|
import os
import numpy as np
import pytest
import torch
from pydantic import parse_obj_as
from docarray import BaseDoc
from docarray.typing.bytes.audio_bytes import AudioBytes
from docarray.typing.tensor.audio.audio_ndarray import AudioNdArray
from docarray.typing.tensor.audio.audio_torch_tensor import AudioTorchTensor
from docarray.utils._internal.misc import is_tf_available
tf_available = is_tf_available()
if tf_available:
import tensorflow as tf
import tensorflow._api.v2.experimental.numpy as tnp
from docarray.typing.tensor.audio import AudioTensorFlowTensor
@pytest.mark.parametrize(
'tensor,cls_audio_tensor,cls_tensor',
[
(torch.zeros(1000, 2), AudioTorchTensor, torch.Tensor),
(np.zeros((1000, 2)), AudioNdArray, np.ndarray),
],
)
def test_set_audio_tensor(tensor, cls_audio_tensor, cls_tensor):
class MyAudioDoc(BaseDoc):
tensor: cls_audio_tensor
doc = MyAudioDoc(tensor=tensor)
assert isinstance(doc.tensor, cls_audio_tensor)
assert isinstance(doc.tensor, cls_tensor)
assert (doc.tensor == tensor).all()
@pytest.mark.tensorflow
def test_set_audio_tensorflow_tensor():
class MyAudioDoc(BaseDoc):
tensor: AudioTensorFlowTensor
doc = MyAudioDoc(tensor=tf.zeros((1000, 2)))
assert isinstance(doc.tensor, AudioTensorFlowTensor)
assert isinstance(doc.tensor.tensor, tf.Tensor)
assert tnp.allclose(doc.tensor.tensor, tf.zeros((1000, 2)))
@pytest.mark.parametrize(
'cls_tensor,tensor',
[
(AudioNdArray, np.zeros((1000, 2))),
(AudioTorchTensor, torch.zeros(1000, 2)),
(AudioTorchTensor, np.zeros((1000, 2))),
],
)
def test_validation(cls_tensor, tensor):
arr = parse_obj_as(cls_tensor, tensor)
assert isinstance(arr, cls_tensor)
@pytest.mark.tensorflow
def test_validation_tensorflow():
arr = parse_obj_as(AudioTensorFlowTensor, tf.zeros((1000, 2)))
assert isinstance(arr, AudioTensorFlowTensor)
@pytest.mark.parametrize(
'cls_tensor,tensor',
[
(AudioNdArray, torch.zeros(1000, 2)),
(AudioNdArray, 'hello'),
(AudioTorchTensor, 'hello'),
],
)
def test_illegal_validation(cls_tensor, tensor):
match = str(cls_tensor).split('.')[-1][:-2]
with pytest.raises(ValueError, match=match):
parse_obj_as(cls_tensor, tensor)
@pytest.mark.proto
@pytest.mark.parametrize(
'cls_tensor,tensor,proto_key',
[
(AudioTorchTensor, torch.zeros(1000, 2), AudioTorchTensor._proto_type_name),
(AudioNdArray, np.zeros((1000, 2)), AudioNdArray._proto_type_name),
],
)
def test_proto_tensor(cls_tensor, tensor, proto_key):
tensor = parse_obj_as(cls_tensor, tensor)
proto = tensor._to_node_protobuf()
assert proto_key in str(proto)
@pytest.mark.tensorflow
def test_proto_tensor_tensorflow():
tensor = parse_obj_as(AudioTensorFlowTensor, tf.zeros((1000, 2)))
proto = tensor._to_node_protobuf()
assert AudioTensorFlowTensor._proto_type_name in str(proto)
@pytest.mark.parametrize(
'cls_tensor,tensor',
[
(AudioTorchTensor, torch.zeros(1000, 2)),
(AudioNdArray, np.zeros((1000, 2))),
],
)
def test_save_audio_tensor_to_wav_file(cls_tensor, tensor, tmpdir):
tmp_file = str(tmpdir / 'tmp.wav')
audio_tensor = parse_obj_as(cls_tensor, tensor)
audio_tensor.save(tmp_file)
assert os.path.isfile(tmp_file)
@pytest.mark.tensorflow
def test_save_audio_tensorflow_tensor_to_wav_file(tmpdir):
tmp_file = str(tmpdir / 'tmp.wav')
audio_tensor = parse_obj_as(AudioTensorFlowTensor, tf.zeros((1000, 2)))
audio_tensor.save(tmp_file)
assert os.path.isfile(tmp_file)
@pytest.mark.parametrize(
'audio_tensor',
[
parse_obj_as(AudioTorchTensor, torch.zeros(1000, 2)),
parse_obj_as(AudioNdArray, np.zeros((1000, 2))),
],
)
def test_save_audio_tensor_to_bytes(audio_tensor):
b = audio_tensor.to_bytes()
isinstance(b, bytes)
isinstance(b, AudioBytes)
|
import os
import numpy as np
import pytest
import torch
from pydantic import parse_obj_as
from docarray import BaseDoc
from docarray.typing.tensor.audio.audio_ndarray import AudioNdArray
from docarray.typing.tensor.audio.audio_torch_tensor import AudioTorchTensor
from docarray.utils._internal.misc import is_tf_available
tf_available = is_tf_available()
if tf_available:
import tensorflow as tf
import tensorflow._api.v2.experimental.numpy as tnp
from docarray.typing.tensor.audio import AudioTensorFlowTensor
@pytest.mark.parametrize(
'tensor,cls_audio_tensor,cls_tensor',
[
(torch.zeros(1000, 2), AudioTorchTensor, torch.Tensor),
(np.zeros((1000, 2)), AudioNdArray, np.ndarray),
],
)
def test_set_audio_tensor(tensor, cls_audio_tensor, cls_tensor):
class MyAudioDoc(BaseDoc):
tensor: cls_audio_tensor
doc = MyAudioDoc(tensor=tensor)
assert isinstance(doc.tensor, cls_audio_tensor)
assert isinstance(doc.tensor, cls_tensor)
assert (doc.tensor == tensor).all()
@pytest.mark.tensorflow
def test_set_audio_tensorflow_tensor():
class MyAudioDoc(BaseDoc):
tensor: AudioTensorFlowTensor
doc = MyAudioDoc(tensor=tf.zeros((1000, 2)))
assert isinstance(doc.tensor, AudioTensorFlowTensor)
assert isinstance(doc.tensor.tensor, tf.Tensor)
assert tnp.allclose(doc.tensor.tensor, tf.zeros((1000, 2)))
@pytest.mark.parametrize(
'cls_tensor,tensor',
[
(AudioNdArray, np.zeros((1000, 2))),
(AudioTorchTensor, torch.zeros(1000, 2)),
(AudioTorchTensor, np.zeros((1000, 2))),
],
)
def test_validation(cls_tensor, tensor):
arr = parse_obj_as(cls_tensor, tensor)
assert isinstance(arr, cls_tensor)
@pytest.mark.tensorflow
def test_validation_tensorflow():
arr = parse_obj_as(AudioTensorFlowTensor, tf.zeros((1000, 2)))
assert isinstance(arr, AudioTensorFlowTensor)
@pytest.mark.parametrize(
'cls_tensor,tensor',
[
(AudioNdArray, torch.zeros(1000, 2)),
(AudioNdArray, 'hello'),
(AudioTorchTensor, 'hello'),
],
)
def test_illegal_validation(cls_tensor, tensor):
match = str(cls_tensor).split('.')[-1][:-2]
with pytest.raises(ValueError, match=match):
parse_obj_as(cls_tensor, tensor)
@pytest.mark.proto
@pytest.mark.parametrize(
'cls_tensor,tensor,proto_key',
[
(AudioTorchTensor, torch.zeros(1000, 2), AudioTorchTensor._proto_type_name),
(AudioNdArray, np.zeros((1000, 2)), AudioNdArray._proto_type_name),
],
)
def test_proto_tensor(cls_tensor, tensor, proto_key):
tensor = parse_obj_as(cls_tensor, tensor)
proto = tensor._to_node_protobuf()
assert proto_key in str(proto)
@pytest.mark.tensorflow
def test_proto_tensor_tensorflow():
tensor = parse_obj_as(AudioTensorFlowTensor, tf.zeros((1000, 2)))
proto = tensor._to_node_protobuf()
assert AudioTensorFlowTensor._proto_type_name in str(proto)
@pytest.mark.parametrize(
'cls_tensor,tensor',
[
(AudioTorchTensor, torch.zeros(1000, 2)),
(AudioNdArray, np.zeros((1000, 2))),
],
)
def test_save_audio_tensor_to_wav_file(cls_tensor, tensor, tmpdir):
tmp_file = str(tmpdir / 'tmp.wav')
audio_tensor = parse_obj_as(cls_tensor, tensor)
audio_tensor.save(tmp_file)
assert os.path.isfile(tmp_file)
@pytest.mark.tensorflow
def test_save_audio_tensorflow_tensor_to_wav_file(tmpdir):
tmp_file = str(tmpdir / 'tmp.wav')
audio_tensor = parse_obj_as(AudioTensorFlowTensor, tf.zeros((1000, 2)))
audio_tensor.save(tmp_file)
assert os.path.isfile(tmp_file)
|
from docarray import BaseDoc, DocList
def test_instance_and_equivalence():
class MyDoc(BaseDoc):
text: str
docs = DocList[MyDoc]([MyDoc(text='hello')])
assert issubclass(DocList[MyDoc], DocList[MyDoc])
assert issubclass(docs.__class__, DocList[MyDoc])
assert isinstance(docs, DocList[MyDoc])
def test_subclassing():
class MyDoc(BaseDoc):
text: str
class MyDocList(DocList[MyDoc]):
pass
docs = MyDocList([MyDoc(text='hello')])
assert issubclass(MyDocList, DocList[MyDoc])
assert issubclass(docs.__class__, DocList[MyDoc])
assert isinstance(docs, MyDocList)
assert isinstance(docs, DocList[MyDoc])
assert issubclass(MyDoc, BaseDoc)
assert not issubclass(DocList[MyDoc], DocList[BaseDoc])
assert not issubclass(MyDocList, DocList[BaseDoc])
|
from docarray import BaseDoc, DocArray
def test_instance_and_equivalence():
class MyDoc(BaseDoc):
text: str
docs = DocArray[MyDoc]([MyDoc(text='hello')])
assert issubclass(DocArray[MyDoc], DocArray[MyDoc])
assert issubclass(docs.__class__, DocArray[MyDoc])
assert isinstance(docs, DocArray[MyDoc])
def test_subclassing():
class MyDoc(BaseDoc):
text: str
class MyDocArray(DocArray[MyDoc]):
pass
docs = MyDocArray([MyDoc(text='hello')])
assert issubclass(MyDocArray, DocArray[MyDoc])
assert issubclass(docs.__class__, DocArray[MyDoc])
assert isinstance(docs, MyDocArray)
assert isinstance(docs, DocArray[MyDoc])
assert issubclass(MyDoc, BaseDoc)
assert not issubclass(DocArray[MyDoc], DocArray[BaseDoc])
assert not issubclass(MyDocArray, DocArray[BaseDoc])
|
import numpy as np
import pytest
from keras.src import backend
from keras.src import layers
from keras.src import testing
def squared_l2_norm(x):
x = backend.convert_to_numpy(x)
return np.sum(x**2)
class UnitNormalizationTest(testing.TestCase):
@pytest.mark.requires_trainable_backend
def test_un_basics(self):
self.run_layer_test(
layers.UnitNormalization,
init_kwargs={"axis": -1},
input_shape=(2, 3),
expected_output_shape=(2, 3),
supports_masking=True,
assert_built_after_instantiation=True,
)
self.run_layer_test(
layers.UnitNormalization,
init_kwargs={"axis": (1, 2)},
input_shape=(1, 3, 3),
expected_output_shape=(1, 3, 3),
supports_masking=True,
assert_built_after_instantiation=True,
)
def test_invalid_axis(self):
with self.assertRaisesRegex(
TypeError,
(
"Invalid value for `axis` argument: expected an int or a "
"list/tuple of ints."
),
):
layers.UnitNormalization(axis={"axis": -1})
def test_correctness(self):
layer = layers.UnitNormalization(axis=-1)
inputs = np.random.normal(size=(2, 3))
outputs = layer(inputs)
self.assertAllClose(squared_l2_norm(outputs[0, :]), 1.0)
self.assertAllClose(squared_l2_norm(outputs[1, :]), 1.0)
layer = layers.UnitNormalization(axis=(1, 2))
inputs = np.random.normal(size=(2, 3, 3))
outputs = layer(inputs)
self.assertAllClose(squared_l2_norm(outputs[0, :, :]), 1.0)
self.assertAllClose(squared_l2_norm(outputs[1, :, :]), 1.0)
layer = layers.UnitNormalization(axis=1)
inputs = np.random.normal(size=(2, 3, 2))
outputs = layer(inputs)
self.assertAllClose(squared_l2_norm(outputs[0, :, 0]), 1.0)
self.assertAllClose(squared_l2_norm(outputs[1, :, 0]), 1.0)
self.assertAllClose(squared_l2_norm(outputs[0, :, 1]), 1.0)
self.assertAllClose(squared_l2_norm(outputs[1, :, 1]), 1.0)
|
import numpy as np
import pytest
from keras.src import backend
from keras.src import layers
from keras.src import testing
def squared_l2_norm(x):
x = backend.convert_to_numpy(x)
return np.sum(x**2)
class UnitNormalizationTest(testing.TestCase):
@pytest.mark.requires_trainable_backend
def test_un_basics(self):
self.run_layer_test(
layers.UnitNormalization,
init_kwargs={"axis": -1},
input_shape=(2, 3),
expected_output_shape=(2, 3),
supports_masking=True,
)
self.run_layer_test(
layers.UnitNormalization,
init_kwargs={"axis": (1, 2)},
input_shape=(1, 3, 3),
expected_output_shape=(1, 3, 3),
supports_masking=True,
)
def test_invalid_axis(self):
with self.assertRaisesRegex(
TypeError,
(
"Invalid value for `axis` argument: expected an int or a "
"list/tuple of ints."
),
):
layers.UnitNormalization(axis={"axis": -1})
def test_correctness(self):
layer = layers.UnitNormalization(axis=-1)
inputs = np.random.normal(size=(2, 3))
outputs = layer(inputs)
self.assertAllClose(squared_l2_norm(outputs[0, :]), 1.0)
self.assertAllClose(squared_l2_norm(outputs[1, :]), 1.0)
layer = layers.UnitNormalization(axis=(1, 2))
inputs = np.random.normal(size=(2, 3, 3))
outputs = layer(inputs)
self.assertAllClose(squared_l2_norm(outputs[0, :, :]), 1.0)
self.assertAllClose(squared_l2_norm(outputs[1, :, :]), 1.0)
layer = layers.UnitNormalization(axis=1)
inputs = np.random.normal(size=(2, 3, 2))
outputs = layer(inputs)
self.assertAllClose(squared_l2_norm(outputs[0, :, 0]), 1.0)
self.assertAllClose(squared_l2_norm(outputs[1, :, 0]), 1.0)
self.assertAllClose(squared_l2_norm(outputs[0, :, 1]), 1.0)
self.assertAllClose(squared_l2_norm(outputs[1, :, 1]), 1.0)
|
import warnings
from abc import ABC
from typing import Any, BinaryIO, Dict, TypeVar, Union
from docarray.typing.tensor.abstract_tensor import AbstractTensor
from docarray.utils.misc import is_notebook
T = TypeVar('T', bound='AbstractAudioTensor')
MAX_INT_16 = 2**15
class AbstractAudioTensor(AbstractTensor, ABC):
def to_bytes(self):
"""
Convert audio tensor to bytes.
"""
tensor = self.get_comp_backend().to_numpy(self)
tensor = (tensor * MAX_INT_16).astype('<h')
return tensor.tobytes()
def save(
self: 'T',
file_path: Union[str, BinaryIO],
format: str = 'wav',
frame_rate: int = 44100,
sample_width: int = 2,
pydub_args: Dict[str, Any] = {},
) -> None:
"""
Save audio tensor to an audio file. Mono/stereo is preserved.
:param file_path: path to an audio file. If file is a string, open the file by
that name, otherwise treat it as a file-like object.
:param format: format for the audio file ('mp3', 'wav', 'raw', 'ogg' or other ffmpeg/avconv supported files)
:param frame_rate: sampling frequency
:param sample_width: sample width in bytes
:param pydub_args: dictionary of additional arguments for pydub.AudioSegment.export function
"""
from pydub import AudioSegment # type: ignore
comp_backend = self.get_comp_backend()
channels = 2 if comp_backend.n_dim(array=self) > 1 else 1 # type: ignore
segment = AudioSegment(
self.to_bytes(),
frame_rate=frame_rate,
sample_width=sample_width,
channels=channels,
)
segment.export(file_path, format=format, **pydub_args)
def display(self, rate=44100):
"""
Play audio data from tensor in notebook.
"""
if is_notebook():
from IPython.display import Audio, display
audio_np = self.get_comp_backend().to_numpy(self)
display(Audio(audio_np, rate=rate))
else:
warnings.warn('Display of audio is only possible in a notebook.')
|
import warnings
import wave
from abc import ABC
from typing import BinaryIO, TypeVar, Union
from docarray.typing.tensor.abstract_tensor import AbstractTensor
from docarray.utils.misc import is_notebook
T = TypeVar('T', bound='AbstractAudioTensor')
MAX_INT_16 = 2**15
class AbstractAudioTensor(AbstractTensor, ABC):
def to_bytes(self):
"""
Convert audio tensor to bytes.
"""
tensor = self.get_comp_backend().to_numpy(self)
tensor = (tensor * MAX_INT_16).astype('<h')
return tensor.tobytes()
def save_to_wav_file(
self: 'T',
file_path: Union[str, BinaryIO],
sample_rate: int = 44100,
sample_width: int = 2,
) -> None:
"""
Save audio tensor to a .wav file. Mono/stereo is preserved.
:param file_path: path to a .wav file. If file is a string, open the file by
that name, otherwise treat it as a file-like object.
:param sample_rate: sampling frequency
:param sample_width: sample width in bytes
"""
comp_backend = self.get_comp_backend()
n_channels = 2 if comp_backend.n_dim(array=self) > 1 else 1 # type: ignore
with wave.open(file_path, 'w') as f:
f.setnchannels(n_channels)
f.setsampwidth(sample_width)
f.setframerate(sample_rate)
f.writeframes(self.to_bytes())
def display(self, rate=44100):
"""
Play audio data from tensor in notebook.
"""
if is_notebook():
from IPython.display import Audio, display
audio_np = self.get_comp_backend().to_numpy(self)
display(Audio(audio_np, rate=rate))
else:
warnings.warn('Display of audio is only possible in a notebook.')
|
import pytest
from llama_index.embeddings.nvidia import NVIDIAEmbedding as Interface
from pytest_httpx import HTTPXMock
from requests_mock import Mocker
from contextlib import contextmanager
import os
from typing import Generator, Any
@pytest.fixture()
def mock_local_models(httpx_mock: HTTPXMock, base_url: str):
mock_response = {
"data": [
{
"id": "model1",
"object": "model",
"created": 1234567890,
"owned_by": "OWNER",
"root": "model1",
}
]
}
httpx_mock.add_response(
url=f"{base_url}/models",
method="GET",
json=mock_response,
status_code=200,
)
def test_create_without_base_url(public_class: type, monkeypatch) -> None:
monkeypatch.setenv("NVIDIA_API_KEY", "valid_api_key")
monkeypatch.delenv("NVIDIA_BASE_URL", raising=False)
x = public_class()
assert x.base_url == "https://integrate.api.nvidia.com/v1"
assert str(x._client.base_url) == "https://integrate.api.nvidia.com/v1/"
# https.Url
def test_base_url_priority(public_class: type, monkeypatch) -> None:
monkeypatch.setenv("NVIDIA_API_KEY", "valid_api_key")
ENV_URL = "https://ENV/v1"
NV_PARAM_URL = "https://NV_PARAM/v1"
PARAM_URL = "https://PARAM/v1"
def get_base_url(**kwargs: Any) -> str:
return public_class(model="NV-Embed-QA", **kwargs).base_url
with no_env_var("NVIDIA_BASE_URL"):
os.environ["NVIDIA_BASE_URL"] = ENV_URL
assert get_base_url() == ENV_URL
assert get_base_url(base_url=NV_PARAM_URL) == NV_PARAM_URL
assert get_base_url(base_url=PARAM_URL) == PARAM_URL
# marking as skip because base_url validation is removed
@pytest.mark.skip(reason="base_url validation is removed")
@pytest.mark.parametrize(
"base_url",
[
"bogus",
"http:/",
"http://",
"http:/oops",
],
)
def test_param_base_url_negative(
public_class: type, base_url: str, monkeypatch
) -> None:
monkeypatch.setenv("NVIDIA_API_KEY", "valid_api_key")
monkeypatch.delenv("NVIDIA_BASE_URL", raising=False)
with pytest.raises(ValueError) as e:
public_class(model="model1", base_url=base_url)
assert "Invalid base_url" in str(e.value)
# marking as skip because base_url validation is removed
@pytest.mark.skip(reason="base_url validation is removed")
@pytest.mark.parametrize(
"base_url",
[
"http://localhost:8888/embeddings",
"http://0.0.0.0:8888/rankings",
"http://localhost:8888/embeddings/",
"http://0.0.0.0:8888/rankings/",
"http://localhost:8888/chat/completions",
"http://localhost:8080/v1/embeddings",
"http://0.0.0.0:8888/v1/rankings",
],
)
def test_expect_warn(public_class: type, base_url: str) -> None:
with pytest.warns(UserWarning) as record:
public_class(model="model1", base_url=base_url)
assert len(record) == 1
assert "does not end in /v1" in str(record[0].message)
@pytest.mark.parametrize(
"base_url",
[
"http://localhost:8080/v1",
],
)
def test_base_url_valid_not_hosted(base_url: str, mock_local_models: None) -> None:
with pytest.warns(UserWarning):
cls = Interface(base_url=base_url)
assert cls._is_hosted is False
assert cls.model == "model1"
@contextmanager
def no_env_var(var: str) -> Generator[None, None, None]:
try:
if val := os.environ.get(var, None):
del os.environ[var]
yield
finally:
if val:
os.environ[var] = val
else:
if var in os.environ:
del os.environ[var]
@pytest.mark.parametrize(
"base_url",
[
"http://host/path0/path1/path2/v1",
"http://host:123/path0/path1/path2/v1/",
],
)
def test_proxy_base_url(
public_class: type, base_url: str, requests_mock: Mocker
) -> None:
with no_env_var("NVIDIA_BASE_URL"):
client = public_class(
api_key="NO_API_KEY_PROVIDED", model="NV-Embed-QA", base_url=base_url
)
assert base_url.startswith(client.base_url)
|
import pytest
from llama_index.embeddings.nvidia import NVIDIAEmbedding as Interface
from pytest_httpx import HTTPXMock
from requests_mock import Mocker
from contextlib import contextmanager
import os
from typing import Generator, Any
@pytest.fixture()
def mock_local_models(httpx_mock: HTTPXMock, base_url: str):
mock_response = {
"data": [
{
"id": "model1",
"object": "model",
"created": 1234567890,
"owned_by": "OWNER",
"root": "model1",
}
]
}
httpx_mock.add_response(
url=f"{base_url}/models",
method="GET",
json=mock_response,
status_code=200,
)
def test_create_without_base_url(public_class: type, monkeypatch) -> None:
monkeypatch.setenv("NVIDIA_API_KEY", "valid_api_key")
monkeypatch.delenv("NVIDIA_BASE_URL", raising=False)
x = public_class()
assert x.base_url == "https://integrate.api.nvidia.com/v1"
assert str(x._client.base_url) == "https://integrate.api.nvidia.com/v1/"
# https.Url
def test_base_url_priority(public_class: type, monkeypatch) -> None:
monkeypatch.setenv("NVIDIA_API_KEY", "valid_api_key")
ENV_URL = "https://ENV/v1"
NV_PARAM_URL = "https://NV_PARAM/v1"
PARAM_URL = "https://PARAM/v1"
def get_base_url(**kwargs: Any) -> str:
return public_class(model="NV-Embed-QA", **kwargs).base_url
with no_env_var("NVIDIA_BASE_URL"):
os.environ["NVIDIA_BASE_URL"] = ENV_URL
assert get_base_url() == ENV_URL
assert get_base_url(base_url=NV_PARAM_URL) == NV_PARAM_URL
assert get_base_url(base_url=PARAM_URL) == PARAM_URL
@pytest.mark.parametrize(
"base_url",
[
"bogus",
"http:/",
"http://",
"http:/oops",
],
)
def test_param_base_url_negative(
public_class: type, base_url: str, monkeypatch
) -> None:
monkeypatch.setenv("NVIDIA_API_KEY", "valid_api_key")
monkeypatch.delenv("NVIDIA_BASE_URL", raising=False)
with pytest.raises(ValueError) as e:
public_class(model="model1", base_url=base_url)
assert "Invalid base_url" in str(e.value)
@pytest.mark.parametrize(
"base_url",
[
"http://localhost:8888/embeddings",
"http://0.0.0.0:8888/rankings",
"http://localhost:8888/embeddings/",
"http://0.0.0.0:8888/rankings/",
"http://localhost:8888/chat/completions",
"http://localhost:8080/v1/embeddings",
"http://0.0.0.0:8888/v1/rankings",
],
)
def test_expect_warn(public_class: type, base_url: str) -> None:
with pytest.warns(UserWarning) as record:
public_class(model="model1", base_url=base_url)
assert len(record) == 1
assert "does not end in /v1" in str(record[0].message)
@pytest.mark.parametrize(
"base_url",
[
"http://localhost:8080/v1",
],
)
def test_base_url_valid_not_hosted(base_url: str, mock_local_models: None) -> None:
with pytest.warns(UserWarning):
cls = Interface(base_url=base_url)
assert cls._is_hosted is False
assert cls.model == "model1"
@contextmanager
def no_env_var(var: str) -> Generator[None, None, None]:
try:
if val := os.environ.get(var, None):
del os.environ[var]
yield
finally:
if val:
os.environ[var] = val
else:
if var in os.environ:
del os.environ[var]
@pytest.mark.parametrize(
"base_url",
[
"http://host/path0/path1/path2/v1",
"http://host:123/path0/path1/path2/v1/",
],
)
def test_proxy_base_url(
public_class: type, base_url: str, requests_mock: Mocker
) -> None:
with no_env_var("NVIDIA_BASE_URL"):
client = public_class(
api_key="NO_API_KEY_PROVIDED", model="NV-Embed-QA", base_url=base_url
)
assert base_url.startswith(client.base_url)
|
"""Build configuration"""
import dataclasses
from typing import Any, Dict, List, Optional
@dataclasses.dataclass
class BuildConfiguration: # pylint: disable=R0902
"""Configurations use when building libxgboost"""
# Whether to hide C++ symbols in libxgboost.so
hide_cxx_symbols: bool = True
# Whether to enable OpenMP
use_openmp: bool = True
# Whether to enable CUDA
use_cuda: bool = False
# Whether to enable NCCL
use_nccl: bool = False
# Whether to load nccl dynamically
use_dlopen_nccl: bool = False
# Whether to enable federated learning
plugin_federated: bool = False
# Whether to enable rmm support
plugin_rmm: bool = False
# Special option: See explanation below
use_system_libxgboost: bool = False
def _set_config_setting(self, config_settings: Dict[str, Any]) -> None:
for field_name in config_settings:
setattr(
self,
field_name,
(config_settings[field_name].lower() in ["true", "1", "on"]),
)
def update(self, config_settings: Optional[Dict[str, Any]]) -> None:
"""Parse config_settings from Pip (or other PEP 517 frontend)"""
if config_settings is not None:
self._set_config_setting(config_settings)
def get_cmake_args(self) -> List[str]:
"""Convert build configuration to CMake args"""
cmake_args = []
for field_name in [x.name for x in dataclasses.fields(self)]:
if field_name in ["use_system_libxgboost"]:
continue
cmake_option = field_name.upper()
cmake_value = "ON" if getattr(self, field_name) is True else "OFF"
cmake_args.append(f"-D{cmake_option}={cmake_value}")
return cmake_args
|
"""Build configuration"""
import dataclasses
from typing import Any, Dict, List, Optional
@dataclasses.dataclass
class BuildConfiguration: # pylint: disable=R0902
"""Configurations use when building libxgboost"""
# Whether to hide C++ symbols in libxgboost.so
hide_cxx_symbols: bool = True
# Whether to enable OpenMP
use_openmp: bool = True
# Whether to enable CUDA
use_cuda: bool = False
# Whether to enable NCCL
use_nccl: bool = False
# Whether to load nccl dynamically
use_dlopen_nccl: bool = False
# Whether to enable HDFS
use_hdfs: bool = False
# Whether to enable Azure Storage
use_azure: bool = False
# Whether to enable AWS S3
use_s3: bool = False
# Whether to enable the dense parser plugin
plugin_dense_parser: bool = False
# Special option: See explanation below
use_system_libxgboost: bool = False
def _set_config_setting(self, config_settings: Dict[str, Any]) -> None:
for field_name in config_settings:
setattr(
self,
field_name,
(config_settings[field_name].lower() in ["true", "1", "on"]),
)
def update(self, config_settings: Optional[Dict[str, Any]]) -> None:
"""Parse config_settings from Pip (or other PEP 517 frontend)"""
if config_settings is not None:
self._set_config_setting(config_settings)
def get_cmake_args(self) -> List[str]:
"""Convert build configuration to CMake args"""
cmake_args = []
for field_name in [x.name for x in dataclasses.fields(self)]:
if field_name in ["use_system_libxgboost"]:
continue
cmake_option = field_name.upper()
cmake_value = "ON" if getattr(self, field_name) is True else "OFF"
cmake_args.append(f"-D{cmake_option}={cmake_value}")
return cmake_args
|
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class HfFileSystem(AbstractFileSystem):
"""Interface to files in a Hugging face repository"""
root_marker = ""
protocol = "hf-legacy" # "hf://"" is reserved for hffs
def __init__(
self,
repo_info: Optional[DatasetInfo] = None,
token: Optional[str] = None,
**kwargs,
):
"""
The file system can be instantiated using a huggingface_hub.hf_api.DatasetInfo object,
and can be used to list and open files from a Hugging Face dataset repository with fsspec.
Args:
repo_info (:obj:``DatasetInfo``, `optional`):
Dataset repository info from huggingface_hub.HfApi().dataset_info(...)
token (:obj:``str``, `optional`):
Hugging Face token. Will default to the locally saved token if not provided.
"""
super().__init__(self, **kwargs)
self.repo_info = repo_info
self.token = token
self.dir_cache = None
def _get_dirs(self):
if self.dir_cache is None:
self.dir_cache = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
self.dir_cache[hf_file.rfilename] = {
"name": hf_file.rfilename,
"size": None,
"type": "file",
}
self.dir_cache.update(
{
str(d): {"name": str(d), "size": None, "type": "directory"}
for d in list(PurePosixPath(hf_file.rfilename).parents)[:-1]
}
)
def _open(
self,
path: str,
mode: str = "rb",
**kwargs,
):
if not isinstance(self.repo_info, DatasetInfo):
raise NotImplementedError(f"Open is only implemented for dataset repositories, but got {self.repo_info}")
url = hf_hub_url(self.repo_info.id, path, revision=self.repo_info.sha)
return fsspec.open(
url,
mode=mode,
headers=get_authentication_headers_for_url(url, use_auth_token=self.token),
client_kwargs={"trust_env": True}, # Enable reading proxy env variables.
).open()
def info(self, path, **kwargs):
self._get_dirs()
path = self._strip_protocol(path)
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(path)
def ls(self, path, detail=False, **kwargs):
self._get_dirs()
path = PurePosixPath(path.strip("/"))
paths = {}
for p, f in self.dir_cache.items():
p = PurePosixPath(p.strip("/"))
root = p.parent
if root == path:
paths[str(p)] = f
out = list(paths.values())
if detail:
return out
else:
return sorted(f["name"] for f in out)
|
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class HfFileSystem(AbstractFileSystem):
"""Interface to files in a Hugging face repository"""
root_marker = ""
protocol = "hf-legacy" # "hf://"" is reserved for hffs
def __init__(
self,
repo_info: Optional[DatasetInfo] = None,
token: Optional[str] = None,
**kwargs,
):
"""
The file system can be instantiated using a huggingface_hub.hf_api.DatasetInfo object,
and can be used to list and open files from a Hugging Face dataset repository with fsspec.
Args:
repo_info (:obj:``DatasetInfo``, `optional`):
Dataset repository info from huggingface_hub.HfApi().dataset_info(...)
token (:obj:``str``, `optional`):
Hugging Face token. Will default to the locally saved token if not provided.
"""
super().__init__(self, **kwargs)
self.repo_info = repo_info
self.token = token
self.dir_cache = None
def _get_dirs(self):
if self.dir_cache is None:
self.dir_cache = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
self.dir_cache[hf_file.rfilename] = {
"name": hf_file.rfilename,
"size": None,
"type": "file",
}
self.dir_cache.update(
{
str(d): {"name": str(d), "size": None, "type": "directory"}
for d in list(PurePosixPath(hf_file.rfilename).parents)[:-1]
}
)
def _open(
self,
path: str,
mode: str = "rb",
**kwargs,
):
if not isinstance(self.repo_info, DatasetInfo):
raise NotImplementedError(f"Open is only implemented for dataset repositories, but got {self.repo_info}")
url = hf_hub_url(self.repo_info.id, path, revision=self.repo_info.sha)
return fsspec.open(
url,
mode=mode,
headers=get_authentication_headers_for_url(url, use_auth_token=self.token),
).open()
def info(self, path, **kwargs):
self._get_dirs()
path = self._strip_protocol(path)
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(path)
def ls(self, path, detail=False, **kwargs):
self._get_dirs()
path = PurePosixPath(path.strip("/"))
paths = {}
for p, f in self.dir_cache.items():
p = PurePosixPath(p.strip("/"))
root = p.parent
if root == path:
paths[str(p)] = f
out = list(paths.values())
if detail:
return out
else:
return sorted(f["name"] for f in out)
|
import subprocess
import sys
import pytest
from pytest_benchmark.fixture import BenchmarkFixture # type: ignore
@pytest.mark.parametrize(
"import_path",
[
pytest.param(
"from langchain_core.messages import HumanMessage", id="HumanMessage"
),
pytest.param("from langchain_core.tools import tool", id="tool"),
pytest.param(
"from langchain_core.callbacks import CallbackManager", id="CallbackManager"
),
pytest.param("from langchain_core.runnables import Runnable", id="Runnable"),
pytest.param(
"from langchain_core.language_models import BaseChatModel",
id="BaseChatModel",
),
pytest.param(
"from langchain_core.prompts import ChatPromptTemplate",
id="ChatPromptTemplate",
),
pytest.param("from langchain_core.documents import Document", id="Document"),
pytest.param(
"from langchain_core.vectorstores import InMemoryVectorStore",
id="InMemoryVectorStore",
),
pytest.param(
"from langchain_core.runnables import RunnableLambda",
id="RunnableLambda",
),
pytest.param(
"from langchain_core.tracers import LangChainTracer",
id="LangChainTracer",
),
pytest.param(
"from langchain_core.output_parsers import PydanticOutputParser",
id="PydanticOutputParser",
),
pytest.param(
"from langchain_core.rate_limiters import InMemoryRateLimiter",
id="InMemoryRateLimiter",
),
],
)
@pytest.mark.benchmark
def test_import_time(benchmark: BenchmarkFixture, import_path: str) -> None:
@benchmark
def import_in_subprocess() -> None:
subprocess.run([sys.executable, "-c", import_path], check=False)
|
import subprocess
import sys
import pytest
from pytest_benchmark.fixture import BenchmarkFixture # type: ignore
@pytest.mark.parametrize(
"import_path",
[
pytest.param(
"from langchain_core.messages import HumanMessage", id="HumanMessage"
),
pytest.param("from langchain_core.tools import tool", id="tool"),
pytest.param(
"from langchain_core.callbacks import CallbackManager", id="CallbackManager"
),
pytest.param("from langchain_core.runnables import Runnable", id="Runnable"),
pytest.param(
"from langchain_core.language_models import BaseChatModel",
id="BaseChatModel",
),
pytest.param(
"from langchain_core.prompts import ChatPromptTemplate",
id="PromChatPromptTemplateptTemplate",
),
pytest.param("from langchain_core.documents import Document", id="Document"),
pytest.param(
"from langchain_core.vectorstores import InMemoryVectorStore",
id="InMemoryVectorStore",
),
],
)
@pytest.mark.benchmark
def test_import_time(benchmark: BenchmarkFixture, import_path: str) -> None:
@benchmark
def import_in_subprocess() -> None:
subprocess.run([sys.executable, "-c", import_path], check=False)
|
"""Decision tree based models for classification and regression."""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
from ._classes import (
BaseDecisionTree,
DecisionTreeClassifier,
DecisionTreeRegressor,
ExtraTreeClassifier,
ExtraTreeRegressor,
)
from ._export import export_graphviz, export_text, plot_tree
__all__ = [
"BaseDecisionTree",
"DecisionTreeClassifier",
"DecisionTreeRegressor",
"ExtraTreeClassifier",
"ExtraTreeRegressor",
"export_graphviz",
"export_text",
"plot_tree",
]
|
"""Decision tree based models for classification and regression."""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
from ._classes import (
BaseDecisionTree,
DecisionTreeClassifier,
DecisionTreeRegressor,
ExtraTreeClassifier,
ExtraTreeRegressor,
)
from ._export import export_graphviz, export_text, plot_tree
__all__ = [
"BaseDecisionTree",
"DecisionTreeClassifier",
"DecisionTreeRegressor",
"ExtraTreeClassifier",
"ExtraTreeRegressor",
"export_graphviz",
"plot_tree",
"export_text",
]
|
import sys
from os import path
from setuptools import find_packages
from setuptools import setup
if sys.version_info < (3, 7, 0):
raise OSError(f'DocArray requires Python >=3.7, but yours is {sys.version}')
try:
pkg_name = 'docarray'
libinfo_py = path.join(pkg_name, '__init__.py')
libinfo_content = open(libinfo_py, 'r', encoding='utf8').readlines()
version_line = [l.strip() for l in libinfo_content if l.startswith('__version__')][
0
]
exec(version_line) # gives __version__
except FileNotFoundError:
__version__ = '0.0.0'
try:
with open('README.md', encoding='utf8') as fp:
_long_description = fp.read()
except FileNotFoundError:
_long_description = ''
setup(
name=pkg_name,
packages=find_packages(),
version=__version__,
include_package_data=True,
description='The data structure for unstructured data',
author='Jina AI',
author_email='hello@jina.ai',
license='Apache 2.0',
url='https://github.com/jina-ai/docarray',
download_url='https://github.com/jina-ai/docarray/tags',
long_description=_long_description,
long_description_content_type='text/markdown',
zip_safe=False,
setup_requires=['setuptools>=18.0', 'wheel'],
install_requires=['numpy', 'rich>=12.0.0'],
extras_require={
# req usage, please see https://docarray.jina.ai/#install
'common': [
'protobuf>=3.13.0,<=3.20.1',
'lz4',
'requests',
'matplotlib',
'Pillow',
'fastapi',
'uvicorn',
],
'full': [
'protobuf>=3.13.0,<=3.20.1',
'lz4',
'requests',
'matplotlib',
'Pillow',
'trimesh',
'scipy',
'av',
'fastapi',
'uvicorn',
'strawberry-graphql',
'weaviate-client~=3.3.0',
'annlite>=0.3.2',
'qdrant-client~=0.7.3',
'elasticsearch>=8.2.0',
],
'qdrant': [
'qdrant-client~=0.7.3',
],
'annlite': [
'annlite>=0.3.2',
],
'weaviate': [
'weaviate-client~=3.3.0',
],
'elasticsearch': [
'elasticsearch>=8.2.0',
],
'test': [
'pytest',
'pytest-timeout',
'pytest-mock',
'pytest-cov',
'pytest-repeat',
'pytest-reraise',
'mock',
'pytest-custom_exit_code',
'black==22.3.0',
'tensorflow==2.7.0',
'paddlepaddle==2.2.0',
'torch==1.9.0',
'torchvision==0.10.0',
'datasets',
'onnx',
'onnxruntime',
'jupyterlab',
'transformers>=4.16.2',
'weaviate-client~=3.3.0',
'annlite>=0.3.2',
'elasticsearch>=8.2.0',
'jina',
],
},
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Intended Audience :: Science/Research',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
'Programming Language :: Unix Shell',
'Environment :: Console',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Topic :: Database :: Database Engines/Servers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Internet :: WWW/HTTP :: Indexing/Search',
'Topic :: Scientific/Engineering :: Image Recognition',
'Topic :: Multimedia :: Video',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Mathematics',
'Topic :: Software Development',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
],
project_urls={
'Documentation': 'https://docarray.jina.ai',
'Source': 'https://github.com/jina-ai/docarray/',
'Tracker': 'https://github.com/jina-ai/docarray/issues',
},
keywords='docarray deep-learning data-structures cross-modal multi-modal unstructured-data nested-data neural-search',
)
|
import sys
from os import path
from setuptools import find_packages
from setuptools import setup
if sys.version_info < (3, 7, 0):
raise OSError(f'DocArray requires Python >=3.7, but yours is {sys.version}')
try:
pkg_name = 'docarray'
libinfo_py = path.join(pkg_name, '__init__.py')
libinfo_content = open(libinfo_py, 'r', encoding='utf8').readlines()
version_line = [l.strip() for l in libinfo_content if l.startswith('__version__')][
0
]
exec(version_line) # gives __version__
except FileNotFoundError:
__version__ = '0.0.0'
try:
with open('README.md', encoding='utf8') as fp:
_long_description = fp.read()
except FileNotFoundError:
_long_description = ''
setup(
name=pkg_name,
packages=find_packages(),
version=__version__,
include_package_data=True,
description='The data structure for unstructured data',
author='Jina AI',
author_email='hello@jina.ai',
license='Apache 2.0',
url='https://github.com/jina-ai/docarray',
download_url='https://github.com/jina-ai/docarray/tags',
long_description=_long_description,
long_description_content_type='text/markdown',
zip_safe=False,
setup_requires=['setuptools>=18.0', 'wheel'],
install_requires=['numpy', 'rich>=12.0.0'],
extras_require={
# req usage, please see https://docarray.jina.ai/#install
'common': [
'protobuf>=3.13.0,<=3.20.1',
'lz4',
'requests',
'matplotlib',
'Pillow',
'fastapi',
'uvicorn',
],
'full': [
'protobuf>=3.13.0,<=3.20.1',
'lz4',
'requests',
'matplotlib',
'Pillow',
'trimesh',
'scipy',
'av',
'fastapi',
'uvicorn',
'strawberry-graphql',
'weaviate-client~=3.3.0',
'annlite>=0.3.0',
'qdrant-client~=0.7.3',
'elasticsearch>=8.2.0',
],
'qdrant': [
'qdrant-client~=0.7.3',
],
'annlite': [
'annlite>=0.3.0',
],
'weaviate': [
'weaviate-client~=3.3.0',
],
'elasticsearch': [
'elasticsearch>=8.2.0',
],
'test': [
'pytest',
'pytest-timeout',
'pytest-mock',
'pytest-cov',
'pytest-repeat',
'pytest-reraise',
'mock',
'pytest-custom_exit_code',
'black==22.3.0',
'tensorflow==2.7.0',
'paddlepaddle==2.2.0',
'torch==1.9.0',
'torchvision==0.10.0',
'datasets',
'onnx',
'onnxruntime',
'jupyterlab',
'transformers>=4.16.2',
'weaviate-client~=3.3.0',
'annlite>=0.3.0',
'elasticsearch>=8.2.0',
'jina',
],
},
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Intended Audience :: Science/Research',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
'Programming Language :: Unix Shell',
'Environment :: Console',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Topic :: Database :: Database Engines/Servers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Internet :: WWW/HTTP :: Indexing/Search',
'Topic :: Scientific/Engineering :: Image Recognition',
'Topic :: Multimedia :: Video',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Mathematics',
'Topic :: Software Development',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
],
project_urls={
'Documentation': 'https://docarray.jina.ai',
'Source': 'https://github.com/jina-ai/docarray/',
'Tracker': 'https://github.com/jina-ai/docarray/issues',
},
keywords='docarray deep-learning data-structures cross-modal multi-modal unstructured-data nested-data neural-search',
)
|
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
from pathlib import Path
from typing import List
import pytest
from jina import Document, DocumentArray, Executor
from laser_encoder import LaserEncoder
_EMBEDDING_DIM = 1024
@pytest.fixture(scope='session')
def basic_encoder() -> LaserEncoder:
return LaserEncoder()
def test_config():
ex = Executor.load_config(str(Path(__file__).parents[2] / 'config.yml'))
assert ex.default_language == 'en'
def test_no_document(basic_encoder: LaserEncoder):
basic_encoder.encode(None, {})
def test_empty_documents(basic_encoder: LaserEncoder):
docs = DocumentArray([])
basic_encoder.encode(docs, {})
assert len(docs) == 0
def test_no_text_documents(basic_encoder: LaserEncoder):
docs = DocumentArray([Document()])
basic_encoder.encode(docs, {})
assert len(docs) == 1
assert docs[0].embedding is None
def test_encoding_cpu(basic_encoder: LaserEncoder):
docs = DocumentArray([Document(text='hello there')])
basic_encoder.encode(docs, {})
assert docs[0].embedding.shape == (_EMBEDDING_DIM,)
@pytest.mark.parametrize(
'language, sentence',
[
('en', 'Today is a nice day'),
('es', 'hoy es un buen día'),
('ru', 'сегодня хороший день'),
],
)
def test_languages(language: str, sentence: str, basic_encoder: LaserEncoder):
docs = DocumentArray([Document(text=sentence)])
basic_encoder.encode(docs, {'language': language})
assert docs[0].embedding.shape == (_EMBEDDING_DIM,)
@pytest.mark.parametrize(
'traversal_paths, counts',
[
(['r'], [['r', 1], ['c', 0], ['cc', 0]]),
(['c'], [['r', 0], ['c', 3], ['cc', 0]]),
(['cc'], [['r', 0], ['c', 0], ['cc', 2]]),
(['cc', 'r'], [['r', 1], ['c', 0], ['cc', 2]]),
],
)
def test_traversal_path(
traversal_paths: List[str], counts: List, basic_encoder: LaserEncoder
):
text = 'blah'
docs = DocumentArray([Document(id='root1', text=text)])
docs[0].chunks = [
Document(id='chunk11', text=text),
Document(id='chunk12', text=text),
Document(id='chunk13', text=text),
]
docs[0].chunks[0].chunks = [
Document(id='chunk111', text=text),
Document(id='chunk112', text=text),
]
basic_encoder.encode(docs=docs, parameters={'traversal_paths': traversal_paths})
for path, count in counts:
embeddings = docs.traverse_flat([path]).get_attributes('embedding')
assert len(list(filter(lambda x: x is not None, embeddings))) == count
@pytest.mark.parametrize('batch_size', [1, 2, 4, 8])
def test_batch_size(basic_encoder: LaserEncoder, batch_size: int):
docs = DocumentArray([Document(text='hello there') for _ in range(32)])
basic_encoder.encode(docs, parameters={'batch_size': batch_size})
for doc in docs:
assert doc.embedding.shape == (_EMBEDDING_DIM,)
def test_quality_embeddings(basic_encoder: LaserEncoder):
docs = DocumentArray(
[
# Different than usual example - because embeddings suck (manually verified
# using the laserembedings module)
Document(id='A', text='car'),
Document(id='B', text='truck'),
Document(id='C', text='radio'),
Document(id='D', text='TV'),
]
)
basic_encoder.encode(DocumentArray(docs), {})
# assert semantic meaning is captured in the encoding
docs.match(docs)
matches = ['B', 'A', 'D', 'C']
for i, doc in enumerate(docs):
assert doc.matches[1].id == matches[i]
|
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
from pathlib import Path
from typing import List
import pytest
from jina import Document, DocumentArray, Executor
from ...laser_encoder import LaserEncoder
_EMBEDDING_DIM = 1024
@pytest.fixture(scope='session')
def basic_encoder() -> LaserEncoder:
return LaserEncoder()
def test_config():
ex = Executor.load_config(str(Path(__file__).parents[2] / 'config.yml'))
assert ex.default_language == 'en'
def test_no_document(basic_encoder: LaserEncoder):
basic_encoder.encode(None, {})
def test_empty_documents(basic_encoder: LaserEncoder):
docs = DocumentArray([])
basic_encoder.encode(docs, {})
assert len(docs) == 0
def test_no_text_documents(basic_encoder: LaserEncoder):
docs = DocumentArray([Document()])
basic_encoder.encode(docs, {})
assert len(docs) == 1
assert docs[0].embedding is None
def test_encoding_cpu(basic_encoder: LaserEncoder):
docs = DocumentArray([Document(text='hello there')])
basic_encoder.encode(docs, {})
assert docs[0].embedding.shape == (_EMBEDDING_DIM,)
@pytest.mark.parametrize(
'language, sentence',
[
('en', 'Today is a nice day'),
('es', 'hoy es un buen día'),
('ru', 'сегодня хороший день'),
],
)
def test_languages(language: str, sentence: str, basic_encoder: LaserEncoder):
docs = DocumentArray([Document(text=sentence)])
basic_encoder.encode(docs, {'language': language})
assert docs[0].embedding.shape == (_EMBEDDING_DIM,)
@pytest.mark.parametrize(
'traversal_paths, counts',
[
(['r'], [['r', 1], ['c', 0], ['cc', 0]]),
(['c'], [['r', 0], ['c', 3], ['cc', 0]]),
(['cc'], [['r', 0], ['c', 0], ['cc', 2]]),
(['cc', 'r'], [['r', 1], ['c', 0], ['cc', 2]]),
],
)
def test_traversal_path(
traversal_paths: List[str], counts: List, basic_encoder: LaserEncoder
):
text = 'blah'
docs = DocumentArray([Document(id='root1', text=text)])
docs[0].chunks = [
Document(id='chunk11', text=text),
Document(id='chunk12', text=text),
Document(id='chunk13', text=text),
]
docs[0].chunks[0].chunks = [
Document(id='chunk111', text=text),
Document(id='chunk112', text=text),
]
basic_encoder.encode(docs=docs, parameters={'traversal_paths': traversal_paths})
for path, count in counts:
embeddings = docs.traverse_flat([path]).get_attributes('embedding')
assert len(list(filter(lambda x: x is not None, embeddings))) == count
@pytest.mark.parametrize('batch_size', [1, 2, 4, 8])
def test_batch_size(basic_encoder: LaserEncoder, batch_size: int):
docs = DocumentArray([Document(text='hello there') for _ in range(32)])
basic_encoder.encode(docs, parameters={'batch_size': batch_size})
for doc in docs:
assert doc.embedding.shape == (_EMBEDDING_DIM,)
def test_quality_embeddings(basic_encoder: LaserEncoder):
docs = DocumentArray(
[
# Different than usual example - because embeddings suck (manually verified
# using the laserembedings module)
Document(id='A', text='car'),
Document(id='B', text='truck'),
Document(id='C', text='radio'),
Document(id='D', text='TV'),
]
)
basic_encoder.encode(DocumentArray(docs), {})
# assert semantic meaning is captured in the encoding
docs.match(docs)
matches = ['B', 'A', 'D', 'C']
for i, doc in enumerate(docs):
assert doc.matches[1].id == matches[i]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .atss import ATSS
from .autoassign import AutoAssign
from .base import BaseDetector
from .cascade_rcnn import CascadeRCNN
from .centernet import CenterNet
from .cornernet import CornerNet
from .deformable_detr import DeformableDETR
from .detr import DETR
from .fast_rcnn import FastRCNN
from .faster_rcnn import FasterRCNN
from .fcos import FCOS
from .fovea import FOVEA
from .fsaf import FSAF
from .gfl import GFL
from .grid_rcnn import GridRCNN
from .htc import HybridTaskCascade
from .kd_one_stage import KnowledgeDistillationSingleStageDetector
from .lad import LAD
from .mask2former import Mask2Former
from .mask_rcnn import MaskRCNN
from .mask_scoring_rcnn import MaskScoringRCNN
from .maskformer import MaskFormer
from .nasfcos import NASFCOS
from .paa import PAA
from .panoptic_fpn import PanopticFPN
from .panoptic_two_stage_segmentor import TwoStagePanopticSegmentor
from .point_rend import PointRend
from .queryinst import QueryInst
from .reppoints_detector import RepPointsDetector
from .retinanet import RetinaNet
from .rpn import RPN
from .scnet import SCNet
from .single_stage import SingleStageDetector
from .solo import SOLO
from .sparse_rcnn import SparseRCNN
from .tood import TOOD
from .trident_faster_rcnn import TridentFasterRCNN
from .two_stage import TwoStageDetector
from .vfnet import VFNet
from .yolact import YOLACT
from .yolo import YOLOV3
from .yolof import YOLOF
from .yolox import YOLOX
__all__ = [
'ATSS', 'BaseDetector', 'SingleStageDetector', 'TwoStageDetector', 'RPN',
'KnowledgeDistillationSingleStageDetector', 'FastRCNN', 'FasterRCNN',
'MaskRCNN', 'CascadeRCNN', 'HybridTaskCascade', 'RetinaNet', 'FCOS',
'GridRCNN', 'MaskScoringRCNN', 'RepPointsDetector', 'FOVEA', 'FSAF',
'NASFCOS', 'PointRend', 'GFL', 'CornerNet', 'PAA', 'YOLOV3', 'YOLACT',
'VFNet', 'DETR', 'TridentFasterRCNN', 'SparseRCNN', 'SCNet', 'SOLO',
'DeformableDETR', 'AutoAssign', 'YOLOF', 'CenterNet', 'YOLOX',
'TwoStagePanopticSegmentor', 'PanopticFPN', 'QueryInst', 'LAD', 'TOOD',
'MaskFormer', 'Mask2Former'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .atss import ATSS
from .autoassign import AutoAssign
from .base import BaseDetector
from .cascade_rcnn import CascadeRCNN
from .centernet import CenterNet
from .cornernet import CornerNet
from .deformable_detr import DeformableDETR
from .detr import DETR
from .fast_rcnn import FastRCNN
from .faster_rcnn import FasterRCNN
from .fcos import FCOS
from .fovea import FOVEA
from .fsaf import FSAF
from .gfl import GFL
from .grid_rcnn import GridRCNN
from .htc import HybridTaskCascade
from .kd_one_stage import KnowledgeDistillationSingleStageDetector
from .lad import LAD
from .mask_rcnn import MaskRCNN
from .mask_scoring_rcnn import MaskScoringRCNN
from .maskformer import MaskFormer
from .nasfcos import NASFCOS
from .paa import PAA
from .panoptic_fpn import PanopticFPN
from .panoptic_two_stage_segmentor import TwoStagePanopticSegmentor
from .point_rend import PointRend
from .queryinst import QueryInst
from .reppoints_detector import RepPointsDetector
from .retinanet import RetinaNet
from .rpn import RPN
from .scnet import SCNet
from .single_stage import SingleStageDetector
from .solo import SOLO
from .sparse_rcnn import SparseRCNN
from .tood import TOOD
from .trident_faster_rcnn import TridentFasterRCNN
from .two_stage import TwoStageDetector
from .vfnet import VFNet
from .yolact import YOLACT
from .yolo import YOLOV3
from .yolof import YOLOF
from .yolox import YOLOX
__all__ = [
'ATSS', 'BaseDetector', 'SingleStageDetector', 'TwoStageDetector', 'RPN',
'KnowledgeDistillationSingleStageDetector', 'FastRCNN', 'FasterRCNN',
'MaskRCNN', 'CascadeRCNN', 'HybridTaskCascade', 'RetinaNet', 'FCOS',
'GridRCNN', 'MaskScoringRCNN', 'RepPointsDetector', 'FOVEA', 'FSAF',
'NASFCOS', 'PointRend', 'GFL', 'CornerNet', 'PAA', 'YOLOV3', 'YOLACT',
'VFNet', 'DETR', 'TridentFasterRCNN', 'SparseRCNN', 'SCNet', 'SOLO',
'DeformableDETR', 'AutoAssign', 'YOLOF', 'CenterNet', 'YOLOX',
'TwoStagePanopticSegmentor', 'PanopticFPN', 'QueryInst', 'LAD', 'TOOD',
'MaskFormer'
]
|
from typing import Annotated, Any, Literal, Optional, TypedDict
from uuid import uuid4
from pydantic import BaseModel, Field, SecretStr, field_serializer
class _BaseCredentials(BaseModel):
id: str = Field(default_factory=lambda: str(uuid4()))
provider: str
title: Optional[str]
@field_serializer("*")
def dump_secret_strings(value: Any, _info):
if isinstance(value, SecretStr):
return value.get_secret_value()
return value
class OAuth2Credentials(_BaseCredentials):
type: Literal["oauth2"] = "oauth2"
username: Optional[str]
"""Username of the third-party service user that these credentials belong to"""
access_token: SecretStr
access_token_expires_at: Optional[int]
"""Unix timestamp (seconds) indicating when the access token expires (if at all)"""
refresh_token: Optional[SecretStr]
refresh_token_expires_at: Optional[int]
"""Unix timestamp (seconds) indicating when the refresh token expires (if at all)"""
scopes: list[str]
metadata: dict[str, Any] = Field(default_factory=dict)
def bearer(self) -> str:
return f"Bearer {self.access_token.get_secret_value()}"
class APIKeyCredentials(_BaseCredentials):
type: Literal["api_key"] = "api_key"
api_key: SecretStr
expires_at: Optional[int]
"""Unix timestamp (seconds) indicating when the API key expires (if at all)"""
def bearer(self) -> str:
return f"Bearer {self.api_key.get_secret_value()}"
Credentials = Annotated[
OAuth2Credentials | APIKeyCredentials,
Field(discriminator="type"),
]
CredentialsType = Literal["api_key", "oauth2"]
class OAuthState(BaseModel):
token: str
provider: str
expires_at: int
scopes: list[str]
"""Unix timestamp (seconds) indicating when this OAuth state expires"""
class UserMetadata(BaseModel):
integration_credentials: list[Credentials] = Field(default_factory=list)
integration_oauth_states: list[OAuthState] = Field(default_factory=list)
class UserMetadataRaw(TypedDict, total=False):
integration_credentials: list[dict]
integration_oauth_states: list[dict]
class UserIntegrations(BaseModel):
credentials: list[Credentials] = Field(default_factory=list)
oauth_states: list[OAuthState] = Field(default_factory=list)
|
from typing import Annotated, Any, Literal, Optional, TypedDict
from uuid import uuid4
from pydantic import BaseModel, Field, SecretStr, field_serializer
class _BaseCredentials(BaseModel):
id: str = Field(default_factory=lambda: str(uuid4()))
provider: str
title: Optional[str]
@field_serializer("*")
def dump_secret_strings(value: Any, _info):
if isinstance(value, SecretStr):
return value.get_secret_value()
return value
class OAuth2Credentials(_BaseCredentials):
type: Literal["oauth2"] = "oauth2"
username: Optional[str]
"""Username of the third-party service user that these credentials belong to"""
access_token: SecretStr
access_token_expires_at: Optional[int]
"""Unix timestamp (seconds) indicating when the access token expires (if at all)"""
refresh_token: Optional[SecretStr]
refresh_token_expires_at: Optional[int]
"""Unix timestamp (seconds) indicating when the refresh token expires (if at all)"""
scopes: list[str]
metadata: dict[str, Any] = Field(default_factory=dict)
def bearer(self) -> str:
return f"Bearer {self.access_token.get_secret_value()}"
class APIKeyCredentials(_BaseCredentials):
type: Literal["api_key"] = "api_key"
api_key: SecretStr
expires_at: Optional[int]
"""Unix timestamp (seconds) indicating when the API key expires (if at all)"""
def bearer(self) -> str:
return f"Bearer {self.api_key.get_secret_value()}"
Credentials = Annotated[
OAuth2Credentials | APIKeyCredentials,
Field(discriminator="type"),
]
CredentialsType = Literal["api_key", "oauth2"]
class OAuthState(BaseModel):
token: str
provider: str
expires_at: int
scopes: list[str]
"""Unix timestamp (seconds) indicating when this OAuth state expires"""
class UserMetadata(BaseModel):
integration_credentials: list[Credentials] = Field(default_factory=list)
integration_oauth_states: list[OAuthState] = Field(default_factory=list)
class UserMetadataRaw(BaseModel):
integration_credentials: list[dict] = Field(default_factory=list)
integration_oauth_states: list[dict] = Field(default_factory=list)
|
_base_ = './fcos_r50-caffe_fpn_gn-head_1x_coco.py'
# dataset settings
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='RandomChoiceResize',
scale=[(1333, 640), (1333, 800)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
# training schedule for 2x
max_epochs = 24
train_cfg = dict(max_epochs=max_epochs)
# learning rate
param_scheduler = [
dict(type='ConstantLR', factor=1.0 / 3, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[16, 22],
gamma=0.1)
]
|
_base_ = './fcos_r50_caffe_fpn_gn-head_1x_coco.py'
# dataset settings
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='RandomChoiceResize',
scale=[(1333, 640), (1333, 800)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
# training schedule for 2x
max_epochs = 24
train_cfg = dict(max_epochs=max_epochs)
# learning rate
param_scheduler = [
dict(type='ConstantLR', factor=1.0 / 3, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[16, 22],
gamma=0.1)
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import pytest
import torch
from mmengine.data import LabelData
class TestLabelData(TestCase):
def test_label_to_onehot(self):
item = torch.tensor([1], dtype=torch.int64)
num_classes = 10
onehot = LabelData.label_to_onehot(label=item, num_classes=num_classes)
assert tuple(onehot.shape) == (num_classes, )
assert onehot.device == item.device
# item is not onehot
with self.assertRaises(AssertionError):
LabelData.label_to_onehot(label='item', num_classes=num_classes)
# item'max bigger than num_classes
with self.assertRaises(AssertionError):
LabelData.label_to_onehot(
torch.tensor([11], dtype=torch.int64), num_classes)
onehot = LabelData.label_to_onehot(
label=torch.tensor([], dtype=torch.int64), num_classes=num_classes)
assert (onehot == torch.zeros((num_classes, ),
dtype=torch.int64)).all()
def test_onehot_to_label(self):
# item is not onehot
with self.assertRaisesRegex(
ValueError,
'input is not one-hot and can not convert to label'):
LabelData.onehot_to_label(
onehot=torch.tensor([2], dtype=torch.int64))
with self.assertRaises(AssertionError):
LabelData.onehot_to_label(onehot='item')
item = torch.arange(0, 9)
onehot = LabelData.label_to_onehot(item, num_classes=10)
label = LabelData.onehot_to_label(onehot)
assert (label == item).all()
assert label.device == item.device
@pytest.mark.skipif(
not torch.cuda.is_available(), reason='GPU is required!')
def test_cuda(self):
item = torch.arange(0, 9).cuda()
onehot = LabelData.label_to_onehot(item, num_classes=10)
assert item.device == onehot.device
label = LabelData.onehot_to_label(onehot)
assert label.device == onehot.device
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import torch
from mmengine.data import LabelData
class TestLabelData(TestCase):
def test_label_to_onehot(self):
item = torch.tensor([1], dtype=torch.int64)
num_classes = 10
onehot = LabelData.label_to_onehot(label=item, num_classes=num_classes)
assert tuple(onehot.shape) == (num_classes, )
# item is not onehot
with self.assertRaises(AssertionError):
LabelData.label_to_onehot(label='item', num_classes=num_classes)
# item'max bigger than num_classes
with self.assertRaises(AssertionError):
LabelData.label_to_onehot(
torch.tensor([11], dtype=torch.int64), num_classes)
def test_onehot_to_label(self):
# item is not onehot
with self.assertRaisesRegex(
ValueError,
'input is not one-hot and can not convert to label'):
LabelData.onehot_to_label(
onehot=torch.tensor([2], dtype=torch.int64))
with self.assertRaises(AssertionError):
LabelData.onehot_to_label(onehot='item')
item = torch.arange(0, 9)
onehot = LabelData.label_to_onehot(item, num_classes=10)
label = LabelData.onehot_to_label(onehot)
assert (label == item).all()
|
_base_ = [
'../_base_/models/faster_rcnn_r50_caffe_c4.py',
'../_base_/schedules/schedule_1x.py', '../_base_/datasets/voc0712.py',
'../_base_/default_runtime.py'
]
model = dict(roi_head=dict(bbox_head=dict(num_classes=20)))
# dataset settings
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='RandomChoiceResize',
scales=[(1333, 480), (1333, 512), (1333, 544), (1333, 576),
(1333, 608), (1333, 640), (1333, 672), (1333, 704),
(1333, 736), (1333, 768), (1333, 800)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
# avoid bboxes being resized
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(
sampler=dict(type='InfiniteSampler', shuffle=True),
dataset=dict(
_delete_=True,
type='ConcatDataset',
datasets=[
dict(
type='VOCDataset',
data_root={{_base_.data_root}},
ann_file='VOC2007/ImageSets/Main/trainval.txt',
data_prefix=dict(sub_data_root='VOC2007/'),
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=train_pipeline),
dict(
type='VOCDataset',
data_root={{_base_.data_root}},
ann_file='VOC2012/ImageSets/Main/trainval.txt',
data_prefix=dict(sub_data_root='VOC2012/'),
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=train_pipeline)
]))
val_dataloader = dict(dataset=dict(pipeline=test_pipeline))
test_dataloader = val_dataloader
# training schedule for 18k
max_iter = 18000
train_cfg = dict(
_delete_=True,
type='IterBasedTrainLoop',
max_iters=max_iter,
val_interval=3000)
# learning rate
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=100),
dict(
type='MultiStepLR',
begin=0,
end=max_iter,
by_epoch=False,
milestones=[12000, 16000],
gamma=0.1)
]
# optimizer
optim_wrapper = dict(
type='OptimWrapper',
optimizer=dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001))
default_hooks = dict(checkpoint=dict(by_epoch=False, interval=3000))
log_processor = dict(by_epoch=False)
|
_base_ = [
'../_base_/models/faster_rcnn_r50_caffe_c4.py',
'../_base_/default_runtime.py'
]
model = dict(roi_head=dict(bbox_head=dict(num_classes=20)))
# dataset settings
dataset_type = 'VOCDataset'
data_root = 'data/VOCdevkit/'
img_norm_cfg = dict(
mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='Resize',
img_scale=[(1333, 480), (1333, 512), (1333, 544), (1333, 576),
(1333, 608), (1333, 640), (1333, 672), (1333, 704),
(1333, 736), (1333, 768), (1333, 800)],
multiscale_mode='value',
keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
samples_per_gpu=2,
workers_per_gpu=2,
train=dict(
type=dataset_type,
ann_file=[
data_root + 'VOC2007/ImageSets/Main/trainval.txt',
data_root + 'VOC2012/ImageSets/Main/trainval.txt'
],
img_prefix=[data_root + 'VOC2007/', data_root + 'VOC2012/'],
pipeline=train_pipeline),
val=dict(
type=dataset_type,
ann_file=data_root + 'VOC2007/ImageSets/Main/test.txt',
img_prefix=data_root + 'VOC2007/',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
ann_file=data_root + 'VOC2007/ImageSets/Main/test.txt',
img_prefix=data_root + 'VOC2007/',
pipeline=test_pipeline))
# optimizer
optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)
optimizer_config = dict(grad_clip=None)
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=100,
warmup_ratio=0.001,
step=[12000, 16000])
# Runner type
runner = dict(type='IterBasedRunner', max_iters=18000)
checkpoint_config = dict(interval=3000)
evaluation = dict(interval=3000, metric='mAP')
|
from typing import Union, Iterable, Dict
from ..base.seqlike import BaseSequenceLikeMixin
from .... import Document
class SequenceLikeMixin(BaseSequenceLikeMixin):
"""Implement sequence-like methods for DocumentArray with Elastic as storage"""
def __eq__(self, other):
"""Compare this object to the other, returns True if and only if other
as the same type as self and other has the same meta information
:param other: the other object to check for equality
:return: ``True`` if other is equal to self
"""
# two DAW are considered as the same if they have the same client meta data
return (
type(self) is type(other)
and self._client.get_meta() == other._client.get_meta()
and self._config == other._config
)
def __len__(self):
"""Return the length of :class:`DocumentArray` that uses Elastic as storage
:return: the length of this :class:`DocumentArrayElastic` object
"""
try:
return self._client.count(index=self._config.index_name)["count"]
except:
return 0
def __contains__(self, x: Union[str, 'Document']):
"""Check if ``x`` is contained in this :class:`DocumentArray` with Elastic storage
:param x: the id of the document to check or the document object itself
:return: True if ``x`` is contained in self
"""
if isinstance(x, str):
return self._doc_id_exists(x)
elif isinstance(x, Document):
return self._doc_id_exists(x.id)
else:
return False
def __del__(self):
"""Delete this :class:`DocumentArrayElastic` object"""
self._save_offset2ids()
# if not self._persist:
# self._offset2ids.clear()
def __repr__(self):
"""Return the string representation of :class:`DocumentArrayElastic` object
:return: string representation of this object
"""
return f'<{self.__class__.__name__} (length={len(self)}) at {id(self)}>'
def _upload_batch(self, docs: Iterable['Document']):
batch = []
for doc in docs:
batch.append(self._document_to_elastic(doc))
if len(batch) > self._config.batch_size:
self._send_requests(batch)
self._refresh(self._config.index_name)
batch = []
if len(batch) > 0:
self._send_requests(batch)
self._refresh(self._config.index_name)
def extend(self, docs: Iterable['Document']):
docs = list(docs)
self._upload_batch(docs)
self._offset2ids.extend(
[doc.id for doc in docs if doc.id not in self._offset2ids.ids]
)
|
from typing import Union, Iterable, Dict
from ..base.seqlike import BaseSequenceLikeMixin
from .... import Document
class SequenceLikeMixin(BaseSequenceLikeMixin):
"""Implement sequence-like methods for DocumentArray with Elastic as storage"""
def __eq__(self, other):
"""Compare this object to the other, returns True if and only if other
as the same type as self and other has the same meta information
:param other: the other object to check for equality
:return: ``True`` if other is equal to self
"""
# two DAW are considered as the same if they have the same client meta data
return (
type(self) is type(other)
and self._client.get_meta() == other._client.get_meta()
and self._config == other._config
)
def __len__(self):
"""Return the length of :class:`DocumentArray` that uses Elastic as storage
:return: the length of this :class:`DocumentArrayElastic` object
"""
try:
return self._client.count(index=self._config.index_name)["count"]
except:
return 0
def __contains__(self, x: Union[str, 'Document']):
"""Check if ``x`` is contained in this :class:`DocumentArray` with Elastic storage
:param x: the id of the document to check or the document object itself
:return: True if ``x`` is contained in self
"""
if isinstance(x, str):
return self._doc_id_exists(x)
elif isinstance(x, Document):
return self._doc_id_exists(x.id)
else:
return False
def __del__(self):
"""Delete this :class:`DocumentArrayElastic` object"""
self._save_offset2ids()
# if not self._persist:
# self._offset2ids.clear()
def __repr__(self):
"""Return the string representation of :class:`DocumentArrayElastic` object
:return: string representation of this object
"""
return f'<{self.__class__.__name__} (length={len(self)}) at {id(self)}>'
def _upload_batch(self, docs: Iterable['Document']):
batch = []
for doc in docs:
batch.append(self._document_to_elastic(doc))
if len(batch) > self._config.batch_size:
self._send_requests(batch)
self._refresh(self._config.index_name)
batch = []
if len(batch) > 0:
self._send_requests(batch)
self._refresh(self._config.index_name)
def extend(self, docs: Iterable['Document']):
docs = list(docs)
self._upload_batch(docs)
self._offset2ids.extend([doc.id for doc in docs])
|
class DataAdapter:
"""Base class for input data adapters.
The purpose of a DataAdapter is to provide a unfied interface to
iterate over input data provided in a variety of formats -- such as
NumPy arrays, tf.Tensors, tf.data.Datasets, Keras PyDatasets, etc.
"""
def get_numpy_iterator(self):
"""Get a Python iterable for the `DataAdapter`, that yields NumPy
arrays.
Returns:
A Python iterator.
"""
raise NotImplementedError
def get_tf_dataset(self):
"""Get a `tf.data.Dataset` instance for the DataAdapter.
Note that the dataset returned does not repeat for epoch, so caller
might need to create new iterator for the same dataset at the beginning
of the epoch. This behavior might change in the future.
Returns:
A `tf.data.Dataset`. Caller might use the dataset in different
context, e.g. iter(dataset) in eager to get the value directly, or
in graph mode, provide the iterator tensor to Keras model function.
"""
raise NotImplementedError
def get_jax_iterator(self):
"""Get a Python iterable for the `DataAdapter`, that yields JAX arrays.
Returns:
A Python iterator.
"""
raise NotImplementedError
def get_torch_dataloader(self):
"""Get a Torch `DataLoader` for the `DataAdapter`.
Returns:
A Torch `DataLoader`.
"""
raise NotImplementedError
@property
def num_batches(self):
"""Return the size (number of batches) for the dataset created.
For certain type of the data input, the number of batches is known, eg
for Numpy data, the size is same as (number_of_element / batch_size).
Whereas for dataset or python generator, the size is unknown since it
may or may not have an end state.
Returns:
int, the number of batches for the dataset, or None if it is
unknown. The caller could use this to control the loop of training,
show progress bar, or handle unexpected StopIteration error.
"""
raise NotImplementedError
@property
def batch_size(self):
"""Return the batch size of the dataset created.
For certain type of the data input, the batch size is known, and even
required, like numpy array. Whereas for dataset, the batch is unknown
unless we take a peek.
Returns:
int, the batch size of the dataset, or None if it is unknown.
"""
raise NotImplementedError
@property
def has_partial_batch(self):
"""Whether the dataset has partial batch at the end."""
raise NotImplementedError
@property
def partial_batch_size(self):
"""The size of the final partial batch for dataset.
Will return None if has_partial_batch is False or batch_size is None.
"""
raise NotImplementedError
def on_epoch_end(self):
"""A hook called after each epoch."""
pass
|
class DataAdapter(object):
"""Base class for input data adapters.
The purpose of a DataAdapter is to provide a unfied interface to
iterate over input data provided in a variety of formats -- such as
NumPy arrays, tf.Tensors, tf.data.Datasets, Keras PyDatasets, etc.
"""
def get_numpy_iterator(self):
"""Get a Python iterable for the `DataAdapter`, that yields NumPy
arrays.
Returns:
A Python iterator.
"""
raise NotImplementedError
def get_tf_dataset(self):
"""Get a `tf.data.Dataset` instance for the DataAdapter.
Note that the dataset returned does not repeat for epoch, so caller
might need to create new iterator for the same dataset at the beginning
of the epoch. This behavior might change in the future.
Returns:
A `tf.data.Dataset`. Caller might use the dataset in different
context, e.g. iter(dataset) in eager to get the value directly, or
in graph mode, provide the iterator tensor to Keras model function.
"""
raise NotImplementedError
def get_jax_iterator(self):
"""Get a Python iterable for the `DataAdapter`, that yields JAX arrays.
Returns:
A Python iterator.
"""
raise NotImplementedError
def get_torch_dataloader(self):
"""Get a Torch `DataLoader` for the `DataAdapter`.
Returns:
A Torch `DataLoader`.
"""
raise NotImplementedError
@property
def num_batches(self):
"""Return the size (number of batches) for the dataset created.
For certain type of the data input, the number of batches is known, eg
for Numpy data, the size is same as (number_of_element / batch_size).
Whereas for dataset or python generator, the size is unknown since it
may or may not have an end state.
Returns:
int, the number of batches for the dataset, or None if it is
unknown. The caller could use this to control the loop of training,
show progress bar, or handle unexpected StopIteration error.
"""
raise NotImplementedError
@property
def batch_size(self):
"""Return the batch size of the dataset created.
For certain type of the data input, the batch size is known, and even
required, like numpy array. Whereas for dataset, the batch is unknown
unless we take a peek.
Returns:
int, the batch size of the dataset, or None if it is unknown.
"""
raise NotImplementedError
@property
def has_partial_batch(self):
"""Whether the dataset has partial batch at the end."""
raise NotImplementedError
@property
def partial_batch_size(self):
"""The size of the final partial batch for dataset.
Will return None if has_partial_batch is False or batch_size is None.
"""
raise NotImplementedError
def on_epoch_end(self):
"""A hook called after each epoch."""
pass
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.