input
stringlengths 33
5k
| output
stringlengths 32
5k
|
---|---|
"""Evaluation metrics for cluster analysis results.
- Supervised evaluation uses a ground truth class values for each sample.
- Unsupervised evaluation does not use ground truths and measures the "quality" of the
model itself.
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
from ._bicluster import consensus_score
from ._supervised import (
adjusted_mutual_info_score,
adjusted_rand_score,
completeness_score,
contingency_matrix,
# TODO(1.10): Remove
entropy,
expected_mutual_information,
fowlkes_mallows_score,
homogeneity_completeness_v_measure,
homogeneity_score,
mutual_info_score,
normalized_mutual_info_score,
pair_confusion_matrix,
rand_score,
v_measure_score,
)
from ._unsupervised import (
calinski_harabasz_score,
davies_bouldin_score,
silhouette_samples,
silhouette_score,
)
__all__ = [
"adjusted_mutual_info_score",
"adjusted_rand_score",
"calinski_harabasz_score",
"completeness_score",
"consensus_score",
"contingency_matrix",
"davies_bouldin_score",
# TODO(1.10): Remove
"entropy",
"expected_mutual_information",
"fowlkes_mallows_score",
"homogeneity_completeness_v_measure",
"homogeneity_score",
"mutual_info_score",
"normalized_mutual_info_score",
"pair_confusion_matrix",
"rand_score",
"silhouette_samples",
"silhouette_score",
"v_measure_score",
]
|
"""Evaluation metrics for cluster analysis results.
- Supervised evaluation uses a ground truth class values for each sample.
- Unsupervised evaluation does not use ground truths and measures the "quality" of the
model itself.
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
from ._bicluster import consensus_score
from ._supervised import (
adjusted_mutual_info_score,
adjusted_rand_score,
completeness_score,
contingency_matrix,
entropy,
expected_mutual_information,
fowlkes_mallows_score,
homogeneity_completeness_v_measure,
homogeneity_score,
mutual_info_score,
normalized_mutual_info_score,
pair_confusion_matrix,
rand_score,
v_measure_score,
)
from ._unsupervised import (
calinski_harabasz_score,
davies_bouldin_score,
silhouette_samples,
silhouette_score,
)
__all__ = [
"adjusted_mutual_info_score",
"adjusted_rand_score",
"calinski_harabasz_score",
"completeness_score",
"consensus_score",
"contingency_matrix",
"davies_bouldin_score",
"entropy",
"expected_mutual_information",
"fowlkes_mallows_score",
"homogeneity_completeness_v_measure",
"homogeneity_score",
"mutual_info_score",
"normalized_mutual_info_score",
"pair_confusion_matrix",
"rand_score",
"silhouette_samples",
"silhouette_score",
"v_measure_score",
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import pytest
import torch
from mmengine.structures import LabelData
class TestLabelData(TestCase):
def test_label_to_onehot(self):
item = torch.tensor([1], dtype=torch.int64)
num_classes = 10
onehot = LabelData.label_to_onehot(label=item, num_classes=num_classes)
assert tuple(onehot.shape) == (num_classes, )
assert onehot.device == item.device
# item is not onehot
with self.assertRaises(AssertionError):
LabelData.label_to_onehot(label='item', num_classes=num_classes)
# item'max bigger than num_classes
with self.assertRaises(AssertionError):
LabelData.label_to_onehot(
torch.tensor([11], dtype=torch.int64), num_classes)
onehot = LabelData.label_to_onehot(
label=torch.tensor([], dtype=torch.int64), num_classes=num_classes)
assert (onehot == torch.zeros((num_classes, ),
dtype=torch.int64)).all()
def test_onehot_to_label(self):
# item is not onehot
with self.assertRaisesRegex(
ValueError,
'input is not one-hot and can not convert to label'):
LabelData.onehot_to_label(
onehot=torch.tensor([2], dtype=torch.int64))
with self.assertRaises(AssertionError):
LabelData.onehot_to_label(onehot='item')
item = torch.arange(0, 9)
onehot = LabelData.label_to_onehot(item, num_classes=10)
label = LabelData.onehot_to_label(onehot)
assert (label == item).all()
assert label.device == item.device
item = torch.tensor([2])
onehot = LabelData.label_to_onehot(item, num_classes=10)
label = LabelData.onehot_to_label(onehot)
assert label == item
assert label.device == item.device
@pytest.mark.skipif(
not torch.cuda.is_available(), reason='GPU is required!')
def test_cuda(self):
item = torch.arange(0, 9).cuda()
onehot = LabelData.label_to_onehot(item, num_classes=10)
assert item.device == onehot.device
label = LabelData.onehot_to_label(onehot)
assert label.device == onehot.device
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import pytest
import torch
from mmengine.structures import LabelData
class TestLabelData(TestCase):
def test_label_to_onehot(self):
item = torch.tensor([1], dtype=torch.int64)
num_classes = 10
onehot = LabelData.label_to_onehot(label=item, num_classes=num_classes)
assert tuple(onehot.shape) == (num_classes, )
assert onehot.device == item.device
# item is not onehot
with self.assertRaises(AssertionError):
LabelData.label_to_onehot(label='item', num_classes=num_classes)
# item'max bigger than num_classes
with self.assertRaises(AssertionError):
LabelData.label_to_onehot(
torch.tensor([11], dtype=torch.int64), num_classes)
onehot = LabelData.label_to_onehot(
label=torch.tensor([], dtype=torch.int64), num_classes=num_classes)
assert (onehot == torch.zeros((num_classes, ),
dtype=torch.int64)).all()
def test_onehot_to_label(self):
# item is not onehot
with self.assertRaisesRegex(
ValueError,
'input is not one-hot and can not convert to label'):
LabelData.onehot_to_label(
onehot=torch.tensor([2], dtype=torch.int64))
with self.assertRaises(AssertionError):
LabelData.onehot_to_label(onehot='item')
item = torch.arange(0, 9)
onehot = LabelData.label_to_onehot(item, num_classes=10)
label = LabelData.onehot_to_label(onehot)
assert (label == item).all()
assert label.device == item.device
@pytest.mark.skipif(
not torch.cuda.is_available(), reason='GPU is required!')
def test_cuda(self):
item = torch.arange(0, 9).cuda()
onehot = LabelData.label_to_onehot(item, num_classes=10)
assert item.device == onehot.device
label = LabelData.onehot_to_label(onehot)
assert label.device == onehot.device
|
"""Chat Message."""
from typing import Any, Literal
from typing_extensions import override
from langchain_core.messages.base import (
BaseMessage,
BaseMessageChunk,
merge_content,
)
from langchain_core.utils._merge import merge_dicts
class ChatMessage(BaseMessage):
"""Message that can be assigned an arbitrary speaker (i.e. role)."""
role: str
"""The speaker / role of the Message."""
type: Literal["chat"] = "chat"
"""The type of the message (used during serialization). Defaults to "chat"."""
ChatMessage.model_rebuild()
class ChatMessageChunk(ChatMessage, BaseMessageChunk):
"""Chat Message chunk."""
# Ignoring mypy re-assignment here since we're overriding the value
# to make sure that the chunk variant can be discriminated from the
# non-chunk variant.
type: Literal["ChatMessageChunk"] = "ChatMessageChunk" # type: ignore[assignment]
"""The type of the message (used during serialization).
Defaults to "ChatMessageChunk"."""
@override
def __add__(self, other: Any) -> BaseMessageChunk: # type: ignore[override]
if isinstance(other, ChatMessageChunk):
if self.role != other.role:
msg = "Cannot concatenate ChatMessageChunks with different roles."
raise ValueError(msg)
return self.__class__(
role=self.role,
content=merge_content(self.content, other.content),
additional_kwargs=merge_dicts(
self.additional_kwargs, other.additional_kwargs
),
response_metadata=merge_dicts(
self.response_metadata, other.response_metadata
),
id=self.id,
)
if isinstance(other, BaseMessageChunk):
return self.__class__(
role=self.role,
content=merge_content(self.content, other.content),
additional_kwargs=merge_dicts(
self.additional_kwargs, other.additional_kwargs
),
response_metadata=merge_dicts(
self.response_metadata, other.response_metadata
),
id=self.id,
)
return super().__add__(other)
|
"""Chat Message."""
from typing import Any, Literal
from typing_extensions import override
from langchain_core.messages.base import (
BaseMessage,
BaseMessageChunk,
merge_content,
)
from langchain_core.utils._merge import merge_dicts
class ChatMessage(BaseMessage):
"""Message that can be assigned an arbitrary speaker (i.e. role)."""
role: str
"""The speaker / role of the Message."""
type: Literal["chat"] = "chat"
"""The type of the message (used during serialization). Defaults to "chat"."""
ChatMessage.model_rebuild()
class ChatMessageChunk(ChatMessage, BaseMessageChunk):
"""Chat Message chunk."""
# Ignoring mypy re-assignment here since we're overriding the value
# to make sure that the chunk variant can be discriminated from the
# non-chunk variant.
type: Literal["ChatMessageChunk"] = "ChatMessageChunk" # type: ignore
"""The type of the message (used during serialization).
Defaults to "ChatMessageChunk"."""
@override
def __add__(self, other: Any) -> BaseMessageChunk: # type: ignore
if isinstance(other, ChatMessageChunk):
if self.role != other.role:
msg = "Cannot concatenate ChatMessageChunks with different roles."
raise ValueError(msg)
return self.__class__(
role=self.role,
content=merge_content(self.content, other.content),
additional_kwargs=merge_dicts(
self.additional_kwargs, other.additional_kwargs
),
response_metadata=merge_dicts(
self.response_metadata, other.response_metadata
),
id=self.id,
)
if isinstance(other, BaseMessageChunk):
return self.__class__(
role=self.role,
content=merge_content(self.content, other.content),
additional_kwargs=merge_dicts(
self.additional_kwargs, other.additional_kwargs
),
response_metadata=merge_dicts(
self.response_metadata, other.response_metadata
),
id=self.id,
)
return super().__add__(other)
|
from typing import Optional
import numpy as np
import pytest
import torch
from pydantic.tools import parse_obj_as, schema_json_of
from docarray import BaseDocument
from docarray.base_document.io.json import orjson_dumps
from docarray.typing import AudioTorchTensor, AudioUrl
from docarray.utils.misc import is_tf_available
from tests import TOYDATA_DIR
tf_available = is_tf_available()
if tf_available:
import tensorflow as tf
from docarray.typing.tensor.audio import AudioTensorFlowTensor
AUDIO_FILES = [
str(TOYDATA_DIR / 'hello.wav'),
str(TOYDATA_DIR / 'olleh.wav'),
]
REMOTE_AUDIO_FILE = 'https://github.com/docarray/docarray/blob/feat-rewrite-v2/tests/toydata/olleh.wav?raw=true' # noqa: E501
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'file_url',
[*AUDIO_FILES, REMOTE_AUDIO_FILE],
)
def test_audio_url(file_url):
uri = parse_obj_as(AudioUrl, file_url)
tensor, _ = uri.load()
assert isinstance(tensor, np.ndarray)
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'file_url',
[*AUDIO_FILES, REMOTE_AUDIO_FILE],
)
def test_load_audio_url_to_audio_torch_tensor_field(file_url):
class MyAudioDoc(BaseDocument):
audio_url: AudioUrl
tensor: Optional[AudioTorchTensor]
doc = MyAudioDoc(audio_url=file_url)
doc.tensor, _ = doc.audio_url.load()
assert isinstance(doc.tensor, torch.Tensor)
assert isinstance(doc.tensor, AudioTorchTensor)
@pytest.mark.tensorflow
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'file_url',
[*AUDIO_FILES, REMOTE_AUDIO_FILE],
)
def test_load_audio_url_to_audio_tensorflow_tensor_field(file_url):
class MyAudioDoc(BaseDocument):
audio_url: AudioUrl
tensor: Optional[AudioTensorFlowTensor]
doc = MyAudioDoc(audio_url=file_url)
doc.tensor, _ = doc.audio_url.load()
assert isinstance(doc.tensor, AudioTensorFlowTensor)
assert isinstance(doc.tensor.tensor, tf.Tensor)
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'file_url',
[*AUDIO_FILES, REMOTE_AUDIO_FILE],
)
def test_load(file_url):
url = parse_obj_as(AudioUrl, file_url)
tensor, _ = url.load()
assert isinstance(tensor, np.ndarray)
def test_json_schema():
schema_json_of(AudioUrl)
def test_dump_json():
url = parse_obj_as(AudioUrl, REMOTE_AUDIO_FILE)
orjson_dumps(url)
@pytest.mark.parametrize(
'path_to_file',
[
*[file for file in AUDIO_FILES],
REMOTE_AUDIO_FILE,
],
)
def test_validation(path_to_file):
url = parse_obj_as(AudioUrl, path_to_file)
assert isinstance(url, AudioUrl)
assert isinstance(url, str)
@pytest.mark.parametrize(
'path_to_file',
[
'my/local/text/file.txt',
'my/local/text/file.png',
],
)
def test_illegal_validation(path_to_file):
with pytest.raises(ValueError, match='AudioUrl'):
parse_obj_as(AudioUrl, path_to_file)
@pytest.mark.proto
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'file_url',
[*AUDIO_FILES, REMOTE_AUDIO_FILE],
)
def test_proto_audio_url(file_url):
uri = parse_obj_as(AudioUrl, file_url)
proto = uri._to_node_protobuf()
assert 'audio_url' in str(proto)
|
from typing import Optional
import numpy as np
import pytest
import torch
from pydantic.tools import parse_obj_as, schema_json_of
from docarray import BaseDocument
from docarray.base_document.io.json import orjson_dumps
from docarray.typing import AudioTorchTensor, AudioUrl
from docarray.utils.misc import is_tf_available
from tests import TOYDATA_DIR
tf_available = is_tf_available()
if tf_available:
import tensorflow as tf
from docarray.typing.tensor.audio import AudioTensorFlowTensor
AUDIO_FILES = [
str(TOYDATA_DIR / 'hello.wav'),
str(TOYDATA_DIR / 'olleh.wav'),
]
REMOTE_AUDIO_FILE = 'https://github.com/docarray/docarray/blob/feat-rewrite-v2/tests/toydata/olleh.wav?raw=true' # noqa: E501
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'file_url',
[*AUDIO_FILES, REMOTE_AUDIO_FILE],
)
def test_audio_url(file_url):
uri = parse_obj_as(AudioUrl, file_url)
tensor = uri.load()
assert isinstance(tensor, np.ndarray)
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'file_url',
[*AUDIO_FILES, REMOTE_AUDIO_FILE],
)
def test_load_audio_url_to_audio_torch_tensor_field(file_url):
class MyAudioDoc(BaseDocument):
audio_url: AudioUrl
tensor: Optional[AudioTorchTensor]
doc = MyAudioDoc(audio_url=file_url)
doc.tensor = doc.audio_url.load()
assert isinstance(doc.tensor, torch.Tensor)
assert isinstance(doc.tensor, AudioTorchTensor)
@pytest.mark.tensorflow
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'file_url',
[*AUDIO_FILES, REMOTE_AUDIO_FILE],
)
def test_load_audio_url_to_audio_tensorflow_tensor_field(file_url):
class MyAudioDoc(BaseDocument):
audio_url: AudioUrl
tensor: Optional[AudioTensorFlowTensor]
doc = MyAudioDoc(audio_url=file_url)
doc.tensor = doc.audio_url.load()
assert isinstance(doc.tensor, AudioTensorFlowTensor)
assert isinstance(doc.tensor.tensor, tf.Tensor)
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'file_url',
[*AUDIO_FILES, REMOTE_AUDIO_FILE],
)
def test_load(file_url):
url = parse_obj_as(AudioUrl, file_url)
tensor = url.load()
assert isinstance(tensor, np.ndarray)
def test_json_schema():
schema_json_of(AudioUrl)
def test_dump_json():
url = parse_obj_as(AudioUrl, REMOTE_AUDIO_FILE)
orjson_dumps(url)
@pytest.mark.parametrize(
'path_to_file',
[
*[file for file in AUDIO_FILES],
REMOTE_AUDIO_FILE,
],
)
def test_validation(path_to_file):
url = parse_obj_as(AudioUrl, path_to_file)
assert isinstance(url, AudioUrl)
assert isinstance(url, str)
@pytest.mark.parametrize(
'path_to_file',
[
'illegal',
'https://www.google.com',
'my/local/text/file.txt',
'my/local/text/file.png',
],
)
def test_illegal_validation(path_to_file):
with pytest.raises(ValueError, match='AudioUrl'):
parse_obj_as(AudioUrl, path_to_file)
@pytest.mark.proto
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'file_url',
[*AUDIO_FILES, REMOTE_AUDIO_FILE],
)
def test_proto_audio_url(file_url):
uri = parse_obj_as(AudioUrl, file_url)
proto = uri._to_node_protobuf()
assert 'audio_url' in str(proto)
|
"""Configuration for unit tests."""
from collections.abc import Iterator, Sequence
from importlib import util
from uuid import UUID
import pytest
from blockbuster import BlockBuster, blockbuster_ctx
from pytest_mock import MockerFixture
@pytest.fixture(autouse=True)
def blockbuster() -> Iterator[BlockBuster]:
with blockbuster_ctx("langchain_core") as bb:
for func in ["os.stat", "os.path.abspath"]:
(
bb.functions[func]
.can_block_in("langchain_core/_api/internal.py", "is_caller_internal")
.can_block_in("langchain_core/runnables/base.py", "__repr__")
.can_block_in(
"langchain_core/beta/runnables/context.py", "aconfig_with_context"
)
)
for func in ["os.stat", "io.TextIOWrapper.read"]:
bb.functions[func].can_block_in(
"langsmith/client.py", "_default_retry_config"
)
for bb_function in bb.functions.values():
bb_function.can_block_in(
"freezegun/api.py", "_get_cached_module_attributes"
)
yield bb
def pytest_addoption(parser: pytest.Parser) -> None:
"""Add custom command line options to pytest."""
parser.addoption(
"--only-extended",
action="store_true",
help="Only run extended tests. Does not allow skipping any extended tests.",
)
parser.addoption(
"--only-core",
action="store_true",
help="Only run core tests. Never runs any extended tests.",
)
def pytest_collection_modifyitems(
config: pytest.Config, items: Sequence[pytest.Function]
) -> None:
"""Add implementations for handling custom markers.
At the moment, this adds support for a custom `requires` marker.
The `requires` marker is used to denote tests that require one or more packages
to be installed to run. If the package is not installed, the test is skipped.
The `requires` marker syntax is:
.. code-block:: python
@pytest.mark.requires("package1", "package2")
def test_something():
...
"""
# Mapping from the name of a package to whether it is installed or not.
# Used to avoid repeated calls to `util.find_spec`
required_pkgs_info: dict[str, bool] = {}
only_extended = config.getoption("--only-extended") or False
only_core = config.getoption("--only-core") or False
if only_extended and only_core:
msg = "Cannot specify both `--only-extended` and `--only-core`."
raise ValueError(msg)
for item in items:
requires_marker = item.get_closest_marker("requires")
if requires_marker is not None:
if only_core:
item.add_marker(pytest.mark.skip(reason="Skipping not a core test."))
continue
# Iterate through the list of required packages
required_pkgs = requires_marker.args
for pkg in required_pkgs:
# If we haven't yet checked whether the pkg is installed
# let's check it and store the result.
if pkg not in required_pkgs_info:
try:
installed = util.find_spec(pkg) is not None
except Exception:
installed = False
required_pkgs_info[pkg] = installed
if not required_pkgs_info[pkg]:
if only_extended:
pytest.fail(
f"Package `{pkg}` is not installed but is required for "
f"extended tests. Please install the given package and "
f"try again.",
)
else:
# If the package is not installed, we immediately break
# and mark the test as skipped.
item.add_marker(
pytest.mark.skip(reason=f"Requires pkg: `{pkg}`")
)
break
elif only_extended:
item.add_marker(pytest.mark.skip(reason="Skipping not an extended test."))
@pytest.fixture
def deterministic_uuids(mocker: MockerFixture) -> MockerFixture:
side_effect = (
UUID(f"00000000-0000-4000-8000-{i:012}", version=4) for i in range(10000)
)
return mocker.patch("uuid.uuid4", side_effect=side_effect)
|
"""Configuration for unit tests."""
from collections.abc import Iterator, Sequence
from importlib import util
from uuid import UUID
import pytest
from blockbuster import BlockBuster, blockbuster_ctx
from pytest_mock import MockerFixture
@pytest.fixture(autouse=True)
def blockbuster() -> Iterator[BlockBuster]:
with blockbuster_ctx("langchain_core") as bb:
for func in ["os.stat", "os.path.abspath"]:
(
bb.functions[func]
.can_block_in("langchain_core/_api/internal.py", "is_caller_internal")
.can_block_in("langchain_core/runnables/base.py", "__repr__")
.can_block_in(
"langchain_core/beta/runnables/context.py", "aconfig_with_context"
)
)
for func in ["os.stat", "io.TextIOWrapper.read"]:
bb.functions[func].can_block_in(
"langsmith/client.py", "_default_retry_config"
)
for bb_function in bb.functions.values():
bb_function.can_block_in(
"freezegun/api.py", "_get_cached_module_attributes"
)
yield bb
def pytest_addoption(parser: pytest.Parser) -> None:
"""Add custom command line options to pytest."""
parser.addoption(
"--only-extended",
action="store_true",
help="Only run extended tests. Does not allow skipping any extended tests.",
)
parser.addoption(
"--only-core",
action="store_true",
help="Only run core tests. Never runs any extended tests.",
)
def pytest_collection_modifyitems(
config: pytest.Config, items: Sequence[pytest.Function]
) -> None:
"""Add implementations for handling custom markers.
At the moment, this adds support for a custom `requires` marker.
The `requires` marker is used to denote tests that require one or more packages
to be installed to run. If the package is not installed, the test is skipped.
The `requires` marker syntax is:
.. code-block:: python
@pytest.mark.requires("package1", "package2")
def test_something():
...
"""
# Mapping from the name of a package to whether it is installed or not.
# Used to avoid repeated calls to `util.find_spec`
required_pkgs_info: dict[str, bool] = {}
only_extended = config.getoption("--only-extended") or False
only_core = config.getoption("--only-core") or False
if only_extended and only_core:
msg = "Cannot specify both `--only-extended` and `--only-core`."
raise ValueError(msg)
for item in items:
requires_marker = item.get_closest_marker("requires")
if requires_marker is not None:
if only_core:
item.add_marker(pytest.mark.skip(reason="Skipping not a core test."))
continue
# Iterate through the list of required packages
required_pkgs = requires_marker.args
for pkg in required_pkgs:
# If we haven't yet checked whether the pkg is installed
# let's check it and store the result.
if pkg not in required_pkgs_info:
try:
installed = util.find_spec(pkg) is not None
except Exception:
installed = False
required_pkgs_info[pkg] = installed
if not required_pkgs_info[pkg]:
if only_extended:
pytest.fail(
f"Package `{pkg}` is not installed but is required for "
f"extended tests. Please install the given package and "
f"try again.",
)
else:
# If the package is not installed, we immediately break
# and mark the test as skipped.
item.add_marker(
pytest.mark.skip(reason=f"Requires pkg: `{pkg}`")
)
break
else:
if only_extended:
item.add_marker(
pytest.mark.skip(reason="Skipping not an extended test.")
)
@pytest.fixture
def deterministic_uuids(mocker: MockerFixture) -> MockerFixture:
side_effect = (
UUID(f"00000000-0000-4000-8000-{i:012}", version=4) for i in range(10000)
)
return mocker.patch("uuid.uuid4", side_effect=side_effect)
|
import numpy as np
import pytest
from keras.src import backend
from keras.src import layers
from keras.src import ops
from keras.src import regularizers
from keras.src import testing
class LayerNormalizationTest(testing.TestCase):
@pytest.mark.requires_trainable_backend
def test_ln_basics(self):
self.run_layer_test(
layers.LayerNormalization,
init_kwargs={
"gamma_regularizer": regularizers.L2(0.01),
"beta_regularizer": regularizers.L2(0.01),
},
input_shape=(3, 4, 2),
expected_output_shape=(3, 4, 2),
expected_num_trainable_weights=2,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=0,
expected_num_losses=2,
supports_masking=True,
)
self.run_layer_test(
layers.LayerNormalization,
init_kwargs={
"gamma_initializer": "ones",
"beta_initializer": "ones",
},
input_shape=(3, 4, 2),
expected_output_shape=(3, 4, 2),
expected_num_trainable_weights=2,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=0,
expected_num_losses=0,
supports_masking=True,
)
self.run_layer_test(
layers.LayerNormalization,
init_kwargs={"scale": False, "center": False},
input_shape=(3, 3),
expected_output_shape=(3, 3),
expected_num_trainable_weights=0,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=0,
expected_num_losses=0,
supports_masking=True,
)
self.run_layer_test(
layers.LayerNormalization,
init_kwargs={"rms_scaling": True},
input_shape=(3, 3),
expected_output_shape=(3, 3),
expected_num_trainable_weights=1,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=0,
expected_num_losses=0,
supports_masking=True,
)
self.run_layer_test(
layers.LayerNormalization,
init_kwargs={"axis": (-3, -2, -1)},
input_shape=(2, 8, 8, 3),
expected_output_shape=(2, 8, 8, 3),
expected_num_trainable_weights=2,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=0,
expected_num_losses=0,
supports_masking=True,
)
self.run_layer_test(
layers.LayerNormalization,
init_kwargs={},
input_shape=(1, 0, 10),
expected_output_shape=(1, 0, 10),
expected_num_trainable_weights=2,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=0,
expected_num_losses=0,
supports_masking=True,
)
def test_invalid_axis(self):
with self.assertRaisesRegex(
TypeError,
("Expected an int or a list/tuple of ints for the argument 'axis'"),
):
layers.LayerNormalization(axis={"axis": -1})
def test_correctness(self):
layer = layers.LayerNormalization(dtype="float32")
layer.build(input_shape=(2, 2, 2))
inputs = np.random.normal(
loc=5.0, scale=10.0, size=(1000, 2, 2, 2)
).astype("float32")
out = layer(inputs)
out -= layer.beta
out /= layer.gamma
self.assertAllClose(ops.mean(out), 0.0, atol=1e-1)
self.assertAllClose(ops.std(out), 1.0, atol=1e-1)
def test_output(self):
layer = layers.LayerNormalization(
dtype="float32",
beta_initializer="ones",
gamma_initializer="ones",
)
inputs = np.arange(5).astype("float32")[None, :]
out = layer(inputs)
self.assertAllClose(out, [[-0.41386, 0.29307, 1.0, 1.70693, 2.41386]])
def test_output_with_rms_scaling(self):
layer = layers.LayerNormalization(
dtype="float32",
rms_scaling=True,
gamma_initializer="ones",
)
inputs = np.arange(5).astype("float32")[None, :]
out = layer(inputs)
self.assertAllClose(out, [[0.0, 0.70693, 1.41386, 2.12079, 2.82772]])
def test_large_value_within_autocast_scope(self):
layer = layers.LayerNormalization()
layer.build((1, 4, 4, 3))
# Use 70000 to trigger overflow for float16
large_value = ops.full(layer.gamma.shape, 70000)
with backend.AutocastScope("float16"):
layer.gamma.assign(large_value)
self.assertAllClose(layer.gamma.value, large_value)
|
import numpy as np
import pytest
from keras.src import backend
from keras.src import layers
from keras.src import ops
from keras.src import regularizers
from keras.src import testing
class LayerNormalizationTest(testing.TestCase):
@pytest.mark.requires_trainable_backend
def test_ln_basics(self):
self.run_layer_test(
layers.LayerNormalization,
init_kwargs={
"gamma_regularizer": regularizers.L2(0.01),
"beta_regularizer": regularizers.L2(0.01),
},
input_shape=(3, 4, 2),
expected_output_shape=(3, 4, 2),
expected_num_trainable_weights=2,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=0,
expected_num_losses=2,
supports_masking=True,
)
self.run_layer_test(
layers.LayerNormalization,
init_kwargs={
"gamma_initializer": "ones",
"beta_initializer": "ones",
},
input_shape=(3, 4, 2),
expected_output_shape=(3, 4, 2),
expected_num_trainable_weights=2,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=0,
expected_num_losses=0,
supports_masking=True,
)
self.run_layer_test(
layers.LayerNormalization,
init_kwargs={"scale": False, "center": False},
input_shape=(3, 3),
expected_output_shape=(3, 3),
expected_num_trainable_weights=0,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=0,
expected_num_losses=0,
supports_masking=True,
)
self.run_layer_test(
layers.LayerNormalization,
init_kwargs={"rms_scaling": True},
input_shape=(3, 3),
expected_output_shape=(3, 3),
expected_num_trainable_weights=1,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=0,
expected_num_losses=0,
supports_masking=True,
)
self.run_layer_test(
layers.LayerNormalization,
init_kwargs={"axis": (-3, -2, -1)},
input_shape=(2, 8, 8, 3),
expected_output_shape=(2, 8, 8, 3),
expected_num_trainable_weights=2,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=0,
expected_num_losses=0,
supports_masking=True,
)
self.run_layer_test(
layers.LayerNormalization,
init_kwargs={},
input_shape=(1, 0, 10),
expected_output_shape=(1, 0, 10),
expected_num_trainable_weights=2,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=0,
expected_num_losses=0,
supports_masking=True,
)
def test_invalid_axis(self):
with self.assertRaisesRegex(
TypeError,
(
"Expected an int or a list/tuple of ints for the argument "
"'axis'"
),
):
layers.LayerNormalization(axis={"axis": -1})
def test_correctness(self):
layer = layers.LayerNormalization(dtype="float32")
layer.build(input_shape=(2, 2, 2))
inputs = np.random.normal(
loc=5.0, scale=10.0, size=(1000, 2, 2, 2)
).astype("float32")
out = layer(inputs)
out -= layer.beta
out /= layer.gamma
self.assertAllClose(ops.mean(out), 0.0, atol=1e-1)
self.assertAllClose(ops.std(out), 1.0, atol=1e-1)
def test_output(self):
layer = layers.LayerNormalization(
dtype="float32",
beta_initializer="ones",
gamma_initializer="ones",
)
inputs = np.arange(5).astype("float32")[None, :]
out = layer(inputs)
self.assertAllClose(out, [[-0.41386, 0.29307, 1.0, 1.70693, 2.41386]])
def test_output_with_rms_scaling(self):
layer = layers.LayerNormalization(
dtype="float32",
rms_scaling=True,
gamma_initializer="ones",
)
inputs = np.arange(5).astype("float32")[None, :]
out = layer(inputs)
self.assertAllClose(out, [[0.0, 0.70693, 1.41386, 2.12079, 2.82772]])
def test_large_value_within_autocast_scope(self):
layer = layers.LayerNormalization()
layer.build((1, 4, 4, 3))
# Use 70000 to trigger overflow for float16
large_value = ops.full(layer.gamma.shape, 70000)
with backend.AutocastScope("float16"):
layer.gamma.assign(large_value)
self.assertAllClose(layer.gamma.value, large_value)
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.registry import MODELS
from .single_stage import SingleStageDetector
@MODELS.register_module()
class TOOD(SingleStageDetector):
r"""Implementation of `TOOD: Task-aligned One-stage Object Detection.
<https://arxiv.org/abs/2108.07755>`_."""
def __init__(self,
backbone,
neck,
bbox_head,
train_cfg=None,
test_cfg=None,
pretrained=None,
init_cfg=None):
super(TOOD, self).__init__(backbone, neck, bbox_head, train_cfg,
test_cfg, pretrained, init_cfg)
def set_epoch(self, epoch):
self.bbox_head.epoch = epoch
|
# Copyright (c) OpenMMLab. All rights reserved.
from ..builder import DETECTORS
from .single_stage import SingleStageDetector
@DETECTORS.register_module()
class TOOD(SingleStageDetector):
r"""Implementation of `TOOD: Task-aligned One-stage Object Detection.
<https://arxiv.org/abs/2108.07755>`_."""
def __init__(self,
backbone,
neck,
bbox_head,
train_cfg=None,
test_cfg=None,
pretrained=None,
init_cfg=None):
super(TOOD, self).__init__(backbone, neck, bbox_head, train_cfg,
test_cfg, pretrained, init_cfg)
def set_epoch(self, epoch):
self.bbox_head.epoch = epoch
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Library imports for ClusterResolvers.
This library contains all implementations of ClusterResolvers.
ClusterResolvers are a way of specifying cluster information for distributed
execution. Built on top of existing `ClusterSpec` framework, ClusterResolvers
are a way for TensorFlow to communicate with various cluster management
systems (e.g. GCE, AWS, etc...).
"""
from tensorflow.python.distribute.cluster_resolver.cluster_resolver import ClusterResolver
from tensorflow.python.distribute.cluster_resolver.cluster_resolver import SimpleClusterResolver
from tensorflow.python.distribute.cluster_resolver.cluster_resolver import UnionClusterResolver
from tensorflow.python.distribute.cluster_resolver.gce_cluster_resolver import GCEClusterResolver
from tensorflow.python.distribute.cluster_resolver.kubernetes_cluster_resolver import ExecutableLocation
from tensorflow.python.distribute.cluster_resolver.kubernetes_cluster_resolver import KubernetesClusterResolver
from tensorflow.python.distribute.cluster_resolver.slurm_cluster_resolver import SlurmClusterResolver
from tensorflow.python.distribute.cluster_resolver.tfconfig_cluster_resolver import TFConfigClusterResolver
from tensorflow.python.distribute.cluster_resolver.tpu_cluster_resolver import TPUClusterResolver
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Library imports for ClusterResolvers.
This library contains all implementations of ClusterResolvers.
ClusterResolvers are a way of specifying cluster information for distributed
execution. Built on top of existing `ClusterSpec` framework, ClusterResolvers
are a way for TensorFlow to communicate with various cluster management
systems (e.g. GCE, AWS, etc...).
"""
from tensorflow.python.distribute.cluster_resolver.cluster_resolver import ClusterResolver
from tensorflow.python.distribute.cluster_resolver.cluster_resolver import SimpleClusterResolver
from tensorflow.python.distribute.cluster_resolver.cluster_resolver import UnionClusterResolver
from tensorflow.python.distribute.cluster_resolver.gce_cluster_resolver import GCEClusterResolver
from tensorflow.python.distribute.cluster_resolver.kubernetes_cluster_resolver import KubernetesClusterResolver
from tensorflow.python.distribute.cluster_resolver.slurm_cluster_resolver import SlurmClusterResolver
from tensorflow.python.distribute.cluster_resolver.tfconfig_cluster_resolver import TFConfigClusterResolver
from tensorflow.python.distribute.cluster_resolver.tpu_cluster_resolver import TPUClusterResolver
|
# TODO: Remove this config after benchmarking all related configs
_base_ = 'fcos_r50_caffe_fpn_gn-head_1x_coco.py'
# dataset settings
train_dataloader = dict(batch_size=4, num_workers=4)
|
# TODO: Remove this config after benchmarking all related configs
_base_ = 'fcos_r50_caffe_fpn_gn-head_1x_coco.py'
data = dict(samples_per_gpu=4, workers_per_gpu=4)
|
_base_ = 'ssj_270k_coco_instance.py'
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
image_size = (1024, 1024)
file_client_args = dict(backend='disk')
# comment out the code below to use different file client
# file_client_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/detection/',
# 'data/': 's3://openmmlab/datasets/detection/'
# }))
# Standard Scale Jittering (SSJ) resizes and crops an image
# with a resize range of 0.8 to 1.25 of the original image size.
load_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='RandomResize',
scale=image_size,
ratio_range=(0.8, 1.25),
keep_ratio=True),
dict(
type='RandomCrop',
crop_type='absolute_range',
crop_size=image_size,
recompute_bbox=True,
allow_negative_crop=True),
dict(type='FilterAnnotations', min_gt_bbox_wh=(1e-2, 1e-2)),
dict(type='RandomFlip', prob=0.5),
dict(type='Pad', size=image_size),
]
train_pipeline = [
dict(type='CopyPaste', max_num_pasted=100),
dict(type='PackDetInputs')
]
train_dataloader = dict(
dataset=dict(
_delete_=True,
type='MultiImageMixDataset',
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instances_train2017.json',
data_prefix=dict(img='train2017/'),
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=load_pipeline),
pipeline=train_pipeline))
|
_base_ = '../_base_/default_runtime.py'
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
image_size = (1024, 1024)
file_client_args = dict(backend='disk')
# Standard Scale Jittering (SSJ) resizes and crops an image
# with a resize range of 0.8 to 1.25 of the original image size.
load_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='Resize',
img_scale=image_size,
ratio_range=(0.8, 1.25),
multiscale_mode='range',
keep_ratio=True),
dict(
type='RandomCrop',
crop_type='absolute_range',
crop_size=image_size,
recompute_bbox=True,
allow_negative_crop=True),
dict(type='FilterAnnotations', min_gt_bbox_wh=(1e-2, 1e-2)),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Pad', size=image_size),
]
train_pipeline = [
dict(type='CopyPaste', max_num_pasted=100),
dict(type='Normalize', **img_norm_cfg),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']),
]
test_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
samples_per_gpu=2,
workers_per_gpu=2,
train=dict(
type='MultiImageMixDataset',
dataset=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_train2017.json',
img_prefix=data_root + 'train2017/',
pipeline=load_pipeline),
pipeline=train_pipeline),
val=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline))
evaluation = dict(interval=6000, metric=['bbox', 'segm'])
# optimizer assumes batch_size = (32 GPUs) x (2 samples per GPU)
optimizer = dict(type='SGD', lr=0.1, momentum=0.9, weight_decay=0.00004)
optimizer_config = dict(grad_clip=None)
# lr steps at [0.9, 0.95, 0.975] of the maximum iterations
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=1000,
warmup_ratio=0.001,
step=[243000, 256500, 263250])
checkpoint_config = dict(interval=6000)
# The model is trained by 270k iterations with batch_size 64,
# which is roughly equivalent to 144 epochs.
runner = dict(type='IterBasedRunner', max_iters=270000)
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (32 GPUs) x (2 samples per GPU)
auto_scale_lr = dict(base_batch_size=64)
|
_base_ = [
'mmpretrain::_base_/datasets/imagenet_bs256_rsb_a12.py',
'mmpretrain::_base_/schedules/imagenet_bs2048_rsb.py',
'mmpretrain::_base_/default_runtime.py'
]
model = dict(
type='ImageClassifier',
backbone=dict(
type='mmdet.CSPNeXt',
arch='P5',
out_indices=(4, ),
expand_ratio=0.5,
deepen_factor=0.33,
widen_factor=0.5,
channel_attention=True,
norm_cfg=dict(type='BN'),
act_cfg=dict(type='mmdet.SiLU')),
neck=dict(type='GlobalAveragePooling'),
head=dict(
type='LinearClsHead',
num_classes=1000,
in_channels=512,
loss=dict(
type='LabelSmoothLoss',
label_smooth_val=0.1,
mode='original',
loss_weight=1.0),
topk=(1, 5)),
train_cfg=dict(augments=[
dict(type='Mixup', alpha=0.2),
dict(type='CutMix', alpha=1.0)
]))
# dataset settings
train_dataloader = dict(sampler=dict(type='RepeatAugSampler', shuffle=True))
# schedule settings
optim_wrapper = dict(
optimizer=dict(weight_decay=0.01),
paramwise_cfg=dict(bias_decay_mult=0., norm_decay_mult=0.),
)
param_scheduler = [
# warm up learning rate scheduler
dict(
type='LinearLR',
start_factor=0.0001,
by_epoch=True,
begin=0,
end=5,
# update by iter
convert_to_iter_based=True),
# main learning rate scheduler
dict(
type='CosineAnnealingLR',
T_max=595,
eta_min=1.0e-6,
by_epoch=True,
begin=5,
end=600)
]
train_cfg = dict(by_epoch=True, max_epochs=600)
|
_base_ = [
'mmcls::_base_/datasets/imagenet_bs256_rsb_a12.py',
'mmcls::_base_/schedules/imagenet_bs2048_rsb.py',
'mmcls::_base_/default_runtime.py'
]
model = dict(
type='ImageClassifier',
backbone=dict(
type='mmdet.CSPNeXt',
arch='P5',
out_indices=(4, ),
expand_ratio=0.5,
deepen_factor=0.33,
widen_factor=0.5,
channel_attention=True,
norm_cfg=dict(type='BN'),
act_cfg=dict(type='mmdet.SiLU')),
neck=dict(type='GlobalAveragePooling'),
head=dict(
type='LinearClsHead',
num_classes=1000,
in_channels=512,
loss=dict(
type='LabelSmoothLoss',
label_smooth_val=0.1,
mode='original',
loss_weight=1.0),
topk=(1, 5)),
train_cfg=dict(augments=[
dict(type='Mixup', alpha=0.2),
dict(type='CutMix', alpha=1.0)
]))
# dataset settings
train_dataloader = dict(sampler=dict(type='RepeatAugSampler', shuffle=True))
# schedule settings
optim_wrapper = dict(
optimizer=dict(weight_decay=0.01),
paramwise_cfg=dict(bias_decay_mult=0., norm_decay_mult=0.),
)
param_scheduler = [
# warm up learning rate scheduler
dict(
type='LinearLR',
start_factor=0.0001,
by_epoch=True,
begin=0,
end=5,
# update by iter
convert_to_iter_based=True),
# main learning rate scheduler
dict(
type='CosineAnnealingLR',
T_max=595,
eta_min=1.0e-6,
by_epoch=True,
begin=5,
end=600)
]
train_cfg = dict(by_epoch=True, max_epochs=600)
|
import asyncio
from langchain_core.callbacks import (
AsyncCallbackManagerForRetrieverRun,
CallbackManagerForRetrieverRun,
)
from langchain_core.documents import Document
from langchain_core.retrievers import BaseRetriever
class MergerRetriever(BaseRetriever):
"""Retriever that merges the results of multiple retrievers."""
retrievers: list[BaseRetriever]
"""A list of retrievers to merge."""
def _get_relevant_documents(
self,
query: str,
*,
run_manager: CallbackManagerForRetrieverRun,
) -> list[Document]:
"""
Get the relevant documents for a given query.
Args:
query: The query to search for.
Returns:
A list of relevant documents.
"""
# Merge the results of the retrievers.
return self.merge_documents(query, run_manager)
async def _aget_relevant_documents(
self,
query: str,
*,
run_manager: AsyncCallbackManagerForRetrieverRun,
) -> list[Document]:
"""
Asynchronously get the relevant documents for a given query.
Args:
query: The query to search for.
Returns:
A list of relevant documents.
"""
# Merge the results of the retrievers.
return await self.amerge_documents(query, run_manager)
def merge_documents(
self,
query: str,
run_manager: CallbackManagerForRetrieverRun,
) -> list[Document]:
"""
Merge the results of the retrievers.
Args:
query: The query to search for.
Returns:
A list of merged documents.
"""
# Get the results of all retrievers.
retriever_docs = [
retriever.invoke(
query,
config={"callbacks": run_manager.get_child(f"retriever_{i + 1}")},
)
for i, retriever in enumerate(self.retrievers)
]
# Merge the results of the retrievers.
merged_documents = []
max_docs = max(map(len, retriever_docs), default=0)
for i in range(max_docs):
for _retriever, doc in zip(self.retrievers, retriever_docs):
if i < len(doc):
merged_documents.append(doc[i])
return merged_documents
async def amerge_documents(
self,
query: str,
run_manager: AsyncCallbackManagerForRetrieverRun,
) -> list[Document]:
"""
Asynchronously merge the results of the retrievers.
Args:
query: The query to search for.
Returns:
A list of merged documents.
"""
# Get the results of all retrievers.
retriever_docs = await asyncio.gather(
*(
retriever.ainvoke(
query,
config={"callbacks": run_manager.get_child(f"retriever_{i + 1}")},
)
for i, retriever in enumerate(self.retrievers)
),
)
# Merge the results of the retrievers.
merged_documents = []
max_docs = max(map(len, retriever_docs), default=0)
for i in range(max_docs):
for _retriever, doc in zip(self.retrievers, retriever_docs):
if i < len(doc):
merged_documents.append(doc[i])
return merged_documents
|
import asyncio
from langchain_core.callbacks import (
AsyncCallbackManagerForRetrieverRun,
CallbackManagerForRetrieverRun,
)
from langchain_core.documents import Document
from langchain_core.retrievers import BaseRetriever
class MergerRetriever(BaseRetriever):
"""Retriever that merges the results of multiple retrievers."""
retrievers: list[BaseRetriever]
"""A list of retrievers to merge."""
def _get_relevant_documents(
self,
query: str,
*,
run_manager: CallbackManagerForRetrieverRun,
) -> list[Document]:
"""
Get the relevant documents for a given query.
Args:
query: The query to search for.
Returns:
A list of relevant documents.
"""
# Merge the results of the retrievers.
return self.merge_documents(query, run_manager)
async def _aget_relevant_documents(
self,
query: str,
*,
run_manager: AsyncCallbackManagerForRetrieverRun,
) -> list[Document]:
"""
Asynchronously get the relevant documents for a given query.
Args:
query: The query to search for.
Returns:
A list of relevant documents.
"""
# Merge the results of the retrievers.
return await self.amerge_documents(query, run_manager)
def merge_documents(
self,
query: str,
run_manager: CallbackManagerForRetrieverRun,
) -> list[Document]:
"""
Merge the results of the retrievers.
Args:
query: The query to search for.
Returns:
A list of merged documents.
"""
# Get the results of all retrievers.
retriever_docs = [
retriever.invoke(
query,
config={"callbacks": run_manager.get_child(f"retriever_{i + 1}")},
)
for i, retriever in enumerate(self.retrievers)
]
# Merge the results of the retrievers.
merged_documents = []
max_docs = max(map(len, retriever_docs), default=0)
for i in range(max_docs):
for retriever, doc in zip(self.retrievers, retriever_docs):
if i < len(doc):
merged_documents.append(doc[i])
return merged_documents
async def amerge_documents(
self,
query: str,
run_manager: AsyncCallbackManagerForRetrieverRun,
) -> list[Document]:
"""
Asynchronously merge the results of the retrievers.
Args:
query: The query to search for.
Returns:
A list of merged documents.
"""
# Get the results of all retrievers.
retriever_docs = await asyncio.gather(
*(
retriever.ainvoke(
query,
config={"callbacks": run_manager.get_child(f"retriever_{i + 1}")},
)
for i, retriever in enumerate(self.retrievers)
),
)
# Merge the results of the retrievers.
merged_documents = []
max_docs = max(map(len, retriever_docs), default=0)
for i in range(max_docs):
for retriever, doc in zip(self.retrievers, retriever_docs):
if i < len(doc):
merged_documents.append(doc[i])
return merged_documents
|
# Licensed to the LF AI & Data foundation under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional
from rich.progress import (
BarColumn,
MofNCompleteColumn,
Progress,
SpinnerColumn,
Text,
TextColumn,
TimeElapsedColumn,
TimeRemainingColumn,
)
class _QPSColumn(TextColumn):
def render(self, task) -> Text:
if task.speed:
_text = f'{task.speed:.0f} QPS'
else:
_text = 'unknown'
if self.markup:
text = Text.from_markup(_text, style=self.style, justify=self.justify)
else:
text = Text(_text, style=self.style, justify=self.justify)
if self.highlighter:
self.highlighter.highlight(text)
return text
def _get_pbar(disable: bool, total: Optional[int] = None):
columns = (
SpinnerColumn(),
TextColumn('[bold]{task.description}'),
BarColumn(),
MofNCompleteColumn(),
'•',
_QPSColumn('{task.speed} QPS', justify='right', style='progress.data.speed'),
'•',
TimeRemainingColumn() if total else TimeElapsedColumn(),
'•',
TextColumn(
'[bold blue]{task.fields[total_size]}',
justify='right',
style='progress.filesize',
),
)
return Progress(
*columns,
transient=False,
disable=disable,
)
def _get_progressbar(description: str, disable: bool, total: Optional[int]):
progress = _get_pbar(disable, total)
task = progress.add_task(description, total=total, start=False, total_size=0)
return progress, task
|
from typing import Optional
from rich.progress import (
BarColumn,
MofNCompleteColumn,
Progress,
SpinnerColumn,
Text,
TextColumn,
TimeElapsedColumn,
TimeRemainingColumn,
)
class _QPSColumn(TextColumn):
def render(self, task) -> Text:
if task.speed:
_text = f'{task.speed:.0f} QPS'
else:
_text = 'unknown'
if self.markup:
text = Text.from_markup(_text, style=self.style, justify=self.justify)
else:
text = Text(_text, style=self.style, justify=self.justify)
if self.highlighter:
self.highlighter.highlight(text)
return text
def _get_pbar(disable: bool, total: Optional[int] = None):
columns = (
SpinnerColumn(),
TextColumn('[bold]{task.description}'),
BarColumn(),
MofNCompleteColumn(),
'•',
_QPSColumn('{task.speed} QPS', justify='right', style='progress.data.speed'),
'•',
TimeRemainingColumn() if total else TimeElapsedColumn(),
'•',
TextColumn(
'[bold blue]{task.fields[total_size]}',
justify='right',
style='progress.filesize',
),
)
return Progress(
*columns,
transient=False,
disable=disable,
)
def _get_progressbar(description: str, disable: bool, total: Optional[int]):
progress = _get_pbar(disable, total)
task = progress.add_task(description, total=total, start=False, total_size=0)
return progress, task
|
import numpy as np
import pytest
from pydantic import parse_obj_as
from docarray.base_doc.doc import BaseDoc
from docarray.documents import Mesh3D
from tests import TOYDATA_DIR
LOCAL_OBJ_FILE = str(TOYDATA_DIR / 'tetrahedron.obj')
REMOTE_OBJ_FILE = 'https://people.sc.fsu.edu/~jburkardt/data/obj/al.obj'
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize('file_url', [LOCAL_OBJ_FILE, REMOTE_OBJ_FILE])
def test_mesh(file_url):
mesh = Mesh3D(url=file_url)
mesh.tensors = mesh.url.load()
assert isinstance(mesh.tensors.vertices, np.ndarray)
assert isinstance(mesh.tensors.faces, np.ndarray)
def test_str_init():
t = parse_obj_as(Mesh3D, 'http://hello.ply')
assert t.url == 'http://hello.ply'
def test_doc():
class MyDoc(BaseDoc):
mesh1: Mesh3D
mesh2: Mesh3D
doc = MyDoc(mesh1='http://hello.ply', mesh2=Mesh3D(url='http://hello.ply'))
assert doc.mesh1.url == 'http://hello.ply'
assert doc.mesh2.url == 'http://hello.ply'
|
import numpy as np
import pytest
from pydantic import parse_obj_as
from docarray.base_doc.doc import BaseDoc
from docarray.documents import Mesh3D
from tests import TOYDATA_DIR
LOCAL_OBJ_FILE = str(TOYDATA_DIR / 'tetrahedron.obj')
REMOTE_OBJ_FILE = 'https://people.sc.fsu.edu/~jburkardt/data/obj/al.obj'
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize('file_url', [LOCAL_OBJ_FILE, REMOTE_OBJ_FILE])
def test_mesh(file_url):
mesh = Mesh3D(url=file_url)
mesh.tensors = mesh.url.load()
assert isinstance(mesh.tensors.vertices, np.ndarray)
assert isinstance(mesh.tensors.faces, np.ndarray)
def test_str_init():
t = parse_obj_as(Mesh3D, 'http://hello.ply')
assert t.url == 'http://hello.ply'
def test_doc():
class MyDoc(BaseDoc):
mesh1: Mesh3D
mesh2: Mesh3D
doc = MyDoc(mesh1='http://hello.ply', mesh2=Mesh3D(url='http://hello.ply'))
assert doc.mesh1.url == 'http://hello.ply'
assert doc.mesh2.url == 'http://hello.ply'
|
_base_ = [
'../_base_/models/cascade-mask-rcnn_r50_fpn.py',
'../_base_/datasets/lvis_v1_instance.py',
'../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py'
]
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')),
roi_head=dict(
bbox_head=[
dict(
type='Shared2FCBBoxHead',
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=1203,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2]),
reg_class_agnostic=True,
cls_predictor_cfg=dict(type='NormedLinear', tempearture=20),
loss_cls=dict(
type='SeesawLoss',
p=0.8,
q=2.0,
num_classes=1203,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0,
loss_weight=1.0)),
dict(
type='Shared2FCBBoxHead',
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=1203,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.05, 0.05, 0.1, 0.1]),
reg_class_agnostic=True,
cls_predictor_cfg=dict(type='NormedLinear', tempearture=20),
loss_cls=dict(
type='SeesawLoss',
p=0.8,
q=2.0,
num_classes=1203,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0,
loss_weight=1.0)),
dict(
type='Shared2FCBBoxHead',
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=1203,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.033, 0.033, 0.067, 0.067]),
reg_class_agnostic=True,
cls_predictor_cfg=dict(type='NormedLinear', tempearture=20),
loss_cls=dict(
type='SeesawLoss',
p=0.8,
q=2.0,
num_classes=1203,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0))
],
mask_head=dict(num_classes=1203)),
test_cfg=dict(
rcnn=dict(
score_thr=0.0001,
# LVIS allows up to 300
max_per_img=300)))
# dataset settings
train_pipeline = [
dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='RandomChoiceResize',
scales=[(1333, 640), (1333, 672), (1333, 704), (1333, 736),
(1333, 768), (1333, 800)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(dataset=dict(pipeline=train_pipeline)))
train_cfg = dict(val_interval=24)
|
_base_ = [
'../_base_/models/cascade-mask-rcnn_r50_fpn.py',
'../_base_/datasets/lvis_v1_instance.py',
'../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py'
]
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')),
roi_head=dict(
bbox_head=[
dict(
type='Shared2FCBBoxHead',
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=1203,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2]),
reg_class_agnostic=True,
cls_predictor_cfg=dict(type='NormedLinear', tempearture=20),
loss_cls=dict(
type='SeesawLoss',
p=0.8,
q=2.0,
num_classes=1203,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0,
loss_weight=1.0)),
dict(
type='Shared2FCBBoxHead',
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=1203,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.05, 0.05, 0.1, 0.1]),
reg_class_agnostic=True,
cls_predictor_cfg=dict(type='NormedLinear', tempearture=20),
loss_cls=dict(
type='SeesawLoss',
p=0.8,
q=2.0,
num_classes=1203,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0,
loss_weight=1.0)),
dict(
type='Shared2FCBBoxHead',
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=1203,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.033, 0.033, 0.067, 0.067]),
reg_class_agnostic=True,
cls_predictor_cfg=dict(type='NormedLinear', tempearture=20),
loss_cls=dict(
type='SeesawLoss',
p=0.8,
q=2.0,
num_classes=1203,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0))
],
mask_head=dict(num_classes=1203)),
test_cfg=dict(
rcnn=dict(
score_thr=0.0001,
# LVIS allows up to 300
max_per_img=300)))
# dataset settings
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='RandomChoiceResize',
scales=[(1333, 640), (1333, 672), (1333, 704), (1333, 736),
(1333, 768), (1333, 800)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(dataset=dict(pipeline=train_pipeline)))
train_cfg = dict(val_interval=24)
|
import logging
from typing import Any, Callable, List
from llama_index.core.node_parser.interface import TextSplitter
logger = logging.getLogger(__name__)
def truncate_text(text: str, text_splitter: TextSplitter) -> str:
"""Truncate text to fit within the chunk size.
Args:
text (str): The text to truncate.
text_splitter (TextSplitter): The splitter to use for chunking.
Returns:
str: The first chunk of the split text that fits within the chunk size.
"""
chunks = text_splitter.split_text(text)
return chunks[0]
def split_text_keep_separator(text: str, separator: str) -> List[str]:
"""Split text with separator and keep the separator at the end of each split.
Args:
text (str): The text to split.
separator (str): The separator to split on.
Returns:
List[str]: List of text segments with separators preserved at the end of each split.
"""
parts = text.split(separator)
result = [separator + s if i > 0 else s for i, s in enumerate(parts)]
return [s for s in result if s]
def split_by_sep(sep: str, keep_sep: bool = True) -> Callable[[str], List[str]]:
"""Create a function that splits text by a separator.
Args:
sep (str): The separator to split on.
keep_sep (bool, optional): Whether to keep the separator in the output. Defaults to True.
Returns:
Callable[[str], List[str]]: A function that takes a string and returns a list of split strings.
"""
if keep_sep:
return lambda text: split_text_keep_separator(text, sep)
else:
return lambda text: text.split(sep)
def split_by_char() -> Callable[[str], List[str]]:
"""Create a function that splits text into individual characters.
Returns:
Callable[[str], List[str]]: A function that takes a string and returns a list of individual characters.
"""
return lambda text: list(text)
def split_by_sentence_tokenizer_internal(text: str, tokenizer: Any) -> List[str]:
"""Get the spans and then return the sentences.
Using the start index of each span
Instead of using end, use the start of the next span if available
"""
spans = list(tokenizer.span_tokenize(text))
sentences = []
for i, span in enumerate(spans):
start = span[0]
if i < len(spans) - 1:
end = spans[i + 1][0]
else:
end = len(text)
sentences.append(text[start:end])
return sentences
def split_by_sentence_tokenizer() -> Callable[[str], List[str]]:
import nltk
tokenizer = nltk.tokenize.PunktSentenceTokenizer()
return lambda text: split_by_sentence_tokenizer_internal(text, tokenizer)
def split_by_regex(regex: str) -> Callable[[str], List[str]]:
"""Create a function that splits text using a regular expression pattern.
Args:
regex (str): The regular expression pattern to use for splitting.
Returns:
Callable[[str], List[str]]: A function that takes a string and returns a list of matches based on the regex pattern.
"""
import re
return lambda text: re.findall(regex, text)
def split_by_phrase_regex() -> Callable[[str], List[str]]:
"""Split text by phrase regex.
This regular expression will split the sentences into phrases,
where each phrase is a sequence of one or more non-comma,
non-period, and non-semicolon characters, followed by an optional comma,
period, or semicolon. The regular expression will also capture the
delimiters themselves as separate items in the list of phrases.
"""
regex = "[^,.;。]+[,.;。]?"
return split_by_regex(regex)
|
import logging
from typing import Any, Callable, List
from llama_index.core.node_parser.interface import TextSplitter
logger = logging.getLogger(__name__)
def truncate_text(text: str, text_splitter: TextSplitter) -> str:
"""Truncate text to fit within the chunk size."""
chunks = text_splitter.split_text(text)
return chunks[0]
def split_text_keep_separator(text: str, separator: str) -> List[str]:
"""Split text with separator and keep the separator at the end of each split."""
parts = text.split(separator)
result = [separator + s if i > 0 else s for i, s in enumerate(parts)]
return [s for s in result if s]
def split_by_sep(sep: str, keep_sep: bool = True) -> Callable[[str], List[str]]:
"""Split text by separator."""
if keep_sep:
return lambda text: split_text_keep_separator(text, sep)
else:
return lambda text: text.split(sep)
def split_by_char() -> Callable[[str], List[str]]:
"""Split text by character."""
return lambda text: list(text)
def split_by_sentence_tokenizer_internal(text: str, tokenizer: Any) -> List[str]:
"""Get the spans and then return the sentences.
Using the start index of each span
Instead of using end, use the start of the next span if available
"""
spans = list(tokenizer.span_tokenize(text))
sentences = []
for i, span in enumerate(spans):
start = span[0]
if i < len(spans) - 1:
end = spans[i + 1][0]
else:
end = len(text)
sentences.append(text[start:end])
return sentences
def split_by_sentence_tokenizer() -> Callable[[str], List[str]]:
import nltk
tokenizer = nltk.tokenize.PunktSentenceTokenizer()
return lambda text: split_by_sentence_tokenizer_internal(text, tokenizer)
def split_by_regex(regex: str) -> Callable[[str], List[str]]:
"""Split text by regex."""
import re
return lambda text: re.findall(regex, text)
def split_by_phrase_regex() -> Callable[[str], List[str]]:
"""Split text by phrase regex.
This regular expression will split the sentences into phrases,
where each phrase is a sequence of one or more non-comma,
non-period, and non-semicolon characters, followed by an optional comma,
period, or semicolon. The regular expression will also capture the
delimiters themselves as separate items in the list of phrases.
"""
regex = "[^,.;。]+[,.;。]?"
return split_by_regex(regex)
|
import pathlib
from typing import Any, BinaryIO, Dict, List, Tuple, Union
import numpy as np
from torchdata.datapipes.iter import IterDataPipe, Mapper, UnBatcher
from torchvision.prototype.datapoints import Image, Label
from torchvision.prototype.datasets.utils import Dataset, HttpResource, OnlineResource
from torchvision.prototype.datasets.utils._internal import hint_sharding, hint_shuffling, read_mat
from .._api import register_dataset, register_info
NAME = "svhn"
@register_info(NAME)
def _info() -> Dict[str, Any]:
return dict(categories=[str(c) for c in range(10)])
@register_dataset(NAME)
class SVHN(Dataset):
"""SVHN Dataset.
homepage="http://ufldl.stanford.edu/housenumbers/",
dependencies = scipy
"""
def __init__(
self,
root: Union[str, pathlib.Path],
*,
split: str = "train",
skip_integrity_check: bool = False,
) -> None:
self._split = self._verify_str_arg(split, "split", {"train", "test", "extra"})
self._categories = _info()["categories"]
super().__init__(root, skip_integrity_check=skip_integrity_check, dependencies=("scipy",))
_CHECKSUMS = {
"train": "435e94d69a87fde4fd4d7f3dd208dfc32cb6ae8af2240d066de1df7508d083b8",
"test": "cdce80dfb2a2c4c6160906d0bd7c68ec5a99d7ca4831afa54f09182025b6a75b",
"extra": "a133a4beb38a00fcdda90c9489e0c04f900b660ce8a316a5e854838379a71eb3",
}
def _resources(self) -> List[OnlineResource]:
data = HttpResource(
f"http://ufldl.stanford.edu/housenumbers/{self._split}_32x32.mat",
sha256=self._CHECKSUMS[self._split],
)
return [data]
def _read_images_and_labels(self, data: Tuple[str, BinaryIO]) -> List[Tuple[np.ndarray, np.ndarray]]:
_, buffer = data
content = read_mat(buffer)
return list(
zip(
content["X"].transpose((3, 0, 1, 2)),
content["y"].squeeze(),
)
)
def _prepare_sample(self, data: Tuple[np.ndarray, np.ndarray]) -> Dict[str, Any]:
image_array, label_array = data
return dict(
image=Image(image_array.transpose((2, 0, 1))),
label=Label(int(label_array) % 10, categories=self._categories),
)
def _datapipe(self, resource_dps: List[IterDataPipe]) -> IterDataPipe[Dict[str, Any]]:
dp = resource_dps[0]
dp = Mapper(dp, self._read_images_and_labels)
dp = UnBatcher(dp)
dp = hint_shuffling(dp)
dp = hint_sharding(dp)
return Mapper(dp, self._prepare_sample)
def __len__(self) -> int:
return {
"train": 73_257,
"test": 26_032,
"extra": 531_131,
}[self._split]
|
import pathlib
from typing import Any, BinaryIO, Dict, List, Tuple, Union
import numpy as np
from torchdata.datapipes.iter import IterDataPipe, Mapper, UnBatcher
from torchvision.prototype.datasets.utils import Dataset, HttpResource, OnlineResource
from torchvision.prototype.datasets.utils._internal import hint_sharding, hint_shuffling, read_mat
from torchvision.prototype.features import Image, Label
from .._api import register_dataset, register_info
NAME = "svhn"
@register_info(NAME)
def _info() -> Dict[str, Any]:
return dict(categories=[str(c) for c in range(10)])
@register_dataset(NAME)
class SVHN(Dataset):
"""SVHN Dataset.
homepage="http://ufldl.stanford.edu/housenumbers/",
dependencies = scipy
"""
def __init__(
self,
root: Union[str, pathlib.Path],
*,
split: str = "train",
skip_integrity_check: bool = False,
) -> None:
self._split = self._verify_str_arg(split, "split", {"train", "test", "extra"})
self._categories = _info()["categories"]
super().__init__(root, skip_integrity_check=skip_integrity_check, dependencies=("scipy",))
_CHECKSUMS = {
"train": "435e94d69a87fde4fd4d7f3dd208dfc32cb6ae8af2240d066de1df7508d083b8",
"test": "cdce80dfb2a2c4c6160906d0bd7c68ec5a99d7ca4831afa54f09182025b6a75b",
"extra": "a133a4beb38a00fcdda90c9489e0c04f900b660ce8a316a5e854838379a71eb3",
}
def _resources(self) -> List[OnlineResource]:
data = HttpResource(
f"http://ufldl.stanford.edu/housenumbers/{self._split}_32x32.mat",
sha256=self._CHECKSUMS[self._split],
)
return [data]
def _read_images_and_labels(self, data: Tuple[str, BinaryIO]) -> List[Tuple[np.ndarray, np.ndarray]]:
_, buffer = data
content = read_mat(buffer)
return list(
zip(
content["X"].transpose((3, 0, 1, 2)),
content["y"].squeeze(),
)
)
def _prepare_sample(self, data: Tuple[np.ndarray, np.ndarray]) -> Dict[str, Any]:
image_array, label_array = data
return dict(
image=Image(image_array.transpose((2, 0, 1))),
label=Label(int(label_array) % 10, categories=self._categories),
)
def _datapipe(self, resource_dps: List[IterDataPipe]) -> IterDataPipe[Dict[str, Any]]:
dp = resource_dps[0]
dp = Mapper(dp, self._read_images_and_labels)
dp = UnBatcher(dp)
dp = hint_shuffling(dp)
dp = hint_sharding(dp)
return Mapper(dp, self._prepare_sample)
def __len__(self) -> int:
return {
"train": 73_257,
"test": 26_032,
"extra": 531_131,
}[self._split]
|
_base_ = [
'../_base_/models/mask_rcnn_r50_fpn.py',
'../_base_/datasets/lvis_v1_instance.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
model = dict(
roi_head=dict(
bbox_head=dict(num_classes=1203), mask_head=dict(num_classes=1203)),
test_cfg=dict(
rcnn=dict(
score_thr=0.0001,
# LVIS allows up to 300
max_per_img=300)))
|
_base_ = [
'../_base_/models/mask_rcnn_r50_fpn.py',
'../_base_/datasets/lvis_v1_instance.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
model = dict(
roi_head=dict(
bbox_head=dict(num_classes=1203), mask_head=dict(num_classes=1203)),
test_cfg=dict(
rcnn=dict(
score_thr=0.0001,
# LVIS allows up to 300
max_per_img=300)))
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='Resize',
img_scale=[(1333, 640), (1333, 672), (1333, 704), (1333, 736),
(1333, 768), (1333, 800)],
multiscale_mode='value',
keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']),
]
data = dict(train=dict(dataset=dict(pipeline=train_pipeline)))
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.document_loaders.baiducloud_bos_file import (
BaiduBOSFileLoader,
)
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"BaiduBOSFileLoader": "langchain_community.document_loaders.baiducloud_bos_file",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"BaiduBOSFileLoader",
]
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.document_loaders.baiducloud_bos_file import (
BaiduBOSFileLoader,
)
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"BaiduBOSFileLoader": "langchain_community.document_loaders.baiducloud_bos_file"
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"BaiduBOSFileLoader",
]
|
_base_ = [
'../_base_/models/mask-rcnn_r50_fpn.py',
'../_base_/datasets/coco_instance.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
# please install mmpretrain
# import mmpretrain.models to trigger register_module in mmpretrain
custom_imports = dict(
imports=['mmpretrain.models'], allow_failed_imports=False)
checkpoint_file = 'https://download.openmmlab.com/mmclassification/v0/convnext/downstream/convnext-tiny_3rdparty_32xb128-noema_in1k_20220301-795e9634.pth' # noqa
model = dict(
backbone=dict(
_delete_=True,
type='mmpretrain.ConvNeXt',
arch='tiny',
out_indices=[0, 1, 2, 3],
drop_path_rate=0.4,
layer_scale_init_value=1.0,
gap_before_final_norm=False,
init_cfg=dict(
type='Pretrained', checkpoint=checkpoint_file,
prefix='backbone.')),
neck=dict(in_channels=[96, 192, 384, 768]))
# augmentation strategy originates from DETR / Sparse RCNN
train_pipeline = [
dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(type='RandomFlip', prob=0.5),
dict(
type='RandomChoice',
transforms=[[
dict(
type='RandomChoiceResize',
scales=[(480, 1333), (512, 1333), (544, 1333), (576, 1333),
(608, 1333), (640, 1333), (672, 1333), (704, 1333),
(736, 1333), (768, 1333), (800, 1333)],
keep_ratio=True)
],
[
dict(
type='RandomChoiceResize',
scales=[(400, 1333), (500, 1333), (600, 1333)],
keep_ratio=True),
dict(
type='RandomCrop',
crop_type='absolute_range',
crop_size=(384, 600),
allow_negative_crop=True),
dict(
type='RandomChoiceResize',
scales=[(480, 1333), (512, 1333), (544, 1333),
(576, 1333), (608, 1333), (640, 1333),
(672, 1333), (704, 1333), (736, 1333),
(768, 1333), (800, 1333)],
keep_ratio=True)
]]),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
max_epochs = 36
train_cfg = dict(max_epochs=max_epochs)
# learning rate
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0,
end=1000),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[27, 33],
gamma=0.1)
]
# Enable automatic-mixed-precision training with AmpOptimWrapper.
optim_wrapper = dict(
type='AmpOptimWrapper',
constructor='LearningRateDecayOptimizerConstructor',
paramwise_cfg={
'decay_rate': 0.95,
'decay_type': 'layer_wise',
'num_layers': 6
},
optimizer=dict(
_delete_=True,
type='AdamW',
lr=0.0001,
betas=(0.9, 0.999),
weight_decay=0.05,
))
|
_base_ = [
'../_base_/models/mask-rcnn_r50_fpn.py',
'../_base_/datasets/coco_instance.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
# TODO: delete custom_imports after mmcls supports auto import
# please install mmcls>=1.0
# import mmcls.models to trigger register_module in mmcls
custom_imports = dict(imports=['mmcls.models'], allow_failed_imports=False)
checkpoint_file = 'https://download.openmmlab.com/mmclassification/v0/convnext/downstream/convnext-tiny_3rdparty_32xb128-noema_in1k_20220301-795e9634.pth' # noqa
model = dict(
backbone=dict(
_delete_=True,
type='mmcls.ConvNeXt',
arch='tiny',
out_indices=[0, 1, 2, 3],
drop_path_rate=0.4,
layer_scale_init_value=1.0,
gap_before_final_norm=False,
init_cfg=dict(
type='Pretrained', checkpoint=checkpoint_file,
prefix='backbone.')),
neck=dict(in_channels=[96, 192, 384, 768]))
# augmentation strategy originates from DETR / Sparse RCNN
train_pipeline = [
dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(type='RandomFlip', prob=0.5),
dict(
type='RandomChoice',
transforms=[[
dict(
type='RandomChoiceResize',
scales=[(480, 1333), (512, 1333), (544, 1333), (576, 1333),
(608, 1333), (640, 1333), (672, 1333), (704, 1333),
(736, 1333), (768, 1333), (800, 1333)],
keep_ratio=True)
],
[
dict(
type='RandomChoiceResize',
scales=[(400, 1333), (500, 1333), (600, 1333)],
keep_ratio=True),
dict(
type='RandomCrop',
crop_type='absolute_range',
crop_size=(384, 600),
allow_negative_crop=True),
dict(
type='RandomChoiceResize',
scales=[(480, 1333), (512, 1333), (544, 1333),
(576, 1333), (608, 1333), (640, 1333),
(672, 1333), (704, 1333), (736, 1333),
(768, 1333), (800, 1333)],
keep_ratio=True)
]]),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
max_epochs = 36
train_cfg = dict(max_epochs=max_epochs)
# learning rate
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0,
end=1000),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[27, 33],
gamma=0.1)
]
# Enable automatic-mixed-precision training with AmpOptimWrapper.
optim_wrapper = dict(
type='AmpOptimWrapper',
constructor='LearningRateDecayOptimizerConstructor',
paramwise_cfg={
'decay_rate': 0.95,
'decay_type': 'layer_wise',
'num_layers': 6
},
optimizer=dict(
_delete_=True,
type='AdamW',
lr=0.0001,
betas=(0.9, 0.999),
weight_decay=0.05,
))
|
# Copyright (c) OpenMMLab. All rights reserved.
from abc import ABCMeta, abstractmethod
from typing import Dict, List, Tuple, Union
import torch.nn.functional as F
from mmengine.model import BaseModule
from torch import Tensor
from mmdet.core.utils import ConfigType, OptMultiConfig, SampleList
from mmdet.registry import MODELS
@MODELS.register_module()
class BaseSemanticHead(BaseModule, metaclass=ABCMeta):
"""Base module of Semantic Head.
Args:
num_classes (int): the number of classes.
seg_rescale_factor (float): the rescale factor for ``gt_sem_seg``,
which equals to ``1 / output_strides``. The output_strides is
for ``seg_preds``. Defaults to 1 / 4.
init_cfg (Optional[Union[:obj:`ConfigDict`, dict]]): the initialization
config.
loss_seg (Union[:obj:`ConfigDict`, dict]): the loss of the semantic
head.
"""
def __init__(self,
num_classes: int,
seg_rescale_factor: float = 1 / 4.,
loss_seg: ConfigType = dict(
type='CrossEntropyLoss',
ignore_index=255,
loss_weight=1.0),
init_cfg: OptMultiConfig = None) -> None:
super().__init__(init_cfg=init_cfg)
self.loss_seg = MODELS.build(loss_seg)
self.num_classes = num_classes
self.seg_rescale_factor = seg_rescale_factor
@abstractmethod
def forward(self, x: Union[Tensor, Tuple[Tensor]]) -> Dict[str, Tensor]:
"""Placeholder of forward function.
Args:
x (Tensor): Feature maps.
Returns:
Dict[str, Tensor]: A dictionary, including features
and predicted scores. Required keys: 'seg_preds'
and 'feats'.
"""
pass
@abstractmethod
def loss(self, x: Union[Tensor, Tuple[Tensor]],
batch_data_samples: SampleList) -> Dict[str, Tensor]:
"""
Args:
x (Union[Tensor, Tuple[Tensor]]): Feature maps.
batch_data_samples (list[:obj:`DetDataSample`]): The batch
data samples. It usually includes information such
as `gt_instance` or `gt_panoptic_seg` or `gt_sem_seg`.
Args:
x (Tensor): Feature maps.
Returns:
Dict[str, Tensor]: The loss of semantic head.
"""
pass
def predict(self,
x: Union[Tensor, Tuple[Tensor]],
batch_img_metas: List[dict],
rescale: bool = False) -> List[Tensor]:
"""Test without Augmentation.
Args:
x (Union[Tensor, Tuple[Tensor]]): Feature maps.
batch_img_metas (List[dict]): List of image information.
rescale (bool): Whether to rescale the results.
Defaults to False.
Returns:
list[Tensor]: semantic segmentation logits.
"""
seg_preds = self.forward(x)['seg_preds']
seg_preds = F.interpolate(
seg_preds,
size=batch_img_metas[0]['batch_input_shape'],
mode='bilinear',
align_corners=False)
seg_preds = [seg_preds[i] for i in range(len(batch_img_metas))]
if rescale:
seg_pred_list = []
for i in range(len(batch_img_metas)):
h, w = batch_img_metas[i]['img_shape']
seg_pred = seg_preds[i][:, :h, :w]
h, w = batch_img_metas[i]['ori_shape']
seg_pred = F.interpolate(
seg_pred[None],
size=(h, w),
mode='bilinear',
align_corners=False)[0]
seg_pred_list.append(seg_pred)
else:
seg_pred_list = seg_preds
return seg_pred_list
|
# Copyright (c) OpenMMLab. All rights reserved.
from abc import ABCMeta, abstractmethod
from typing import Dict, List, Tuple, Union
import torch.nn.functional as F
from mmengine.model import BaseModule
from torch import Tensor
from mmdet.core.utils import ConfigType, OptMultiConfig, SampleList
from mmdet.registry import MODELS
@MODELS.register_module()
class BaseSemanticHead(BaseModule, metaclass=ABCMeta):
"""Base module of Semantic Head.
Args:
num_classes (int): the number of classes.
seg_rescale_factor (float): the rescale factor for ``gt_sem_seg``,
which equals to ``1 / output_strides``. The output_strides is
for ``seg_preds``. Defaults to 1 / 4.
init_cfg (Optional[Union[:obj:`ConfigDict`, dict]]): the initialization
config.
loss_seg (Union[:obj:`ConfigDict`, dict]): the loss of the semantic
head.
"""
def __init__(self,
num_classes: int,
seg_rescale_factor: float = 1 / 4.,
loss_seg: ConfigType = dict(
type='CrossEntropyLoss',
ignore_index=255,
loss_weight=1.0),
init_cfg: OptMultiConfig = None) -> None:
super().__init__(init_cfg=init_cfg)
self.loss_seg = MODELS.build(loss_seg)
self.num_classes = num_classes
self.seg_rescale_factor = seg_rescale_factor
@abstractmethod
def forward(self, x: Union[Tensor, Tuple[Tensor]]) -> Dict[str, Tensor]:
"""Placeholder of forward function.
Args:
x (Tensor): Feature maps.
Returns:
Dict[str, Tensor]: A dictionary, including features
and predicted scores. Required keys: 'seg_preds'
and 'feats'.
"""
pass
@abstractmethod
def loss(self, x: Union[Tensor, Tuple[Tensor]],
batch_data_samples: SampleList) -> Dict[str, Tensor]:
"""
Args:
x (Union[Tensor, Tuple[Tensor]]): Feature maps.
batch_data_samples (list[:obj:`DetDataSample`]): The batch
data samples. It usually includes information such
as `gt_instance` or `gt_panoptic_seg` or `gt_sem_seg`.
Returns:
Dict[str, Tensor]: The loss of semantic head.
"""
pass
def predict(self,
x: Union[Tensor, Tuple[Tensor]],
batch_img_metas: List[dict],
rescale: bool = False) -> List[Tensor]:
"""Test without Augmentation.
Args:
x (Union[Tensor, Tuple[Tensor]]): Feature maps.
batch_img_metas (List[dict]): List of image information.
rescale (bool): Whether to rescale the results.
Defaults to False.
Returns:
list[Tensor]: semantic segmentation logits.
"""
seg_preds = self.forward(x)['seg_preds']
seg_preds = F.interpolate(
seg_preds,
size=batch_img_metas[0]['batch_input_shape'],
mode='bilinear',
align_corners=False)
seg_preds = [seg_preds[i] for i in range(len(batch_img_metas))]
if rescale:
seg_pred_list = []
for i in range(len(batch_img_metas)):
h, w = batch_img_metas[i]['img_shape']
seg_pred = seg_preds[i][:, :h, :w]
h, w = batch_img_metas[i]['ori_shape']
seg_pred = F.interpolate(
seg_pred[None],
size=(h, w),
mode='bilinear',
align_corners=False)[0]
seg_pred_list.append(seg_pred)
else:
seg_pred_list = seg_preds
return seg_pred_list
|
"""
A quantized model executes some or all of the operations with integers rather than floating point values. This allows for a more compact models and the use of high performance vectorized operations on many hardware platforms.
As a result, you get about 40% smaller and faster models. The speed-up depends on your CPU and how PyTorch was build and can be anywhere between 10% speed-up and 300% speed-up.
Note: Quantized models are only available for CPUs. Use a GPU, if available, for optimal performance.
For more details:
https://pytorch.org/docs/stable/quantization.html
"""
import logging
import os
import torch
from sentence_transformers import LoggingHandler, SentenceTransformer, util, InputExample
from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator
from torch.nn import Embedding, Linear
from torch.quantization import quantize_dynamic
import gzip
import csv
import time
#### Just some code to print debug information to stdout
logging.basicConfig(
format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO, handlers=[LoggingHandler()]
)
#### /print debug information to stdout
# Check if dataset exists. If not, download and extract it
sts_dataset_path = "datasets/stsbenchmark.tsv.gz"
if not os.path.exists(sts_dataset_path):
util.http_get("https://sbert.net/datasets/stsbenchmark.tsv.gz", sts_dataset_path)
# Limit torch to 4 threads
torch.set_num_threads(4)
#### Just some code to print debug information to stdout
logging.basicConfig(format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO)
### /print debug information to stdout
model_name = "all-distilroberta-v1"
# Load a named sentence model (based on BERT). This will download the model from our server.
# Alternatively, you can also pass a filepath to SentenceTransformer()
model = SentenceTransformer(model_name, device="cpu")
q_model = quantize_dynamic(model, {Linear, Embedding})
# Convert the dataset to a DataLoader ready for training
logging.info("Read STSbenchmark dataset")
test_samples = []
sentences = []
with gzip.open(sts_dataset_path, "rt", encoding="utf8") as fIn:
reader = csv.DictReader(fIn, delimiter="\t", quoting=csv.QUOTE_NONE)
for row in reader:
score = float(row["score"]) / 5.0 # Normalize score to range 0 ... 1
inp_example = InputExample(texts=[row["sentence1"], row["sentence2"]], label=score)
sentences.append(row["sentence1"])
sentences.append(row["sentence2"])
if row["split"] == "test":
test_samples.append(inp_example)
sentences = sentences[0:10000]
logging.info("Evaluating speed of unquantized model")
start_time = time.time()
emb = model.encode(sentences, show_progress_bar=True)
diff_normal = time.time() - start_time
logging.info("Done after {:.2f} sec. {:.2f} sentences / sec".format(diff_normal, len(sentences) / diff_normal))
logging.info("Evaluating speed of quantized model")
start_time = time.time()
emb = q_model.encode(sentences, show_progress_bar=True)
diff_quantized = time.time() - start_time
logging.info("Done after {:.2f} sec. {:.2f} sentences / sec".format(diff_quantized, len(sentences) / diff_quantized))
logging.info("Speed-up: {:.2f}".format(diff_normal / diff_quantized))
#########
evaluator = EmbeddingSimilarityEvaluator.from_input_examples(test_samples, name="sts-test")
logging.info("Evaluate regular model")
model.evaluate(evaluator)
print("\n\n")
logging.info("Evaluate quantized model")
q_model.evaluate(evaluator)
|
"""
A quantized model executes some or all of the operations with integers rather than floating point values. This allows for a more compact models and the use of high performance vectorized operations on many hardware platforms.
As a result, you get about 40% smaller and faster models. The speed-up depends on your CPU and how PyTorch was build and can be anywhere between 10% speed-up and 300% speed-up.
Note: Quantized models are only available for CPUs. Use a GPU, if available, for optimal performance.
For more details:
https://pytorch.org/docs/stable/quantization.html
"""
import logging
import os
import torch
from sentence_transformers import LoggingHandler, SentenceTransformer, util, InputExample
from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator
from torch.nn import Embedding, Linear
from torch.quantization import quantize_dynamic
import gzip
import csv
import time
#### Just some code to print debug information to stdout
logging.basicConfig(
format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO, handlers=[LoggingHandler()]
)
#### /print debug information to stdout
# Check if dataset exists. If not, download and extract it
sts_dataset_path = "datasets/stsbenchmark.tsv.gz"
if not os.path.exists(sts_dataset_path):
util.http_get("https://sbert.net/datasets/stsbenchmark.tsv.gz", sts_dataset_path)
# Limit torch to 4 threads
torch.set_num_threads(4)
#### Just some code to print debug information to stdout
logging.basicConfig(format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO)
### /print debug information to stdout
model_name = "all-distilroberta-v1"
# Load a named sentence model (based on BERT). This will download the model from our server.
# Alternatively, you can also pass a filepath to SentenceTransformer()
model = SentenceTransformer(model_name, device="cpu")
q_model = quantize_dynamic(model, {Linear, Embedding})
# Convert the dataset to a DataLoader ready for training
logging.info("Read STSbenchmark dataset")
test_samples = []
sentences = []
with gzip.open(sts_dataset_path, "rt", encoding="utf8") as fIn:
reader = csv.DictReader(fIn, delimiter="\t", quoting=csv.QUOTE_NONE)
for row in reader:
score = float(row["score"]) / 5.0 # Normalize score to range 0 ... 1
inp_example = InputExample(texts=[row["sentence1"], row["sentence2"]], label=score)
sentences.append(row["sentence1"])
sentences.append(row["sentence2"])
if row["split"] == "test":
test_samples.append(inp_example)
sentences = sentences[0:10000]
logging.info("Evaluating speed of unquantized model")
start_time = time.time()
emb = model.encode(sentences, show_progress_bar=True)
diff_normal = time.time() - start_time
logging.info("Done after {:.2f} sec. {:.2f} sentences / sec".format(diff_normal, len(sentences) / diff_normal))
logging.info("Evaluating speed of quantized model")
start_time = time.time()
emb = q_model.encode(sentences, show_progress_bar=True)
diff_quantized = time.time() - start_time
logging.info("Done after {:.2f} sec. {:.2f} sentences / sec".format(diff_quantized, len(sentences) / diff_quantized))
logging.info("Speed-up: {:.2f}".format(diff_normal / diff_quantized))
#########
evaluator = EmbeddingSimilarityEvaluator.from_input_examples(test_samples, name="sts-test")
logging.info("Evaluate regular model")
model.evaluate(evaluator)
print("\n\n")
logging.info("Evaluate quantized model")
q_model.evaluate(evaluator)
|
import importlib
import pytest
from fastapi.testclient import TestClient
from ...utils import needs_py39
@pytest.fixture(
name="client",
params=[
"tutorial005",
pytest.param("tutorial005_py39", marks=needs_py39),
],
)
def get_client(request: pytest.FixtureRequest):
mod = importlib.import_module(f"docs_src.extra_models.{request.param}")
client = TestClient(mod.app)
return client
def test_get_items(client: TestClient):
response = client.get("/keyword-weights/")
assert response.status_code == 200, response.text
assert response.json() == {"foo": 2.3, "bar": 3.4}
def test_openapi_schema(client: TestClient):
response = client.get("/openapi.json")
assert response.status_code == 200, response.text
assert response.json() == {
"openapi": "3.1.0",
"info": {"title": "FastAPI", "version": "0.1.0"},
"paths": {
"/keyword-weights/": {
"get": {
"responses": {
"200": {
"description": "Successful Response",
"content": {
"application/json": {
"schema": {
"title": "Response Read Keyword Weights Keyword Weights Get",
"type": "object",
"additionalProperties": {"type": "number"},
}
}
},
}
},
"summary": "Read Keyword Weights",
"operationId": "read_keyword_weights_keyword_weights__get",
}
}
},
}
|
from fastapi.testclient import TestClient
from docs_src.extra_models.tutorial005 import app
client = TestClient(app)
def test_get_items():
response = client.get("/keyword-weights/")
assert response.status_code == 200, response.text
assert response.json() == {"foo": 2.3, "bar": 3.4}
def test_openapi_schema():
response = client.get("/openapi.json")
assert response.status_code == 200, response.text
assert response.json() == {
"openapi": "3.1.0",
"info": {"title": "FastAPI", "version": "0.1.0"},
"paths": {
"/keyword-weights/": {
"get": {
"responses": {
"200": {
"description": "Successful Response",
"content": {
"application/json": {
"schema": {
"title": "Response Read Keyword Weights Keyword Weights Get",
"type": "object",
"additionalProperties": {"type": "number"},
}
}
},
}
},
"summary": "Read Keyword Weights",
"operationId": "read_keyword_weights_keyword_weights__get",
}
}
},
}
|
from typing import Any, Dict, Iterable
import torch
from torch import Tensor, nn
from sentence_transformers.SentenceTransformer import SentenceTransformer
from sentence_transformers.util import fullname
class CosineSimilarityLoss(nn.Module):
def __init__(self, model: SentenceTransformer, loss_fct=nn.MSELoss(), cos_score_transformation=nn.Identity()):
"""
CosineSimilarityLoss expects that the InputExamples consists of two texts and a float label. It computes the
vectors ``u = model(sentence_A)`` and ``v = model(sentence_B)`` and measures the cosine-similarity between the two.
By default, it minimizes the following loss: ``||input_label - cos_score_transformation(cosine_sim(u,v))||_2``.
Args:
model: SentenceTransformer model
loss_fct: Which pytorch loss function should be used to
compare the ``cosine_similarity(u, v)`` with the
input_label? By default, MSE is used: ``||input_label -
cosine_sim(u, v)||_2``
cos_score_transformation: The cos_score_transformation
function is applied on top of cosine_similarity. By
default, the identify function is used (i.e. no change).
References:
- `Training Examples > Semantic Textual Similarity <../../examples/training/sts/README.html>`_
Requirements:
1. Sentence pairs with corresponding similarity scores in range `[0, 1]`
Relations:
- :class:`CoSENTLoss` seems to produce a stronger training signal than CosineSimilarityLoss. In our experiments, CoSENTLoss is recommended.
- :class:`AnglELoss` is :class:`CoSENTLoss` with ``pairwise_angle_sim`` as the metric, rather than ``pairwise_cos_sim``. It also produces a stronger training signal than CosineSimilarityLoss.
Inputs:
+--------------------------------+------------------------+
| Texts | Labels |
+================================+========================+
| (sentence_A, sentence_B) pairs | float similarity score |
+--------------------------------+------------------------+
Example:
::
from sentence_transformers import SentenceTransformer, SentenceTransformerTrainer, losses
from datasets import Dataset
model = SentenceTransformer("microsoft/mpnet-base")
train_dataset = Dataset.from_dict({
"sentence1": ["It's nice weather outside today.", "He drove to work."],
"sentence2": ["It's so sunny.", "She walked to the store."],
"score": [1.0, 0.3],
})
loss = losses.CosineSimilarityLoss(model)
trainer = SentenceTransformerTrainer(
model=model,
train_dataset=train_dataset,
loss=loss,
)
trainer.train()
"""
super(CosineSimilarityLoss, self).__init__()
self.model = model
self.loss_fct = loss_fct
self.cos_score_transformation = cos_score_transformation
def forward(self, sentence_features: Iterable[Dict[str, Tensor]], labels: Tensor):
embeddings = [self.model(sentence_feature)["sentence_embedding"] for sentence_feature in sentence_features]
output = self.cos_score_transformation(torch.cosine_similarity(embeddings[0], embeddings[1]))
return self.loss_fct(output, labels.float().view(-1))
def get_config_dict(self) -> Dict[str, Any]:
return {"loss_fct": fullname(self.loss_fct)}
|
import torch
from torch import nn, Tensor
from typing import Iterable, Dict
from ..SentenceTransformer import SentenceTransformer
class CosineSimilarityLoss(nn.Module):
def __init__(self, model: SentenceTransformer, loss_fct=nn.MSELoss(), cos_score_transformation=nn.Identity()):
"""
CosineSimilarityLoss expects that the InputExamples consists of two texts and a float label. It computes the
vectors ``u = model(sentence_A)`` and ``v = model(sentence_B)`` and measures the cosine-similarity between the two.
By default, it minimizes the following loss: ``||input_label - cos_score_transformation(cosine_sim(u,v))||_2``.
:param model: SentenceTransformer model
:param loss_fct: Which pytorch loss function should be used to compare the ``cosine_similarity(u, v)`` with the input_label?
By default, MSE is used: ``||input_label - cosine_sim(u, v)||_2``
:param cos_score_transformation: The cos_score_transformation function is applied on top of cosine_similarity.
By default, the identify function is used (i.e. no change).
References:
- `Training Examples > Semantic Textual Similarity <../../examples/training/sts/README.html>`_
Requirements:
1. Sentence pairs with corresponding similarity scores in range `[0, 1]`
Relations:
- :class:`CoSENTLoss` seems to produce a stronger training signal than CosineSimilarityLoss. In our experiments, CoSENTLoss is recommended.
- :class:`AnglELoss` is :class:`CoSENTLoss` with ``pairwise_angle_sim`` as the metric, rather than ``pairwise_cos_sim``. It also produces a stronger training signal than CosineSimilarityLoss.
Inputs:
+--------------------------------+------------------------+
| Texts | Labels |
+================================+========================+
| (sentence_A, sentence_B) pairs | float similarity score |
+--------------------------------+------------------------+
Example:
::
from sentence_transformers import SentenceTransformer, InputExample, losses
from torch.utils.data import DataLoader
model = SentenceTransformer('distilbert-base-nli-mean-tokens')
train_examples = [
InputExample(texts=['My first sentence', 'My second sentence'], label=0.8),
InputExample(texts=['Another pair', 'Unrelated sentence'], label=0.3)
]
train_batch_size = 1
train_dataloader = DataLoader(train_examples, shuffle=True, batch_size=train_batch_size)
train_loss = losses.CosineSimilarityLoss(model=model)
model.fit(
[(train_dataloader, train_loss)],
epochs=10,
)
"""
super(CosineSimilarityLoss, self).__init__()
self.model = model
self.loss_fct = loss_fct
self.cos_score_transformation = cos_score_transformation
def forward(self, sentence_features: Iterable[Dict[str, Tensor]], labels: Tensor):
embeddings = [self.model(sentence_feature)["sentence_embedding"] for sentence_feature in sentence_features]
output = self.cos_score_transformation(torch.cosine_similarity(embeddings[0], embeddings[1]))
return self.loss_fct(output, labels.view(-1))
|
import numpy as np
import pytest
from docarray.utils.misc import is_tf_available
tf_available = is_tf_available()
if tf_available:
import tensorflow as tf
from docarray.computation.tensorflow_backend import TensorFlowCompBackend
from docarray.typing import TensorFlowTensor
@pytest.mark.tensorflow
@pytest.mark.parametrize(
'shape,result',
[
((5), 1),
((1, 5), 2),
((5, 5), 2),
((), 0),
],
)
def test_n_dim(shape, result):
array = TensorFlowTensor(tf.zeros(shape))
assert TensorFlowCompBackend.n_dim(array) == result
@pytest.mark.tensorflow
@pytest.mark.parametrize(
'shape,result',
[
((10,), (10,)),
((5, 5), (5, 5)),
((), ()),
],
)
def test_shape(shape, result):
array = TensorFlowTensor(tf.zeros(shape))
shape = TensorFlowCompBackend.shape(array)
assert shape == result
assert type(shape) == tuple
@pytest.mark.tensorflow
def test_to_device():
array = TensorFlowTensor(tf.constant([1, 2, 3]))
array = TensorFlowCompBackend.to_device(array, 'CPU:0')
assert array.tensor.device.endswith('CPU:0')
@pytest.mark.tensorflow
@pytest.mark.parametrize('dtype', ['int64', 'float64', 'int8', 'double'])
def test_dtype(dtype):
array = TensorFlowTensor(tf.constant([1, 2, 3], dtype=getattr(tf, dtype)))
assert TensorFlowCompBackend.dtype(array) == dtype
@pytest.mark.tensorflow
def test_empty():
array = TensorFlowCompBackend.empty((10, 3))
assert array.tensor.shape == (10, 3)
@pytest.mark.tensorflow
def test_empty_dtype():
tf_tensor = TensorFlowCompBackend.empty((10, 3), dtype=tf.int32)
assert tf_tensor.tensor.shape == (10, 3)
assert tf_tensor.tensor.dtype == tf.int32
@pytest.mark.tensorflow
def test_empty_device():
tensor = TensorFlowCompBackend.empty((10, 3), device='CPU:0')
assert tensor.tensor.shape == (10, 3)
assert tensor.tensor.device.endswith('CPU:0')
@pytest.mark.tensorflow
def test_squeeze():
tensor = TensorFlowTensor(tf.zeros(shape=(1, 1, 3, 1)))
squeezed = TensorFlowCompBackend.squeeze(tensor)
assert squeezed.tensor.shape == (3,)
@pytest.mark.tensorflow
@pytest.mark.parametrize(
'data_input,t_range,x_range,data_result',
[
(
[0, 1, 2, 3, 4, 5],
(0, 10),
None,
[0, 2, 4, 6, 8, 10],
),
(
[0, 1, 2, 3, 4, 5],
(0, 10),
(0, 10),
[0, 1, 2, 3, 4, 5],
),
(
[[0.0, 1.0], [0.0, 1.0]],
(0, 10),
None,
[[0.0, 10.0], [0.0, 10.0]],
),
],
)
def test_minmax_normalize(data_input, t_range, x_range, data_result):
array = TensorFlowTensor(tf.constant(data_input))
output = TensorFlowCompBackend.minmax_normalize(
tensor=array, t_range=t_range, x_range=x_range
)
assert np.allclose(output.tensor, tf.constant(data_result))
@pytest.mark.tensorflow
def test_reshape():
tensor = TensorFlowTensor(tf.zeros((3, 224, 224)))
reshaped = TensorFlowCompBackend.reshape(tensor, (224, 224, 3))
assert reshaped.tensor.shape == (224, 224, 3)
@pytest.mark.tensorflow
def test_stack():
t0 = TensorFlowTensor(tf.zeros((3, 224, 224)))
t1 = TensorFlowTensor(tf.ones((3, 224, 224)))
stacked1 = TensorFlowCompBackend.stack([t0, t1], dim=0)
assert isinstance(stacked1, TensorFlowTensor)
assert stacked1.tensor.shape == (2, 3, 224, 224)
stacked2 = TensorFlowCompBackend.stack([t0, t1], dim=-1)
assert isinstance(stacked2, TensorFlowTensor)
assert stacked2.tensor.shape == (3, 224, 224, 2)
|
import numpy as np
import pytest
try:
import tensorflow as tf
from docarray.computation.tensorflow_backend import TensorFlowCompBackend
from docarray.typing import TensorFlowTensor
except (ImportError, TypeError):
pass
@pytest.mark.tensorflow
@pytest.mark.parametrize(
'shape,result',
[
((5), 1),
((1, 5), 2),
((5, 5), 2),
((), 0),
],
)
def test_n_dim(shape, result):
array = TensorFlowTensor(tf.zeros(shape))
assert TensorFlowCompBackend.n_dim(array) == result
@pytest.mark.tensorflow
@pytest.mark.parametrize(
'shape,result',
[
((10,), (10,)),
((5, 5), (5, 5)),
((), ()),
],
)
def test_shape(shape, result):
array = TensorFlowTensor(tf.zeros(shape))
shape = TensorFlowCompBackend.shape(array)
assert shape == result
assert type(shape) == tuple
@pytest.mark.tensorflow
def test_to_device():
array = TensorFlowTensor(tf.constant([1, 2, 3]))
array = TensorFlowCompBackend.to_device(array, 'CPU:0')
assert array.tensor.device.endswith('CPU:0')
@pytest.mark.tensorflow
@pytest.mark.parametrize('dtype', ['int64', 'float64', 'int8', 'double'])
def test_dtype(dtype):
array = TensorFlowTensor(tf.constant([1, 2, 3], dtype=getattr(tf, dtype)))
assert TensorFlowCompBackend.dtype(array) == dtype
@pytest.mark.tensorflow
def test_empty():
array = TensorFlowCompBackend.empty((10, 3))
assert array.tensor.shape == (10, 3)
@pytest.mark.tensorflow
def test_empty_dtype():
tf_tensor = TensorFlowCompBackend.empty((10, 3), dtype=tf.int32)
assert tf_tensor.tensor.shape == (10, 3)
assert tf_tensor.tensor.dtype == tf.int32
@pytest.mark.tensorflow
def test_empty_device():
tensor = TensorFlowCompBackend.empty((10, 3), device='CPU:0')
assert tensor.tensor.shape == (10, 3)
assert tensor.tensor.device.endswith('CPU:0')
@pytest.mark.tensorflow
def test_squeeze():
tensor = TensorFlowTensor(tf.zeros(shape=(1, 1, 3, 1)))
squeezed = TensorFlowCompBackend.squeeze(tensor)
assert squeezed.tensor.shape == (3,)
@pytest.mark.tensorflow
@pytest.mark.parametrize(
'data_input,t_range,x_range,data_result',
[
(
[0, 1, 2, 3, 4, 5],
(0, 10),
None,
[0, 2, 4, 6, 8, 10],
),
(
[0, 1, 2, 3, 4, 5],
(0, 10),
(0, 10),
[0, 1, 2, 3, 4, 5],
),
(
[[0.0, 1.0], [0.0, 1.0]],
(0, 10),
None,
[[0.0, 10.0], [0.0, 10.0]],
),
],
)
def test_minmax_normalize(data_input, t_range, x_range, data_result):
array = TensorFlowTensor(tf.constant(data_input))
output = TensorFlowCompBackend.minmax_normalize(
tensor=array, t_range=t_range, x_range=x_range
)
assert np.allclose(output.tensor, tf.constant(data_result))
@pytest.mark.tensorflow
def test_reshape():
tensor = TensorFlowTensor(tf.zeros((3, 224, 224)))
reshaped = TensorFlowCompBackend.reshape(tensor, (224, 224, 3))
assert reshaped.tensor.shape == (224, 224, 3)
@pytest.mark.tensorflow
def test_stack():
t0 = TensorFlowTensor(tf.zeros((3, 224, 224)))
t1 = TensorFlowTensor(tf.ones((3, 224, 224)))
stacked1 = TensorFlowCompBackend.stack([t0, t1], dim=0)
assert isinstance(stacked1, TensorFlowTensor)
assert stacked1.tensor.shape == (2, 3, 224, 224)
stacked2 = TensorFlowCompBackend.stack([t0, t1], dim=-1)
assert isinstance(stacked2, TensorFlowTensor)
assert stacked2.tensor.shape == (3, 224, 224, 2)
|
from typing import List, Optional, Type
from langchain_core.callbacks import CallbackManagerForToolRun
from pydantic import BaseModel, Field
from langchain_community.tools.office365.base import O365BaseTool
class CreateDraftMessageSchema(BaseModel):
"""Input for SendMessageTool."""
body: str = Field(
...,
description="The message body to include in the draft.",
)
to: List[str] = Field(
...,
description="The list of recipients.",
)
subject: str = Field(
...,
description="The subject of the message.",
)
cc: Optional[List[str]] = Field(
None,
description="The list of CC recipients.",
)
bcc: Optional[List[str]] = Field(
None,
description="The list of BCC recipients.",
)
class O365CreateDraftMessage(O365BaseTool):
"""Tool for creating a draft email in Office 365."""
name: str = "create_email_draft"
description: str = (
"Use this tool to create a draft email with the provided message fields."
)
args_schema: Type[CreateDraftMessageSchema] = CreateDraftMessageSchema
def _run(
self,
body: str,
to: List[str],
subject: str,
cc: Optional[List[str]] = None,
bcc: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
# Get mailbox object
mailbox = self.account.mailbox()
message = mailbox.new_message()
# Assign message values
message.body = body
message.subject = subject
message.to.add(to)
if cc is not None:
message.cc.add(cc)
if bcc is not None:
message.bcc.add(bcc)
message.save_draft()
output = "Draft created: " + str(message)
return output
|
from typing import List, Optional, Type
from langchain_core.callbacks import CallbackManagerForToolRun
from pydantic import BaseModel, Field
from langchain_community.tools.office365.base import O365BaseTool
class CreateDraftMessageSchema(BaseModel):
"""Input for SendMessageTool."""
body: str = Field(
...,
description="The message body to include in the draft.",
)
to: List[str] = Field(
...,
description="The list of recipients.",
)
subject: str = Field(
...,
description="The subject of the message.",
)
cc: Optional[List[str]] = Field(
None,
description="The list of CC recipients.",
)
bcc: Optional[List[str]] = Field(
None,
description="The list of BCC recipients.",
)
class O365CreateDraftMessage(O365BaseTool): # type: ignore[override, override]
"""Tool for creating a draft email in Office 365."""
name: str = "create_email_draft"
description: str = (
"Use this tool to create a draft email with the provided message fields."
)
args_schema: Type[CreateDraftMessageSchema] = CreateDraftMessageSchema
def _run(
self,
body: str,
to: List[str],
subject: str,
cc: Optional[List[str]] = None,
bcc: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
# Get mailbox object
mailbox = self.account.mailbox()
message = mailbox.new_message()
# Assign message values
message.body = body
message.subject = subject
message.to.add(to)
if cc is not None:
message.cc.add(cc)
if bcc is not None:
message.bcc.add(bcc)
message.save_draft()
output = "Draft created: " + str(message)
return output
|
# mypy: allow-untyped-defs
import torch.distributed as dist
from torch._C._distributed_c10d import FakeProcessGroup
class FakeStore(dist.Store):
"""
A fake store is a fake Key-Value store simply for initialization usage
the of fake process group, one can either use FakeStore or HashStore.
"""
def _create_fake_pg(prefix_store, rank, world_size, timeout):
"""
A fake process group (not related to FakeTensor) is a process group which
doesn't actually do any communication, it just hallucinates some
communication. You can run a single rank with a fake process group
without needing multiple processes (simulates per-rank behavior)
NOTE: This is not a real process group, and it would produce wrong results
for every collective. It should be used as a convenient tool when playing
with distributed but don't care about the actual data.
"""
return FakeProcessGroup(rank, world_size)
dist.Backend.register_backend("fake", _create_fake_pg, devices=["cpu", "cuda", "hpu"])
|
# mypy: allow-untyped-defs
import torch.distributed as dist
from torch._C._distributed_c10d import FakeProcessGroup
class FakeStore(dist.Store):
"""
A fake store is a fake Key-Value store simply for initialization usage
the of fake process group, one can either use FakeStore or HashStore.
"""
def _create_fake_pg(prefix_store, rank, world_size, timeout):
"""
A fake process group (not related to FakeTensor) is a process group which
doesn't actually do any communication, it just hallucinates some
communication. You can run a single rank with a fake process group
without needing multiple processes (simulates per-rank behavior)
NOTE: This is not a real process group, and it would produce wrong results
for every collective. It should be used as a convenient tool when playing
with distributed but don't care about the actual data.
"""
return FakeProcessGroup(rank, world_size)
dist.Backend.register_backend("fake", _create_fake_pg, devices=["cpu", "cuda"])
|
"""This is now a no-op and can be safely removed from your code.
It used to enable the use of
:class:`~sklearn.ensemble.HistGradientBoostingClassifier` and
:class:`~sklearn.ensemble.HistGradientBoostingRegressor` when they were still
:term:`experimental`, but these estimators are now stable and can be imported
normally from `sklearn.ensemble`.
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
# Don't remove this file, we don't want to break users code just because the
# feature isn't experimental anymore.
import warnings
warnings.warn(
"Since version 1.0, "
"it is not needed to import enable_hist_gradient_boosting anymore. "
"HistGradientBoostingClassifier and HistGradientBoostingRegressor are now "
"stable and can be normally imported from sklearn.ensemble."
)
|
"""This is now a no-op and can be safely removed from your code.
It used to enable the use of
:class:`~sklearn.ensemble.HistGradientBoostingClassifier` and
:class:`~sklearn.ensemble.HistGradientBoostingRegressor` when they were still
:term:`experimental`, but these estimators are now stable and can be imported
normally from `sklearn.ensemble`.
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
# Don't remove this file, we don't want to break users code just because the
# feature isn't experimental anymore.
import warnings
warnings.warn(
"Since version 1.0, "
"it is not needed to import enable_hist_gradient_boosting anymore. "
"HistGradientBoostingClassifier and HistGradientBoostingRegressor are now "
"stable and can be normally imported from sklearn.ensemble."
)
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.tools import RedditSearchRun, RedditSearchSchema
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"RedditSearchSchema": "langchain_community.tools",
"RedditSearchRun": "langchain_community.tools",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"RedditSearchRun",
"RedditSearchSchema",
]
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.tools import RedditSearchRun, RedditSearchSchema
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"RedditSearchSchema": "langchain_community.tools",
"RedditSearchRun": "langchain_community.tools",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"RedditSearchSchema",
"RedditSearchRun",
]
|
from __future__ import annotations
from collections.abc import Iterable
from typing import Any
import torch
from torch import Tensor, nn
from sentence_transformers import util
from sentence_transformers.SentenceTransformer import SentenceTransformer
class MultipleNegativesSymmetricRankingLoss(nn.Module):
def __init__(self, model: SentenceTransformer, scale: float = 20.0, similarity_fct=util.cos_sim) -> None:
"""
Given a list of (anchor, positive) pairs, this loss sums the following two losses:
1. Forward loss: Given an anchor, find the sample with the highest similarity out of all positives in the batch.
This is equivalent to :class:`MultipleNegativesRankingLoss`.
2. Backward loss: Given a positive, find the sample with the highest similarity out of all anchors in the batch.
For example with question-answer pairs, :class:`MultipleNegativesRankingLoss` just computes the loss to find
the answer given a question, but :class:`MultipleNegativesSymmetricRankingLoss` additionally computes the
loss to find the question given an answer.
Note: If you pass triplets, the negative entry will be ignored. A anchor is just searched for the positive.
Args:
model: SentenceTransformer model
scale: Output of similarity function is multiplied by scale
value
similarity_fct: similarity function between sentence
embeddings. By default, cos_sim. Can also be set to dot
product (and then set scale to 1)
Requirements:
1. (anchor, positive) pairs
Inputs:
+---------------------------------------+--------+
| Texts | Labels |
+=======================================+========+
| (anchor, positive) pairs | none |
+---------------------------------------+--------+
Recommendations:
- Use ``BatchSamplers.NO_DUPLICATES`` (:class:`docs <sentence_transformers.training_args.BatchSamplers>`) to
ensure that no in-batch negatives are duplicates of the anchor or positive samples.
Relations:
- Like :class:`MultipleNegativesRankingLoss`, but with an additional loss term.
- :class:`CachedMultipleNegativesSymmetricRankingLoss` is equivalent to this loss, but it uses caching that
allows for much higher batch sizes (and thus better performance) without extra memory usage. However, it
is slightly slower.
Example:
::
from sentence_transformers import SentenceTransformer, SentenceTransformerTrainer, losses
from datasets import Dataset
model = SentenceTransformer("microsoft/mpnet-base")
train_dataset = Dataset.from_dict({
"anchor": ["It's nice weather outside today.", "He drove to work."],
"positive": ["It's so sunny.", "He took the car to the office."],
})
loss = losses.MultipleNegativesSymmetricRankingLoss(model)
trainer = SentenceTransformerTrainer(
model=model,
train_dataset=train_dataset,
loss=loss,
)
trainer.train()
"""
super().__init__()
self.model = model
self.scale = scale
self.similarity_fct = similarity_fct
self.cross_entropy_loss = nn.CrossEntropyLoss()
def forward(self, sentence_features: Iterable[dict[str, Tensor]], labels: Tensor) -> Tensor:
reps = [self.model(sentence_feature)["sentence_embedding"] for sentence_feature in sentence_features]
anchor = reps[0]
candidates = torch.cat(reps[1:])
scores = self.similarity_fct(anchor, candidates) * self.scale
labels = torch.tensor(
range(len(scores)), dtype=torch.long, device=scores.device
) # Example a[i] should match with b[i]
anchor_positive_scores = scores[:, 0 : len(reps[1])]
forward_loss = self.cross_entropy_loss(scores, labels)
backward_loss = self.cross_entropy_loss(anchor_positive_scores.transpose(0, 1), labels)
return (forward_loss + backward_loss) / 2
def get_config_dict(self) -> dict[str, Any]:
return {"scale": self.scale, "similarity_fct": self.similarity_fct.__name__}
|
from __future__ import annotations
from typing import Any, Iterable
import torch
from torch import Tensor, nn
from sentence_transformers import util
from sentence_transformers.SentenceTransformer import SentenceTransformer
class MultipleNegativesSymmetricRankingLoss(nn.Module):
def __init__(self, model: SentenceTransformer, scale: float = 20.0, similarity_fct=util.cos_sim) -> None:
"""
Given a list of (anchor, positive) pairs, this loss sums the following two losses:
1. Forward loss: Given an anchor, find the sample with the highest similarity out of all positives in the batch.
This is equivalent to :class:`MultipleNegativesRankingLoss`.
2. Backward loss: Given a positive, find the sample with the highest similarity out of all anchors in the batch.
For example with question-answer pairs, :class:`MultipleNegativesRankingLoss` just computes the loss to find
the answer given a question, but :class:`MultipleNegativesSymmetricRankingLoss` additionally computes the
loss to find the question given an answer.
Note: If you pass triplets, the negative entry will be ignored. A anchor is just searched for the positive.
Args:
model: SentenceTransformer model
scale: Output of similarity function is multiplied by scale
value
similarity_fct: similarity function between sentence
embeddings. By default, cos_sim. Can also be set to dot
product (and then set scale to 1)
Requirements:
1. (anchor, positive) pairs
Inputs:
+---------------------------------------+--------+
| Texts | Labels |
+=======================================+========+
| (anchor, positive) pairs | none |
+---------------------------------------+--------+
Recommendations:
- Use ``BatchSamplers.NO_DUPLICATES`` (:class:`docs <sentence_transformers.training_args.BatchSamplers>`) to
ensure that no in-batch negatives are duplicates of the anchor or positive samples.
Relations:
- Like :class:`MultipleNegativesRankingLoss`, but with an additional loss term.
- :class:`CachedMultipleNegativesSymmetricRankingLoss` is equivalent to this loss, but it uses caching that
allows for much higher batch sizes (and thus better performance) without extra memory usage. However, it
is slightly slower.
Example:
::
from sentence_transformers import SentenceTransformer, SentenceTransformerTrainer, losses
from datasets import Dataset
model = SentenceTransformer("microsoft/mpnet-base")
train_dataset = Dataset.from_dict({
"anchor": ["It's nice weather outside today.", "He drove to work."],
"positive": ["It's so sunny.", "He took the car to the office."],
})
loss = losses.MultipleNegativesSymmetricRankingLoss(model)
trainer = SentenceTransformerTrainer(
model=model,
train_dataset=train_dataset,
loss=loss,
)
trainer.train()
"""
super().__init__()
self.model = model
self.scale = scale
self.similarity_fct = similarity_fct
self.cross_entropy_loss = nn.CrossEntropyLoss()
def forward(self, sentence_features: Iterable[dict[str, Tensor]], labels: Tensor) -> Tensor:
reps = [self.model(sentence_feature)["sentence_embedding"] for sentence_feature in sentence_features]
anchor = reps[0]
candidates = torch.cat(reps[1:])
scores = self.similarity_fct(anchor, candidates) * self.scale
labels = torch.tensor(
range(len(scores)), dtype=torch.long, device=scores.device
) # Example a[i] should match with b[i]
anchor_positive_scores = scores[:, 0 : len(reps[1])]
forward_loss = self.cross_entropy_loss(scores, labels)
backward_loss = self.cross_entropy_loss(anchor_positive_scores.transpose(0, 1), labels)
return (forward_loss + backward_loss) / 2
def get_config_dict(self) -> dict[str, Any]:
return {"scale": self.scale, "similarity_fct": self.similarity_fct.__name__}
|
import enum
from typing import Any, Optional
import pydantic
from backend.data.api_key import APIKeyPermission, APIKeyWithoutHash
from backend.data.graph import Graph
class WSMethod(enum.Enum):
SUBSCRIBE_GRAPH_EXEC = "subscribe_graph_execution"
SUBSCRIBE_GRAPH_EXECS = "subscribe_graph_executions"
UNSUBSCRIBE = "unsubscribe"
GRAPH_EXECUTION_EVENT = "graph_execution_event"
NODE_EXECUTION_EVENT = "node_execution_event"
ERROR = "error"
HEARTBEAT = "heartbeat"
class WSMessage(pydantic.BaseModel):
method: WSMethod
data: Optional[dict[str, Any] | list[Any] | str] = None
success: bool | None = None
channel: str | None = None
error: str | None = None
class WSSubscribeGraphExecutionRequest(pydantic.BaseModel):
graph_exec_id: str
class WSSubscribeGraphExecutionsRequest(pydantic.BaseModel):
graph_id: str
class ExecuteGraphResponse(pydantic.BaseModel):
graph_exec_id: str
class CreateGraph(pydantic.BaseModel):
graph: Graph
class CreateAPIKeyRequest(pydantic.BaseModel):
name: str
permissions: list[APIKeyPermission]
description: Optional[str] = None
class CreateAPIKeyResponse(pydantic.BaseModel):
api_key: APIKeyWithoutHash
plain_text_key: str
class SetGraphActiveVersion(pydantic.BaseModel):
active_graph_version: int
class UpdatePermissionsRequest(pydantic.BaseModel):
permissions: list[APIKeyPermission]
class Pagination(pydantic.BaseModel):
total_items: int = pydantic.Field(
description="Total number of items.", examples=[42]
)
total_pages: int = pydantic.Field(
description="Total number of pages.", examples=[2]
)
current_page: int = pydantic.Field(
description="Current_page page number.", examples=[1]
)
page_size: int = pydantic.Field(
description="Number of items per page.", examples=[25]
)
class RequestTopUp(pydantic.BaseModel):
credit_amount: int
|
import enum
from typing import Any, Optional
import pydantic
from backend.data.api_key import APIKeyPermission, APIKeyWithoutHash
from backend.data.graph import Graph
class WSMethod(enum.Enum):
SUBSCRIBE_GRAPH_EXEC = "subscribe_graph_execution"
UNSUBSCRIBE = "unsubscribe"
GRAPH_EXECUTION_EVENT = "graph_execution_event"
NODE_EXECUTION_EVENT = "node_execution_event"
ERROR = "error"
HEARTBEAT = "heartbeat"
class WSMessage(pydantic.BaseModel):
method: WSMethod
data: Optional[dict[str, Any] | list[Any] | str] = None
success: bool | None = None
channel: str | None = None
error: str | None = None
class WSSubscribeGraphExecutionRequest(pydantic.BaseModel):
graph_exec_id: str
class ExecuteGraphResponse(pydantic.BaseModel):
graph_exec_id: str
class CreateGraph(pydantic.BaseModel):
graph: Graph
class CreateAPIKeyRequest(pydantic.BaseModel):
name: str
permissions: list[APIKeyPermission]
description: Optional[str] = None
class CreateAPIKeyResponse(pydantic.BaseModel):
api_key: APIKeyWithoutHash
plain_text_key: str
class SetGraphActiveVersion(pydantic.BaseModel):
active_graph_version: int
class UpdatePermissionsRequest(pydantic.BaseModel):
permissions: list[APIKeyPermission]
class Pagination(pydantic.BaseModel):
total_items: int = pydantic.Field(
description="Total number of items.", examples=[42]
)
total_pages: int = pydantic.Field(
description="Total number of pages.", examples=[2]
)
current_page: int = pydantic.Field(
description="Current_page page number.", examples=[1]
)
page_size: int = pydantic.Field(
description="Number of items per page.", examples=[25]
)
class RequestTopUp(pydantic.BaseModel):
credit_amount: int
|
import itertools
from dataclasses import dataclass
from typing import Optional
import pyarrow as pa
import datasets
from datasets.table import table_cast
logger = datasets.utils.logging.get_logger(__name__)
@dataclass
class ArrowConfig(datasets.BuilderConfig):
"""BuilderConfig for Arrow."""
features: Optional[datasets.Features] = None
def __post_init__(self):
super().__post_init__()
class Arrow(datasets.ArrowBasedBuilder):
BUILDER_CONFIG_CLASS = ArrowConfig
def _info(self):
return datasets.DatasetInfo(features=self.config.features)
def _split_generators(self, dl_manager):
"""We handle string, list and dicts in datafiles"""
if not self.config.data_files:
raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}")
dl_manager.download_config.extract_on_the_fly = True
data_files = dl_manager.download_and_extract(self.config.data_files)
splits = []
for split_name, files in data_files.items():
if isinstance(files, str):
files = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
files = [dl_manager.iter_files(file) for file in files]
# Infer features if they are stored in the arrow schema
if self.info.features is None:
for file in itertools.chain.from_iterable(files):
with open(file, "rb") as f:
self.info.features = datasets.Features.from_arrow_schema(pa.ipc.open_stream(f).schema)
break
splits.append(datasets.SplitGenerator(name=split_name, gen_kwargs={"files": files}))
return splits
def _cast_table(self, pa_table: pa.Table) -> pa.Table:
if self.info.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
pa_table = table_cast(pa_table, self.info.features.arrow_schema)
return pa_table
def _generate_tables(self, files):
for file_idx, file in enumerate(itertools.chain.from_iterable(files)):
with open(file, "rb") as f:
try:
for batch_idx, record_batch in enumerate(pa.ipc.open_stream(f)):
pa_table = pa.Table.from_batches([record_batch])
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield f"{file_idx}_{batch_idx}", self._cast_table(pa_table)
except ValueError as e:
logger.error(f"Failed to read file '{file}' with error {type(e)}: {e}")
raise
|
import itertools
from dataclasses import dataclass
from typing import Optional
import pyarrow as pa
import datasets
from datasets.table import table_cast
logger = datasets.utils.logging.get_logger(__name__)
@dataclass
class ArrowConfig(datasets.BuilderConfig):
"""BuilderConfig for Arrow."""
features: Optional[datasets.Features] = None
class Arrow(datasets.ArrowBasedBuilder):
BUILDER_CONFIG_CLASS = ArrowConfig
def _info(self):
return datasets.DatasetInfo(features=self.config.features)
def _split_generators(self, dl_manager):
"""We handle string, list and dicts in datafiles"""
if not self.config.data_files:
raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}")
dl_manager.download_config.extract_on_the_fly = True
data_files = dl_manager.download_and_extract(self.config.data_files)
splits = []
for split_name, files in data_files.items():
if isinstance(files, str):
files = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
files = [dl_manager.iter_files(file) for file in files]
# Infer features if they are stored in the arrow schema
if self.info.features is None:
for file in itertools.chain.from_iterable(files):
with open(file, "rb") as f:
self.info.features = datasets.Features.from_arrow_schema(pa.ipc.open_stream(f).schema)
break
splits.append(datasets.SplitGenerator(name=split_name, gen_kwargs={"files": files}))
return splits
def _cast_table(self, pa_table: pa.Table) -> pa.Table:
if self.info.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
pa_table = table_cast(pa_table, self.info.features.arrow_schema)
return pa_table
def _generate_tables(self, files):
for file_idx, file in enumerate(itertools.chain.from_iterable(files)):
with open(file, "rb") as f:
try:
for batch_idx, record_batch in enumerate(pa.ipc.open_stream(f)):
pa_table = pa.Table.from_batches([record_batch])
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield f"{file_idx}_{batch_idx}", self._cast_table(pa_table)
except ValueError as e:
logger.error(f"Failed to read file '{file}' with error {type(e)}: {e}")
raise
|
from langchain_core.utils.strings import comma_list, stringify_dict, stringify_value
__all__ = ["comma_list", "stringify_dict", "stringify_value"]
|
from langchain_core.utils.strings import comma_list, stringify_dict, stringify_value
__all__ = ["stringify_value", "stringify_dict", "comma_list"]
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmengine.config import ConfigDict
from mmdet.core.utils import OptConfigType, OptMultiConfig
from mmdet.registry import MODELS
from .two_stage import TwoStageDetector
@MODELS.register_module()
class PointRend(TwoStageDetector):
"""PointRend: Image Segmentation as Rendering
This detector is the implementation of
`PointRend <https://arxiv.org/abs/1912.08193>`_.
"""
def __init__(self,
backbone: ConfigDict,
rpn_head: ConfigDict,
roi_head: ConfigDict,
train_cfg: ConfigDict,
test_cfg: ConfigDict,
neck: OptConfigType = None,
data_preprocessor: OptConfigType = None,
init_cfg: OptMultiConfig = None) -> None:
super().__init__(
backbone=backbone,
neck=neck,
rpn_head=rpn_head,
roi_head=roi_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
init_cfg=init_cfg,
data_preprocessor=data_preprocessor)
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.registry import MODELS
from .two_stage import TwoStageDetector
@MODELS.register_module()
class PointRend(TwoStageDetector):
"""PointRend: Image Segmentation as Rendering
This detector is the implementation of
`PointRend <https://arxiv.org/abs/1912.08193>`_.
"""
def __init__(self,
backbone,
rpn_head,
roi_head,
train_cfg,
test_cfg,
neck=None,
pretrained=None,
init_cfg=None):
super(PointRend, self).__init__(
backbone=backbone,
neck=neck,
rpn_head=rpn_head,
roi_head=roi_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
pretrained=pretrained,
init_cfg=init_cfg)
|
from __future__ import annotations
import csv
import logging
import os
from scipy.stats import pearsonr, spearmanr
from sentence_transformers import InputExample
logger = logging.getLogger(__name__)
class CECorrelationEvaluator:
"""
This evaluator can be used with the CrossEncoder class. Given sentence pairs and continuous scores,
it compute the pearson & spearman correlation between the predicted score for the sentence pair
and the gold score.
"""
def __init__(self, sentence_pairs: list[list[str]], scores: list[float], name: str = "", write_csv: bool = True):
self.sentence_pairs = sentence_pairs
self.scores = scores
self.name = name
self.csv_file = "CECorrelationEvaluator" + ("_" + name if name else "") + "_results.csv"
self.csv_headers = ["epoch", "steps", "Pearson_Correlation", "Spearman_Correlation"]
self.write_csv = write_csv
@classmethod
def from_input_examples(cls, examples: list[InputExample], **kwargs):
sentence_pairs = []
scores = []
for example in examples:
sentence_pairs.append(example.texts)
scores.append(example.label)
return cls(sentence_pairs, scores, **kwargs)
def __call__(self, model, output_path: str = None, epoch: int = -1, steps: int = -1) -> float:
if epoch != -1:
if steps == -1:
out_txt = f" after epoch {epoch}:"
else:
out_txt = f" in epoch {epoch} after {steps} steps:"
else:
out_txt = ":"
logger.info("CECorrelationEvaluator: Evaluating the model on " + self.name + " dataset" + out_txt)
pred_scores = model.predict(self.sentence_pairs, convert_to_numpy=True, show_progress_bar=False)
eval_pearson, _ = pearsonr(self.scores, pred_scores)
eval_spearman, _ = spearmanr(self.scores, pred_scores)
logger.info(f"Correlation:\tPearson: {eval_pearson:.4f}\tSpearman: {eval_spearman:.4f}")
if output_path is not None and self.write_csv:
csv_path = os.path.join(output_path, self.csv_file)
output_file_exists = os.path.isfile(csv_path)
with open(csv_path, mode="a" if output_file_exists else "w", encoding="utf-8") as f:
writer = csv.writer(f)
if not output_file_exists:
writer.writerow(self.csv_headers)
writer.writerow([epoch, steps, eval_pearson, eval_spearman])
return eval_spearman
|
from __future__ import annotations
import csv
import logging
import os
from scipy.stats import pearsonr, spearmanr
from sentence_transformers import InputExample
logger = logging.getLogger(__name__)
class CECorrelationEvaluator:
"""
This evaluator can be used with the CrossEncoder class. Given sentence pairs and continuous scores,
it compute the pearson & spearman correlation between the predicted score for the sentence pair
and the gold score.
"""
def __init__(self, sentence_pairs: list[list[str]], scores: list[float], name: str = "", write_csv: bool = True):
self.sentence_pairs = sentence_pairs
self.scores = scores
self.name = name
self.csv_file = "CECorrelationEvaluator" + ("_" + name if name else "") + "_results.csv"
self.csv_headers = ["epoch", "steps", "Pearson_Correlation", "Spearman_Correlation"]
self.write_csv = write_csv
@classmethod
def from_input_examples(cls, examples: list[InputExample], **kwargs):
sentence_pairs = []
scores = []
for example in examples:
sentence_pairs.append(example.texts)
scores.append(example.label)
return cls(sentence_pairs, scores, **kwargs)
def __call__(self, model, output_path: str = None, epoch: int = -1, steps: int = -1) -> float:
if epoch != -1:
if steps == -1:
out_txt = " after epoch {}:".format(epoch)
else:
out_txt = " in epoch {} after {} steps:".format(epoch, steps)
else:
out_txt = ":"
logger.info("CECorrelationEvaluator: Evaluating the model on " + self.name + " dataset" + out_txt)
pred_scores = model.predict(self.sentence_pairs, convert_to_numpy=True, show_progress_bar=False)
eval_pearson, _ = pearsonr(self.scores, pred_scores)
eval_spearman, _ = spearmanr(self.scores, pred_scores)
logger.info("Correlation:\tPearson: {:.4f}\tSpearman: {:.4f}".format(eval_pearson, eval_spearman))
if output_path is not None and self.write_csv:
csv_path = os.path.join(output_path, self.csv_file)
output_file_exists = os.path.isfile(csv_path)
with open(csv_path, mode="a" if output_file_exists else "w", encoding="utf-8") as f:
writer = csv.writer(f)
if not output_file_exists:
writer.writerow(self.csv_headers)
writer.writerow([epoch, steps, eval_pearson, eval_spearman])
return eval_spearman
|
import importlib
import pytest
from fastapi.testclient import TestClient
from pytest import MonkeyPatch
from ...utils import needs_pydanticv1, needs_pydanticv2
@pytest.fixture(
name="app",
params=[
pytest.param("tutorial001", marks=needs_pydanticv2),
pytest.param("tutorial001_pv1", marks=needs_pydanticv1),
],
)
def get_app(request: pytest.FixtureRequest, monkeypatch: MonkeyPatch):
monkeypatch.setenv("ADMIN_EMAIL", "admin@example.com")
mod = importlib.import_module(f"docs_src.settings.{request.param}")
return mod.app
def test_settings(app):
client = TestClient(app)
response = client.get("/info")
assert response.status_code == 200, response.text
assert response.json() == {
"app_name": "Awesome API",
"admin_email": "admin@example.com",
"items_per_user": 50,
}
|
from fastapi.testclient import TestClient
from pytest import MonkeyPatch
from ...utils import needs_pydanticv2
@needs_pydanticv2
def test_settings(monkeypatch: MonkeyPatch):
monkeypatch.setenv("ADMIN_EMAIL", "admin@example.com")
from docs_src.settings.tutorial001 import app
client = TestClient(app)
response = client.get("/info")
assert response.status_code == 200, response.text
assert response.json() == {
"app_name": "Awesome API",
"admin_email": "admin@example.com",
"items_per_user": 50,
}
|
import numpy as np
import pytest
from pydantic import parse_obj_as
from docarray.computation.numpy_backend import NumpyCompBackend
from docarray.typing import NdArray
def test_to_device():
with pytest.raises(NotImplementedError):
NumpyCompBackend.to_device(np.random.rand(10, 3), 'meta')
@pytest.mark.parametrize(
'array,result',
[
(np.zeros((5)), 1),
(np.zeros((1, 5)), 2),
(np.zeros((5, 5)), 2),
(np.zeros(()), 0),
],
)
def test_n_dim(array, result):
assert NumpyCompBackend.n_dim(array) == result
@pytest.mark.parametrize(
'array,result',
[
(np.zeros((10,)), (10,)),
(np.zeros((5, 5)), (5, 5)),
(np.zeros(()), ()),
],
)
def test_shape(array, result):
shape = NumpyCompBackend.shape(array)
assert shape == result
assert type(shape) == tuple
def test_device():
array = np.array([1, 2, 3])
assert NumpyCompBackend.device(array) is None
@pytest.mark.parametrize('dtype', [np.int64, np.float64, int, float])
def test_dtype(dtype):
array = np.array([1, 2, 3], dtype=dtype)
assert NumpyCompBackend.dtype(array) == dtype
def test_empty():
array = NumpyCompBackend.empty((10, 3))
assert array.shape == (10, 3)
def test_empty_dtype():
tensor = NumpyCompBackend.empty((10, 3), dtype=np.int32)
assert tensor.shape == (10, 3)
assert tensor.dtype == np.int32
def test_empty_device():
with pytest.raises(NotImplementedError):
NumpyCompBackend.empty((10, 3), device='meta')
def test_squeeze():
tensor = np.zeros(shape=(1, 1, 3, 1))
squeezed = NumpyCompBackend.squeeze(tensor)
assert squeezed.shape == (3,)
@pytest.mark.parametrize(
'array,t_range,x_range,result',
[
(np.array([0, 1, 2, 3, 4, 5]), (0, 10), None, np.array([0, 2, 4, 6, 8, 10])),
(np.array([0, 1, 2, 3, 4, 5]), (0, 10), (0, 10), np.array([0, 1, 2, 3, 4, 5])),
(
np.array([[0.0, 1.0], [0.0, 1.0]]),
(0, 10),
None,
np.array([[0.0, 10.0], [0.0, 10.0]]),
),
],
)
def test_minmax_normalize(array, t_range, x_range, result):
output = NumpyCompBackend.minmax_normalize(
tensor=array, t_range=t_range, x_range=x_range
)
assert np.allclose(output, result)
def test_stack():
t0 = parse_obj_as(NdArray, np.zeros((3, 224, 224)))
t1 = parse_obj_as(NdArray, np.ones((3, 224, 224)))
stacked1 = NumpyCompBackend.stack([t0, t1], dim=0)
assert isinstance(stacked1, np.ndarray)
assert stacked1.shape == (2, 3, 224, 224)
stacked2 = NumpyCompBackend.stack([t0, t1], dim=-1)
assert isinstance(stacked2, np.ndarray)
assert stacked2.shape == (3, 224, 224, 2)
|
import numpy as np
import pytest
from pydantic import parse_obj_as
from docarray.computation.numpy_backend import NumpyCompBackend
from docarray.typing import NdArray
def test_to_device():
with pytest.raises(NotImplementedError):
NumpyCompBackend.to_device(np.random.rand(10, 3), 'meta')
@pytest.mark.parametrize(
'array,result',
[
(np.zeros((5)), 1),
(np.zeros((1, 5)), 2),
(np.zeros((5, 5)), 2),
(np.zeros(()), 0),
],
)
def test_n_dim(array, result):
assert NumpyCompBackend.n_dim(array) == result
@pytest.mark.parametrize(
'array,result',
[
(np.zeros((10,)), (10,)),
(np.zeros((5, 5)), (5, 5)),
(np.zeros(()), ()),
],
)
def test_shape(array, result):
shape = NumpyCompBackend.shape(array)
assert shape == result
assert type(shape) == tuple
def test_device():
array = np.array([1, 2, 3])
assert NumpyCompBackend.device(array) is None
@pytest.mark.parametrize('dtype', [np.int64, np.float64, np.int, np.float])
def test_dtype(dtype):
array = np.array([1, 2, 3], dtype=dtype)
assert NumpyCompBackend.dtype(array) == dtype
def test_empty():
array = NumpyCompBackend.empty((10, 3))
assert array.shape == (10, 3)
def test_empty_dtype():
tensor = NumpyCompBackend.empty((10, 3), dtype=np.int32)
assert tensor.shape == (10, 3)
assert tensor.dtype == np.int32
def test_empty_device():
with pytest.raises(NotImplementedError):
NumpyCompBackend.empty((10, 3), device='meta')
def test_squeeze():
tensor = np.zeros(shape=(1, 1, 3, 1))
squeezed = NumpyCompBackend.squeeze(tensor)
assert squeezed.shape == (3,)
@pytest.mark.parametrize(
'array,t_range,x_range,result',
[
(np.array([0, 1, 2, 3, 4, 5]), (0, 10), None, np.array([0, 2, 4, 6, 8, 10])),
(np.array([0, 1, 2, 3, 4, 5]), (0, 10), (0, 10), np.array([0, 1, 2, 3, 4, 5])),
(
np.array([[0.0, 1.0], [0.0, 1.0]]),
(0, 10),
None,
np.array([[0.0, 10.0], [0.0, 10.0]]),
),
],
)
def test_minmax_normalize(array, t_range, x_range, result):
output = NumpyCompBackend.minmax_normalize(
tensor=array, t_range=t_range, x_range=x_range
)
assert np.allclose(output, result)
def test_stack():
t0 = parse_obj_as(NdArray, np.zeros((3, 224, 224)))
t1 = parse_obj_as(NdArray, np.ones((3, 224, 224)))
stacked1 = NumpyCompBackend.stack([t0, t1], dim=0)
assert isinstance(stacked1, np.ndarray)
assert stacked1.shape == (2, 3, 224, 224)
stacked2 = NumpyCompBackend.stack([t0, t1], dim=-1)
assert isinstance(stacked2, np.ndarray)
assert stacked2.shape == (3, 224, 224, 2)
|
import numpy as np
import torch
from docarray import BaseDocument
from docarray.typing import AnyTensor, NdArray, TorchTensor
def test_set_tensor():
class MyDocument(BaseDocument):
tensor: AnyTensor
d = MyDocument(tensor=np.zeros((3, 224, 224)))
assert isinstance(d.tensor, NdArray)
assert isinstance(d.tensor, np.ndarray)
assert (d.tensor == np.zeros((3, 224, 224))).all()
d = MyDocument(tensor=torch.zeros((3, 224, 224)))
assert isinstance(d.tensor, TorchTensor)
assert isinstance(d.tensor, torch.Tensor)
assert (d.tensor == torch.zeros((3, 224, 224))).all()
|
import numpy as np
import torch
from docarray import Document
from docarray.typing import AnyTensor, NdArray, TorchTensor
def test_set_tensor():
class MyDocument(Document):
tensor: AnyTensor
d = MyDocument(tensor=np.zeros((3, 224, 224)))
assert isinstance(d.tensor, NdArray)
assert isinstance(d.tensor, np.ndarray)
assert (d.tensor == np.zeros((3, 224, 224))).all()
d = MyDocument(tensor=torch.zeros((3, 224, 224)))
assert isinstance(d.tensor, TorchTensor)
assert isinstance(d.tensor, torch.Tensor)
assert (d.tensor == torch.zeros((3, 224, 224))).all()
|
import re
from typing import Union
from langchain_core.agents import AgentAction, AgentFinish
from langchain_core.exceptions import OutputParserException
from langchain.agents.agent import AgentOutputParser
from langchain.agents.mrkl.prompt import FORMAT_INSTRUCTIONS
FINAL_ANSWER_ACTION = "Final Answer:"
MISSING_ACTION_AFTER_THOUGHT_ERROR_MESSAGE = (
"Invalid Format: Missing 'Action:' after 'Thought:"
)
MISSING_ACTION_INPUT_AFTER_ACTION_ERROR_MESSAGE = (
"Invalid Format: Missing 'Action Input:' after 'Action:'"
)
FINAL_ANSWER_AND_PARSABLE_ACTION_ERROR_MESSAGE = (
"Parsing LLM output produced both a final answer and a parse-able action:"
)
class MRKLOutputParser(AgentOutputParser):
"""MRKL Output parser for the chat agent."""
format_instructions: str = FORMAT_INSTRUCTIONS
"""Default formatting instructions"""
def get_format_instructions(self) -> str:
"""Returns formatting instructions for the given output parser."""
return self.format_instructions
def parse(self, text: str) -> Union[AgentAction, AgentFinish]:
"""Parse the output from the agent into
an AgentAction or AgentFinish object.
Args:
text: The text to parse.
Returns:
An AgentAction or AgentFinish object.
Raises:
OutputParserException: If the output could not be parsed.
"""
includes_answer = FINAL_ANSWER_ACTION in text
regex = (
r"Action\s*\d*\s*:[\s]*(.*?)[\s]*Action\s*\d*\s*Input\s*\d*\s*:[\s]*(.*)"
)
action_match = re.search(regex, text, re.DOTALL)
if action_match and includes_answer:
if text.find(FINAL_ANSWER_ACTION) < text.find(action_match.group(0)):
# if final answer is before the hallucination, return final answer
start_index = text.find(FINAL_ANSWER_ACTION) + len(FINAL_ANSWER_ACTION)
end_index = text.find("\n\n", start_index)
return AgentFinish(
{"output": text[start_index:end_index].strip()}, text[:end_index]
)
else:
msg = f"{FINAL_ANSWER_AND_PARSABLE_ACTION_ERROR_MESSAGE}: {text}"
raise OutputParserException(msg)
if action_match:
action = action_match.group(1).strip()
action_input = action_match.group(2)
tool_input = action_input.strip(" ")
# ensure if its a well formed SQL query we don't remove any trailing " chars
if tool_input.startswith("SELECT ") is False:
tool_input = tool_input.strip('"')
return AgentAction(action, tool_input, text)
elif includes_answer:
return AgentFinish(
{"output": text.split(FINAL_ANSWER_ACTION)[-1].strip()}, text
)
if not re.search(r"Action\s*\d*\s*:[\s]*(.*?)", text, re.DOTALL):
msg = f"Could not parse LLM output: `{text}`"
raise OutputParserException(
msg,
observation=MISSING_ACTION_AFTER_THOUGHT_ERROR_MESSAGE,
llm_output=text,
send_to_llm=True,
)
elif not re.search(
r"[\s]*Action\s*\d*\s*Input\s*\d*\s*:[\s]*(.*)", text, re.DOTALL
):
msg = f"Could not parse LLM output: `{text}`"
raise OutputParserException(
msg,
observation=MISSING_ACTION_INPUT_AFTER_ACTION_ERROR_MESSAGE,
llm_output=text,
send_to_llm=True,
)
else:
msg = f"Could not parse LLM output: `{text}`"
raise OutputParserException(msg)
@property
def _type(self) -> str:
return "mrkl"
|
import re
from typing import Union
from langchain_core.agents import AgentAction, AgentFinish
from langchain_core.exceptions import OutputParserException
from langchain.agents.agent import AgentOutputParser
from langchain.agents.mrkl.prompt import FORMAT_INSTRUCTIONS
FINAL_ANSWER_ACTION = "Final Answer:"
MISSING_ACTION_AFTER_THOUGHT_ERROR_MESSAGE = (
"Invalid Format: Missing 'Action:' after 'Thought:"
)
MISSING_ACTION_INPUT_AFTER_ACTION_ERROR_MESSAGE = (
"Invalid Format: Missing 'Action Input:' after 'Action:'"
)
FINAL_ANSWER_AND_PARSABLE_ACTION_ERROR_MESSAGE = (
"Parsing LLM output produced both a final answer and a parse-able action:"
)
class MRKLOutputParser(AgentOutputParser):
"""MRKL Output parser for the chat agent."""
format_instructions: str = FORMAT_INSTRUCTIONS
"""Default formatting instructions"""
def get_format_instructions(self) -> str:
"""Returns formatting instructions for the given output parser."""
return self.format_instructions
def parse(self, text: str) -> Union[AgentAction, AgentFinish]:
"""Parse the output from the agent into
an AgentAction or AgentFinish object.
Args:
text: The text to parse.
Returns:
An AgentAction or AgentFinish object.
Raises:
OutputParserException: If the output could not be parsed.
"""
includes_answer = FINAL_ANSWER_ACTION in text
regex = (
r"Action\s*\d*\s*:[\s]*(.*?)[\s]*Action\s*\d*\s*Input\s*\d*\s*:[\s]*(.*)"
)
action_match = re.search(regex, text, re.DOTALL)
if action_match and includes_answer:
if text.find(FINAL_ANSWER_ACTION) < text.find(action_match.group(0)):
# if final answer is before the hallucination, return final answer
start_index = text.find(FINAL_ANSWER_ACTION) + len(FINAL_ANSWER_ACTION)
end_index = text.find("\n\n", start_index)
return AgentFinish(
{"output": text[start_index:end_index].strip()}, text[:end_index]
)
else:
raise OutputParserException(
f"{FINAL_ANSWER_AND_PARSABLE_ACTION_ERROR_MESSAGE}: {text}"
)
if action_match:
action = action_match.group(1).strip()
action_input = action_match.group(2)
tool_input = action_input.strip(" ")
# ensure if its a well formed SQL query we don't remove any trailing " chars
if tool_input.startswith("SELECT ") is False:
tool_input = tool_input.strip('"')
return AgentAction(action, tool_input, text)
elif includes_answer:
return AgentFinish(
{"output": text.split(FINAL_ANSWER_ACTION)[-1].strip()}, text
)
if not re.search(r"Action\s*\d*\s*:[\s]*(.*?)", text, re.DOTALL):
raise OutputParserException(
f"Could not parse LLM output: `{text}`",
observation=MISSING_ACTION_AFTER_THOUGHT_ERROR_MESSAGE,
llm_output=text,
send_to_llm=True,
)
elif not re.search(
r"[\s]*Action\s*\d*\s*Input\s*\d*\s*:[\s]*(.*)", text, re.DOTALL
):
raise OutputParserException(
f"Could not parse LLM output: `{text}`",
observation=MISSING_ACTION_INPUT_AFTER_ACTION_ERROR_MESSAGE,
llm_output=text,
send_to_llm=True,
)
else:
raise OutputParserException(f"Could not parse LLM output: `{text}`")
@property
def _type(self) -> str:
return "mrkl"
|
import http.client
import json
from typing import Optional
def list_packages(*, contains: Optional[str] = None) -> list[str]:
conn = http.client.HTTPSConnection("api.github.com")
try:
headers = {
"Accept": "application/vnd.github+json",
"X-GitHub-Api-Version": "2022-11-28",
"User-Agent": "langchain-cli",
}
conn.request(
"GET",
"/repos/langchain-ai/langchain/contents/templates",
headers=headers,
)
res = conn.getresponse()
res_str = res.read()
data = json.loads(res_str)
package_names = [
p["name"] for p in data if p["type"] == "dir" and p["name"] != "docs"
]
return (
[p for p in package_names if contains in p] if contains else package_names
)
finally:
conn.close()
|
import http.client
import json
from typing import Optional
def list_packages(*, contains: Optional[str] = None):
conn = http.client.HTTPSConnection("api.github.com")
headers = {
"Accept": "application/vnd.github+json",
"X-GitHub-Api-Version": "2022-11-28",
"User-Agent": "langchain-cli",
}
conn.request(
"GET",
"/repos/langchain-ai/langchain/contents/templates",
headers=headers,
)
res = conn.getresponse()
res_str = res.read()
data = json.loads(res_str)
package_names = [
p["name"] for p in data if p["type"] == "dir" and p["name"] != "docs"
]
return [p for p in package_names if contains in p] if contains else package_names
|
from backend.blocks.hubspot._auth import (
HubSpotCredentials,
HubSpotCredentialsField,
HubSpotCredentialsInput,
)
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from backend.data.model import SchemaField
from backend.util.request import requests
class HubSpotContactBlock(Block):
class Input(BlockSchema):
credentials: HubSpotCredentialsInput = HubSpotCredentialsField()
operation: str = SchemaField(
description="Operation to perform (create, update, get)", default="get"
)
contact_data: dict = SchemaField(
description="Contact data for create/update operations",
default_factory=dict,
)
email: str = SchemaField(
description="Email address for get/update operations", default=""
)
class Output(BlockSchema):
contact: dict = SchemaField(description="Contact information")
status: str = SchemaField(description="Operation status")
def __init__(self):
super().__init__(
id="5267326e-c4c1-4016-9f54-4e72ad02f813",
description="Manages HubSpot contacts - create, update, and retrieve contact information",
categories={BlockCategory.CRM},
input_schema=HubSpotContactBlock.Input,
output_schema=HubSpotContactBlock.Output,
)
def run(
self, input_data: Input, *, credentials: HubSpotCredentials, **kwargs
) -> BlockOutput:
base_url = "https://api.hubapi.com/crm/v3/objects/contacts"
headers = {
"Authorization": f"Bearer {credentials.api_key.get_secret_value()}",
"Content-Type": "application/json",
}
if input_data.operation == "create":
response = requests.post(
base_url, headers=headers, json={"properties": input_data.contact_data}
)
result = response.json()
yield "contact", result
yield "status", "created"
elif input_data.operation == "get":
# Search for contact by email
search_url = f"{base_url}/search"
search_data = {
"filterGroups": [
{
"filters": [
{
"propertyName": "email",
"operator": "EQ",
"value": input_data.email,
}
]
}
]
}
response = requests.post(search_url, headers=headers, json=search_data)
result = response.json()
yield "contact", result.get("results", [{}])[0]
yield "status", "retrieved"
elif input_data.operation == "update":
search_response = requests.post(
f"{base_url}/search",
headers=headers,
json={
"filterGroups": [
{
"filters": [
{
"propertyName": "email",
"operator": "EQ",
"value": input_data.email,
}
]
}
]
},
)
contact_id = search_response.json().get("results", [{}])[0].get("id")
if contact_id:
response = requests.patch(
f"{base_url}/{contact_id}",
headers=headers,
json={"properties": input_data.contact_data},
)
result = response.json()
yield "contact", result
yield "status", "updated"
else:
yield "contact", {}
yield "status", "contact_not_found"
|
from backend.blocks.hubspot._auth import (
HubSpotCredentials,
HubSpotCredentialsField,
HubSpotCredentialsInput,
)
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from backend.data.model import SchemaField
from backend.util.request import requests
class HubSpotContactBlock(Block):
class Input(BlockSchema):
credentials: HubSpotCredentialsInput = HubSpotCredentialsField()
operation: str = SchemaField(
description="Operation to perform (create, update, get)", default="get"
)
contact_data: dict = SchemaField(
description="Contact data for create/update operations", default={}
)
email: str = SchemaField(
description="Email address for get/update operations", default=""
)
class Output(BlockSchema):
contact: dict = SchemaField(description="Contact information")
status: str = SchemaField(description="Operation status")
def __init__(self):
super().__init__(
id="5267326e-c4c1-4016-9f54-4e72ad02f813",
description="Manages HubSpot contacts - create, update, and retrieve contact information",
categories={BlockCategory.CRM},
input_schema=HubSpotContactBlock.Input,
output_schema=HubSpotContactBlock.Output,
)
def run(
self, input_data: Input, *, credentials: HubSpotCredentials, **kwargs
) -> BlockOutput:
base_url = "https://api.hubapi.com/crm/v3/objects/contacts"
headers = {
"Authorization": f"Bearer {credentials.api_key.get_secret_value()}",
"Content-Type": "application/json",
}
if input_data.operation == "create":
response = requests.post(
base_url, headers=headers, json={"properties": input_data.contact_data}
)
result = response.json()
yield "contact", result
yield "status", "created"
elif input_data.operation == "get":
# Search for contact by email
search_url = f"{base_url}/search"
search_data = {
"filterGroups": [
{
"filters": [
{
"propertyName": "email",
"operator": "EQ",
"value": input_data.email,
}
]
}
]
}
response = requests.post(search_url, headers=headers, json=search_data)
result = response.json()
yield "contact", result.get("results", [{}])[0]
yield "status", "retrieved"
elif input_data.operation == "update":
search_response = requests.post(
f"{base_url}/search",
headers=headers,
json={
"filterGroups": [
{
"filters": [
{
"propertyName": "email",
"operator": "EQ",
"value": input_data.email,
}
]
}
]
},
)
contact_id = search_response.json().get("results", [{}])[0].get("id")
if contact_id:
response = requests.patch(
f"{base_url}/{contact_id}",
headers=headers,
json={"properties": input_data.contact_data},
)
result = response.json()
yield "contact", result
yield "status", "updated"
else:
yield "contact", {}
yield "status", "contact_not_found"
|
from typing import Dict, Tuple, Optional, List
import numpy as np
from jina import Executor, DocumentArray, requests, Document
from jina.types.arrays.memmap import DocumentArrayMemmap
from jina_commons import get_logger
class SimpleIndexer(Executor):
"""
A simple indexer that stores all the Document data together,
in a DocumentArrayMemmap object
To be used as a unified indexer, combining both indexing and searching
"""
def __init__(
self,
index_file_name: str,
default_traversal_paths: Optional[List[str]] = None,
default_top_k: int = 5,
distance_metric: str = 'cosine',
**kwargs,
):
"""
Initializer function for the simple indexer
:param index_file_name: The file name for the index file
:param default_traversal_paths: The default traversal path that is used
if no traversal path is given in the parameters of the request.
This defaults to ['r'].
:param default_top_k: default value for the top_k parameter
:param distance_metric: The distance metric to be used for finding the
most similar embeddings. Either 'euclidean' or 'cosine'.
"""
super().__init__(**kwargs)
self._docs = DocumentArrayMemmap(self.workspace + f'/{index_file_name}')
self.default_traversal_paths = default_traversal_paths or ['r']
self.default_top_k = default_top_k
if distance_metric == 'cosine':
self.distance = _cosine
elif distance_metric == 'euclidean':
self.distance = _euclidean
else:
raise ValueError('This distance metric is not available!')
self._flush = True
self._docs_embeddings = None
self.logger = get_logger(self)
@requests(on='/index')
def index(
self,
docs: Optional['DocumentArray'] = None,
parameters: Optional[Dict] = {},
**kwargs,
):
"""All Documents to the DocumentArray
:param docs: the docs to add
:param parameters: the parameters dictionary
"""
if not docs:
return
traversal_paths = parameters.get(
'traversal_paths', self.default_traversal_paths
)
flat_docs = docs.traverse_flat(traversal_paths)
self._docs.extend(flat_docs)
self._flush = True
@requests(on='/search')
def search(
self,
docs: Optional['DocumentArray'] = None,
parameters: Optional[Dict] = {},
**kwargs,
):
"""Perform a vector similarity search and retrieve the full Document match
:param docs: the Documents to search with
:param parameters: the parameters for the search"""
if not docs:
return
if not self._docs:
self.logger.warning(
'no documents are indexed. searching empty docs. returning.'
)
return
traversal_paths = parameters.get(
'traversal_paths', self.default_traversal_paths
)
flat_docs = docs.traverse_flat(traversal_paths)
if not flat_docs:
return
top_k = parameters.get('top_k', self.default_top_k)
flat_docs.match(
self._docs,
metric=lambda q_emb, d_emb, _: self.distance(
_ext_A(_norm(q_emb)), _ext_B(_norm(d_emb))
),
limit=top_k,
)
self._flush = False
@requests(on='/fill_embedding')
def fill_embedding(self, docs: DocumentArray, **kwargs):
"""retrieve embedding of Documents by id
:param docs: DocumentArray to search with
"""
if not docs:
return
for doc in docs:
doc.embedding = self._docs[doc.id].embedding
def _ext_A(A):
nA, dim = A.shape
A_ext = np.ones((nA, dim * 3))
A_ext[:, dim : 2 * dim] = A
A_ext[:, 2 * dim :] = A ** 2
return A_ext
def _ext_B(B):
nB, dim = B.shape
B_ext = np.ones((dim * 3, nB))
B_ext[:dim] = (B ** 2).T
B_ext[dim : 2 * dim] = -2.0 * B.T
del B
return B_ext
def _euclidean(A_ext, B_ext):
sqdist = A_ext.dot(B_ext).clip(min=0)
return np.sqrt(sqdist)
def _norm(A):
return A / np.linalg.norm(A, ord=2, axis=1, keepdims=True)
def _cosine(A_norm_ext, B_norm_ext):
return A_norm_ext.dot(B_norm_ext).clip(min=0) / 2
|
from typing import Dict, Tuple, Optional, List
import numpy as np
from jina import Executor, DocumentArray, requests, Document
from jina.types.arrays.memmap import DocumentArrayMemmap
class SimpleIndexer(Executor):
"""
A simple indexer that stores all the Document data together,
in a DocumentArrayMemmap object
To be used as a unified indexer, combining both indexing and searching
"""
def __init__(
self,
index_file_name: str,
default_traversal_paths: Optional[List[str]] = None,
default_top_k: int = 5,
distance_metric: str = 'cosine',
**kwargs,
):
"""
Initializer function for the simple indexer
:param index_file_name: The file name for the index file
:param default_traversal_paths: The default traversal path that is used
if no traversal path is given in the parameters of the request.
This defaults to ['r'].
:param default_top_k: default value for the top_k parameter
:param distance_metric: The distance metric to be used for finding the
most similar embeddings. Either 'euclidean' or 'cosine'.
"""
super().__init__(**kwargs)
self._docs = DocumentArrayMemmap(self.workspace + f'/{index_file_name}')
self.default_traversal_paths = default_traversal_paths or ['r']
self.default_top_k = default_top_k
if distance_metric == 'cosine':
self.distance = _cosine
elif distance_metric == 'euclidean':
self.distance = _euclidean
else:
raise ValueError('This distance metric is not available!')
self._flush = True
self._docs_embeddings = None
@property
def index_embeddings(self):
if self._flush:
self._docs_embeddings = np.stack(self._docs.get_attributes('embedding'))
self._flush = False
return self._docs_embeddings
@requests(on='/index')
def index(self, docs: 'DocumentArray', parameters: Dict, **kwargs):
"""All Documents to the DocumentArray
:param docs: the docs to add
:param parameters: the parameters dictionary
"""
if not docs: return
traversal_path = parameters.get('traversal_paths', self.default_traversal_paths)
flat_docs = docs.traverse_flat(traversal_path)
self._docs.extend(flat_docs)
self._flush = True
@requests(on='/search')
def search(self, docs: 'DocumentArray', parameters: Dict, **kwargs):
"""Perform a vector similarity search and retrieve the full Document match
:param docs: the Documents to search with
:param parameters: the parameters for the search"""
if not docs: return
traversal_path = parameters.get('traversal_paths', self.default_traversal_paths)
top_k = parameters.get('top_k', self.default_top_k)
flat_docs = docs.traverse_flat(traversal_path)
a = np.stack(flat_docs.get_attributes('embedding'))
b = self.index_embeddings
q_emb = _ext_A(_norm(a))
d_emb = _ext_B(_norm(b))
dists = self.distance(q_emb, d_emb)
idx, dist = self._get_sorted_top_k(dists, int(top_k))
for _q, _ids, _dists in zip(flat_docs, idx, dist):
for _id, _dist in zip(_ids, _dists):
d = Document(self._docs[int(_id)], copy=True)
d.scores['cosine'] = 1 - _dist
_q.matches.append(d)
@staticmethod
def _get_sorted_top_k(
dist: 'np.array', top_k: int
) -> Tuple['np.ndarray', 'np.ndarray']:
if top_k >= dist.shape[1]:
idx = dist.argsort(axis=1)[:, :top_k]
dist = np.take_along_axis(dist, idx, axis=1)
else:
idx_ps = dist.argpartition(kth=top_k, axis=1)[:, :top_k]
dist = np.take_along_axis(dist, idx_ps, axis=1)
idx_fs = dist.argsort(axis=1)
idx = np.take_along_axis(idx_ps, idx_fs, axis=1)
dist = np.take_along_axis(dist, idx_fs, axis=1)
return idx, dist
@requests(on='/fill_embedding')
def fill_embedding(self, docs: DocumentArray, **kwargs):
"""retrieve embedding of Documents by id
:param docs: DocumentArray to search with
"""
if not docs: return
for doc in docs:
doc.embedding = self._docs[doc.id].embedding
def _ext_A(A):
nA, dim = A.shape
A_ext = np.ones((nA, dim * 3))
A_ext[:, dim : 2 * dim] = A
A_ext[:, 2 * dim :] = A ** 2
return A_ext
def _ext_B(B):
nB, dim = B.shape
B_ext = np.ones((dim * 3, nB))
B_ext[:dim] = (B ** 2).T
B_ext[dim : 2 * dim] = -2.0 * B.T
del B
return B_ext
def _euclidean(A_ext, B_ext):
sqdist = A_ext.dot(B_ext).clip(min=0)
return np.sqrt(sqdist)
def _norm(A):
return A / np.linalg.norm(A, ord=2, axis=1, keepdims=True)
def _cosine(A_norm_ext, B_norm_ext):
return A_norm_ext.dot(B_norm_ext).clip(min=0) / 2
|
"""
Top-level module of Jina.
The primary function of this module is to import all of the public Jina
interfaces into a single place. The interfaces themselves are located in
sub-modules, as described below.
"""
import os as _os
import platform as _platform
import signal as _signal
import sys as _sys
import warnings as _warnings
import docarray as _docarray
if _sys.version_info < (3, 7, 0):
raise OSError(f'Jina requires Python >= 3.7, but yours is {_sys.version_info}')
def _warning_on_one_line(message, category, filename, lineno, *args, **kwargs):
return '\033[1;33m%s: %s\033[0m \033[1;30m(raised from %s:%s)\033[0m\n' % (
category.__name__,
message,
filename,
lineno,
)
def _ignore_google_warnings():
import warnings
warnings.filterwarnings(
'ignore',
category=DeprecationWarning,
message='Deprecated call to `pkg_resources.declare_namespace(\'google\')`.',
append=True,
)
_warnings.formatwarning = _warning_on_one_line
_warnings.simplefilter('always', DeprecationWarning, append=True)
_ignore_google_warnings()
# fix fork error on MacOS but seems no effect? must do EXPORT manually before jina start
_os.environ['OBJC_DISABLE_INITIALIZE_FORK_SAFETY'] = 'YES'
# JINA_MP_START_METHOD has higher priority than os-patch
_start_method = _os.environ.get('JINA_MP_START_METHOD', None)
if _start_method and _start_method.lower() in {'fork', 'spawn', 'forkserver'}:
from multiprocessing import set_start_method as _set_start_method
try:
_set_start_method(_start_method.lower())
_warnings.warn(
f'multiprocessing start method is set to `{_start_method.lower()}`'
)
except Exception as e:
_warnings.warn(
f'failed to set multiprocessing start_method to `{_start_method.lower()}`: {e!r}'
)
elif _sys.version_info >= (3, 8, 0) and _platform.system() == 'Darwin':
# DO SOME OS-WISE PATCHES
# temporary fix for python 3.8 on macos where the default start is set to "spawn"
# https://docs.python.org/3/library/multiprocessing.html#contexts-and-start-methods
from multiprocessing import set_start_method as _set_start_method
try:
_set_start_method('fork')
_warnings.warn(f'multiprocessing start method is set to `fork`')
except Exception as e:
_warnings.warn(f'failed to set multiprocessing start_method to `fork`: {e!r}')
# do not change this line manually this is managed by git tag and updated on every release
# NOTE: this represents the NEXT release version
__version__ = '3.27.17'
# do not change this line manually
# this is managed by proto/build-proto.sh and updated on every execution
__proto_version__ = '0.1.27'
try:
__docarray_version__ = _docarray.__version__
except AttributeError as e:
raise RuntimeError(
'`docarray` dependency is not installed correctly, please reinstall with `pip install -U --force-reinstall docarray`'
)
try:
_signal.signal(_signal.SIGINT, _signal.default_int_handler)
except Exception as exc:
_warnings.warn(f'failed to set default signal handler: {exc!r}`')
def _set_nofile(nofile_atleast=4096):
"""
Set nofile soft limit to at least 4096, useful for running matlplotlib/seaborn on
parallel executing plot generators vs. Ubuntu default ulimit -n 1024 or OS X El Captian 256
temporary setting extinguishing with Python session.
:param nofile_atleast: nofile soft limit
:return: nofile soft limit and nofile hard limit
"""
try:
import resource as res
except ImportError: # Windows
res = None
if res is None:
return (None,) * 2
soft, ohard = res.getrlimit(res.RLIMIT_NOFILE)
hard = ohard
if soft < nofile_atleast:
soft = nofile_atleast
if hard < soft:
hard = soft
try:
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except (ValueError, res.error):
try:
hard = soft
print(f'trouble with max limit, retrying with soft,hard {soft},{hard}')
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except Exception:
print('failed to set ulimit, giving up')
soft, hard = res.getrlimit(res.RLIMIT_NOFILE)
return soft, hard
_set_nofile()
# ONLY FIRST CLASS CITIZENS ARE ALLOWED HERE, namely Document, Executor Flow
# Document
from jina._docarray import Document, DocumentArray
# Client
from jina.clients import Client
# Deployment
from jina.orchestrate.deployments import Deployment
from jina.orchestrate.flow.asyncio import AsyncFlow
# Flow
from jina.orchestrate.flow.base import Flow
# Executor
from jina.serve.executors import BaseExecutor as Executor
from jina.serve.executors.decorators import dynamic_batching, monitor, requests
# Custom Gateway
from jina.serve.runtimes.gateway.gateway import Gateway
|
"""
Top-level module of Jina.
The primary function of this module is to import all of the public Jina
interfaces into a single place. The interfaces themselves are located in
sub-modules, as described below.
"""
import os as _os
import platform as _platform
import signal as _signal
import sys as _sys
import warnings as _warnings
import docarray as _docarray
if _sys.version_info < (3, 7, 0):
raise OSError(f'Jina requires Python >= 3.7, but yours is {_sys.version_info}')
def _warning_on_one_line(message, category, filename, lineno, *args, **kwargs):
return '\033[1;33m%s: %s\033[0m \033[1;30m(raised from %s:%s)\033[0m\n' % (
category.__name__,
message,
filename,
lineno,
)
def _ignore_google_warnings():
import warnings
warnings.filterwarnings(
'ignore',
category=DeprecationWarning,
message='Deprecated call to `pkg_resources.declare_namespace(\'google\')`.',
append=True,
)
_warnings.formatwarning = _warning_on_one_line
_warnings.simplefilter('always', DeprecationWarning, append=True)
_ignore_google_warnings()
# fix fork error on MacOS but seems no effect? must do EXPORT manually before jina start
_os.environ['OBJC_DISABLE_INITIALIZE_FORK_SAFETY'] = 'YES'
# JINA_MP_START_METHOD has higher priority than os-patch
_start_method = _os.environ.get('JINA_MP_START_METHOD', None)
if _start_method and _start_method.lower() in {'fork', 'spawn', 'forkserver'}:
from multiprocessing import set_start_method as _set_start_method
try:
_set_start_method(_start_method.lower())
_warnings.warn(
f'multiprocessing start method is set to `{_start_method.lower()}`'
)
except Exception as e:
_warnings.warn(
f'failed to set multiprocessing start_method to `{_start_method.lower()}`: {e!r}'
)
elif _sys.version_info >= (3, 8, 0) and _platform.system() == 'Darwin':
# DO SOME OS-WISE PATCHES
# temporary fix for python 3.8 on macos where the default start is set to "spawn"
# https://docs.python.org/3/library/multiprocessing.html#contexts-and-start-methods
from multiprocessing import set_start_method as _set_start_method
try:
_set_start_method('fork')
_warnings.warn(f'multiprocessing start method is set to `fork`')
except Exception as e:
_warnings.warn(f'failed to set multiprocessing start_method to `fork`: {e!r}')
# do not change this line manually this is managed by git tag and updated on every release
# NOTE: this represents the NEXT release version
__version__ = '3.27.16'
# do not change this line manually
# this is managed by proto/build-proto.sh and updated on every execution
__proto_version__ = '0.1.27'
try:
__docarray_version__ = _docarray.__version__
except AttributeError as e:
raise RuntimeError(
'`docarray` dependency is not installed correctly, please reinstall with `pip install -U --force-reinstall docarray`'
)
try:
_signal.signal(_signal.SIGINT, _signal.default_int_handler)
except Exception as exc:
_warnings.warn(f'failed to set default signal handler: {exc!r}`')
def _set_nofile(nofile_atleast=4096):
"""
Set nofile soft limit to at least 4096, useful for running matlplotlib/seaborn on
parallel executing plot generators vs. Ubuntu default ulimit -n 1024 or OS X El Captian 256
temporary setting extinguishing with Python session.
:param nofile_atleast: nofile soft limit
:return: nofile soft limit and nofile hard limit
"""
try:
import resource as res
except ImportError: # Windows
res = None
if res is None:
return (None,) * 2
soft, ohard = res.getrlimit(res.RLIMIT_NOFILE)
hard = ohard
if soft < nofile_atleast:
soft = nofile_atleast
if hard < soft:
hard = soft
try:
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except (ValueError, res.error):
try:
hard = soft
print(f'trouble with max limit, retrying with soft,hard {soft},{hard}')
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except Exception:
print('failed to set ulimit, giving up')
soft, hard = res.getrlimit(res.RLIMIT_NOFILE)
return soft, hard
_set_nofile()
# ONLY FIRST CLASS CITIZENS ARE ALLOWED HERE, namely Document, Executor Flow
# Document
from jina._docarray import Document, DocumentArray
# Client
from jina.clients import Client
# Deployment
from jina.orchestrate.deployments import Deployment
from jina.orchestrate.flow.asyncio import AsyncFlow
# Flow
from jina.orchestrate.flow.base import Flow
# Executor
from jina.serve.executors import BaseExecutor as Executor
from jina.serve.executors.decorators import dynamic_batching, monitor, requests
# Custom Gateway
from jina.serve.runtimes.gateway.gateway import Gateway
|
from keras.src.api_export import keras_export
from keras.src.layers.pooling.base_pooling import BasePooling
@keras_export(["keras.layers.MaxPooling2D", "keras.layers.MaxPool2D"])
class MaxPooling2D(BasePooling):
"""Max pooling operation for 2D spatial data.
Downsamples the input along its spatial dimensions (height and width)
by taking the maximum value over an input window
(of size defined by `pool_size`) for each channel of the input.
The window is shifted by `strides` along each dimension.
The resulting output when using the `"valid"` padding option has a spatial
shape (number of rows or columns) of:
`output_shape = math.floor((input_shape - pool_size) / strides) + 1`
(when `input_shape >= pool_size`)
The resulting output shape when using the `"same"` padding option is:
`output_shape = math.floor((input_shape - 1) / strides) + 1`
Args:
pool_size: int or tuple of 2 integers, factors by which to downscale
(dim1, dim2). If only one integer is specified, the same
window length will be used for all dimensions.
strides: int or tuple of 2 integers, or None. Strides values. If None,
it will default to `pool_size`. If only one int is specified, the
same stride size will be used for all dimensions.
padding: string, either `"valid"` or `"same"` (case-insensitive).
`"valid"` means no padding. `"same"` results in padding evenly to
the left/right or up/down of the input such that output has the same
height/width dimension as the input.
data_format: string, either `"channels_last"` or `"channels_first"`.
The ordering of the dimensions in the inputs. `"channels_last"`
corresponds to inputs with shape `(batch, height, width, channels)`
while `"channels_first"` corresponds to inputs with shape
`(batch, channels, height, width)`. It defaults to the
`image_data_format` value found in your Keras config file at
`~/.keras/keras.json`. If you never set it, then it will be
`"channels_last"`.
Input shape:
- If `data_format="channels_last"`:
4D tensor with shape `(batch_size, height, width, channels)`.
- If `data_format="channels_first"`:
4D tensor with shape `(batch_size, channels, height, width)`.
Output shape:
- If `data_format="channels_last"`:
4D tensor with shape
`(batch_size, pooled_height, pooled_width, channels)`.
- If `data_format="channels_first"`:
4D tensor with shape
`(batch_size, channels, pooled_height, pooled_width)`.
Examples:
`strides=(1, 1)` and `padding="valid"`:
>>> x = np.array([[1., 2., 3.],
... [4., 5., 6.],
... [7., 8., 9.]])
>>> x = np.reshape(x, [1, 3, 3, 1])
>>> max_pool_2d = keras.layers.MaxPooling2D(pool_size=(2, 2),
... strides=(1, 1), padding="valid")
>>> max_pool_2d(x)
`strides=(2, 2)` and `padding="valid"`:
>>> x = np.array([[1., 2., 3., 4.],
... [5., 6., 7., 8.],
... [9., 10., 11., 12.]])
>>> x = np.reshape(x, [1, 3, 4, 1])
>>> max_pool_2d = keras.layers.MaxPooling2D(pool_size=(2, 2),
... strides=(2, 2), padding="valid")
>>> max_pool_2d(x)
`stride=(1, 1)` and `padding="same"`:
>>> x = np.array([[1., 2., 3.],
... [4., 5., 6.],
... [7., 8., 9.]])
>>> x = np.reshape(x, [1, 3, 3, 1])
>>> max_pool_2d = keras.layers.MaxPooling2D(pool_size=(2, 2),
... strides=(1, 1), padding="same")
>>> max_pool_2d(x)
"""
def __init__(
self,
pool_size=(2, 2),
strides=None,
padding="valid",
data_format=None,
name=None,
**kwargs
):
super().__init__(
pool_size,
strides,
pool_dimensions=2,
pool_mode="max",
padding=padding,
data_format=data_format,
name=name,
**kwargs,
)
|
from keras.src.api_export import keras_export
from keras.src.layers.pooling.base_pooling import BasePooling
@keras_export(["keras.layers.MaxPooling2D", "keras.layers.MaxPool2D"])
class MaxPooling2D(BasePooling):
"""Max pooling operation for 2D spatial data.
Downsamples the input along its spatial dimensions (height and width)
by taking the maximum value over an input window
(of size defined by `pool_size`) for each channel of the input.
The window is shifted by `strides` along each dimension.
The resulting output when using the `"valid"` padding option has a spatial
shape (number of rows or columns) of:
`output_shape = math.floor((input_shape - pool_size) / strides) + 1`
(when `input_shape >= pool_size`)
The resulting output shape when using the `"same"` padding option is:
`output_shape = math.floor((input_shape - 1) / strides) + 1`
Args:
pool_size: int or tuple of 2 integers, factors by which to downscale
(dim1, dim2). If only one integer is specified, the same
window length will be used for all dimensions.
strides: int or tuple of 2 integers, or None. Strides values. If None,
it will default to `pool_size`. If only one int is specified, the
same stride size will be used for all dimensions.
padding: string, either `"valid"` or `"same"` (case-insensitive).
`"valid"` means no padding. `"same"` results in padding evenly to
the left/right or up/down of the input such that output has the same
height/width dimension as the input.
data_format: string, either `"channels_last"` or `"channels_first"`.
The ordering of the dimensions in the inputs. `"channels_last"`
corresponds to inputs with shape `(batch, height, width, channels)`
while `"channels_first"` corresponds to inputs with shape
`(batch, channels, height, width)`. It defaults to the
`image_data_format` value found in your Keras config file at
`~/.keras/keras.json`. If you never set it, then it will be
`"channels_last"`.
Input shape:
- If `data_format="channels_last"`:
4D tensor with shape `(batch_size, height, width, channels)`.
- If `data_format="channels_first"`:
4D tensor with shape `(batch_size, channels, height, width)`.
Output shape:
- If `data_format="channels_last"`:
4D tensor with shape
`(batch_size, pooled_height, pooled_width, channels)`.
- If `data_format="channels_first"`:
4D tensor with shape
`(batch_size, channels, pooled_height, pooled_width)`.
Examples:
`strides=(1, 1)` and `padding="valid"`:
>>> x = np.array([[1., 2., 3.],
... [4., 5., 6.],
... [7., 8., 9.]])
>>> x = np.reshape(x, [1, 3, 3, 1])
>>> max_pool_2d = keras.layers.MaxPooling2D(pool_size=(2, 2),
... strides=(1, 1), padding="valid")
>>> max_pool_2d(x)
`strides=(2, 2)` and `padding="valid"`:
>>> x = np.array([[1., 2., 3., 4.],
... [5., 6., 7., 8.],
... [9., 10., 11., 12.]])
>>> x = np.reshape(x, [1, 3, 4, 1])
>>> max_pool_2d = keras.layers.MaxPooling2D(pool_size=(2, 2),
... strides=(2, 2), padding="valid")
>>> max_pool_2d(x)
`stride=(1, 1)` and `padding="same"`:
>>> x = np.array([[1., 2., 3.],
... [4., 5., 6.],
... [7., 8., 9.]])
>>> x = np.reshape(x, [1, 3, 3, 1])
>>> max_pool_2d = keras.layers.MaxPooling2D(pool_size=(2, 2),
... strides=(1, 1), padding="same")
>>> max_pool_2d(x)
"""
def __init__(
self,
pool_size=(2, 2),
strides=None,
padding="valid",
data_format=None,
name=None,
**kwargs
):
super().__init__(
pool_size,
strides,
pool_dimensions=2,
pool_mode="max",
padding=padding,
data_format=data_format,
name=name,
**kwargs,
)
|
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Immutable mapping."""
import collections.abc
# WARNING: this class is used internally by extension types (tf.ExtensionType),
# and may be deleted if/when extension types transition to a different encoding
# in the future.
class ImmutableDict(collections.abc.Mapping):
"""Immutable `Mapping`."""
# Note: keys, items, values, get, __eq__, and __ne__ are implemented by
# the `Mapping` base class.
def __init__(self, *args, **kwargs):
self._dict = dict(*args, **kwargs)
def __getitem__(self, key):
return self._dict[key]
def __contains__(self, key):
return key in self._dict
def __iter__(self):
return iter(self._dict)
def __len__(self):
return len(self._dict)
def __repr__(self):
return f'ImmutableDict({self._dict})'
# This suppresses a warning that tf.nest would otherwise generate.
__supported_by_tf_nest__ = True
|
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Immutable mapping."""
import collections.abc
# WARNING: this class is used internally by extension types (tf.ExtensionType),
# and may be deleted if/when extension types transition to a different encoding
# in the future.
class ImmutableDict(collections.abc.Mapping):
"""Immutable `Mapping`."""
# Note: keys, items, values, get, __eq__, and __ne__ are implemented by
# the `Mapping` base class.
def __init__(self, *args, **kwargs):
self._dict = dict(*args, **kwargs)
def __getitem__(self, key):
return self._dict[key]
def __contains__(self, key):
return key in self._dict
def __iter__(self):
return iter(self._dict)
def __len__(self):
return len(self._dict)
def __repr__(self):
return f'ImmutableDict({self._dict})'
# This supresses a warning that tf.nest woud otherwise generate.
__supported_by_tf_nest__ = True
|
from typing import Optional
import pytest
from docarray import BaseDoc, DocArray
from docarray.documents import ImageDoc
from docarray.helper import (
_access_path_dict_to_nested_dict,
_access_path_to_dict,
_dict_to_access_paths,
_is_access_path_valid,
_update_nested_dicts,
get_paths,
)
@pytest.fixture()
def nested_doc():
class Inner(BaseDoc):
img: Optional[ImageDoc]
class Middle(BaseDoc):
img: Optional[ImageDoc]
inner: Optional[Inner]
class Outer(BaseDoc):
img: Optional[ImageDoc]
middle: Optional[Middle]
da: DocArray[Inner]
doc = Outer(
img=ImageDoc(),
middle=Middle(img=ImageDoc(), inner=Inner(img=ImageDoc())),
da=DocArray[Inner]([Inner(img=ImageDoc(url='test.png'))]),
)
return doc
def test_is_access_path_valid(nested_doc):
assert _is_access_path_valid(nested_doc.__class__, 'img')
assert _is_access_path_valid(nested_doc.__class__, 'middle__img')
assert _is_access_path_valid(nested_doc.__class__, 'middle__inner__img')
assert _is_access_path_valid(nested_doc.__class__, 'middle')
assert _is_access_path_valid(nested_doc.__class__, 'da__img__url')
def test_is_access_path_not_valid(nested_doc):
assert not _is_access_path_valid(nested_doc.__class__, 'inner')
assert not _is_access_path_valid(nested_doc.__class__, 'some__other__path')
assert not _is_access_path_valid(nested_doc.__class__, 'middle.inner')
def test_get_access_paths():
class Painting(BaseDoc):
title: str
img: ImageDoc
access_paths = Painting._get_access_paths()
assert access_paths == [
'id',
'title',
'img__id',
'img__url',
'img__tensor',
'img__embedding',
'img__bytes_',
]
def test_dict_to_access_paths():
d = {
'a0': {'b0': {'c0': 0}, 'b1': {'c0': 1}},
'a1': {'b0': {'c0': 2, 'c1': 3}, 'b1': 4},
}
casted = _dict_to_access_paths(d)
assert casted == {
'a0__b0__c0': 0,
'a0__b1__c0': 1,
'a1__b0__c0': 2,
'a1__b0__c1': 3,
'a1__b1': 4,
}
def test_access_path_to_dict():
access_path = 'a__b__c__d__e'
value = 1
result = {'a': {'b': {'c': {'d': {'e': value}}}}}
assert _access_path_to_dict(access_path, value) == result
def test_access_path_dict_to_nested_dict():
d = {
'a0__b0__c0': 0,
'a0__b1__c0': 1,
'a1__b0__c0': 2,
'a1__b0__c1': 3,
'a1__b1': 4,
}
casted = _access_path_dict_to_nested_dict(d)
assert casted == {
'a0': {'b0': {'c0': 0}, 'b1': {'c0': 1}},
'a1': {'b0': {'c0': 2, 'c1': 3}, 'b1': 4},
}
def test_update_nested_dict():
d1 = {'text': 'hello', 'image': {'tensor': None}}
d2 = {'image': {'url': 'some.png'}}
_update_nested_dicts(d1, d2)
assert d1 == {'text': 'hello', 'image': {'tensor': None, 'url': 'some.png'}}
def test_get_paths():
paths = list(get_paths(patterns='*.py'))
for path in paths:
assert path.endswith('.py')
def test_get_paths_recursive():
paths_rec = list(get_paths(patterns='**', recursive=True))
paths_not_rec = list(get_paths(patterns='**', recursive=False))
assert len(paths_rec) > len(paths_not_rec)
def test_get_paths_exclude():
paths = list(get_paths(patterns='*.py'))
paths_wo_init = list(get_paths(patterns='*.py', exclude_regex='__init__.[a-z]*'))
assert len(paths_wo_init) <= len(paths)
assert '__init__.py' not in paths_wo_init
|
from typing import Optional
import pytest
from docarray import BaseDocument, DocumentArray
from docarray.documents import ImageDoc
from docarray.helper import (
_access_path_dict_to_nested_dict,
_access_path_to_dict,
_dict_to_access_paths,
_is_access_path_valid,
_update_nested_dicts,
get_paths,
)
@pytest.fixture()
def nested_doc():
class Inner(BaseDocument):
img: Optional[ImageDoc]
class Middle(BaseDocument):
img: Optional[ImageDoc]
inner: Optional[Inner]
class Outer(BaseDocument):
img: Optional[ImageDoc]
middle: Optional[Middle]
da: DocumentArray[Inner]
doc = Outer(
img=ImageDoc(),
middle=Middle(img=ImageDoc(), inner=Inner(img=ImageDoc())),
da=DocumentArray[Inner]([Inner(img=ImageDoc(url='test.png'))]),
)
return doc
def test_is_access_path_valid(nested_doc):
assert _is_access_path_valid(nested_doc.__class__, 'img')
assert _is_access_path_valid(nested_doc.__class__, 'middle__img')
assert _is_access_path_valid(nested_doc.__class__, 'middle__inner__img')
assert _is_access_path_valid(nested_doc.__class__, 'middle')
assert _is_access_path_valid(nested_doc.__class__, 'da__img__url')
def test_is_access_path_not_valid(nested_doc):
assert not _is_access_path_valid(nested_doc.__class__, 'inner')
assert not _is_access_path_valid(nested_doc.__class__, 'some__other__path')
assert not _is_access_path_valid(nested_doc.__class__, 'middle.inner')
def test_get_access_paths():
class Painting(BaseDocument):
title: str
img: ImageDoc
access_paths = Painting._get_access_paths()
assert access_paths == [
'id',
'title',
'img__id',
'img__url',
'img__tensor',
'img__embedding',
'img__bytes_',
]
def test_dict_to_access_paths():
d = {
'a0': {'b0': {'c0': 0}, 'b1': {'c0': 1}},
'a1': {'b0': {'c0': 2, 'c1': 3}, 'b1': 4},
}
casted = _dict_to_access_paths(d)
assert casted == {
'a0__b0__c0': 0,
'a0__b1__c0': 1,
'a1__b0__c0': 2,
'a1__b0__c1': 3,
'a1__b1': 4,
}
def test_access_path_to_dict():
access_path = 'a__b__c__d__e'
value = 1
result = {'a': {'b': {'c': {'d': {'e': value}}}}}
assert _access_path_to_dict(access_path, value) == result
def test_access_path_dict_to_nested_dict():
d = {
'a0__b0__c0': 0,
'a0__b1__c0': 1,
'a1__b0__c0': 2,
'a1__b0__c1': 3,
'a1__b1': 4,
}
casted = _access_path_dict_to_nested_dict(d)
assert casted == {
'a0': {'b0': {'c0': 0}, 'b1': {'c0': 1}},
'a1': {'b0': {'c0': 2, 'c1': 3}, 'b1': 4},
}
def test_update_nested_dict():
d1 = {'text': 'hello', 'image': {'tensor': None}}
d2 = {'image': {'url': 'some.png'}}
_update_nested_dicts(d1, d2)
assert d1 == {'text': 'hello', 'image': {'tensor': None, 'url': 'some.png'}}
def test_get_paths():
paths = list(get_paths(patterns='*.py'))
for path in paths:
assert path.endswith('.py')
def test_get_paths_recursive():
paths_rec = list(get_paths(patterns='**', recursive=True))
paths_not_rec = list(get_paths(patterns='**', recursive=False))
assert len(paths_rec) > len(paths_not_rec)
def test_get_paths_exclude():
paths = list(get_paths(patterns='*.py'))
paths_wo_init = list(get_paths(patterns='*.py', exclude_regex='__init__.[a-z]*'))
assert len(paths_wo_init) <= len(paths)
assert '__init__.py' not in paths_wo_init
|
"""Generate SQL queries using LlamaIndex."""
import argparse
import json
import logging
import os
import re
from typing import Any, cast
from llama_index import LLMPredictor, SQLDatabase
from llama_index.indices import SQLStructStoreIndex
from llama_index.llms.openai import OpenAI
from sqlalchemy import create_engine, text
from tqdm import tqdm
logging.getLogger("root").setLevel(logging.WARNING)
_spaces = re.compile(r"\s+")
_newlines = re.compile(r"\n+")
def _generate_sql(
llama_index: SQLStructStoreIndex,
nl_query_text: str,
) -> str:
"""Generate SQL query for the given NL query text."""
query_engine = llama_index.as_query_engine()
response = query_engine.query(nl_query_text)
if (
response.metadata is None
or "sql_query" not in response.metadata
or response.metadata["sql_query"] is None
):
raise RuntimeError("No SQL query generated.")
query = response.metadata["sql_query"]
# Remove newlines and extra spaces.
query = _newlines.sub(" ", query)
query = _spaces.sub(" ", query)
return query.strip()
def generate_sql(llama_indexes: dict, examples: list, output_file: str) -> None:
"""Generate SQL queries for the given examples and write them to the output file."""
with open(output_file, "w") as f:
for example in tqdm(examples, desc=f"Generating {output_file}"):
db_name = example["db_id"]
nl_query_text = example["question"]
try:
sql_query = _generate_sql(llama_indexes[db_name], nl_query_text)
except Exception as e:
print(
f"Failed to generate SQL query for question: "
f"{example['question']} on database: {example['db_id']}."
)
print(e)
sql_query = "ERROR"
f.write(sql_query + "\n")
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Generate SQL queries using LlamaIndex."
)
parser.add_argument(
"--input", type=str, required=True, help="Path to the spider dataset directory."
)
parser.add_argument(
"--output",
type=str,
required=True,
help="Path to the output directory of generated SQL files,"
" one query on each line, "
"to be compared with the *_gold.sql files in the input directory.",
)
parser.add_argument(
"--model",
type=str,
choices=["gpt-4", "gpt-3.5-turbo", "text-davinci-003", "code-davinci-002"],
required=True,
help="The model to use for generating SQL queries.",
)
args = parser.parse_args()
# Create the output directory if it does not exist.
if not os.path.exists(args.output):
os.makedirs(args.output)
# Load the Spider dataset from the input directory.
with open(os.path.join(args.input, "train_spider.json")) as f:
train_spider = json.load(f)
with open(os.path.join(args.input, "train_others.json")) as f:
train_others = json.load(f)
with open(os.path.join(args.input, "dev.json")) as f:
dev = json.load(f)
# Create all necessary SQL database objects.
databases = {}
for db in train_spider + train_others + dev:
db_name = db["db_id"]
if db_name in databases:
continue
db_path = os.path.join(args.input, "database", db_name, db_name + ".sqlite")
engine = create_engine("sqlite:///" + db_path)
databases[db_name] = (SQLDatabase(engine=engine), engine)
# Create the LlamaIndexes for all databases.
llm = OpenAI(model=args.model, temperature=0)
llm_predictor = LLMPredictor(llm=llm)
llm_indexes = {}
for db_name, (db, engine) in databases.items():
# Get the name of the first table in the database.
# This is a hack to get a table name for the index, which can use any
# table in the database.
with engine.connect() as connection:
table_name = cast(
Any,
connection.execute(
text("select name from sqlite_master where type = 'table'")
).fetchone(),
)[0]
llm_indexes[db_name] = SQLStructStoreIndex.from_documents(
documents=[],
llm_predictor=llm_predictor,
sql_database=db,
table_name=table_name,
)
# Generate SQL queries.
generate_sql(
llama_indexes=llm_indexes,
examples=train_spider + train_others,
output_file=os.path.join(args.output, "train_pred.sql"),
)
generate_sql(
llama_indexes=llm_indexes,
examples=dev,
output_file=os.path.join(args.output, "dev_pred.sql"),
)
|
"""Generate SQL queries using LlamaIndex."""
import argparse
import json
import logging
import os
import re
from typing import Any, cast
from llama_index import LLMPredictor, SQLDatabase
from llama_index.indices import SQLStructStoreIndex
from llama_index.llms.openai import OpenAI
from sqlalchemy import create_engine, text
from tqdm import tqdm
logging.getLogger("root").setLevel(logging.WARNING)
_spaces = re.compile(r"\s+")
_newlines = re.compile(r"\n+")
def _generate_sql(
llama_index: SQLStructStoreIndex,
nl_query_text: str,
) -> str:
"""Generate SQL query for the given NL query text."""
query_engine = llama_index.as_query_engine()
response = query_engine.query(nl_query_text)
if (
response.metadata is None
or "sql_query" not in response.metadata
or response.metadata["sql_query"] is None
):
raise RuntimeError("No SQL query generated.")
query = response.metadata["sql_query"]
# Remove newlines and extra spaces.
query = _newlines.sub(" ", query)
query = _spaces.sub(" ", query)
return query.strip()
def generate_sql(llama_indexes: dict, examples: list, output_file: str) -> None:
"""Generate SQL queries for the given examples and write them to the output file."""
with open(output_file, "w") as f:
for example in tqdm(examples, desc=f"Generating {output_file}"):
db_name = example["db_id"]
nl_query_text = example["question"]
try:
sql_query = _generate_sql(llama_indexes[db_name], nl_query_text)
except Exception as e:
print(
f"Failed to generate SQL query for question: "
f"{example['question']} on database: {example['db_id']}."
)
print(e)
sql_query = "ERROR"
f.write(sql_query + "\n")
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Generate SQL queries using LlamaIndex."
)
parser.add_argument(
"--input", type=str, required=True, help="Path to the spider dataset directory."
)
parser.add_argument(
"--output",
type=str,
required=True,
help="Path to the output directory of generated SQL files,"
" one query on each line, "
"to be compared with the *_gold.sql files in the input directory.",
)
parser.add_argument(
"--model",
type=str,
choices=["gpt-4", "gpt-3.5-turbo", "text-davinci-003", "code-davinci-002"],
required=True,
help="The model to use for generating SQL queries.",
)
args = parser.parse_args()
# Create the output directory if it does not exist.
if not os.path.exists(args.output):
os.makedirs(args.output)
# Load the Spider dataset from the input directory.
with open(os.path.join(args.input, "train_spider.json")) as f:
train_spider = json.load(f)
with open(os.path.join(args.input, "train_others.json")) as f:
train_others = json.load(f)
with open(os.path.join(args.input, "dev.json")) as f:
dev = json.load(f)
# Create all necessary SQL database objects.
databases = {}
for db in train_spider + train_others + dev:
db_name = db["db_id"]
if db_name in databases:
continue
db_path = os.path.join(args.input, "database", db_name, db_name + ".sqlite")
engine = create_engine("sqlite:///" + db_path)
databases[db_name] = (SQLDatabase(engine=engine), engine)
# Create the LlamaIndexes for all databases.
llm = OpenAI(model=args.model, temperature=0)
llm_predictor = LLMPredictor(llm=llm)
llm_indexes = {}
for db_name, (db, engine) in databases.items():
# Get the name of the first table in the database.
# This is a hack to get a table name for the index, which can use any
# table in the database.
with engine.connect() as connection:
table_name = cast(
Any,
connection.execute(
text("select name from sqlite_master where type = 'table'")
).fetchone(),
)[0]
llm_indexes[db_name] = SQLStructStoreIndex.from_documents(
documents=[],
llm_predictor=llm_predictor,
sql_database=db,
table_name=table_name,
)
# Generate SQL queries.
generate_sql(
llama_indexes=llm_indexes,
examples=train_spider + train_others,
output_file=os.path.join(args.output, "train_pred.sql"),
)
generate_sql(
llama_indexes=llm_indexes,
examples=dev,
output_file=os.path.join(args.output, "dev_pred.sql"),
)
|
"""**sys_info** prints information about the system and langchain packages for debugging purposes.""" # noqa: E501
from collections.abc import Sequence
def _get_sub_deps(packages: Sequence[str]) -> list[str]:
"""Get any specified sub-dependencies."""
from importlib import metadata
sub_deps = set()
underscored_packages = {pkg.replace("-", "_") for pkg in packages}
for pkg in packages:
try:
required = metadata.requires(pkg)
except metadata.PackageNotFoundError:
continue
if not required:
continue
for req in required:
cleaned_req = req.split(" ")[0]
if cleaned_req.replace("-", "_") not in underscored_packages:
sub_deps.add(cleaned_req)
return sorted(sub_deps, key=lambda x: x.lower())
def print_sys_info(*, additional_pkgs: Sequence[str] = ()) -> None:
"""Print information about the environment for debugging purposes.
Args:
additional_pkgs: Additional packages to include in the output.
"""
import pkgutil
import platform
import sys
from importlib import metadata, util
# Packages that do not start with "langchain" prefix.
other_langchain_packages = [
"langserve",
"langsmith",
]
langchain_pkgs = [
name for _, name, _ in pkgutil.iter_modules() if name.startswith("langchain")
]
langgraph_pkgs = [
name for _, name, _ in pkgutil.iter_modules() if name.startswith("langgraph")
]
all_packages = sorted(
set(
langchain_pkgs
+ langgraph_pkgs
+ other_langchain_packages
+ list(additional_pkgs)
)
)
# Always surface these packages to the top
order_by = ["langchain_core", "langchain", "langchain_community", "langsmith"]
for pkg in reversed(order_by):
if pkg in all_packages:
all_packages.remove(pkg)
all_packages = [pkg, *list(all_packages)]
system_info = {
"OS": platform.system(),
"OS Version": platform.version(),
"Python Version": sys.version,
}
print() # noqa: T201
print("System Information") # noqa: T201
print("------------------") # noqa: T201
print("> OS: ", system_info["OS"]) # noqa: T201
print("> OS Version: ", system_info["OS Version"]) # noqa: T201
print("> Python Version: ", system_info["Python Version"]) # noqa: T201
# Print out only langchain packages
print() # noqa: T201
print("Package Information") # noqa: T201
print("-------------------") # noqa: T201
not_installed = []
for pkg in all_packages:
try:
found_package = util.find_spec(pkg)
except Exception:
found_package = None
if found_package is None:
not_installed.append(pkg)
continue
# Package version
try:
package_version = metadata.version(pkg)
except Exception:
package_version = None
# Print package with version
if package_version is not None:
print(f"> {pkg}: {package_version}") # noqa: T201
else:
print(f"> {pkg}: Installed. No version info available.") # noqa: T201
if not_installed:
print() # noqa: T201
print("Optional packages not installed") # noqa: T201
print("-------------------------------") # noqa: T201
for pkg in not_installed:
print(f"> {pkg}") # noqa: T201
sub_dependencies = _get_sub_deps(all_packages)
if sub_dependencies:
print() # noqa: T201
print("Other Dependencies") # noqa: T201
print("------------------") # noqa: T201
for dep in sub_dependencies:
try:
dep_version = metadata.version(dep)
print(f"> {dep}: {dep_version}") # noqa: T201
except Exception:
print(f"> {dep}: Installed. No version info available.") # noqa: T201
if __name__ == "__main__":
print_sys_info()
|
"""**sys_info** prints information about the system and langchain packages for debugging purposes.""" # noqa: E501
from collections.abc import Sequence
def _get_sub_deps(packages: Sequence[str]) -> list[str]:
"""Get any specified sub-dependencies."""
from importlib import metadata
sub_deps = set()
_underscored_packages = {pkg.replace("-", "_") for pkg in packages}
for pkg in packages:
try:
required = metadata.requires(pkg)
except metadata.PackageNotFoundError:
continue
if not required:
continue
for req in required:
cleaned_req = req.split(" ")[0]
if cleaned_req.replace("-", "_") not in _underscored_packages:
sub_deps.add(cleaned_req)
return sorted(sub_deps, key=lambda x: x.lower())
def print_sys_info(*, additional_pkgs: Sequence[str] = ()) -> None:
"""Print information about the environment for debugging purposes.
Args:
additional_pkgs: Additional packages to include in the output.
"""
import pkgutil
import platform
import sys
from importlib import metadata, util
# Packages that do not start with "langchain" prefix.
other_langchain_packages = [
"langserve",
"langsmith",
]
langchain_pkgs = [
name for _, name, _ in pkgutil.iter_modules() if name.startswith("langchain")
]
langgraph_pkgs = [
name for _, name, _ in pkgutil.iter_modules() if name.startswith("langgraph")
]
all_packages = sorted(
set(
langchain_pkgs
+ langgraph_pkgs
+ other_langchain_packages
+ list(additional_pkgs)
)
)
# Always surface these packages to the top
order_by = ["langchain_core", "langchain", "langchain_community", "langsmith"]
for pkg in reversed(order_by):
if pkg in all_packages:
all_packages.remove(pkg)
all_packages = [pkg, *list(all_packages)]
system_info = {
"OS": platform.system(),
"OS Version": platform.version(),
"Python Version": sys.version,
}
print() # noqa: T201
print("System Information") # noqa: T201
print("------------------") # noqa: T201
print("> OS: ", system_info["OS"]) # noqa: T201
print("> OS Version: ", system_info["OS Version"]) # noqa: T201
print("> Python Version: ", system_info["Python Version"]) # noqa: T201
# Print out only langchain packages
print() # noqa: T201
print("Package Information") # noqa: T201
print("-------------------") # noqa: T201
not_installed = []
for pkg in all_packages:
try:
found_package = util.find_spec(pkg)
except Exception:
found_package = None
if found_package is None:
not_installed.append(pkg)
continue
# Package version
try:
package_version = metadata.version(pkg)
except Exception:
package_version = None
# Print package with version
if package_version is not None:
print(f"> {pkg}: {package_version}") # noqa: T201
else:
print(f"> {pkg}: Installed. No version info available.") # noqa: T201
if not_installed:
print() # noqa: T201
print("Optional packages not installed") # noqa: T201
print("-------------------------------") # noqa: T201
for pkg in not_installed:
print(f"> {pkg}") # noqa: T201
sub_dependencies = _get_sub_deps(all_packages)
if sub_dependencies:
print() # noqa: T201
print("Other Dependencies") # noqa: T201
print("------------------") # noqa: T201
for dep in sub_dependencies:
try:
dep_version = metadata.version(dep)
print(f"> {dep}: {dep_version}") # noqa: T201
except Exception:
print(f"> {dep}: Installed. No version info available.") # noqa: T201
if __name__ == "__main__":
print_sys_info()
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import torch
from mmengine import Config
from mmengine.data import InstanceData
from mmdet import * # noqa
from mmdet.models.dense_heads import DDODHead
class TestDDODHead(TestCase):
def test_ddod_head_loss(self):
"""Tests ddod head loss when truth is empty and non-empty."""
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'pad_shape': (s, s, 3),
'scale_factor': 1
}]
cfg = Config(
dict(
assigner=dict(type='ATSSAssigner', topk=9, alpha=0.8),
reg_assigner=dict(type='ATSSAssigner', topk=9, alpha=0.5),
allowed_border=-1,
pos_weight=-1,
debug=False))
atss_head = DDODHead(
num_classes=4,
in_channels=1,
stacked_convs=1,
feat_channels=1,
use_dcn=False,
norm_cfg=None,
train_cfg=cfg,
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
octave_base_scale=8,
scales_per_octave=1,
strides=[8, 16, 32, 64, 128]),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=2.0),
loss_iou=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0))
feat = [
torch.rand(1, 1, s // feat_size, s // feat_size)
for feat_size in [8, 16, 32, 64, 128]
]
cls_scores, bbox_preds, centernesses = atss_head.forward(feat)
# Test that empty ground truth encourages the network to predict
# background
gt_instances = InstanceData()
gt_instances.bboxes = torch.empty((0, 4))
gt_instances.labels = torch.LongTensor([])
empty_gt_losses = atss_head.loss_by_feat(cls_scores, bbox_preds,
centernesses, [gt_instances],
img_metas)
# When there is no truth, the cls loss should be nonzero but there
# should be no box loss.
empty_cls_loss = sum(empty_gt_losses['loss_cls'])
empty_box_loss = sum(empty_gt_losses['loss_bbox'])
empty_centerness_loss = sum(empty_gt_losses['loss_iou'])
self.assertGreater(empty_cls_loss.item(), 0,
'cls loss should be non-zero')
self.assertEqual(
empty_box_loss.item(), 0,
'there should be no box loss when there are no true boxes')
self.assertEqual(
empty_centerness_loss.item(), 0,
'there should be no centerness loss when there are no true boxes')
# When truth is non-empty then both cls and box loss should be nonzero
# for random inputs
gt_instances = InstanceData()
gt_instances.bboxes = torch.Tensor(
[[23.6667, 23.8757, 238.6326, 151.8874]])
gt_instances.labels = torch.LongTensor([2])
one_gt_losses = atss_head.loss_by_feat(cls_scores, bbox_preds,
centernesses, [gt_instances],
img_metas)
onegt_cls_loss = sum(one_gt_losses['loss_cls'])
onegt_box_loss = sum(one_gt_losses['loss_bbox'])
onegt_centerness_loss = sum(one_gt_losses['loss_iou'])
self.assertGreater(onegt_cls_loss.item(), 0,
'cls loss should be non-zero')
self.assertGreater(onegt_box_loss.item(), 0,
'box loss should be non-zero')
self.assertGreater(onegt_centerness_loss.item(), 0,
'centerness loss should be non-zero')
|
# Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import torch
from mmdet.models.dense_heads import DDODHead
def test_ddod_head_loss():
"""Tests ddod head loss when truth is empty and non-empty."""
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'scale_factor': 1,
'pad_shape': (s, s, 3)
}]
train_cfg = mmcv.Config(
dict( # ATSSAssigner
assigner=dict(type='ATSSAssigner', topk=9, alpha=0.8),
reg_assigner=dict(type='ATSSAssigner', topk=9, alpha=0.5),
allowed_border=-1,
pos_weight=-1,
debug=False))
self = DDODHead(
num_classes=4,
in_channels=1,
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
octave_base_scale=8,
scales_per_octave=1,
strides=[8, 16, 32, 64, 128]),
train_cfg=train_cfg,
norm_cfg=dict(type='GN', num_groups=32, requires_grad=True),
loss_iou=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0))
feat = [
torch.rand(1, 1, s // feat_size, s // feat_size)
for feat_size in [4, 8, 16, 32, 64]
]
cls_scores, bbox_preds, iou_preds = self.forward(feat)
# Test that empty ground truth encourages the network to predict background
gt_bboxes = [torch.empty((0, 4))]
gt_labels = [torch.LongTensor([])]
gt_bboxes_ignore = None
empty_gt_losses = self.loss(cls_scores, bbox_preds, iou_preds, gt_bboxes,
gt_labels, img_metas, gt_bboxes_ignore)
# When there is no truth, the cls loss should be nonzero but there should
# be no box loss.
empty_cls_loss = sum(empty_gt_losses['loss_cls'])
empty_box_loss = sum(empty_gt_losses['loss_bbox'])
empty_iou_loss = sum(empty_gt_losses['loss_iou'])
assert empty_cls_loss.item() > 0, 'cls loss should be non-zero'
assert empty_box_loss.item() == 0, (
'there should be no box loss when there are no true boxes')
assert empty_iou_loss.item() == 0, (
'there should be no iou loss when there are no true boxes')
# When truth is non-empty then both cls and box loss should be nonzero for
# random inputs
gt_bboxes = [
torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]),
]
gt_labels = [torch.LongTensor([2])]
one_gt_losses = self.loss(cls_scores, bbox_preds, iou_preds, gt_bboxes,
gt_labels, img_metas, gt_bboxes_ignore)
onegt_cls_loss = sum(one_gt_losses['loss_cls'])
onegt_box_loss = sum(one_gt_losses['loss_bbox'])
onegt_iou_loss = sum(one_gt_losses['loss_iou'])
assert onegt_cls_loss.item() > 0, 'cls loss should be non-zero'
assert onegt_box_loss.item() > 0, 'box loss should be non-zero'
assert onegt_iou_loss.item() > 0, 'iou loss should be non-zero'
|
from keras.src import ops
from keras.src.api_export import keras_export
from keras.src.layers.layer import Layer
@keras_export("keras.layers.UnitNormalization")
class UnitNormalization(Layer):
"""Unit normalization layer.
Normalize a batch of inputs so that each input in the batch has a L2 norm
equal to 1 (across the axes specified in `axis`).
Example:
>>> data = np.arange(6).reshape(2, 3)
>>> normalized_data = keras.layers.UnitNormalization()(data)
>>> np.sum(normalized_data[0, :] ** 2)
1.0
Args:
axis: Integer or list/tuple. The axis or axes to normalize across.
Typically, this is the features axis or axes. The left-out axes are
typically the batch axis or axes. `-1` is the last dimension
in the input. Defaults to `-1`.
"""
def __init__(self, axis=-1, **kwargs):
super().__init__(**kwargs)
if isinstance(axis, (list, tuple)):
self.axis = list(axis)
elif isinstance(axis, int):
self.axis = axis
else:
raise TypeError(
"Invalid value for `axis` argument: "
"expected an int or a list/tuple of ints. "
f"Received: axis={axis}"
)
self.supports_masking = True
self.built = True
def call(self, inputs):
return ops.normalize(inputs, axis=self.axis, order=2, epsilon=1e-12)
def compute_output_shape(self, input_shape):
return input_shape
def get_config(self):
config = super().get_config()
config.update({"axis": self.axis})
return config
|
from keras.src import ops
from keras.src.api_export import keras_export
from keras.src.layers.layer import Layer
@keras_export("keras.layers.UnitNormalization")
class UnitNormalization(Layer):
"""Unit normalization layer.
Normalize a batch of inputs so that each input in the batch has a L2 norm
equal to 1 (across the axes specified in `axis`).
Example:
>>> data = np.arange(6).reshape(2, 3)
>>> normalized_data = keras.layers.UnitNormalization()(data)
>>> np.sum(normalized_data[0, :] ** 2)
1.0
Args:
axis: Integer or list/tuple. The axis or axes to normalize across.
Typically, this is the features axis or axes. The left-out axes are
typically the batch axis or axes. `-1` is the last dimension
in the input. Defaults to `-1`.
"""
def __init__(self, axis=-1, **kwargs):
super().__init__(**kwargs)
if isinstance(axis, (list, tuple)):
self.axis = list(axis)
elif isinstance(axis, int):
self.axis = axis
else:
raise TypeError(
"Invalid value for `axis` argument: "
"expected an int or a list/tuple of ints. "
f"Received: axis={axis}"
)
self.supports_masking = True
self.built = True
def call(self, inputs):
x = ops.cast(inputs, self.compute_dtype)
square_sum = ops.sum(ops.square(x), axis=self.axis, keepdims=True)
x_inv_norm = ops.rsqrt(ops.maximum(square_sum, 1e-12))
return ops.multiply(x, x_inv_norm)
def compute_output_shape(self, input_shape):
return input_shape
def get_config(self):
config = super().get_config()
config.update({"axis": self.axis})
return config
|
__version__ = '0.14.1'
import os
from docarray.document import Document
from docarray.array import DocumentArray
from docarray.dataclasses import dataclass, field
if 'DA_RICH_HANDLER' in os.environ:
from rich.traceback import install
install()
|
__version__ = '0.14.0'
import os
from docarray.document import Document
from docarray.array import DocumentArray
from docarray.dataclasses import dataclass, field
if 'DA_RICH_HANDLER' in os.environ:
from rich.traceback import install
install()
|
from typing import Optional
import pytest
from docarray import BaseDocument
from docarray.documents import Image
from docarray.helper import (
_access_path_dict_to_nested_dict,
_access_path_to_dict,
_dict_to_access_paths,
_is_access_path_valid,
_update_nested_dicts,
)
@pytest.fixture()
def nested_doc():
class Inner(BaseDocument):
img: Optional[Image]
class Middle(BaseDocument):
img: Optional[Image]
inner: Optional[Inner]
class Outer(BaseDocument):
img: Optional[Image]
middle: Optional[Middle]
doc = Outer(img=Image(), middle=Middle(img=Image(), inner=Inner(img=Image())))
return doc
def test_is_access_path_valid(nested_doc):
assert _is_access_path_valid(nested_doc.__class__, 'img')
assert _is_access_path_valid(nested_doc.__class__, 'middle__img')
assert _is_access_path_valid(nested_doc.__class__, 'middle__inner__img')
assert _is_access_path_valid(nested_doc.__class__, 'middle')
def test_is_access_path_not_valid(nested_doc):
assert not _is_access_path_valid(nested_doc.__class__, 'inner')
assert not _is_access_path_valid(nested_doc.__class__, 'some__other__path')
assert not _is_access_path_valid(nested_doc.__class__, 'middle.inner')
def test_get_access_paths():
class Painting(BaseDocument):
title: str
img: Image
access_paths = Painting._get_access_paths()
assert access_paths == [
'id',
'title',
'img__id',
'img__url',
'img__tensor',
'img__embedding',
'img__bytes',
]
def test_dict_to_access_paths():
d = {
'a0': {'b0': {'c0': 0}, 'b1': {'c0': 1}},
'a1': {'b0': {'c0': 2, 'c1': 3}, 'b1': 4},
}
casted = _dict_to_access_paths(d)
assert casted == {
'a0__b0__c0': 0,
'a0__b1__c0': 1,
'a1__b0__c0': 2,
'a1__b0__c1': 3,
'a1__b1': 4,
}
def test_access_path_to_dict():
access_path = 'a__b__c__d__e'
value = 1
result = {'a': {'b': {'c': {'d': {'e': value}}}}}
assert _access_path_to_dict(access_path, value) == result
def test_access_path_dict_to_nested_dict():
d = {
'a0__b0__c0': 0,
'a0__b1__c0': 1,
'a1__b0__c0': 2,
'a1__b0__c1': 3,
'a1__b1': 4,
}
casted = _access_path_dict_to_nested_dict(d)
assert casted == {
'a0': {'b0': {'c0': 0}, 'b1': {'c0': 1}},
'a1': {'b0': {'c0': 2, 'c1': 3}, 'b1': 4},
}
def test_update_nested_dict():
d1 = {'text': 'hello', 'image': {'tensor': None}}
d2 = {'image': {'url': 'some.png'}}
_update_nested_dicts(d1, d2)
assert d1 == {'text': 'hello', 'image': {'tensor': None, 'url': 'some.png'}}
|
from typing import Optional
import pytest
from docarray import BaseDocument
from docarray.documents import Image
from docarray.helper import (
_access_path_to_dict,
_dict_to_access_paths,
_update_nested_dicts,
is_access_path_valid,
)
@pytest.fixture()
def nested_doc():
class Inner(BaseDocument):
img: Optional[Image]
class Middle(BaseDocument):
img: Optional[Image]
inner: Optional[Inner]
class Outer(BaseDocument):
img: Optional[Image]
middle: Optional[Middle]
doc = Outer(img=Image(), middle=Middle(img=Image(), inner=Inner(img=Image())))
return doc
def test_is_access_path_valid(nested_doc):
assert is_access_path_valid(nested_doc.__class__, 'img')
assert is_access_path_valid(nested_doc.__class__, 'middle__img')
assert is_access_path_valid(nested_doc.__class__, 'middle__inner__img')
assert is_access_path_valid(nested_doc.__class__, 'middle')
def test_is_access_path_not_valid(nested_doc):
assert not is_access_path_valid(nested_doc.__class__, 'inner')
assert not is_access_path_valid(nested_doc.__class__, 'some__other__path')
assert not is_access_path_valid(nested_doc.__class__, 'middle.inner')
def test_get_access_paths():
class Painting(BaseDocument):
title: str
img: Image
access_paths = Painting._get_access_paths()
assert access_paths == [
'id',
'title',
'img__id',
'img__url',
'img__tensor',
'img__embedding',
'img__bytes',
]
def test_dict_to_access_paths():
d = {
'a0': {'b0': {'c0': 0}, 'b1': {'c0': 1}},
'a1': {'b0': {'c0': 2, 'c1': 3}, 'b1': 4},
}
casted = _dict_to_access_paths(d)
assert casted == {
'a0__b0__c0': 0,
'a0__b1__c0': 1,
'a1__b0__c0': 2,
'a1__b0__c1': 3,
'a1__b1': 4,
}
def test_access_path_to_dict():
access_path = 'a__b__c__d__e'
value = 1
result = {'a': {'b': {'c': {'d': {'e': value}}}}}
assert _access_path_to_dict(access_path, value) == result
def test_update_nested_dict():
d1 = {'text': 'hello', 'image': {'tensor': None}}
d2 = {'image': {'url': 'some.png'}}
_update_nested_dicts(d1, d2)
assert d1 == {'text': 'hello', 'image': {'tensor': None, 'url': 'some.png'}}
|
from .conformer import Conformer
from .conv_tasnet import ConvTasNet
from .deepspeech import DeepSpeech
from .emformer import Emformer
from .rnnt import emformer_rnnt_base, emformer_rnnt_model, RNNT
from .rnnt_decoder import Hypothesis, RNNTBeamSearch
from .tacotron2 import Tacotron2
from .wav2letter import Wav2Letter
from .wav2vec2 import (
hubert_base,
hubert_large,
hubert_pretrain_base,
hubert_pretrain_large,
hubert_pretrain_model,
hubert_pretrain_xlarge,
hubert_xlarge,
HuBERTPretrainModel,
wav2vec2_base,
wav2vec2_large,
wav2vec2_large_lv60k,
wav2vec2_model,
Wav2Vec2Model,
)
from .wavernn import WaveRNN
__all__ = [
"Wav2Letter",
"WaveRNN",
"ConvTasNet",
"DeepSpeech",
"Wav2Vec2Model",
"HuBERTPretrainModel",
"wav2vec2_model",
"wav2vec2_base",
"wav2vec2_large",
"wav2vec2_large_lv60k",
"hubert_base",
"hubert_large",
"hubert_xlarge",
"hubert_pretrain_model",
"hubert_pretrain_base",
"hubert_pretrain_large",
"hubert_pretrain_xlarge",
"Tacotron2",
"Conformer",
"Emformer",
"Hypothesis",
"RNNT",
"RNNTBeamSearch",
"emformer_rnnt_base",
"emformer_rnnt_model",
]
|
from .conformer import Conformer
from .conv_tasnet import ConvTasNet
from .deepspeech import DeepSpeech
from .emformer import Emformer
from .rnnt import RNNT, emformer_rnnt_base, emformer_rnnt_model
from .rnnt_decoder import Hypothesis, RNNTBeamSearch
from .tacotron2 import Tacotron2
from .wav2letter import Wav2Letter
from .wav2vec2 import (
Wav2Vec2Model,
HuBERTPretrainModel,
wav2vec2_model,
wav2vec2_base,
wav2vec2_large,
wav2vec2_large_lv60k,
hubert_base,
hubert_large,
hubert_xlarge,
hubert_pretrain_model,
hubert_pretrain_base,
hubert_pretrain_large,
hubert_pretrain_xlarge,
)
from .wavernn import WaveRNN
__all__ = [
"Wav2Letter",
"WaveRNN",
"ConvTasNet",
"DeepSpeech",
"Wav2Vec2Model",
"HuBERTPretrainModel",
"wav2vec2_model",
"wav2vec2_base",
"wav2vec2_large",
"wav2vec2_large_lv60k",
"hubert_base",
"hubert_large",
"hubert_xlarge",
"hubert_pretrain_model",
"hubert_pretrain_base",
"hubert_pretrain_large",
"hubert_pretrain_xlarge",
"Tacotron2",
"Conformer",
"Emformer",
"Hypothesis",
"RNNT",
"RNNTBeamSearch",
"emformer_rnnt_base",
"emformer_rnnt_model",
]
|
"""**Graphs** provide a natural language interface to graph databases."""
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.graphs import (
ArangoGraph,
FalkorDBGraph,
HugeGraph,
KuzuGraph,
MemgraphGraph,
NebulaGraph,
Neo4jGraph,
NeptuneGraph,
NetworkxEntityGraph,
RdfGraph,
)
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"MemgraphGraph": "langchain_community.graphs",
"NetworkxEntityGraph": "langchain_community.graphs",
"Neo4jGraph": "langchain_community.graphs",
"NebulaGraph": "langchain_community.graphs",
"NeptuneGraph": "langchain_community.graphs",
"KuzuGraph": "langchain_community.graphs",
"HugeGraph": "langchain_community.graphs",
"RdfGraph": "langchain_community.graphs",
"ArangoGraph": "langchain_community.graphs",
"FalkorDBGraph": "langchain_community.graphs",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"ArangoGraph",
"FalkorDBGraph",
"HugeGraph",
"KuzuGraph",
"MemgraphGraph",
"NebulaGraph",
"Neo4jGraph",
"NeptuneGraph",
"NetworkxEntityGraph",
"RdfGraph",
]
|
"""**Graphs** provide a natural language interface to graph databases."""
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.graphs import (
ArangoGraph,
FalkorDBGraph,
HugeGraph,
KuzuGraph,
MemgraphGraph,
NebulaGraph,
Neo4jGraph,
NeptuneGraph,
NetworkxEntityGraph,
RdfGraph,
)
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"MemgraphGraph": "langchain_community.graphs",
"NetworkxEntityGraph": "langchain_community.graphs",
"Neo4jGraph": "langchain_community.graphs",
"NebulaGraph": "langchain_community.graphs",
"NeptuneGraph": "langchain_community.graphs",
"KuzuGraph": "langchain_community.graphs",
"HugeGraph": "langchain_community.graphs",
"RdfGraph": "langchain_community.graphs",
"ArangoGraph": "langchain_community.graphs",
"FalkorDBGraph": "langchain_community.graphs",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"MemgraphGraph",
"NetworkxEntityGraph",
"Neo4jGraph",
"NebulaGraph",
"NeptuneGraph",
"KuzuGraph",
"HugeGraph",
"RdfGraph",
"ArangoGraph",
"FalkorDBGraph",
]
|
# coding=utf-8
# Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import requests
# Configuration
LIBRARY_NAME = "diffusers"
GITHUB_REPO = "huggingface/diffusers"
SLACK_WEBHOOK_URL = os.getenv("SLACK_WEBHOOK_URL")
def check_pypi_for_latest_release(library_name):
"""Check PyPI for the latest release of the library."""
response = requests.get(f"https://pypi.org/pypi/{library_name}/json", timeout=60)
if response.status_code == 200:
data = response.json()
return data["info"]["version"]
else:
print("Failed to fetch library details from PyPI.")
return None
def get_github_release_info(github_repo):
"""Fetch the latest release info from GitHub."""
url = f"https://api.github.com/repos/{github_repo}/releases/latest"
response = requests.get(url, timeout=60)
if response.status_code == 200:
data = response.json()
return {"tag_name": data["tag_name"], "url": data["html_url"], "release_time": data["published_at"]}
else:
print("Failed to fetch release info from GitHub.")
return None
def notify_slack(webhook_url, library_name, version, release_info):
"""Send a notification to a Slack channel."""
message = (
f"🚀 New release for {library_name} available: version **{version}** 🎉\n"
f"📜 Release Notes: {release_info['url']}\n"
f"⏱️ Release time: {release_info['release_time']}"
)
payload = {"text": message}
response = requests.post(webhook_url, json=payload)
if response.status_code == 200:
print("Notification sent to Slack successfully.")
else:
print("Failed to send notification to Slack.")
def main():
latest_version = check_pypi_for_latest_release(LIBRARY_NAME)
release_info = get_github_release_info(GITHUB_REPO)
parsed_version = release_info["tag_name"].replace("v", "")
if latest_version and release_info and latest_version == parsed_version:
notify_slack(SLACK_WEBHOOK_URL, LIBRARY_NAME, latest_version, release_info)
else:
print(f"{latest_version=}, {release_info=}, {parsed_version=}")
raise ValueError("There were some problems.")
if __name__ == "__main__":
main()
|
# coding=utf-8
# Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import requests
from ..src.diffusers.utils.constants import DIFFUSERS_REQUEST_TIMEOUT
# Configuration
LIBRARY_NAME = "diffusers"
GITHUB_REPO = "huggingface/diffusers"
SLACK_WEBHOOK_URL = os.getenv("SLACK_WEBHOOK_URL")
def check_pypi_for_latest_release(library_name):
"""Check PyPI for the latest release of the library."""
response = requests.get(f"https://pypi.org/pypi/{library_name}/json", timeout=DIFFUSERS_REQUEST_TIMEOUT)
if response.status_code == 200:
data = response.json()
return data["info"]["version"]
else:
print("Failed to fetch library details from PyPI.")
return None
def get_github_release_info(github_repo):
"""Fetch the latest release info from GitHub."""
url = f"https://api.github.com/repos/{github_repo}/releases/latest"
response = requests.get(url, timeout=DIFFUSERS_REQUEST_TIMEOUT)
if response.status_code == 200:
data = response.json()
return {"tag_name": data["tag_name"], "url": data["html_url"], "release_time": data["published_at"]}
else:
print("Failed to fetch release info from GitHub.")
return None
def notify_slack(webhook_url, library_name, version, release_info):
"""Send a notification to a Slack channel."""
message = (
f"🚀 New release for {library_name} available: version **{version}** 🎉\n"
f"📜 Release Notes: {release_info['url']}\n"
f"⏱️ Release time: {release_info['release_time']}"
)
payload = {"text": message}
response = requests.post(webhook_url, json=payload)
if response.status_code == 200:
print("Notification sent to Slack successfully.")
else:
print("Failed to send notification to Slack.")
def main():
latest_version = check_pypi_for_latest_release(LIBRARY_NAME)
release_info = get_github_release_info(GITHUB_REPO)
parsed_version = release_info["tag_name"].replace("v", "")
if latest_version and release_info and latest_version == parsed_version:
notify_slack(SLACK_WEBHOOK_URL, LIBRARY_NAME, latest_version, release_info)
else:
print(f"{latest_version=}, {release_info=}, {parsed_version=}")
raise ValueError("There were some problems.")
if __name__ == "__main__":
main()
|
# Copyright (c) OpenMMLab. All rights reserved.
from .conditional_detr_layers import (ConditionalDetrTransformerDecoder,
ConditionalDetrTransformerDecoderLayer)
from .dab_detr_layers import (DABDetrTransformerDecoder,
DABDetrTransformerDecoderLayer,
DABDetrTransformerEncoder)
from .deformable_detr_layers import (DeformableDetrTransformerDecoder,
DeformableDetrTransformerDecoderLayer,
DeformableDetrTransformerEncoder,
DeformableDetrTransformerEncoderLayer)
from .detr_layers import (DetrTransformerDecoder, DetrTransformerDecoderLayer,
DetrTransformerEncoder, DetrTransformerEncoderLayer)
from .dino_layers import CdnQueryGenerator, DinoTransformerDecoder
from .utils import (MLP, AdaptivePadding, ConditionalAttention, DynamicConv,
PatchEmbed, PatchMerging, coordinate_to_encoding,
inverse_sigmoid, nchw_to_nlc, nlc_to_nchw)
__all__ = [
'nlc_to_nchw', 'nchw_to_nlc', 'AdaptivePadding', 'PatchEmbed',
'PatchMerging', 'inverse_sigmoid', 'DynamicConv', 'MLP',
'DetrTransformerEncoder', 'DetrTransformerDecoder',
'DetrTransformerEncoderLayer', 'DetrTransformerDecoderLayer',
'DeformableDetrTransformerEncoder', 'DeformableDetrTransformerDecoder',
'DeformableDetrTransformerEncoderLayer',
'DeformableDetrTransformerDecoderLayer', 'coordinate_to_encoding',
'ConditionalAttention', 'DABDetrTransformerDecoderLayer',
'DABDetrTransformerDecoder', 'DABDetrTransformerEncoder',
'ConditionalDetrTransformerDecoder',
'ConditionalDetrTransformerDecoderLayer', 'DinoTransformerDecoder',
'CdnQueryGenerator'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .conditional_detr_transformer import (
ConditionalDetrTransformerDecoder, ConditionalDetrTransformerDecoderLayer)
from .deformable_detr_transformer import (
DeformableDetrTransformerDecoder, DeformableDetrTransformerDecoderLayer,
DeformableDetrTransformerEncoder, DeformableDetrTransformerEncoderLayer)
from .detr_transformer import (DetrTransformerDecoder,
DetrTransformerDecoderLayer,
DetrTransformerEncoder,
DetrTransformerEncoderLayer)
from .dino_transformer import CdnQueryGenerator, DinoTransformerDecoder
from .utils import (MLP, AdaptivePadding, DynamicConv, PatchEmbed,
PatchMerging, inverse_sigmoid, nchw_to_nlc, nlc_to_nchw)
__all__ = [
'nlc_to_nchw', 'nchw_to_nlc', 'AdaptivePadding', 'PatchEmbed',
'PatchMerging', 'inverse_sigmoid', 'DynamicConv', 'MLP',
'DetrTransformerEncoder', 'DetrTransformerDecoder',
'DetrTransformerEncoderLayer', 'DetrTransformerDecoderLayer',
'DeformableDetrTransformerEncoder', 'DeformableDetrTransformerDecoder',
'DeformableDetrTransformerEncoderLayer',
'DeformableDetrTransformerDecoderLayer',
'ConditionalDetrTransformerDecoder',
'ConditionalDetrTransformerDecoderLayer', 'DinoTransformerDecoder',
'CdnQueryGenerator'
]
|
# flake8: noqa
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
__version__ = "2.6.1"
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse("3.7"):
raise ImportWarning(
"To use `datasets`, Python>=3.7 is required, and the current version of Python doesn't match this condition."
)
if version.parse(pyarrow.__version__).major < 6:
raise ImportWarning(
"To use `datasets`, the module `pyarrow>=6.0.0` is required, and the current version of `pyarrow` doesn't match this condition.\n"
"If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`."
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
_arrow_dataset.concatenate_datasets = concatenate_datasets
_utils.DownloadConfig = DownloadConfig
_utils.DownloadManager = DownloadManager
_utils.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadConfig = DownloadConfig
_deprecated_download_manager.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadManager = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
|
# flake8: noqa
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
__version__ = "2.6.1.dev0"
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse("3.7"):
raise ImportWarning(
"To use `datasets`, Python>=3.7 is required, and the current version of Python doesn't match this condition."
)
if version.parse(pyarrow.__version__).major < 6:
raise ImportWarning(
"To use `datasets`, the module `pyarrow>=6.0.0` is required, and the current version of `pyarrow` doesn't match this condition.\n"
"If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`."
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
_arrow_dataset.concatenate_datasets = concatenate_datasets
_utils.DownloadConfig = DownloadConfig
_utils.DownloadManager = DownloadManager
_utils.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadConfig = DownloadConfig
_deprecated_download_manager.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadManager = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
|
# Copyright (c) OpenMMLab. All rights reserved.
from .anchor_free_head import AnchorFreeHead
from .anchor_head import AnchorHead
from .atss_head import ATSSHead
from .autoassign_head import AutoAssignHead
from .cascade_rpn_head import CascadeRPNHead, StageCascadeRPNHead
from .centernet_head import CenterNetHead
from .centripetal_head import CentripetalHead
from .corner_head import CornerHead
from .ddod_head import DDODHead
from .deformable_detr_head import DeformableDETRHead
from .detr_head import DETRHead
from .embedding_rpn_head import EmbeddingRPNHead
from .fcos_head import FCOSHead
from .fovea_head import FoveaHead
from .free_anchor_retina_head import FreeAnchorRetinaHead
from .fsaf_head import FSAFHead
from .ga_retina_head import GARetinaHead
from .ga_rpn_head import GARPNHead
from .gfl_head import GFLHead
from .guided_anchor_head import FeatureAdaption, GuidedAnchorHead
from .lad_head import LADHead
from .ld_head import LDHead
from .mask2former_head import Mask2FormerHead
from .maskformer_head import MaskFormerHead
from .nasfcos_head import NASFCOSHead
from .paa_head import PAAHead
from .pisa_retinanet_head import PISARetinaHead
from .pisa_ssd_head import PISASSDHead
from .reppoints_head import RepPointsHead
from .retina_head import RetinaHead
from .retina_sepbn_head import RetinaSepBNHead
from .rpn_head import RPNHead
from .sabl_retina_head import SABLRetinaHead
from .solo_head import DecoupledSOLOHead, DecoupledSOLOLightHead, SOLOHead
from .solov2_head import SOLOV2Head
from .ssd_head import SSDHead
from .tood_head import TOODHead
from .vfnet_head import VFNetHead
from .yolact_head import YOLACTHead, YOLACTProtonet, YOLACTSegmHead
from .yolo_head import YOLOV3Head
from .yolof_head import YOLOFHead
from .yolox_head import YOLOXHead
__all__ = [
'AnchorFreeHead', 'AnchorHead', 'GuidedAnchorHead', 'FeatureAdaption',
'RPNHead', 'GARPNHead', 'RetinaHead', 'RetinaSepBNHead', 'GARetinaHead',
'SSDHead', 'FCOSHead', 'RepPointsHead', 'FoveaHead',
'FreeAnchorRetinaHead', 'ATSSHead', 'FSAFHead', 'NASFCOSHead',
'PISARetinaHead', 'PISASSDHead', 'GFLHead', 'CornerHead', 'YOLACTHead',
'YOLACTSegmHead', 'YOLACTProtonet', 'YOLOV3Head', 'PAAHead',
'SABLRetinaHead', 'CentripetalHead', 'VFNetHead', 'StageCascadeRPNHead',
'CascadeRPNHead', 'EmbeddingRPNHead', 'LDHead', 'CascadeRPNHead',
'AutoAssignHead', 'DETRHead', 'YOLOFHead', 'DeformableDETRHead',
'SOLOHead', 'DecoupledSOLOHead', 'CenterNetHead', 'YOLOXHead',
'DecoupledSOLOLightHead', 'LADHead', 'TOODHead', 'MaskFormerHead',
'Mask2FormerHead', 'SOLOV2Head', 'DDODHead'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .anchor_free_head import AnchorFreeHead
from .anchor_head import AnchorHead
from .atss_head import ATSSHead
from .autoassign_head import AutoAssignHead
from .cascade_rpn_head import CascadeRPNHead, StageCascadeRPNHead
from .centernet_head import CenterNetHead
from .centripetal_head import CentripetalHead
from .corner_head import CornerHead
from .deformable_detr_head import DeformableDETRHead
from .detr_head import DETRHead
from .embedding_rpn_head import EmbeddingRPNHead
from .fcos_head import FCOSHead
from .fovea_head import FoveaHead
from .free_anchor_retina_head import FreeAnchorRetinaHead
from .fsaf_head import FSAFHead
from .ga_retina_head import GARetinaHead
from .ga_rpn_head import GARPNHead
from .gfl_head import GFLHead
from .guided_anchor_head import FeatureAdaption, GuidedAnchorHead
from .lad_head import LADHead
from .ld_head import LDHead
from .mask2former_head import Mask2FormerHead
from .maskformer_head import MaskFormerHead
from .nasfcos_head import NASFCOSHead
from .paa_head import PAAHead
from .pisa_retinanet_head import PISARetinaHead
from .pisa_ssd_head import PISASSDHead
from .reppoints_head import RepPointsHead
from .retina_head import RetinaHead
from .retina_sepbn_head import RetinaSepBNHead
from .rpn_head import RPNHead
from .sabl_retina_head import SABLRetinaHead
from .solo_head import DecoupledSOLOHead, DecoupledSOLOLightHead, SOLOHead
from .ssd_head import SSDHead
from .tood_head import TOODHead
from .vfnet_head import VFNetHead
from .yolact_head import YOLACTHead, YOLACTProtonet, YOLACTSegmHead
from .yolo_head import YOLOV3Head
from .yolof_head import YOLOFHead
from .yolox_head import YOLOXHead
__all__ = [
'AnchorFreeHead', 'AnchorHead', 'GuidedAnchorHead', 'FeatureAdaption',
'RPNHead', 'GARPNHead', 'RetinaHead', 'RetinaSepBNHead', 'GARetinaHead',
'SSDHead', 'FCOSHead', 'RepPointsHead', 'FoveaHead',
'FreeAnchorRetinaHead', 'ATSSHead', 'FSAFHead', 'NASFCOSHead',
'PISARetinaHead', 'PISASSDHead', 'GFLHead', 'CornerHead', 'YOLACTHead',
'YOLACTSegmHead', 'YOLACTProtonet', 'YOLOV3Head', 'PAAHead',
'SABLRetinaHead', 'CentripetalHead', 'VFNetHead', 'StageCascadeRPNHead',
'CascadeRPNHead', 'EmbeddingRPNHead', 'LDHead', 'CascadeRPNHead',
'AutoAssignHead', 'DETRHead', 'YOLOFHead', 'DeformableDETRHead',
'SOLOHead', 'DecoupledSOLOHead', 'CenterNetHead', 'YOLOXHead',
'DecoupledSOLOLightHead', 'LADHead', 'TOODHead', 'MaskFormerHead',
'Mask2FormerHead'
]
|
from typing import Dict
import torch.nn.functional as F
from torch import Tensor, nn
class Normalize(nn.Module):
"""This layer normalizes embeddings to unit length"""
def __init__(self):
super(Normalize, self).__init__()
def forward(self, features: Dict[str, Tensor]):
features.update({"sentence_embedding": F.normalize(features["sentence_embedding"], p=2, dim=1)})
return features
def save(self, output_path):
pass
@staticmethod
def load(input_path):
return Normalize()
|
from torch import Tensor
from torch import nn
from typing import Dict
import torch.nn.functional as F
class Normalize(nn.Module):
"""
This layer normalizes embeddings to unit length
"""
def __init__(self):
super(Normalize, self).__init__()
def forward(self, features: Dict[str, Tensor]):
features.update({'sentence_embedding': F.normalize(features['sentence_embedding'], p=2, dim=1)})
return features
def save(self, output_path):
pass
@staticmethod
def load(input_path):
return Normalize()
|
from operator import itemgetter
from typing import Sequence, Iterable
from docarray.array.storage.base.getsetdel import BaseGetSetDelMixin
from docarray.array.storage.base.helper import Offset2ID
from docarray import Document
class GetSetDelMixin(BaseGetSetDelMixin):
"""Implement required and derived functions that power `getitem`, `setitem`, `delitem`"""
# essential methods start
def _del_doc_by_id(self, _id: str):
self._sql(f'DELETE FROM {self._table_name} WHERE doc_id=?', (_id,))
self._commit()
def _set_doc_by_id(self, _id: str, value: 'Document'):
self._sql(
f'UPDATE {self._table_name} SET serialized_value=?, doc_id=? WHERE doc_id=?',
(value, value.id, _id),
)
self._commit()
def _get_doc_by_id(self, id: str) -> 'Document':
r = self._sql(
f'SELECT serialized_value FROM {self._table_name} WHERE doc_id = ?', (id,)
)
res = r.fetchone()
if res is None:
raise KeyError(f'Can not find Document with id=`{id}`')
return res[0]
# essentials end here
# now start the optimized bulk methods
def _get_docs_by_offsets(self, offsets: Sequence[int]) -> Iterable['Document']:
ids = [self._offset2ids.get_id(offset) for offset in offsets]
return self._get_docs_by_ids(ids)
def _clear_storage(self):
self._sql(f'DELETE FROM {self._table_name}')
self._commit()
def _del_docs_by_ids(self, ids: str) -> Iterable['Document']:
self._sql(
f"DELETE FROM {self._table_name} WHERE doc_id in ({','.join(['?'] * len(ids))})",
ids,
)
self._commit()
def _load_offset2ids(self):
r = self._sql(
f"SELECT doc_id FROM {self._table_name} ORDER BY item_order",
)
self._offset2ids = Offset2ID(list(map(itemgetter(0), r)))
def _save_offset2ids(self):
for offset, doc_id in enumerate(self._offset2ids):
self._sql(
f"""
UPDATE {self._table_name} SET item_order = ? WHERE {self._table_name}.doc_id = ?
""",
(offset, doc_id),
)
self._commit()
|
from operator import itemgetter
from typing import Sequence, Iterable
from ..base.getsetdel import BaseGetSetDelMixin
from ..base.helper import Offset2ID
from .... import Document
class GetSetDelMixin(BaseGetSetDelMixin):
"""Implement required and derived functions that power `getitem`, `setitem`, `delitem`"""
# essential methods start
def _del_doc_by_id(self, _id: str):
self._sql(f'DELETE FROM {self._table_name} WHERE doc_id=?', (_id,))
self._commit()
def _set_doc_by_id(self, _id: str, value: 'Document'):
self._sql(
f'UPDATE {self._table_name} SET serialized_value=?, doc_id=? WHERE doc_id=?',
(value, value.id, _id),
)
self._commit()
def _get_doc_by_id(self, id: str) -> 'Document':
r = self._sql(
f'SELECT serialized_value FROM {self._table_name} WHERE doc_id = ?', (id,)
)
res = r.fetchone()
if res is None:
raise KeyError(f'Can not find Document with id=`{id}`')
return res[0]
# essentials end here
# now start the optimized bulk methods
def _get_docs_by_offsets(self, offsets: Sequence[int]) -> Iterable['Document']:
ids = [self._offset2ids.get_id(offset) for offset in offsets]
return self._get_docs_by_ids(ids)
def _clear_storage(self):
self._sql(f'DELETE FROM {self._table_name}')
self._commit()
def _del_docs_by_ids(self, ids: str) -> Iterable['Document']:
self._sql(
f"DELETE FROM {self._table_name} WHERE doc_id in ({','.join(['?'] * len(ids))})",
ids,
)
self._commit()
def _load_offset2ids(self):
r = self._sql(
f"SELECT doc_id FROM {self._table_name} ORDER BY item_order",
)
self._offset2ids = Offset2ID(list(map(itemgetter(0), r)))
def _save_offset2ids(self):
for offset, doc_id in enumerate(self._offset2ids):
self._sql(
f"""
UPDATE {self._table_name} SET item_order = ? WHERE {self._table_name}.doc_id = ?
""",
(offset, doc_id),
)
self._commit()
|
_base_ = './mask-rcnn_r50_fpn_2x_coco.py'
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')))
|
_base_ = './mask_rcnn_r50_fpn_2x_coco.py'
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')))
|
from contextlib import contextmanager
from threading import Lock
from typing import TYPE_CHECKING, Any
from expiringdict import ExpiringDict
if TYPE_CHECKING:
from redis import Redis
from redis.lock import Lock as RedisLock
class RedisKeyedMutex:
"""
This class provides a mutex that can be locked and unlocked by a specific key,
using Redis as a distributed locking provider.
It uses an ExpiringDict to automatically clear the mutex after a specified timeout,
in case the key is not unlocked for a specified duration, to prevent memory leaks.
"""
def __init__(self, redis: "Redis", timeout: int | None = 60):
self.redis = redis
self.timeout = timeout
self.locks: dict[Any, "RedisLock"] = ExpiringDict(
max_len=6000, max_age_seconds=self.timeout
)
self.locks_lock = Lock()
@contextmanager
def locked(self, key: Any):
lock = self.acquire(key)
try:
yield
finally:
if lock.locked():
lock.release()
def acquire(self, key: Any) -> "RedisLock":
"""Acquires and returns a lock with the given key"""
with self.locks_lock:
if key not in self.locks:
self.locks[key] = self.redis.lock(
str(key), self.timeout, thread_local=False
)
lock = self.locks[key]
lock.acquire()
return lock
def release(self, key: Any):
if (lock := self.locks.get(key)) and lock.locked() and lock.owned():
lock.release()
def release_all_locks(self):
"""Call this on process termination to ensure all locks are released"""
self.locks_lock.acquire(blocking=False)
for lock in self.locks.values():
if lock.locked() and lock.owned():
lock.release()
|
from contextlib import contextmanager
from threading import Lock
from typing import TYPE_CHECKING, Any
from expiringdict import ExpiringDict
if TYPE_CHECKING:
from redis import Redis
from redis.lock import Lock as RedisLock
class RedisKeyedMutex:
"""
This class provides a mutex that can be locked and unlocked by a specific key,
using Redis as a distributed locking provider.
It uses an ExpiringDict to automatically clear the mutex after a specified timeout,
in case the key is not unlocked for a specified duration, to prevent memory leaks.
"""
def __init__(self, redis: "Redis", timeout: int | None = 60):
self.redis = redis
self.timeout = timeout
self.locks: dict[Any, "RedisLock"] = ExpiringDict(
max_len=6000, max_age_seconds=self.timeout
)
self.locks_lock = Lock()
@contextmanager
def locked(self, key: Any):
lock = self.acquire(key)
try:
yield
finally:
lock.release()
def acquire(self, key: Any) -> "RedisLock":
"""Acquires and returns a lock with the given key"""
with self.locks_lock:
if key not in self.locks:
self.locks[key] = self.redis.lock(
str(key), self.timeout, thread_local=False
)
lock = self.locks[key]
lock.acquire()
return lock
def release(self, key: Any):
if lock := self.locks.get(key):
lock.release()
def release_all_locks(self):
"""Call this on process termination to ensure all locks are released"""
self.locks_lock.acquire(blocking=False)
for lock in self.locks.values():
if lock.locked() and lock.owned():
lock.release()
|
# coding=utf-8
# Copyright 2025 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from transformers.image_utils import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torchvision_available, is_vision_available
from ...test_video_processing_common import VideoProcessingTestMixin, prepare_video_inputs
if is_vision_available():
if is_torchvision_available():
from transformers import VideoLlavaVideoProcessor
class VideoLlavaVideoProcessingTester:
def __init__(
self,
parent,
batch_size=5,
num_frames=8,
num_channels=3,
image_size=18,
min_resolution=30,
max_resolution=80,
do_resize=True,
size=None,
do_center_crop=True,
crop_size=None,
do_normalize=True,
image_mean=OPENAI_CLIP_MEAN,
image_std=OPENAI_CLIP_STD,
do_convert_rgb=True,
):
super().__init__()
size = size if size is not None else {"shortest_edge": 20}
crop_size = crop_size if crop_size is not None else {"height": 18, "width": 18}
self.parent = parent
self.batch_size = batch_size
self.num_frames = num_frames
self.num_channels = num_channels
self.image_size = image_size
self.min_resolution = min_resolution
self.max_resolution = max_resolution
self.do_resize = do_resize
self.size = size
self.do_center_crop = do_center_crop
self.crop_size = crop_size
self.do_normalize = do_normalize
self.image_mean = image_mean
self.image_std = image_std
self.do_convert_rgb = do_convert_rgb
def prepare_video_processor_dict(self):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_convert_rgb": self.do_convert_rgb,
}
def expected_output_video_shape(self, images):
return self.num_frames, self.num_channels, self.crop_size["height"], self.crop_size["width"]
def prepare_video_inputs(self, equal_resolution=False, return_tensors="pil"):
videos = prepare_video_inputs(
batch_size=self.batch_size,
num_frames=self.num_frames,
num_channels=self.num_channels,
min_resolution=self.min_resolution,
max_resolution=self.max_resolution,
equal_resolution=equal_resolution,
return_tensors=return_tensors,
)
return videos
@require_torch
@require_vision
class VideoLlavaVideoProcessingTest(VideoProcessingTestMixin, unittest.TestCase):
fast_video_processing_class = VideoLlavaVideoProcessor if is_torchvision_available() else None
def setUp(self):
super().setUp()
self.video_processor_tester = VideoLlavaVideoProcessingTester(self)
@property
def video_processor_dict(self):
return self.video_processor_tester.prepare_video_processor_dict()
def test_video_processor_properties(self):
video_processing = self.fast_video_processing_class(**self.video_processor_dict)
self.assertTrue(hasattr(video_processing, "do_resize"))
self.assertTrue(hasattr(video_processing, "size"))
self.assertTrue(hasattr(video_processing, "do_center_crop"))
self.assertTrue(hasattr(video_processing, "center_crop"))
self.assertTrue(hasattr(video_processing, "do_normalize"))
self.assertTrue(hasattr(video_processing, "image_mean"))
self.assertTrue(hasattr(video_processing, "image_std"))
self.assertTrue(hasattr(video_processing, "do_convert_rgb"))
|
# coding=utf-8
# Copyright 2025 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from transformers.image_utils import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_torchvision_available, is_vision_available
from ...test_video_processing_common import VideoProcessingTestMixin, prepare_video_inputs
if is_torch_available():
pass
if is_vision_available():
if is_torchvision_available():
from transformers import VideoLlavaVideoProcessor
class VideoLlavaVideoProcessingTester:
def __init__(
self,
parent,
batch_size=5,
num_frames=8,
num_channels=3,
image_size=18,
min_resolution=30,
max_resolution=80,
do_resize=True,
size=None,
do_center_crop=True,
crop_size=None,
do_normalize=True,
image_mean=OPENAI_CLIP_MEAN,
image_std=OPENAI_CLIP_STD,
do_convert_rgb=True,
):
super().__init__()
size = size if size is not None else {"shortest_edge": 20}
crop_size = crop_size if crop_size is not None else {"height": 18, "width": 18}
self.parent = parent
self.batch_size = batch_size
self.num_frames = num_frames
self.num_channels = num_channels
self.image_size = image_size
self.min_resolution = min_resolution
self.max_resolution = max_resolution
self.do_resize = do_resize
self.size = size
self.do_center_crop = do_center_crop
self.crop_size = crop_size
self.do_normalize = do_normalize
self.image_mean = image_mean
self.image_std = image_std
self.do_convert_rgb = do_convert_rgb
def prepare_video_processor_dict(self):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_convert_rgb": self.do_convert_rgb,
}
def expected_output_video_shape(self, images):
return self.num_frames, self.num_channels, self.crop_size["height"], self.crop_size["width"]
def prepare_video_inputs(self, equal_resolution=False, return_tensors="pil"):
videos = prepare_video_inputs(
batch_size=self.batch_size,
num_frames=self.num_frames,
num_channels=self.num_channels,
min_resolution=self.min_resolution,
max_resolution=self.max_resolution,
equal_resolution=equal_resolution,
return_tensors=return_tensors,
)
return videos
@require_torch
@require_vision
class VideoLlavaVideoProcessingTest(VideoProcessingTestMixin, unittest.TestCase):
fast_video_processing_class = VideoLlavaVideoProcessor if is_torchvision_available() else None
def setUp(self):
super().setUp()
self.video_processor_tester = VideoLlavaVideoProcessingTester(self)
@property
def video_processor_dict(self):
return self.video_processor_tester.prepare_video_processor_dict()
def test_video_processor_properties(self):
video_processing = self.fast_video_processing_class(**self.video_processor_dict)
self.assertTrue(hasattr(video_processing, "do_resize"))
self.assertTrue(hasattr(video_processing, "size"))
self.assertTrue(hasattr(video_processing, "do_center_crop"))
self.assertTrue(hasattr(video_processing, "center_crop"))
self.assertTrue(hasattr(video_processing, "do_normalize"))
self.assertTrue(hasattr(video_processing, "image_mean"))
self.assertTrue(hasattr(video_processing, "image_std"))
self.assertTrue(hasattr(video_processing, "do_convert_rgb"))
|
from enum import Enum
from typing import Dict, Iterable
import torch.nn.functional as F
from torch import Tensor, nn
from sentence_transformers.SentenceTransformer import SentenceTransformer
class TripletDistanceMetric(Enum):
"""The metric for the triplet loss"""
COSINE = lambda x, y: 1 - F.cosine_similarity(x, y)
EUCLIDEAN = lambda x, y: F.pairwise_distance(x, y, p=2)
MANHATTAN = lambda x, y: F.pairwise_distance(x, y, p=1)
class TripletLoss(nn.Module):
def __init__(
self, model: SentenceTransformer, distance_metric=TripletDistanceMetric.EUCLIDEAN, triplet_margin: float = 5
):
"""
This class implements triplet loss. Given a triplet of (anchor, positive, negative),
the loss minimizes the distance between anchor and positive while it maximizes the distance
between anchor and negative. It compute the following loss function:
``loss = max(||anchor - positive|| - ||anchor - negative|| + margin, 0)``.
Margin is an important hyperparameter and needs to be tuned respectively.
Args:
model: SentenceTransformerModel
distance_metric: Function to compute distance between two
embeddings. The class TripletDistanceMetric contains
common distance metrices that can be used.
triplet_margin: The negative should be at least this much
further away from the anchor than the positive.
References:
- For further details, see: https://en.wikipedia.org/wiki/Triplet_loss
Requirements:
1. (anchor, positive, negative) triplets
Inputs:
+---------------------------------------+--------+
| Texts | Labels |
+=======================================+========+
| (anchor, positive, negative) triplets | none |
+---------------------------------------+--------+
Example:
::
from sentence_transformers import SentenceTransformer, SentenceTransformerTrainer, losses
from datasets import Dataset
model = SentenceTransformer("microsoft/mpnet-base")
train_dataset = Dataset.from_dict({
"anchor": ["It's nice weather outside today.", "He drove to work."],
"positive": ["It's so sunny.", "He took the car to the office."],
"negative": ["It's quite rainy, sadly.", "She walked to the store."],
})
loss = losses.TripletLoss(model=model)
trainer = SentenceTransformerTrainer(
model=model,
train_dataset=train_dataset,
loss=loss,
)
trainer.train()
"""
super(TripletLoss, self).__init__()
self.model = model
self.distance_metric = distance_metric
self.triplet_margin = triplet_margin
def forward(self, sentence_features: Iterable[Dict[str, Tensor]], labels: Tensor):
reps = [self.model(sentence_feature)["sentence_embedding"] for sentence_feature in sentence_features]
rep_anchor, rep_pos, rep_neg = reps
distance_pos = self.distance_metric(rep_anchor, rep_pos)
distance_neg = self.distance_metric(rep_anchor, rep_neg)
losses = F.relu(distance_pos - distance_neg + self.triplet_margin)
return losses.mean()
def get_config_dict(self):
distance_metric_name = self.distance_metric.__name__
for name, value in vars(TripletDistanceMetric).items():
if value == self.distance_metric:
distance_metric_name = "TripletDistanceMetric.{}".format(name)
break
return {"distance_metric": distance_metric_name, "triplet_margin": self.triplet_margin}
@property
def citation(self) -> str:
return """
@misc{hermans2017defense,
title={In Defense of the Triplet Loss for Person Re-Identification},
author={Alexander Hermans and Lucas Beyer and Bastian Leibe},
year={2017},
eprint={1703.07737},
archivePrefix={arXiv},
primaryClass={cs.CV}
}
"""
|
from torch import nn, Tensor
from typing import Iterable, Dict
import torch.nn.functional as F
from enum import Enum
from ..SentenceTransformer import SentenceTransformer
class TripletDistanceMetric(Enum):
"""
The metric for the triplet loss
"""
COSINE = lambda x, y: 1 - F.cosine_similarity(x, y)
EUCLIDEAN = lambda x, y: F.pairwise_distance(x, y, p=2)
MANHATTAN = lambda x, y: F.pairwise_distance(x, y, p=1)
class TripletLoss(nn.Module):
def __init__(
self, model: SentenceTransformer, distance_metric=TripletDistanceMetric.EUCLIDEAN, triplet_margin: float = 5
):
"""
This class implements triplet loss. Given a triplet of (anchor, positive, negative),
the loss minimizes the distance between anchor and positive while it maximizes the distance
between anchor and negative. It compute the following loss function:
``loss = max(||anchor - positive|| - ||anchor - negative|| + margin, 0)``.
Margin is an important hyperparameter and needs to be tuned respectively.
:param model: SentenceTransformerModel
:param distance_metric: Function to compute distance between two embeddings. The class TripletDistanceMetric
contains common distance metrices that can be used.
:param triplet_margin: The negative should be at least this much further away from the anchor than the positive.
References:
- For further details, see: https://en.wikipedia.org/wiki/Triplet_loss
Requirements:
1. (anchor, positive, negative) triplets
Inputs:
+---------------------------------------+--------+
| Texts | Labels |
+=======================================+========+
| (anchor, positive, negative) triplets | none |
+---------------------------------------+--------+
Example:
::
from sentence_transformers import SentenceTransformer, SentencesDataset, losses
from sentence_transformers.readers import InputExample
from torch.utils.data import DataLoader
model = SentenceTransformer('distilbert-base-nli-mean-tokens')
train_examples = [
InputExample(texts=['Anchor 1', 'Positive 1', 'Negative 1']),
InputExample(texts=['Anchor 2', 'Positive 2', 'Negative 2']),
]
train_batch_size = 1
train_dataset = SentencesDataset(train_examples, model)
train_dataloader = DataLoader(train_dataset, shuffle=True, batch_size=train_batch_size)
train_loss = losses.TripletLoss(model=model)
model.fit(
[(train_dataloader, train_loss)],
epochs=10,
)
"""
super(TripletLoss, self).__init__()
self.model = model
self.distance_metric = distance_metric
self.triplet_margin = triplet_margin
def get_config_dict(self):
distance_metric_name = self.distance_metric.__name__
for name, value in vars(TripletDistanceMetric).items():
if value == self.distance_metric:
distance_metric_name = "TripletDistanceMetric.{}".format(name)
break
return {"distance_metric": distance_metric_name, "triplet_margin": self.triplet_margin}
def forward(self, sentence_features: Iterable[Dict[str, Tensor]], labels: Tensor):
reps = [self.model(sentence_feature)["sentence_embedding"] for sentence_feature in sentence_features]
rep_anchor, rep_pos, rep_neg = reps
distance_pos = self.distance_metric(rep_anchor, rep_pos)
distance_neg = self.distance_metric(rep_anchor, rep_neg)
losses = F.relu(distance_pos - distance_neg + self.triplet_margin)
return losses.mean()
|
"""Experiment with different models."""
from __future__ import annotations
from collections.abc import Sequence
from typing import Optional
from langchain_core.language_models.llms import BaseLLM
from langchain_core.prompts.prompt import PromptTemplate
from langchain_core.utils.input import get_color_mapping, print_text
from langchain.chains.base import Chain
from langchain.chains.llm import LLMChain
class ModelLaboratory:
"""A utility to experiment with and compare the performance of different models."""
def __init__(self, chains: Sequence[Chain], names: Optional[list[str]] = None):
"""Initialize the ModelLaboratory with chains to experiment with.
Args:
chains (Sequence[Chain]): A sequence of chains to experiment with.
Each chain must have exactly one input and one output variable.
names (Optional[List[str]]): Optional list of names corresponding to each chain.
If provided, its length must match the number of chains.
Raises:
ValueError: If any chain is not an instance of `Chain`.
ValueError: If a chain does not have exactly one input variable.
ValueError: If a chain does not have exactly one output variable.
ValueError: If the length of `names` does not match the number of chains.
"""
for chain in chains:
if not isinstance(chain, Chain):
msg = (
"ModelLaboratory should now be initialized with Chains. "
"If you want to initialize with LLMs, use the `from_llms` method "
"instead (`ModelLaboratory.from_llms(...)`)"
)
raise ValueError(msg)
if len(chain.input_keys) != 1:
msg = (
"Currently only support chains with one input variable, "
f"got {chain.input_keys}"
)
raise ValueError(msg)
if len(chain.output_keys) != 1:
msg = (
"Currently only support chains with one output variable, "
f"got {chain.output_keys}"
)
if names is not None and len(names) != len(chains):
msg = "Length of chains does not match length of names."
raise ValueError(msg)
self.chains = chains
chain_range = [str(i) for i in range(len(self.chains))]
self.chain_colors = get_color_mapping(chain_range)
self.names = names
@classmethod
def from_llms(
cls,
llms: list[BaseLLM],
prompt: Optional[PromptTemplate] = None,
) -> ModelLaboratory:
"""Initialize the ModelLaboratory with LLMs and an optional prompt.
Args:
llms (List[BaseLLM]): A list of LLMs to experiment with.
prompt (Optional[PromptTemplate]): An optional prompt to use with the LLMs.
If provided, the prompt must contain exactly one input variable.
Returns:
ModelLaboratory: An instance of `ModelLaboratory` initialized with LLMs.
"""
if prompt is None:
prompt = PromptTemplate(input_variables=["_input"], template="{_input}")
chains = [LLMChain(llm=llm, prompt=prompt) for llm in llms]
names = [str(llm) for llm in llms]
return cls(chains, names=names)
def compare(self, text: str) -> None:
"""Compare model outputs on an input text.
If a prompt was provided with starting the laboratory, then this text will be
fed into the prompt. If no prompt was provided, then the input text is the
entire prompt.
Args:
text: input text to run all models on.
"""
print(f"\033[1mInput:\033[0m\n{text}\n") # noqa: T201
for i, chain in enumerate(self.chains):
name = self.names[i] if self.names is not None else str(chain)
print_text(name, end="\n")
output = chain.run(text)
print_text(output, color=self.chain_colors[str(i)], end="\n\n")
|
"""Experiment with different models."""
from __future__ import annotations
from collections.abc import Sequence
from typing import Optional
from langchain_core.language_models.llms import BaseLLM
from langchain_core.prompts.prompt import PromptTemplate
from langchain_core.utils.input import get_color_mapping, print_text
from langchain.chains.base import Chain
from langchain.chains.llm import LLMChain
class ModelLaboratory:
"""A utility to experiment with and compare the performance of different models."""
def __init__(self, chains: Sequence[Chain], names: Optional[list[str]] = None):
"""Initialize the ModelLaboratory with chains to experiment with.
Args:
chains (Sequence[Chain]): A sequence of chains to experiment with.
Each chain must have exactly one input and one output variable.
names (Optional[List[str]]): Optional list of names corresponding to each chain.
If provided, its length must match the number of chains.
Raises:
ValueError: If any chain is not an instance of `Chain`.
ValueError: If a chain does not have exactly one input variable.
ValueError: If a chain does not have exactly one output variable.
ValueError: If the length of `names` does not match the number of chains.
"""
for chain in chains:
if not isinstance(chain, Chain):
msg = (
"ModelLaboratory should now be initialized with Chains. "
"If you want to initialize with LLMs, use the `from_llms` method "
"instead (`ModelLaboratory.from_llms(...)`)"
)
raise ValueError(msg)
if len(chain.input_keys) != 1:
msg = (
"Currently only support chains with one input variable, "
f"got {chain.input_keys}"
)
raise ValueError(msg)
if len(chain.output_keys) != 1:
msg = (
"Currently only support chains with one output variable, "
f"got {chain.output_keys}"
)
if names is not None and len(names) != len(chains):
msg = "Length of chains does not match length of names."
raise ValueError(msg)
self.chains = chains
chain_range = [str(i) for i in range(len(self.chains))]
self.chain_colors = get_color_mapping(chain_range)
self.names = names
@classmethod
def from_llms(
cls, llms: list[BaseLLM], prompt: Optional[PromptTemplate] = None
) -> ModelLaboratory:
"""Initialize the ModelLaboratory with LLMs and an optional prompt.
Args:
llms (List[BaseLLM]): A list of LLMs to experiment with.
prompt (Optional[PromptTemplate]): An optional prompt to use with the LLMs.
If provided, the prompt must contain exactly one input variable.
Returns:
ModelLaboratory: An instance of `ModelLaboratory` initialized with LLMs.
"""
if prompt is None:
prompt = PromptTemplate(input_variables=["_input"], template="{_input}")
chains = [LLMChain(llm=llm, prompt=prompt) for llm in llms]
names = [str(llm) for llm in llms]
return cls(chains, names=names)
def compare(self, text: str) -> None:
"""Compare model outputs on an input text.
If a prompt was provided with starting the laboratory, then this text will be
fed into the prompt. If no prompt was provided, then the input text is the
entire prompt.
Args:
text: input text to run all models on.
"""
print(f"\033[1mInput:\033[0m\n{text}\n") # noqa: T201
for i, chain in enumerate(self.chains):
name = self.names[i] if self.names is not None else str(chain)
print_text(name, end="\n")
output = chain.run(text)
print_text(output, color=self.chain_colors[str(i)], end="\n\n")
|
# Copyright (c) OpenMMLab. All rights reserved.
from functools import partial
import numpy as np
import torch
from six.moves import map, zip
from ..mask.structures import BitmapMasks, PolygonMasks
def multi_apply(func, *args, **kwargs):
"""Apply function to a list of arguments.
Note:
This function applies the ``func`` to multiple inputs and
map the multiple outputs of the ``func`` into different
list. Each list contains the same type of outputs corresponding
to different inputs.
Args:
func (Function): A function that will be applied to a list of
arguments
Returns:
tuple(list): A tuple containing multiple list, each list contains \
a kind of returned results by the function
"""
pfunc = partial(func, **kwargs) if kwargs else func
map_results = map(pfunc, *args)
return tuple(map(list, zip(*map_results)))
def unmap(data, count, inds, fill=0):
"""Unmap a subset of item (data) back to the original set of items (of size
count)"""
if data.dim() == 1:
ret = data.new_full((count, ), fill)
ret[inds.type(torch.bool)] = data
else:
new_size = (count, ) + data.size()[1:]
ret = data.new_full(new_size, fill)
ret[inds.type(torch.bool), :] = data
return ret
def mask2ndarray(mask):
"""Convert Mask to ndarray..
Args:
mask (:obj:`BitmapMasks` or :obj:`PolygonMasks` or
torch.Tensor or np.ndarray): The mask to be converted.
Returns:
np.ndarray: Ndarray mask of shape (n, h, w) that has been converted
"""
if isinstance(mask, (BitmapMasks, PolygonMasks)):
mask = mask.to_ndarray()
elif isinstance(mask, torch.Tensor):
mask = mask.detach().cpu().numpy()
elif not isinstance(mask, np.ndarray):
raise TypeError(f'Unsupported {type(mask)} data type')
return mask
def flip_tensor(src_tensor, flip_direction):
"""flip tensor base on flip_direction.
Args:
src_tensor (Tensor): input feature map, shape (B, C, H, W).
flip_direction (str): The flipping direction. Options are
'horizontal', 'vertical', 'diagonal'.
Returns:
out_tensor (Tensor): Flipped tensor.
"""
assert src_tensor.ndim == 4
valid_directions = ['horizontal', 'vertical', 'diagonal']
assert flip_direction in valid_directions
if flip_direction == 'horizontal':
out_tensor = torch.flip(src_tensor, [3])
elif flip_direction == 'vertical':
out_tensor = torch.flip(src_tensor, [2])
else:
out_tensor = torch.flip(src_tensor, [2, 3])
return out_tensor
def center_of_mass(mask, esp=1e-6):
"""Calculate the centroid coordinates of the mask.
Args:
mask (Tensor): The mask to be calculated, shape (h, w).
esp (float): Avoid dividing by zero. Default: 1e-6.
Returns:
tuple[Tensor]: the coordinates of the center point of the mask.
- center_h (Tensor): the center point of the height.
- center_w (Tensor): the center point of the width.
"""
h, w = mask.shape
grid_h = torch.arange(h, device=mask.device)[:, None]
grid_w = torch.arange(w, device=mask.device)
normalizer = mask.sum().float().clamp(min=esp)
center_h = (mask * grid_h).sum() / normalizer
center_w = (mask * grid_w).sum() / normalizer
return center_h, center_w
def generate_coordinate(featmap_sizes, device='cuda'):
"""Generate the coordinate.
Args:
featmap_sizes (tuple): The feature to be calculated,
of shape (N, C, W, H).
device (str): The device where the feature will be put on.
Returns:
coord_feat (Tensor): The coordinate feature, of shape (N, 2, W, H).
"""
x_range = torch.linspace(-1, 1, featmap_sizes[-1], device=device)
y_range = torch.linspace(-1, 1, featmap_sizes[-2], device=device)
y, x = torch.meshgrid(y_range, x_range)
y = y.expand([featmap_sizes[0], 1, -1, -1])
x = x.expand([featmap_sizes[0], 1, -1, -1])
coord_feat = torch.cat([x, y], 1)
return coord_feat
|
# Copyright (c) OpenMMLab. All rights reserved.
from functools import partial
import numpy as np
import torch
from six.moves import map, zip
from ..mask.structures import BitmapMasks, PolygonMasks
def multi_apply(func, *args, **kwargs):
"""Apply function to a list of arguments.
Note:
This function applies the ``func`` to multiple inputs and
map the multiple outputs of the ``func`` into different
list. Each list contains the same type of outputs corresponding
to different inputs.
Args:
func (Function): A function that will be applied to a list of
arguments
Returns:
tuple(list): A tuple containing multiple list, each list contains \
a kind of returned results by the function
"""
pfunc = partial(func, **kwargs) if kwargs else func
map_results = map(pfunc, *args)
return tuple(map(list, zip(*map_results)))
def unmap(data, count, inds, fill=0):
"""Unmap a subset of item (data) back to the original set of items (of size
count)"""
if data.dim() == 1:
ret = data.new_full((count, ), fill)
ret[inds.type(torch.bool)] = data
else:
new_size = (count, ) + data.size()[1:]
ret = data.new_full(new_size, fill)
ret[inds.type(torch.bool), :] = data
return ret
def mask2ndarray(mask):
"""Convert Mask to ndarray..
Args:
mask (:obj:`BitmapMasks` or :obj:`PolygonMasks` or
torch.Tensor or np.ndarray): The mask to be converted.
Returns:
np.ndarray: Ndarray mask of shape (n, h, w) that has been converted
"""
if isinstance(mask, (BitmapMasks, PolygonMasks)):
mask = mask.to_ndarray()
elif isinstance(mask, torch.Tensor):
mask = mask.detach().cpu().numpy()
elif not isinstance(mask, np.ndarray):
raise TypeError(f'Unsupported {type(mask)} data type')
return mask
def flip_tensor(src_tensor, flip_direction):
"""flip tensor base on flip_direction.
Args:
src_tensor (Tensor): input feature map, shape (B, C, H, W).
flip_direction (str): The flipping direction. Options are
'horizontal', 'vertical', 'diagonal'.
Returns:
out_tensor (Tensor): Flipped tensor.
"""
assert src_tensor.ndim == 4
valid_directions = ['horizontal', 'vertical', 'diagonal']
assert flip_direction in valid_directions
if flip_direction == 'horizontal':
out_tensor = torch.flip(src_tensor, [3])
elif flip_direction == 'vertical':
out_tensor = torch.flip(src_tensor, [2])
else:
out_tensor = torch.flip(src_tensor, [2, 3])
return out_tensor
|
# Copyright 2022 The Music Spectrogram Diffusion Authors.
# Copyright 2025 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.t5.modeling_t5 import T5Block, T5Config, T5LayerNorm
from ....configuration_utils import ConfigMixin, register_to_config
from ....models import ModelMixin
class SpectrogramNotesEncoder(ModelMixin, ConfigMixin, ModuleUtilsMixin):
@register_to_config
def __init__(
self,
max_length: int,
vocab_size: int,
d_model: int,
dropout_rate: float,
num_layers: int,
num_heads: int,
d_kv: int,
d_ff: int,
feed_forward_proj: str,
is_decoder: bool = False,
):
super().__init__()
self.token_embedder = nn.Embedding(vocab_size, d_model)
self.position_encoding = nn.Embedding(max_length, d_model)
self.position_encoding.weight.requires_grad = False
self.dropout_pre = nn.Dropout(p=dropout_rate)
t5config = T5Config(
vocab_size=vocab_size,
d_model=d_model,
num_heads=num_heads,
d_kv=d_kv,
d_ff=d_ff,
dropout_rate=dropout_rate,
feed_forward_proj=feed_forward_proj,
is_decoder=is_decoder,
is_encoder_decoder=False,
)
self.encoders = nn.ModuleList()
for lyr_num in range(num_layers):
lyr = T5Block(t5config)
self.encoders.append(lyr)
self.layer_norm = T5LayerNorm(d_model)
self.dropout_post = nn.Dropout(p=dropout_rate)
def forward(self, encoder_input_tokens, encoder_inputs_mask):
x = self.token_embedder(encoder_input_tokens)
seq_length = encoder_input_tokens.shape[1]
inputs_positions = torch.arange(seq_length, device=encoder_input_tokens.device)
x += self.position_encoding(inputs_positions)
x = self.dropout_pre(x)
# inverted the attention mask
input_shape = encoder_input_tokens.size()
extended_attention_mask = self.get_extended_attention_mask(encoder_inputs_mask, input_shape)
for lyr in self.encoders:
x = lyr(x, extended_attention_mask)[0]
x = self.layer_norm(x)
return self.dropout_post(x), encoder_inputs_mask
|
# Copyright 2022 The Music Spectrogram Diffusion Authors.
# Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.t5.modeling_t5 import T5Block, T5Config, T5LayerNorm
from ....configuration_utils import ConfigMixin, register_to_config
from ....models import ModelMixin
class SpectrogramNotesEncoder(ModelMixin, ConfigMixin, ModuleUtilsMixin):
@register_to_config
def __init__(
self,
max_length: int,
vocab_size: int,
d_model: int,
dropout_rate: float,
num_layers: int,
num_heads: int,
d_kv: int,
d_ff: int,
feed_forward_proj: str,
is_decoder: bool = False,
):
super().__init__()
self.token_embedder = nn.Embedding(vocab_size, d_model)
self.position_encoding = nn.Embedding(max_length, d_model)
self.position_encoding.weight.requires_grad = False
self.dropout_pre = nn.Dropout(p=dropout_rate)
t5config = T5Config(
vocab_size=vocab_size,
d_model=d_model,
num_heads=num_heads,
d_kv=d_kv,
d_ff=d_ff,
dropout_rate=dropout_rate,
feed_forward_proj=feed_forward_proj,
is_decoder=is_decoder,
is_encoder_decoder=False,
)
self.encoders = nn.ModuleList()
for lyr_num in range(num_layers):
lyr = T5Block(t5config)
self.encoders.append(lyr)
self.layer_norm = T5LayerNorm(d_model)
self.dropout_post = nn.Dropout(p=dropout_rate)
def forward(self, encoder_input_tokens, encoder_inputs_mask):
x = self.token_embedder(encoder_input_tokens)
seq_length = encoder_input_tokens.shape[1]
inputs_positions = torch.arange(seq_length, device=encoder_input_tokens.device)
x += self.position_encoding(inputs_positions)
x = self.dropout_pre(x)
# inverted the attention mask
input_shape = encoder_input_tokens.size()
extended_attention_mask = self.get_extended_attention_mask(encoder_inputs_mask, input_shape)
for lyr in self.encoders:
x = lyr(x, extended_attention_mask)[0]
x = self.layer_norm(x)
return self.dropout_post(x), encoder_inputs_mask
|
import warnings
from typing import Any
from langchain_core.memory import BaseMemory
from pydantic import field_validator
from langchain.memory.chat_memory import BaseChatMemory
class CombinedMemory(BaseMemory):
"""Combining multiple memories' data together."""
memories: list[BaseMemory]
"""For tracking all the memories that should be accessed."""
@field_validator("memories")
@classmethod
def check_repeated_memory_variable(
cls,
value: list[BaseMemory],
) -> list[BaseMemory]:
all_variables: set[str] = set()
for val in value:
overlap = all_variables.intersection(val.memory_variables)
if overlap:
msg = (
f"The same variables {overlap} are found in multiple"
"memory object, which is not allowed by CombinedMemory."
)
raise ValueError(msg)
all_variables |= set(val.memory_variables)
return value
@field_validator("memories")
@classmethod
def check_input_key(cls, value: list[BaseMemory]) -> list[BaseMemory]:
"""Check that if memories are of type BaseChatMemory that input keys exist."""
for val in value:
if isinstance(val, BaseChatMemory) and val.input_key is None:
warnings.warn(
"When using CombinedMemory, "
"input keys should be so the input is known. "
f" Was not set on {val}",
stacklevel=2,
)
return value
@property
def memory_variables(self) -> list[str]:
"""All the memory variables that this instance provides."""
"""Collected from the all the linked memories."""
memory_variables = []
for memory in self.memories:
memory_variables.extend(memory.memory_variables)
return memory_variables
def load_memory_variables(self, inputs: dict[str, Any]) -> dict[str, str]:
"""Load all vars from sub-memories."""
memory_data: dict[str, Any] = {}
# Collect vars from all sub-memories
for memory in self.memories:
data = memory.load_memory_variables(inputs)
for key, value in data.items():
if key in memory_data:
msg = f"The variable {key} is repeated in the CombinedMemory."
raise ValueError(msg)
memory_data[key] = value
return memory_data
def save_context(self, inputs: dict[str, Any], outputs: dict[str, str]) -> None:
"""Save context from this session for every memory."""
# Save context for all sub-memories
for memory in self.memories:
memory.save_context(inputs, outputs)
def clear(self) -> None:
"""Clear context from this session for every memory."""
for memory in self.memories:
memory.clear()
|
import warnings
from typing import Any
from langchain_core.memory import BaseMemory
from pydantic import field_validator
from langchain.memory.chat_memory import BaseChatMemory
class CombinedMemory(BaseMemory):
"""Combining multiple memories' data together."""
memories: list[BaseMemory]
"""For tracking all the memories that should be accessed."""
@field_validator("memories")
@classmethod
def check_repeated_memory_variable(
cls,
value: list[BaseMemory],
) -> list[BaseMemory]:
all_variables: set[str] = set()
for val in value:
overlap = all_variables.intersection(val.memory_variables)
if overlap:
msg = (
f"The same variables {overlap} are found in multiple"
"memory object, which is not allowed by CombinedMemory."
)
raise ValueError(msg)
all_variables |= set(val.memory_variables)
return value
@field_validator("memories")
@classmethod
def check_input_key(cls, value: list[BaseMemory]) -> list[BaseMemory]:
"""Check that if memories are of type BaseChatMemory that input keys exist."""
for val in value:
if isinstance(val, BaseChatMemory) and val.input_key is None:
warnings.warn(
"When using CombinedMemory, "
"input keys should be so the input is known. "
f" Was not set on {val}",
stacklevel=5,
)
return value
@property
def memory_variables(self) -> list[str]:
"""All the memory variables that this instance provides."""
"""Collected from the all the linked memories."""
memory_variables = []
for memory in self.memories:
memory_variables.extend(memory.memory_variables)
return memory_variables
def load_memory_variables(self, inputs: dict[str, Any]) -> dict[str, str]:
"""Load all vars from sub-memories."""
memory_data: dict[str, Any] = {}
# Collect vars from all sub-memories
for memory in self.memories:
data = memory.load_memory_variables(inputs)
for key, value in data.items():
if key in memory_data:
msg = f"The variable {key} is repeated in the CombinedMemory."
raise ValueError(msg)
memory_data[key] = value
return memory_data
def save_context(self, inputs: dict[str, Any], outputs: dict[str, str]) -> None:
"""Save context from this session for every memory."""
# Save context for all sub-memories
for memory in self.memories:
memory.save_context(inputs, outputs)
def clear(self) -> None:
"""Clear context from this session for every memory."""
for memory in self.memories:
memory.clear()
|
import asyncio
from contextlib import asynccontextmanager
from typing import TYPE_CHECKING, Any
from expiringdict import ExpiringDict
if TYPE_CHECKING:
from redis.asyncio import Redis as AsyncRedis
from redis.asyncio.lock import Lock as AsyncRedisLock
class AsyncRedisKeyedMutex:
"""
This class provides a mutex that can be locked and unlocked by a specific key,
using Redis as a distributed locking provider.
It uses an ExpiringDict to automatically clear the mutex after a specified timeout,
in case the key is not unlocked for a specified duration, to prevent memory leaks.
"""
def __init__(self, redis: "AsyncRedis", timeout: int | None = 60):
self.redis = redis
self.timeout = timeout
self.locks: dict[Any, "AsyncRedisLock"] = ExpiringDict(
max_len=6000, max_age_seconds=self.timeout
)
self.locks_lock = asyncio.Lock()
@asynccontextmanager
async def locked(self, key: Any):
lock = await self.acquire(key)
try:
yield
finally:
if (await lock.locked()) and (await lock.owned()):
await lock.release()
async def acquire(self, key: Any) -> "AsyncRedisLock":
"""Acquires and returns a lock with the given key"""
async with self.locks_lock:
if key not in self.locks:
self.locks[key] = self.redis.lock(
str(key), self.timeout, thread_local=False
)
lock = self.locks[key]
await lock.acquire()
return lock
async def release(self, key: Any):
if (
(lock := self.locks.get(key))
and (await lock.locked())
and (await lock.owned())
):
await lock.release()
async def release_all_locks(self):
"""Call this on process termination to ensure all locks are released"""
async with self.locks_lock:
for lock in self.locks.values():
if (await lock.locked()) and (await lock.owned()):
await lock.release()
|
from contextlib import contextmanager
from threading import Lock
from typing import TYPE_CHECKING, Any
from expiringdict import ExpiringDict
if TYPE_CHECKING:
from redis import Redis
from redis.lock import Lock as RedisLock
class RedisKeyedMutex:
"""
This class provides a mutex that can be locked and unlocked by a specific key,
using Redis as a distributed locking provider.
It uses an ExpiringDict to automatically clear the mutex after a specified timeout,
in case the key is not unlocked for a specified duration, to prevent memory leaks.
"""
def __init__(self, redis: "Redis", timeout: int | None = 60):
self.redis = redis
self.timeout = timeout
self.locks: dict[Any, "RedisLock"] = ExpiringDict(
max_len=6000, max_age_seconds=self.timeout
)
self.locks_lock = Lock()
@contextmanager
def locked(self, key: Any):
lock = self.acquire(key)
try:
yield
finally:
if lock.locked() and lock.owned():
lock.release()
def acquire(self, key: Any) -> "RedisLock":
"""Acquires and returns a lock with the given key"""
with self.locks_lock:
if key not in self.locks:
self.locks[key] = self.redis.lock(
str(key), self.timeout, thread_local=False
)
lock = self.locks[key]
lock.acquire()
return lock
def release(self, key: Any):
if (lock := self.locks.get(key)) and lock.locked() and lock.owned():
lock.release()
def release_all_locks(self):
"""Call this on process termination to ensure all locks are released"""
self.locks_lock.acquire(blocking=False)
for lock in self.locks.values():
if lock.locked() and lock.owned():
lock.release()
|
from torchvision.transforms import AutoAugmentPolicy, InterpolationMode # usort: skip
from . import functional # usort: skip
from ._transform import Transform # usort: skip
from ._augment import CutMix, MixUp, RandomErasing
from ._auto_augment import AugMix, AutoAugment, RandAugment, TrivialAugmentWide
from ._color import (
ColorJitter,
Grayscale,
RandomAdjustSharpness,
RandomAutocontrast,
RandomChannelPermutation,
RandomEqualize,
RandomGrayscale,
RandomInvert,
RandomPhotometricDistort,
RandomPosterize,
RandomSolarize,
RGB,
)
from ._container import Compose, RandomApply, RandomChoice, RandomOrder
from ._geometry import (
CenterCrop,
ElasticTransform,
FiveCrop,
Pad,
RandomAffine,
RandomCrop,
RandomHorizontalFlip,
RandomIoUCrop,
RandomPerspective,
RandomResize,
RandomResizedCrop,
RandomRotation,
RandomShortestSize,
RandomVerticalFlip,
RandomZoomOut,
Resize,
ScaleJitter,
TenCrop,
)
from ._meta import ClampBoundingBoxes, ConvertBoundingBoxFormat
from ._misc import (
ConvertImageDtype,
GaussianBlur,
Identity,
Lambda,
LinearTransformation,
Normalize,
SanitizeBoundingBoxes,
ToDtype,
)
from ._temporal import UniformTemporalSubsample
from ._type_conversion import PILToTensor, ToImage, ToPILImage, ToPureTensor
from ._deprecated import ToTensor # usort: skip
|
from torchvision.transforms import AutoAugmentPolicy, InterpolationMode # usort: skip
from . import functional # usort: skip
from ._transform import Transform # usort: skip
from ._augment import CutMix, MixUp, RandomErasing
from ._auto_augment import AugMix, AutoAugment, RandAugment, TrivialAugmentWide
from ._color import (
ColorJitter,
Grayscale,
RandomAdjustSharpness,
RandomAutocontrast,
RandomChannelPermutation,
RandomEqualize,
RandomGrayscale,
RandomInvert,
RandomPhotometricDistort,
RandomPosterize,
RandomSolarize,
)
from ._container import Compose, RandomApply, RandomChoice, RandomOrder
from ._geometry import (
CenterCrop,
ElasticTransform,
FiveCrop,
Pad,
RandomAffine,
RandomCrop,
RandomHorizontalFlip,
RandomIoUCrop,
RandomPerspective,
RandomResize,
RandomResizedCrop,
RandomRotation,
RandomShortestSize,
RandomVerticalFlip,
RandomZoomOut,
Resize,
ScaleJitter,
TenCrop,
)
from ._meta import ClampBoundingBoxes, ConvertBoundingBoxFormat
from ._misc import (
ConvertImageDtype,
GaussianBlur,
Identity,
Lambda,
LinearTransformation,
Normalize,
SanitizeBoundingBoxes,
ToDtype,
)
from ._temporal import UniformTemporalSubsample
from ._type_conversion import PILToTensor, ToImage, ToPILImage, ToPureTensor
from ._deprecated import ToTensor # usort: skip
|
_base_ = '../cascade_rcnn/cascade-mask-rcnn_r50_fpn_1x_coco.py'
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(
type='InstaBoost',
action_candidate=('normal', 'horizontal', 'skip'),
action_prob=(1, 0, 0),
scale=(0.8, 1.2),
dx=15,
dy=15,
theta=(-1, 1),
color_prob=0.5,
hflag=False,
aug_ratio=0.5),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
max_epochs = 48
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[32, 44],
gamma=0.1)
]
train_cfg = dict(max_epochs=max_epochs)
# only keep latest 3 checkpoints
default_hooks = dict(checkpoint=dict(max_keep_ckpts=3))
|
_base_ = '../cascade_rcnn/cascade_mask_rcnn_r50_fpn_1x_coco.py'
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(
type='InstaBoost',
action_candidate=('normal', 'horizontal', 'skip'),
action_prob=(1, 0, 0),
scale=(0.8, 1.2),
dx=15,
dy=15,
theta=(-1, 1),
color_prob=0.5,
hflag=False,
aug_ratio=0.5),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
max_epochs = 48
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[32, 44],
gamma=0.1)
]
train_cfg = dict(max_epochs=max_epochs)
# only keep latest 3 checkpoints
default_hooks = dict(checkpoint=dict(max_keep_ckpts=3))
|
import pytest
@pytest.mark.compile
def test_placeholder() -> None:
"""Used for compiling integration tests without running any real tests."""
|
import pytest
@pytest.mark.compile
def test_placeholder() -> None:
"""Used for compiling integration tests without running any real tests."""
pass
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.callbacks.infino_callback import InfinoCallbackHandler
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"InfinoCallbackHandler": "langchain_community.callbacks.infino_callback",
}
_import_attribute = create_importer(__file__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"InfinoCallbackHandler",
]
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.callbacks.infino_callback import InfinoCallbackHandler
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"InfinoCallbackHandler": "langchain_community.callbacks.infino_callback"
}
_import_attribute = create_importer(__file__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"InfinoCallbackHandler",
]
|
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
# Example to use different file client
# Method 1: simply set the data root and let the file I/O module
# automatically infer from prefix (not support LMDB and Memcache yet)
# data_root = 's3://openmmlab/datasets/detection/coco/'
# Method 2: Use `backend_args`, `file_client_args` in versions before 3.0.0rc6
# backend_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/detection/',
# 'data/': 's3://openmmlab/datasets/detection/'
# }))
backend_args = None
train_pipeline = [
dict(type='LoadImageFromFile', backend_args=backend_args),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile', backend_args=backend_args),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
# If you don't have a gt annotation, delete the pipeline
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(
batch_size=2,
num_workers=2,
persistent_workers=True,
sampler=dict(type='DefaultSampler', shuffle=True),
batch_sampler=dict(type='AspectRatioBatchSampler'),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instances_train2017.json',
data_prefix=dict(img='train2017/'),
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=train_pipeline,
backend_args=backend_args))
val_dataloader = dict(
batch_size=1,
num_workers=2,
persistent_workers=True,
drop_last=False,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instances_val2017.json',
data_prefix=dict(img='val2017/'),
test_mode=True,
pipeline=test_pipeline,
backend_args=backend_args))
test_dataloader = val_dataloader
val_evaluator = dict(
type='CocoMetric',
ann_file=data_root + 'annotations/instances_val2017.json',
metric='bbox',
format_only=False,
backend_args=backend_args)
test_evaluator = val_evaluator
# inference on test dataset and
# format the output results for submission.
# test_dataloader = dict(
# batch_size=1,
# num_workers=2,
# persistent_workers=True,
# drop_last=False,
# sampler=dict(type='DefaultSampler', shuffle=False),
# dataset=dict(
# type=dataset_type,
# data_root=data_root,
# ann_file=data_root + 'annotations/image_info_test-dev2017.json',
# data_prefix=dict(img='test2017/'),
# test_mode=True,
# pipeline=test_pipeline))
# test_evaluator = dict(
# type='CocoMetric',
# metric='bbox',
# format_only=True,
# ann_file=data_root + 'annotations/image_info_test-dev2017.json',
# outfile_prefix='./work_dirs/coco_detection/test')
|
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
# file_client_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/detection/',
# 'data/': 's3://openmmlab/datasets/detection/'
# }))
file_client_args = dict(backend='disk')
train_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
# If you don't have a gt annotation, delete the pipeline
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(
batch_size=2,
num_workers=2,
persistent_workers=True,
sampler=dict(type='DefaultSampler', shuffle=True),
batch_sampler=dict(type='AspectRatioBatchSampler'),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instances_train2017.json',
data_prefix=dict(img='train2017/'),
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=train_pipeline))
val_dataloader = dict(
batch_size=1,
num_workers=2,
persistent_workers=True,
drop_last=False,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instances_val2017.json',
data_prefix=dict(img='val2017/'),
test_mode=True,
pipeline=test_pipeline))
test_dataloader = val_dataloader
val_evaluator = dict(
type='CocoMetric',
ann_file=data_root + 'annotations/instances_val2017.json',
metric='bbox',
format_only=False)
test_evaluator = val_evaluator
# inference on test dataset and
# format the output results for submission.
# test_dataloader = dict(
# batch_size=1,
# num_workers=2,
# persistent_workers=True,
# drop_last=False,
# sampler=dict(type='DefaultSampler', shuffle=False),
# dataset=dict(
# type=dataset_type,
# data_root=data_root,
# ann_file=data_root + 'annotations/image_info_test-dev2017.json',
# data_prefix=dict(img='test2017/'),
# test_mode=True,
# pipeline=test_pipeline))
# test_evaluator = dict(
# type='CocoMetric',
# metric='bbox',
# format_only=True,
# ann_file=data_root + 'annotations/image_info_test-dev2017.json',
# outfile_prefix='./work_dirs/coco_detection/test')
|
from typing import Union, Iterable
from docarray.array.storage.base.seqlike import BaseSequenceLikeMixin
from docarray.array.memory import DocumentArrayInMemory
from docarray import Document
class SequenceLikeMixin(BaseSequenceLikeMixin):
"""Implement sequence-like methods"""
def extend(self, values: Iterable['Document']) -> None:
docs = DocumentArrayInMemory(values)
if len(docs) == 0:
return
for doc in docs:
doc.embedding = self._map_embedding(doc.embedding)
self._annlite.index(docs)
self._offset2ids.extend([doc.id for doc in docs])
def __del__(self) -> None:
if not self._persist:
self._offset2ids.clear()
self._annlite.clear()
def __eq__(self, other):
"""In annlite backend, data are considered as identical if configs point to the same database source"""
return (
type(self) is type(other)
and type(self._config) is type(other._config)
and self._config == other._config
)
def __repr__(self):
return f'<DocumentArray[AnnLite] (length={len(self)}) at {id(self)}>'
def __contains__(self, x: Union[str, 'Document']):
if isinstance(x, str):
return self._annlite.get_doc_by_id(x) is not None
elif isinstance(x, Document):
return self._annlite.get_doc_by_id(x.id) is not None
else:
return False
|
from typing import Union, Iterable
from ..base.seqlike import BaseSequenceLikeMixin
from ...memory import DocumentArrayInMemory
from .... import Document
class SequenceLikeMixin(BaseSequenceLikeMixin):
"""Implement sequence-like methods"""
def extend(self, values: Iterable['Document']) -> None:
docs = DocumentArrayInMemory(values)
if len(docs) == 0:
return
for doc in docs:
doc.embedding = self._map_embedding(doc.embedding)
self._annlite.index(docs)
self._offset2ids.extend([doc.id for doc in docs])
def __del__(self) -> None:
if not self._persist:
self._offset2ids.clear()
self._annlite.clear()
def __eq__(self, other):
"""In annlite backend, data are considered as identical if configs point to the same database source"""
return (
type(self) is type(other)
and type(self._config) is type(other._config)
and self._config == other._config
)
def __repr__(self):
return f'<DocumentArray[AnnLite] (length={len(self)}) at {id(self)}>'
def __contains__(self, x: Union[str, 'Document']):
if isinstance(x, str):
return self._annlite.get_doc_by_id(x) is not None
elif isinstance(x, Document):
return self._annlite.get_doc_by_id(x.id) is not None
else:
return False
|
import numpy as np
import pytest
from tensorflow import data as tf_data
from keras.src import backend
from keras.src import layers
from keras.src import testing
from keras.src.ops import convert_to_tensor
class StringLookupTest(testing.TestCase):
# TODO: increase coverage. Most features aren't being tested.
def test_config(self):
layer = layers.StringLookup(
output_mode="int",
vocabulary=["a", "b", "c"],
oov_token="[OOV]",
mask_token="[MASK]",
)
self.run_class_serialization_test(layer)
def test_adapt_flow(self):
layer = layers.StringLookup(
output_mode="int",
)
layer.adapt(["a", "a", "a", "b", "b", "c"])
input_data = ["b", "c", "d"]
output = layer(input_data)
self.assertTrue(backend.is_tensor(output))
self.assertAllClose(output, np.array([2, 3, 0]))
def test_fixed_vocabulary(self):
layer = layers.StringLookup(
output_mode="int",
vocabulary=["a", "b", "c"],
)
input_data = ["b", "c", "d"]
output = layer(input_data)
self.assertTrue(backend.is_tensor(output))
self.assertAllClose(output, np.array([2, 3, 0]))
@pytest.mark.skipif(
not backend.backend() == "tensorflow", reason="Requires tf.SparseTensor"
)
def test_sparse_inputs(self):
import tensorflow as tf
layer = layers.StringLookup(
output_mode="int",
vocabulary=["a", "b", "c"],
)
input_data = tf.SparseTensor(
indices=[[0, 0], [1, 1], [2, 2]],
values=["b", "c", "d"],
dense_shape=(3, 3),
)
output = layer(input_data)
self.assertIsInstance(output, tf.SparseTensor)
self.assertAllClose(output, np.array([[2, 0, 0], [0, 3, 0], [0, 0, 0]]))
self.assertAllClose(output.values, np.array([2, 3, 0]))
def test_set_vocabulary(self):
layer = layers.StringLookup(
output_mode="int",
)
layer.set_vocabulary(["a", "b", "c"])
input_data = ["b", "c", "d"]
output = layer(input_data)
self.assertTrue(backend.is_tensor(output))
self.assertAllClose(output, np.array([2, 3, 0]))
def test_tf_data_compatibility(self):
layer = layers.StringLookup(
output_mode="int",
vocabulary=["a", "b", "c"],
)
input_data = ["b", "c", "d"]
ds = tf_data.Dataset.from_tensor_slices(input_data).batch(3).map(layer)
output = next(iter(ds)).numpy()
self.assertAllClose(output, np.array([2, 3, 0]))
@pytest.mark.skipif(not backend.backend() == "tensorflow", reason="tf only")
def test_tensor_as_vocab(self):
vocab = convert_to_tensor(["a", "b", "c", "d"])
data = [["a", "c", "d"], ["d", "z", "b"]]
layer = layers.StringLookup(
vocabulary=vocab,
)
output = layer(data)
self.assertAllClose(output, np.array([[1, 3, 4], [4, 0, 2]]))
|
import numpy as np
import pytest
from tensorflow import data as tf_data
from keras.src import backend
from keras.src import layers
from keras.src import testing
from keras.src.ops import convert_to_tensor
class StringLookupTest(testing.TestCase):
# TODO: increase coverage. Most features aren't being tested.
def test_config(self):
layer = layers.StringLookup(
output_mode="int",
vocabulary=["a", "b", "c"],
oov_token="[OOV]",
mask_token="[MASK]",
)
self.run_class_serialization_test(layer)
def test_adapt_flow(self):
layer = layers.StringLookup(
output_mode="int",
)
layer.adapt(["a", "a", "a", "b", "b", "c"])
input_data = ["b", "c", "d"]
output = layer(input_data)
self.assertTrue(backend.is_tensor(output))
self.assertAllClose(output, np.array([2, 3, 0]))
def test_fixed_vocabulary(self):
layer = layers.StringLookup(
output_mode="int",
vocabulary=["a", "b", "c"],
)
input_data = ["b", "c", "d"]
output = layer(input_data)
self.assertTrue(backend.is_tensor(output))
self.assertAllClose(output, np.array([2, 3, 0]))
@pytest.mark.skipif(
not backend.backend() == "tensorflow", reason="Requires tf.SparseTensor"
)
def test_sparse_inputs(self):
import tensorflow as tf
layer = layers.StringLookup(
output_mode="int",
vocabulary=["a", "b", "c"],
)
input_data = tf.SparseTensor(
indices=[[0, 0], [1, 1], [2, 2]],
values=["b", "c", "d"],
dense_shape=(3, 3),
)
output = layer(input_data)
self.assertIsInstance(output, tf.SparseTensor)
self.assertAllClose(output, np.array([[2, 0, 0], [0, 3, 0], [0, 0, 0]]))
self.assertAllClose(output.values, np.array([2, 3, 0]))
def test_set_vocabulary(self):
layer = layers.StringLookup(
output_mode="int",
)
layer.set_vocabulary(["a", "b", "c"])
input_data = ["b", "c", "d"]
output = layer(input_data)
self.assertTrue(backend.is_tensor(output))
self.assertAllClose(output, np.array([2, 3, 0]))
def test_tf_data_compatibility(self):
layer = layers.StringLookup(
output_mode="int",
vocabulary=["a", "b", "c"],
)
input_data = ["b", "c", "d"]
ds = tf_data.Dataset.from_tensor_slices(input_data).batch(3).map(layer)
for output in ds.take(1):
output = output.numpy()
self.assertAllClose(output, np.array([2, 3, 0]))
@pytest.mark.skipif(not backend.backend() == "tensorflow", reason="tf only")
def test_tensor_as_vocab(self):
vocab = convert_to_tensor(["a", "b", "c", "d"])
data = [["a", "c", "d"], ["d", "z", "b"]]
layer = layers.StringLookup(
vocabulary=vocab,
)
output = layer(data)
self.assertAllClose(output, np.array([[1, 3, 4], [4, 0, 2]]))
|
"""Prompt display utils."""
from llama_index.core.prompts.mixin import PromptDictType
# define prompt viewing function
def display_prompt_dict(prompts_dict: PromptDictType) -> None:
"""
Display prompt dict.
Args:
prompts_dict: prompt dict
"""
from IPython.display import Markdown, display
for k, p in prompts_dict.items():
text_md = f"**Prompt Key**: {k}<br>" f"**Text:** <br>"
display(Markdown(text_md))
print(p.get_template())
display(Markdown("<br><br>"))
|
"""Prompt display utils."""
from llama_index.core.prompts.mixin import PromptDictType
# define prompt viewing function
def display_prompt_dict(prompts_dict: PromptDictType) -> None:
"""Display prompt dict.
Args:
prompts_dict: prompt dict
"""
from IPython.display import Markdown, display
for k, p in prompts_dict.items():
text_md = f"**Prompt Key**: {k}<br>" f"**Text:** <br>"
display(Markdown(text_md))
print(p.get_template())
display(Markdown("<br><br>"))
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Gradient tape utilities."""
from tensorflow.python import pywrap_tfe
class Tape(object):
"""Represents a gradient propagation trace."""
__slots__ = ["_tape"]
def __init__(self, tape):
self._tape = tape
def watched_variables(self):
return pywrap_tfe.TFE_Py_TapeWatchedVariables(self._tape)
def push_new_tape(persistent=False, watch_accessed_variables=True):
"""Pushes a new tape onto the tape stack."""
tape = pywrap_tfe.TFE_Py_TapeSetNew(persistent, watch_accessed_variables)
return Tape(tape)
def push_tape(tape):
"""Pushes an existing tape onto the tape stack."""
pywrap_tfe.TFE_Py_TapeSetAdd(tape._tape) # pylint: disable=protected-access
def watch(tape, tensor):
"""Marks this tensor to be watched by the given tape."""
pywrap_tfe.TFE_Py_TapeWatch(tape._tape, tensor) # pylint: disable=protected-access
def default_get_variables(variable):
return [variable]
# Gets a list of changed variables. Can be overridden using
# register_variables_override. An example of overriding is for getting the
# variables within a distributed context.
_variables_override = default_get_variables
def register_watched_variable_resolver(resolver):
"""Registers the resolver to be used to get the list of variables to watch.
Args:
resolver: callable, takes a Variable and returns a list of Variables that
shall be watched.
"""
global _variables_override
assert _variables_override is default_get_variables
_variables_override = resolver
def watch_variable(tape, variable):
"""Marks this variable to be watched by the given tape."""
variables = _variables_override(variable)
for var in variables:
pywrap_tfe.TFE_Py_TapeWatchVariable(tape._tape, var) # pylint: disable=protected-access
pywrap_tfe.TFE_Py_VariableWatcherVariableAccessed(var)
def variable_accessed(variable):
"""Notifies all tapes in the stack that a variable has been accessed.
Args:
variable: variable to be watched.
"""
variables = _variables_override(variable)
for var in variables:
pywrap_tfe.TFE_Py_TapeVariableAccessed(var)
pywrap_tfe.TFE_Py_VariableWatcherVariableAccessed(var)
def variables_accessed(variables):
"""Notifies all tapes in the stack that variables have been accessed.
Only trainable variables are marked as accessed.
Args:
variables: iterable of variables to mark as accessed.
"""
accessed = []
for variable in variables:
if variable.trainable:
accessed.extend(_variables_override(variable))
for var in accessed:
pywrap_tfe.TFE_Py_TapeVariableAccessed(var)
pywrap_tfe.TFE_Py_VariableWatcherVariableAccessed(var)
def pop_tape(tape):
"""Pops the given tape in the stack."""
pywrap_tfe.TFE_Py_TapeSetRemove(tape._tape) # pylint: disable=protected-access
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Gradient tape utilities."""
from tensorflow.python import pywrap_tfe
class Tape(object):
"""Represents a gradient propagation trace."""
__slots__ = ["_tape"]
def __init__(self, tape):
self._tape = tape
def watched_variables(self):
return pywrap_tfe.TFE_Py_TapeWatchedVariables(self._tape)
def push_new_tape(persistent=False, watch_accessed_variables=True):
"""Pushes a new tape onto the tape stack."""
tape = pywrap_tfe.TFE_Py_TapeSetNew(persistent, watch_accessed_variables)
return Tape(tape)
def push_tape(tape):
"""Pushes an existing tape onto the tape stack."""
pywrap_tfe.TFE_Py_TapeSetAdd(tape._tape) # pylint: disable=protected-access
def watch(tape, tensor):
"""Marks this tensor to be watched by the given tape."""
pywrap_tfe.TFE_Py_TapeWatch(tape._tape, tensor) # pylint: disable=protected-access
def default_get_variables(variable):
return [variable]
# Gets a list of changed variables. Can be overriden using
# register_variables_override. An example of overriding is for getting the
# varibles within a distributed context.
_variables_override = default_get_variables
def register_watched_variable_resolver(resolver):
"""Registers the resolver to be used to get the list of variables to watch.
Args:
resolver: callable, takes a Variable and returns a list of Variables that
shall be watched.
"""
global _variables_override
assert _variables_override is default_get_variables
_variables_override = resolver
def watch_variable(tape, variable):
"""Marks this variable to be watched by the given tape."""
variables = _variables_override(variable)
for var in variables:
pywrap_tfe.TFE_Py_TapeWatchVariable(tape._tape, var) # pylint: disable=protected-access
pywrap_tfe.TFE_Py_VariableWatcherVariableAccessed(var)
def variable_accessed(variable):
"""Notifies all tapes in the stack that a variable has been accessed.
Args:
variable: variable to be watched.
"""
variables = _variables_override(variable)
for var in variables:
pywrap_tfe.TFE_Py_TapeVariableAccessed(var)
pywrap_tfe.TFE_Py_VariableWatcherVariableAccessed(var)
def variables_accessed(variables):
"""Notifies all tapes in the stack that variables have been accessed.
Only trainable variables are marked as accessed.
Args:
variables: iterable of variables to mark as accessed.
"""
accessed = []
for variable in variables:
if variable.trainable:
accessed.extend(_variables_override(variable))
for var in accessed:
pywrap_tfe.TFE_Py_TapeVariableAccessed(var)
pywrap_tfe.TFE_Py_VariableWatcherVariableAccessed(var)
def pop_tape(tape):
"""Pops the given tape in the stack."""
pywrap_tfe.TFE_Py_TapeSetRemove(tape._tape) # pylint: disable=protected-access
|
# Copyright (c) OpenMMLab. All rights reserved.
from .activations import SiLU
from .bbox_nms import fast_nms, multiclass_nms
from .brick_wrappers import AdaptiveAvgPool2d, adaptive_avg_pool2d
from .conv_upsample import ConvUpsample
from .csp_layer import CSPLayer
from .dropblock import DropBlock
from .ema import ExpMomentumEMA
from .inverted_residual import InvertedResidual
from .matrix_nms import mask_matrix_nms
from .msdeformattn_pixel_decoder import MSDeformAttnPixelDecoder
from .normed_predictor import NormedConv2d, NormedLinear
from .pixel_decoder import PixelDecoder, TransformerEncoderPixelDecoder
from .positional_encoding import (LearnedPositionalEncoding,
SinePositionalEncoding,
SinePositionalEncoding3D)
from .res_layer import ResLayer, SimplifiedBasicBlock
from .se_layer import ChannelAttention, DyReLU, SELayer
# yapf: disable
from .transformer import (MLP, AdaptivePadding, CdnQueryGenerator,
ConditionalAttention,
ConditionalDetrTransformerDecoder,
ConditionalDetrTransformerDecoderLayer,
DABDetrTransformerDecoder,
DABDetrTransformerDecoderLayer,
DABDetrTransformerEncoder, DDQTransformerDecoder,
DeformableDetrTransformerDecoder,
DeformableDetrTransformerDecoderLayer,
DeformableDetrTransformerEncoder,
DeformableDetrTransformerEncoderLayer,
DetrTransformerDecoder, DetrTransformerDecoderLayer,
DetrTransformerEncoder, DetrTransformerEncoderLayer,
DinoTransformerDecoder, DynamicConv,
Mask2FormerTransformerDecoder,
Mask2FormerTransformerDecoderLayer,
Mask2FormerTransformerEncoder, PatchEmbed,
PatchMerging, coordinate_to_encoding,
inverse_sigmoid, nchw_to_nlc, nlc_to_nchw)
# yapf: enable
__all__ = [
'fast_nms', 'multiclass_nms', 'mask_matrix_nms', 'DropBlock',
'PixelDecoder', 'TransformerEncoderPixelDecoder',
'MSDeformAttnPixelDecoder', 'ResLayer', 'PatchMerging',
'SinePositionalEncoding', 'LearnedPositionalEncoding', 'DynamicConv',
'SimplifiedBasicBlock', 'NormedLinear', 'NormedConv2d', 'InvertedResidual',
'SELayer', 'ConvUpsample', 'CSPLayer', 'adaptive_avg_pool2d',
'AdaptiveAvgPool2d', 'PatchEmbed', 'nchw_to_nlc', 'nlc_to_nchw', 'DyReLU',
'ExpMomentumEMA', 'inverse_sigmoid', 'ChannelAttention', 'SiLU', 'MLP',
'DetrTransformerEncoderLayer', 'DetrTransformerDecoderLayer',
'DetrTransformerEncoder', 'DetrTransformerDecoder',
'DeformableDetrTransformerEncoder', 'DeformableDetrTransformerDecoder',
'DeformableDetrTransformerEncoderLayer',
'DeformableDetrTransformerDecoderLayer', 'AdaptivePadding',
'coordinate_to_encoding', 'ConditionalAttention',
'DABDetrTransformerDecoderLayer', 'DABDetrTransformerDecoder',
'DABDetrTransformerEncoder', 'DDQTransformerDecoder',
'ConditionalDetrTransformerDecoder',
'ConditionalDetrTransformerDecoderLayer', 'DinoTransformerDecoder',
'CdnQueryGenerator', 'Mask2FormerTransformerEncoder',
'Mask2FormerTransformerDecoderLayer', 'Mask2FormerTransformerDecoder',
'SinePositionalEncoding3D'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .activations import SiLU
from .bbox_nms import fast_nms, multiclass_nms
from .brick_wrappers import AdaptiveAvgPool2d, adaptive_avg_pool2d
from .conv_upsample import ConvUpsample
from .csp_layer import CSPLayer
from .dropblock import DropBlock
from .ema import ExpMomentumEMA
from .inverted_residual import InvertedResidual
from .matrix_nms import mask_matrix_nms
from .msdeformattn_pixel_decoder import MSDeformAttnPixelDecoder
from .normed_predictor import NormedConv2d, NormedLinear
from .pixel_decoder import PixelDecoder, TransformerEncoderPixelDecoder
from .positional_encoding import (LearnedPositionalEncoding,
SinePositionalEncoding,
SinePositionalEncoding3D)
from .res_layer import ResLayer, SimplifiedBasicBlock
from .se_layer import ChannelAttention, DyReLU, SELayer
# yapf: disable
from .transformer import (MLP, AdaptivePadding, CdnQueryGenerator,
ConditionalAttention,
ConditionalDetrTransformerDecoder,
ConditionalDetrTransformerDecoderLayer,
DABDetrTransformerDecoder,
DABDetrTransformerDecoderLayer,
DABDetrTransformerEncoder,
DeformableDetrTransformerDecoder,
DeformableDetrTransformerDecoderLayer,
DeformableDetrTransformerEncoder,
DeformableDetrTransformerEncoderLayer,
DetrTransformerDecoder, DetrTransformerDecoderLayer,
DetrTransformerEncoder, DetrTransformerEncoderLayer,
DinoTransformerDecoder, DynamicConv,
Mask2FormerTransformerDecoder,
Mask2FormerTransformerDecoderLayer,
Mask2FormerTransformerEncoder, PatchEmbed,
PatchMerging, coordinate_to_encoding,
inverse_sigmoid, nchw_to_nlc, nlc_to_nchw)
# yapf: enable
__all__ = [
'fast_nms', 'multiclass_nms', 'mask_matrix_nms', 'DropBlock',
'PixelDecoder', 'TransformerEncoderPixelDecoder',
'MSDeformAttnPixelDecoder', 'ResLayer', 'PatchMerging',
'SinePositionalEncoding', 'LearnedPositionalEncoding', 'DynamicConv',
'SimplifiedBasicBlock', 'NormedLinear', 'NormedConv2d', 'InvertedResidual',
'SELayer', 'ConvUpsample', 'CSPLayer', 'adaptive_avg_pool2d',
'AdaptiveAvgPool2d', 'PatchEmbed', 'nchw_to_nlc', 'nlc_to_nchw', 'DyReLU',
'ExpMomentumEMA', 'inverse_sigmoid', 'ChannelAttention', 'SiLU', 'MLP',
'DetrTransformerEncoderLayer', 'DetrTransformerDecoderLayer',
'DetrTransformerEncoder', 'DetrTransformerDecoder',
'DeformableDetrTransformerEncoder', 'DeformableDetrTransformerDecoder',
'DeformableDetrTransformerEncoderLayer',
'DeformableDetrTransformerDecoderLayer', 'AdaptivePadding',
'coordinate_to_encoding', 'ConditionalAttention',
'DABDetrTransformerDecoderLayer', 'DABDetrTransformerDecoder',
'DABDetrTransformerEncoder', 'ConditionalDetrTransformerDecoder',
'ConditionalDetrTransformerDecoderLayer', 'DinoTransformerDecoder',
'CdnQueryGenerator', 'Mask2FormerTransformerEncoder',
'Mask2FormerTransformerDecoderLayer', 'Mask2FormerTransformerDecoder',
'SinePositionalEncoding3D'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from ..builder import DETECTORS
from .single_stage import SingleStageDetector
@DETECTORS.register_module()
class ATSS(SingleStageDetector):
"""Implementation of `ATSS <https://arxiv.org/abs/1912.02424>`_."""
def __init__(self,
backbone,
neck,
bbox_head,
train_cfg=None,
test_cfg=None,
pretrained=None,
init_cfg=None):
super(ATSS, self).__init__(backbone, neck, bbox_head, train_cfg,
test_cfg, pretrained, init_cfg)
|
from ..builder import DETECTORS
from .single_stage import SingleStageDetector
@DETECTORS.register_module()
class ATSS(SingleStageDetector):
"""Implementation of `ATSS <https://arxiv.org/abs/1912.02424>`_."""
def __init__(self,
backbone,
neck,
bbox_head,
train_cfg=None,
test_cfg=None,
pretrained=None,
init_cfg=None):
super(ATSS, self).__init__(backbone, neck, bbox_head, train_cfg,
test_cfg, pretrained, init_cfg)
|
from llama_index.core.schema import NodeRelationship, RelatedNodeInfo, TextNode
from llama_index.vector_stores.qdrant import QdrantVectorStore
import qdrant_client
import pytest_asyncio
@pytest_asyncio.fixture
async def vector_store() -> QdrantVectorStore:
client = qdrant_client.QdrantClient(":memory:")
aclient = qdrant_client.AsyncQdrantClient(":memory:")
vector_store = QdrantVectorStore("test", client=client, aclient=aclient)
nodes = [
TextNode(
text="test1",
id_="11111111-1111-1111-1111-111111111111",
embedding=[1.0, 0.0],
metadata={"some_key": 1},
relationships={NodeRelationship.SOURCE: RelatedNodeInfo(node_id="test-0")},
),
TextNode(
text="test2",
id_="22222222-2222-2222-2222-222222222222",
embedding=[0.0, 1.0],
metadata={"some_key": 2},
relationships={NodeRelationship.SOURCE: RelatedNodeInfo(node_id="test-0")},
),
TextNode(
text="test3",
id_="33333333-3333-3333-3333-333333333333",
embedding=[1.0, 1.0],
metadata={"some_key": "3"},
),
]
vector_store.add(nodes)
# in-memory client does not share data between instances
await vector_store.async_add(nodes)
return vector_store
@pytest_asyncio.fixture
async def hybrid_vector_store() -> QdrantVectorStore:
client = qdrant_client.QdrantClient(":memory:")
aclient = qdrant_client.AsyncQdrantClient(":memory:")
vector_store = QdrantVectorStore(
"test",
client=client,
aclient=aclient,
enable_hybrid=True,
fastembed_sparse_model="Qdrant/bm25",
)
nodes = [
TextNode(
text="test1",
id_="11111111-1111-1111-1111-111111111111",
embedding=[1.0, 0.0],
relationships={NodeRelationship.SOURCE: RelatedNodeInfo(node_id="test-0")},
),
TextNode(
text="test2",
id_="22222222-2222-2222-2222-222222222222",
embedding=[0.0, 1.0],
relationships={NodeRelationship.SOURCE: RelatedNodeInfo(node_id="test-0")},
),
TextNode(
text="test3",
id_="33333333-3333-3333-3333-333333333333",
embedding=[1.0, 1.0],
),
]
vector_store.add(nodes)
# in-memory client does not share data between instances
await vector_store.async_add(nodes)
return vector_store
|
from llama_index.core.schema import NodeRelationship, RelatedNodeInfo, TextNode
from llama_index.vector_stores.qdrant import QdrantVectorStore
import qdrant_client
import pytest_asyncio
@pytest_asyncio.fixture
async def vector_store() -> QdrantVectorStore:
client = qdrant_client.QdrantClient(":memory:")
aclient = qdrant_client.AsyncQdrantClient(":memory:")
vector_store = QdrantVectorStore("test", client=client, aclient=aclient)
nodes = [
TextNode(
text="test1",
id_="11111111-1111-1111-1111-111111111111",
embedding=[1.0, 0.0],
relationships={NodeRelationship.SOURCE: RelatedNodeInfo(node_id="test-0")},
),
TextNode(
text="test2",
id_="22222222-2222-2222-2222-222222222222",
embedding=[0.0, 1.0],
relationships={NodeRelationship.SOURCE: RelatedNodeInfo(node_id="test-0")},
),
TextNode(
text="test3",
id_="33333333-3333-3333-3333-333333333333",
embedding=[1.0, 1.0],
),
]
vector_store.add(nodes)
# in-memory client does not share data between instances
await vector_store.async_add(nodes)
return vector_store
@pytest_asyncio.fixture
async def hybrid_vector_store() -> QdrantVectorStore:
client = qdrant_client.QdrantClient(":memory:")
aclient = qdrant_client.AsyncQdrantClient(":memory:")
vector_store = QdrantVectorStore(
"test",
client=client,
aclient=aclient,
enable_hybrid=True,
fastembed_sparse_model="Qdrant/bm25",
)
nodes = [
TextNode(
text="test1",
id_="11111111-1111-1111-1111-111111111111",
embedding=[1.0, 0.0],
relationships={NodeRelationship.SOURCE: RelatedNodeInfo(node_id="test-0")},
),
TextNode(
text="test2",
id_="22222222-2222-2222-2222-222222222222",
embedding=[0.0, 1.0],
relationships={NodeRelationship.SOURCE: RelatedNodeInfo(node_id="test-0")},
),
TextNode(
text="test3",
id_="33333333-3333-3333-3333-333333333333",
embedding=[1.0, 1.0],
),
]
vector_store.add(nodes)
# in-memory client does not share data between instances
await vector_store.async_add(nodes)
return vector_store
|
"""Copyright 2024, XGBoost contributors"""
import dask
import pytest
from distributed import Client
from xgboost import testing as tm
from xgboost.testing import dask as dtm
pytestmark = [
pytest.mark.skipif(**tm.no_dask()),
pytest.mark.skipif(**tm.no_dask_cuda()),
tm.timeout(120),
]
@pytest.mark.filterwarnings("error")
def test_no_group_split(local_cuda_client: Client) -> None:
with dask.config.set(
{
"array.backend": "cupy",
"dataframe.backend": "cudf",
}
):
dtm.check_no_group_split(local_cuda_client, "cuda")
|
"""Copyright 2024, XGBoost contributors"""
import dask
import pytest
from distributed import Client
from xgboost.testing import dask as dtm
@pytest.mark.filterwarnings("error")
def test_no_group_split(local_cuda_client: Client) -> None:
with dask.config.set(
{
"array.backend": "cupy",
"dataframe.backend": "cudf",
}
):
dtm.check_no_group_split(local_cuda_client, "cuda")
|
# Copyright (c) OpenMMLab. All rights reserved.
from .checkloss_hook import CheckInvalidLossHook
from .ema import ExpMomentumEMAHook, LinearMomentumEMAHook
from .memory_profiler_hook import MemoryProfilerHook
from .set_epoch_info_hook import SetEpochInfoHook
from .sync_norm_hook import SyncNormHook
from .sync_random_size_hook import SyncRandomSizeHook
from .yolox_lrupdater_hook import YOLOXLrUpdaterHook
from .yolox_mode_switch_hook import YOLOXModeSwitchHook
__all__ = [
'SyncRandomSizeHook', 'YOLOXModeSwitchHook', 'SyncNormHook',
'ExpMomentumEMAHook', 'LinearMomentumEMAHook', 'YOLOXLrUpdaterHook',
'CheckInvalidLossHook', 'SetEpochInfoHook', 'MemoryProfilerHook'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .checkloss_hook import CheckInvalidLossHook
from .ema import ExpMomentumEMAHook, LinearMomentumEMAHook
from .set_epoch_info_hook import SetEpochInfoHook
from .sync_norm_hook import SyncNormHook
from .sync_random_size_hook import SyncRandomSizeHook
from .yolox_lrupdater_hook import YOLOXLrUpdaterHook
from .yolox_mode_switch_hook import YOLOXModeSwitchHook
__all__ = [
'SyncRandomSizeHook', 'YOLOXModeSwitchHook', 'SyncNormHook',
'ExpMomentumEMAHook', 'LinearMomentumEMAHook', 'YOLOXLrUpdaterHook',
'CheckInvalidLossHook', 'SetEpochInfoHook'
]
|
import numpy as np
import pytest
from pydantic.tools import parse_obj_as, schema_json_of
from docarray.base_document.io.json import orjson_dumps
from docarray.typing import Mesh3DUrl, NdArray
from docarray.typing.url.url_3d.mesh_url import Mesh3DLoadResult
from tests import TOYDATA_DIR
MESH_FILES = {
'obj': str(TOYDATA_DIR / 'tetrahedron.obj'),
'glb': str(TOYDATA_DIR / 'test.glb'),
'ply': str(TOYDATA_DIR / 'cube.ply'),
}
REMOTE_OBJ_FILE = 'https://people.sc.fsu.edu/~jburkardt/data/obj/al.obj'
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'file_format, file_path',
[
('obj', MESH_FILES['obj']),
('glb', MESH_FILES['glb']),
('ply', MESH_FILES['ply']),
('remote-obj', REMOTE_OBJ_FILE),
],
)
def test_load(file_format, file_path):
url = parse_obj_as(Mesh3DUrl, file_path)
vertices, faces = url.load()
assert isinstance(vertices, np.ndarray)
assert isinstance(vertices, NdArray)
assert isinstance(faces, np.ndarray)
assert isinstance(faces, NdArray)
assert vertices.shape[1] == 3
assert faces.shape[1] == 3
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'file_path',
[*MESH_FILES.values(), REMOTE_OBJ_FILE],
)
@pytest.mark.parametrize('field', [f for f in Mesh3DLoadResult._fields])
def test_load_one_of_fields(file_path, field):
url = parse_obj_as(Mesh3DUrl, file_path)
field = getattr(url.load(), field)
assert isinstance(field, np.ndarray)
assert isinstance(field, NdArray)
def test_json_schema():
schema_json_of(Mesh3DUrl)
def test_dump_json():
url = parse_obj_as(Mesh3DUrl, REMOTE_OBJ_FILE)
orjson_dumps(url)
@pytest.mark.parametrize(
'file_format,path_to_file',
[
('obj', MESH_FILES['obj']),
('glb', MESH_FILES['glb']),
('ply', MESH_FILES['ply']),
('obj', REMOTE_OBJ_FILE),
('illegal', 'illegal'),
('illegal', 'https://www.google.com'),
('illegal', 'my/local/text/file.txt'),
('illegal', 'my/local/text/file.png'),
],
)
def test_validation(file_format, path_to_file):
if file_format == 'illegal':
with pytest.raises(ValueError, match='Mesh3DUrl'):
parse_obj_as(Mesh3DUrl, path_to_file)
else:
url = parse_obj_as(Mesh3DUrl, path_to_file)
assert isinstance(url, Mesh3DUrl)
assert isinstance(url, str)
def test_proto_mesh_url():
uri = parse_obj_as(Mesh3DUrl, REMOTE_OBJ_FILE)
uri._to_node_protobuf()
|
import numpy as np
import pytest
from pydantic.tools import parse_obj_as, schema_json_of
from docarray.base_document.io.json import orjson_dumps
from docarray.typing import Mesh3DUrl
from tests import TOYDATA_DIR
MESH_FILES = {
'obj': str(TOYDATA_DIR / 'tetrahedron.obj'),
'glb': str(TOYDATA_DIR / 'test.glb'),
'ply': str(TOYDATA_DIR / 'cube.ply'),
}
REMOTE_OBJ_FILE = 'https://people.sc.fsu.edu/~jburkardt/data/obj/al.obj'
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'file_format, file_path',
[
('obj', MESH_FILES['obj']),
('glb', MESH_FILES['glb']),
('ply', MESH_FILES['ply']),
('remote-obj', REMOTE_OBJ_FILE),
],
)
def test_load(file_format, file_path):
url = parse_obj_as(Mesh3DUrl, file_path)
vertices, faces = url.load()
assert isinstance(vertices, np.ndarray)
assert isinstance(faces, np.ndarray)
assert vertices.shape[1] == 3
assert faces.shape[1] == 3
def test_json_schema():
schema_json_of(Mesh3DUrl)
def test_dump_json():
url = parse_obj_as(Mesh3DUrl, REMOTE_OBJ_FILE)
orjson_dumps(url)
@pytest.mark.parametrize(
'file_format,path_to_file',
[
('obj', MESH_FILES['obj']),
('glb', MESH_FILES['glb']),
('ply', MESH_FILES['ply']),
('obj', REMOTE_OBJ_FILE),
('illegal', 'illegal'),
('illegal', 'https://www.google.com'),
('illegal', 'my/local/text/file.txt'),
('illegal', 'my/local/text/file.png'),
],
)
def test_validation(file_format, path_to_file):
if file_format == 'illegal':
with pytest.raises(ValueError, match='Mesh3DUrl'):
parse_obj_as(Mesh3DUrl, path_to_file)
else:
url = parse_obj_as(Mesh3DUrl, path_to_file)
assert isinstance(url, Mesh3DUrl)
assert isinstance(url, str)
def test_proto_mesh_url():
uri = parse_obj_as(Mesh3DUrl, REMOTE_OBJ_FILE)
uri._to_node_protobuf()
|
# coding: utf-8
"""Find the path to xgboost dynamic library files."""
import os
import platform
import sys
from typing import List
class XGBoostLibraryNotFound(Exception):
"""Error thrown by when xgboost is not found"""
def find_lib_path() -> List[str]:
"""Find the path to xgboost dynamic library files.
Returns
-------
lib_path
List of all found library path to xgboost
"""
curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))
dll_path = [
# normal, after installation `lib` is copied into Python package tree.
os.path.join(curr_path, "lib"),
# editable installation, no copying is performed.
os.path.join(curr_path, os.path.pardir, os.path.pardir, "lib"),
# use libxgboost from a system prefix, if available. This should be the last
# option.
os.path.join(sys.base_prefix, "lib"),
]
if sys.platform == "win32":
# On Windows, Conda may install libs in different paths
dll_path.extend(
[
os.path.join(sys.base_prefix, "bin"),
os.path.join(sys.base_prefix, "Library"),
os.path.join(sys.base_prefix, "Library", "bin"),
os.path.join(sys.base_prefix, "Library", "lib"),
]
)
dll_path = [os.path.join(p, "xgboost.dll") for p in dll_path]
elif sys.platform.startswith(("linux", "freebsd", "emscripten")):
dll_path = [os.path.join(p, "libxgboost.so") for p in dll_path]
elif sys.platform == "darwin":
dll_path = [os.path.join(p, "libxgboost.dylib") for p in dll_path]
elif sys.platform == "cygwin":
dll_path = [os.path.join(p, "cygxgboost.dll") for p in dll_path]
if platform.system() == "OS400":
dll_path = [os.path.join(p, "libxgboost.so") for p in dll_path]
lib_path = [p for p in dll_path if os.path.exists(p) and os.path.isfile(p)]
# XGBOOST_BUILD_DOC is defined by sphinx conf.
if not lib_path and not os.environ.get("XGBOOST_BUILD_DOC", False):
link = "https://xgboost.readthedocs.io/en/stable/install.html"
msg = (
"Cannot find XGBoost Library in the candidate path. "
+ "List of candidates:\n- "
+ ("\n- ".join(dll_path))
+ "\nXGBoost Python package path: "
+ curr_path
+ "\nsys.base_prefix: "
+ sys.base_prefix
+ "\nSee: "
+ link
+ " for installing XGBoost."
)
raise XGBoostLibraryNotFound(msg)
return lib_path
|
# coding: utf-8
"""Find the path to xgboost dynamic library files."""
import os
import platform
import sys
from typing import List
class XGBoostLibraryNotFound(Exception):
"""Error thrown by when xgboost is not found"""
def find_lib_path() -> List[str]:
"""Find the path to xgboost dynamic library files.
Returns
-------
lib_path
List of all found library path to xgboost
"""
curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))
dll_path = [
# normal, after installation `lib` is copied into Python package tree.
os.path.join(curr_path, "lib"),
# editable installation, no copying is performed.
os.path.join(curr_path, os.path.pardir, os.path.pardir, "lib"),
# use libxgboost from a system prefix, if available. This should be the last
# option.
os.path.join(sys.base_prefix, "lib"),
]
if sys.platform == "win32":
if platform.architecture()[0] == "64bit":
dll_path.append(os.path.join(curr_path, "../../windows/x64/Release/"))
# hack for pip installation when copy all parent source
# directory here
dll_path.append(os.path.join(curr_path, "./windows/x64/Release/"))
else:
dll_path.append(os.path.join(curr_path, "../../windows/Release/"))
# hack for pip installation when copy all parent source
# directory here
dll_path.append(os.path.join(curr_path, "./windows/Release/"))
dll_path = [os.path.join(p, "xgboost.dll") for p in dll_path]
elif sys.platform.startswith(("linux", "freebsd", "emscripten")):
dll_path = [os.path.join(p, "libxgboost.so") for p in dll_path]
elif sys.platform == "darwin":
dll_path = [os.path.join(p, "libxgboost.dylib") for p in dll_path]
elif sys.platform == "cygwin":
dll_path = [os.path.join(p, "cygxgboost.dll") for p in dll_path]
if platform.system() == "OS400":
dll_path = [os.path.join(p, "libxgboost.so") for p in dll_path]
lib_path = [p for p in dll_path if os.path.exists(p) and os.path.isfile(p)]
# XGBOOST_BUILD_DOC is defined by sphinx conf.
if not lib_path and not os.environ.get("XGBOOST_BUILD_DOC", False):
link = "https://xgboost.readthedocs.io/en/stable/install.html"
msg = (
"Cannot find XGBoost Library in the candidate path. "
+ "List of candidates:\n- "
+ ("\n- ".join(dll_path))
+ "\nXGBoost Python package path: "
+ curr_path
+ "\nsys.base_prefix: "
+ sys.base_prefix
+ "\nSee: "
+ link
+ " for installing XGBoost."
)
raise XGBoostLibraryNotFound(msg)
return lib_path
|
_base_ = './yolox_s_8x8_300e_coco.py'
# model settings
model = dict(
random_size_range=(10, 20),
backbone=dict(deepen_factor=0.33, widen_factor=0.375),
neck=dict(in_channels=[96, 192, 384], out_channels=96),
bbox_head=dict(in_channels=96, feat_channels=96))
img_scale = (640, 640)
train_pipeline = [
dict(type='Mosaic', img_scale=img_scale, pad_val=114.0),
dict(
type='RandomAffine',
scaling_ratio_range=(0.5, 1.5),
border=(-img_scale[0] // 2, -img_scale[1] // 2)),
dict(type='YOLOXHSVRandomAug'),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Resize', img_scale=img_scale, keep_ratio=True),
dict(
type='Pad',
pad_to_square=True,
pad_val=dict(img=(114.0, 114.0, 114.0))),
dict(type='FilterAnnotations', min_gt_bbox_wh=(1, 1), keep_empty=False),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(416, 416),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(
type='Pad',
pad_to_square=True,
pad_val=dict(img=(114.0, 114.0, 114.0))),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img'])
])
]
train_dataset = dict(pipeline=train_pipeline)
data = dict(
train=train_dataset,
val=dict(pipeline=test_pipeline),
test=dict(pipeline=test_pipeline))
|
_base_ = './yolox_s_8x8_300e_coco.py'
# model settings
model = dict(
backbone=dict(deepen_factor=0.33, widen_factor=0.375),
neck=dict(in_channels=[96, 192, 384], out_channels=96),
bbox_head=dict(in_channels=96, feat_channels=96))
# dataset settings
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
img_scale = (640, 640)
train_pipeline = [
dict(type='Mosaic', img_scale=img_scale, pad_val=114.0),
dict(
type='RandomAffine',
scaling_ratio_range=(0.5, 1.5),
border=(-img_scale[0] // 2, -img_scale[1] // 2)),
dict(
type='PhotoMetricDistortion',
brightness_delta=32,
contrast_range=(0.5, 1.5),
saturation_range=(0.5, 1.5),
hue_delta=18),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Resize', keep_ratio=True),
dict(type='Pad', pad_to_square=True, pad_val=114.0),
dict(type='Normalize', **img_norm_cfg),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(416, 416),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Pad', size=(416, 416), pad_val=114.0),
dict(type='Normalize', **img_norm_cfg),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img'])
])
]
train_dataset = dict(pipeline=train_pipeline)
data = dict(
train=train_dataset,
val=dict(pipeline=test_pipeline),
test=dict(pipeline=test_pipeline))
resume_from = None
interval = 10
# Execute in the order of insertion when the priority is the same.
# The smaller the value, the higher the priority
custom_hooks = [
dict(type='YOLOXModeSwitchHook', num_last_epochs=15, priority=48),
dict(
type='SyncRandomSizeHook',
ratio_range=(10, 20),
img_scale=img_scale,
priority=48),
dict(
type='SyncNormHook',
num_last_epochs=15,
interval=interval,
priority=48),
dict(type='ExpMomentumEMAHook', resume_from=resume_from, priority=49)
]
checkpoint_config = dict(interval=interval)
evaluation = dict(interval=interval, metric='bbox')
|
from langchain_core.agents import AgentAction
from langchain.agents.conversational.output_parser import ConvoOutputParser
def test_normal_output_parsing() -> None:
_test_convo_output(
"""
Action: my_action
Action Input: my action input
""",
"my_action",
"my action input",
)
def test_multiline_output_parsing() -> None:
_test_convo_output(
"""
Thought: Do I need to use a tool? Yes
Action: evaluate_code
Action Input: Evaluate Code with the following Python content:
```python
print("Hello fifty shades of gray mans!"[::-1]) # noqa: T201
```
""",
"evaluate_code",
"""
Evaluate Code with the following Python content:
```python
print("Hello fifty shades of gray mans!"[::-1]) # noqa: T201
```""".lstrip(),
)
def _test_convo_output(text: str, expected_tool: str, expected_tool_input: str) -> None:
result = ConvoOutputParser().parse(text.strip())
assert isinstance(result, AgentAction)
assert result.tool == expected_tool
assert result.tool_input == expected_tool_input
|
from langchain_core.agents import AgentAction
from langchain.agents.conversational.output_parser import ConvoOutputParser
def test_normal_output_parsing() -> None:
_test_convo_output(
"""
Action: my_action
Action Input: my action input
""",
"my_action",
"my action input",
)
def test_multiline_output_parsing() -> None:
_test_convo_output(
"""
Thought: Do I need to use a tool? Yes
Action: evaluate_code
Action Input: Evaluate Code with the following Python content:
```python
print("Hello fifty shades of gray mans!"[::-1]) # noqa: T201
```
""",
"evaluate_code",
"""
Evaluate Code with the following Python content:
```python
print("Hello fifty shades of gray mans!"[::-1]) # noqa: T201
```""".lstrip(),
)
def _test_convo_output(
input: str, expected_tool: str, expected_tool_input: str
) -> None:
result = ConvoOutputParser().parse(input.strip())
assert isinstance(result, AgentAction)
assert result.tool == expected_tool
assert result.tool_input == expected_tool_input
|
_base_ = [
'../common/ms-poly_3x_coco-instance.py',
'../_base_/models/mask-rcnn_r50_fpn.py'
]
model = dict(
backbone=dict(
type='ResNeXt',
depth=101,
groups=32,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
style='pytorch',
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d')))
|
_base_ = [
'../common/mstrain-poly_3x_coco_instance.py',
'../_base_/models/mask_rcnn_r50_fpn.py'
]
model = dict(
backbone=dict(
type='ResNeXt',
depth=101,
groups=32,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
style='pytorch',
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d')))
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.dtype_policies.dtype_policy import DTypePolicy as DTypePolicy
from keras.src.dtype_policies.dtype_policy import DTypePolicy as Policy
from keras.src.dtype_policies.dtype_policy import dtype_policy as dtype_policy
from keras.src.dtype_policies.dtype_policy import dtype_policy as global_policy
from keras.src.dtype_policies.dtype_policy import (
set_dtype_policy as set_dtype_policy,
)
from keras.src.dtype_policies.dtype_policy import (
set_dtype_policy as set_global_policy,
)
from keras.src.optimizers.loss_scale_optimizer import (
LossScaleOptimizer as LossScaleOptimizer,
)
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.dtype_policies.dtype_policy import DTypePolicy
from keras.src.dtype_policies.dtype_policy import DTypePolicy as Policy
from keras.src.dtype_policies.dtype_policy import dtype_policy
from keras.src.dtype_policies.dtype_policy import dtype_policy as global_policy
from keras.src.dtype_policies.dtype_policy import set_dtype_policy
from keras.src.dtype_policies.dtype_policy import (
set_dtype_policy as set_global_policy,
)
from keras.src.optimizers.loss_scale_optimizer import LossScaleOptimizer
|
_base_ = './vfnet_r50_fpn_ms-2x_coco.py'
model = dict(
backbone=dict(
type='ResNeXt',
depth=101,
groups=64,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d')))
|
_base_ = './vfnet_r50_fpn_mstrain_2x_coco.py'
model = dict(
backbone=dict(
type='ResNeXt',
depth=101,
groups=64,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d')))
|
from typing import TYPE_CHECKING, Dict, Iterable
from sentence_transformers.evaluation.SentenceEvaluator import SentenceEvaluator
if TYPE_CHECKING:
from sentence_transformers.SentenceTransformer import SentenceTransformer
class SequentialEvaluator(SentenceEvaluator):
"""
This evaluator allows that multiple sub-evaluators are passed. When the model is evaluated,
the data is passed sequentially to all sub-evaluators.
All scores are passed to 'main_score_function', which derives one final score value
"""
def __init__(self, evaluators: Iterable[SentenceEvaluator], main_score_function=lambda scores: scores[-1]):
"""
Initializes a SequentialEvaluator object.
Args:
evaluators (Iterable[SentenceEvaluator]): A collection of SentenceEvaluator objects.
main_score_function (function, optional): A function that takes a list of scores and returns the main score.
Defaults to selecting the last score in the list.
Example:
::
evaluator1 = BinaryClassificationEvaluator(...)
evaluator2 = InformationRetrievalEvaluator(...)
evaluator3 = MSEEvaluator(...)
seq_evaluator = SequentialEvaluator([evaluator1, evaluator2, evaluator3])
"""
super().__init__()
self.evaluators = evaluators
self.main_score_function = main_score_function
def __call__(
self, model: "SentenceTransformer", output_path: str = None, epoch: int = -1, steps: int = -1
) -> Dict[str, float]:
evaluations = []
scores = []
for evaluator_idx, evaluator in enumerate(self.evaluators):
evaluation = evaluator(model, output_path, epoch, steps)
if not isinstance(evaluation, dict):
scores.append(evaluation)
evaluation = {f"evaluator_{evaluator_idx}": evaluation}
else:
if hasattr(evaluation, "primary_metric"):
scores.append(evaluation[evaluation.primary_metric])
else:
scores.append(evaluation[list(evaluation.keys())[0]])
evaluations.append(evaluation)
self.primary_metric = "sequential_score"
main_score = self.main_score_function(scores)
results = {key: value for evaluation in evaluations for key, value in evaluation.items()}
results["sequential_score"] = main_score
return results
|
from sentence_transformers import SentenceTransformer
from . import SentenceEvaluator
from typing import Iterable
class SequentialEvaluator(SentenceEvaluator):
"""
This evaluator allows that multiple sub-evaluators are passed. When the model is evaluated,
the data is passed sequentially to all sub-evaluators.
All scores are passed to 'main_score_function', which derives one final score value
"""
def __init__(self, evaluators: Iterable[SentenceEvaluator], main_score_function=lambda scores: scores[-1]):
self.evaluators = evaluators
self.main_score_function = main_score_function
def __call__(self, model: SentenceTransformer, output_path: str = None, epoch: int = -1, steps: int = -1) -> float:
scores = []
for evaluator in self.evaluators:
scores.append(evaluator(model, output_path, epoch, steps))
return self.main_score_function(scores)
|
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
# file_client_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/detection/',
# 'data/': 's3://openmmlab/datasets/detection/'
# }))
file_client_args = dict(backend='disk')
train_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(
batch_size=2,
num_workers=2,
persistent_workers=True,
sampler=dict(type='DefaultSampler', shuffle=True),
batch_sampler=dict(type='AspectRatioBatchSampler'),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instances_train2017.json',
data_prefix=dict(img='train2017/'),
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=train_pipeline))
val_dataloader = dict(
batch_size=1,
num_workers=2,
persistent_workers=True,
drop_last=False,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instances_val2017.json',
data_prefix=dict(img='val2017/'),
test_mode=True,
pipeline=test_pipeline))
test_dataloader = val_dataloader
val_evaluator = dict(
type='CocoMetric',
ann_file=data_root + 'annotations/instances_val2017.json',
metric='bbox',
format_only=False)
test_evaluator = val_evaluator
# inference on test dataset and
# format the output results for submission.
# test_dataloader = dict(
# batch_size=1,
# num_workers=2,
# persistent_workers=True,
# drop_last=False,
# sampler=dict(type='DefaultSampler', shuffle=False),
# dataset=dict(
# type=dataset_type,
# data_root=data_root,
# ann_file=data_root + 'annotations/image_info_test-dev2017.json',
# data_prefix=dict(img='test2017/'),
# test_mode=True,
# pipeline=test_pipeline))
# test_evaluator = dict(
# type='CocoMetric',
# metric='bbox',
# format_only=True,
# ann_file=data_root + 'annotations/image_info_test-dev2017.json',
# outfile_prefix='./work_dirs/coco_detection/test')
|
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
# file_client_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/detection/',
# 'data/': 's3://openmmlab/datasets/detection/'
# }))
file_client_args = dict(backend='disk')
train_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(
batch_size=2,
num_workers=2,
persistent_workers=True,
sampler=dict(type='DefaultSampler', shuffle=True),
batch_sampler=dict(type='AspectRatioBatchSampler'),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instances_train2017.json',
data_prefix=dict(img='train2017/'),
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=train_pipeline))
val_dataloader = dict(
batch_size=1,
num_workers=2,
persistent_workers=True,
drop_last=False,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instances_val2017.json',
data_prefix=dict(img='val2017/'),
test_mode=True,
pipeline=test_pipeline))
test_dataloader = val_dataloader
val_evaluator = dict(
type='CocoMetric',
ann_file=data_root + 'annotations/instances_val2017.json',
metric='bbox')
test_evaluator = val_evaluator
|
"""**Messages** are objects used in prompts and chat conversations.
**Class hierarchy:**
.. code-block::
BaseMessage --> SystemMessage, AIMessage, HumanMessage, ChatMessage, FunctionMessage, ToolMessage
--> BaseMessageChunk --> SystemMessageChunk, AIMessageChunk, HumanMessageChunk, ChatMessageChunk, FunctionMessageChunk, ToolMessageChunk
**Main helpers:**
.. code-block::
ChatPromptTemplate
""" # noqa: E501
from typing import TYPE_CHECKING
from langchain_core._import_utils import import_attr
if TYPE_CHECKING:
from langchain_core.messages.ai import (
AIMessage,
AIMessageChunk,
)
from langchain_core.messages.base import (
BaseMessage,
BaseMessageChunk,
merge_content,
message_to_dict,
messages_to_dict,
)
from langchain_core.messages.chat import ChatMessage, ChatMessageChunk
from langchain_core.messages.content_blocks import (
convert_to_openai_data_block,
convert_to_openai_image_block,
is_data_content_block,
)
from langchain_core.messages.function import FunctionMessage, FunctionMessageChunk
from langchain_core.messages.human import HumanMessage, HumanMessageChunk
from langchain_core.messages.modifier import RemoveMessage
from langchain_core.messages.system import SystemMessage, SystemMessageChunk
from langchain_core.messages.tool import (
InvalidToolCall,
ToolCall,
ToolCallChunk,
ToolMessage,
ToolMessageChunk,
)
from langchain_core.messages.utils import (
AnyMessage,
MessageLikeRepresentation,
_message_from_dict,
convert_to_messages,
convert_to_openai_messages,
filter_messages,
get_buffer_string,
merge_message_runs,
message_chunk_to_message,
messages_from_dict,
trim_messages,
)
__all__ = (
"AIMessage",
"AIMessageChunk",
"AnyMessage",
"BaseMessage",
"BaseMessageChunk",
"ChatMessage",
"ChatMessageChunk",
"FunctionMessage",
"FunctionMessageChunk",
"HumanMessage",
"HumanMessageChunk",
"InvalidToolCall",
"MessageLikeRepresentation",
"SystemMessage",
"SystemMessageChunk",
"ToolCall",
"ToolCallChunk",
"ToolMessage",
"ToolMessageChunk",
"RemoveMessage",
"_message_from_dict",
"convert_to_openai_data_block",
"convert_to_openai_image_block",
"convert_to_messages",
"get_buffer_string",
"is_data_content_block",
"merge_content",
"message_chunk_to_message",
"message_to_dict",
"messages_from_dict",
"messages_to_dict",
"filter_messages",
"merge_message_runs",
"trim_messages",
"convert_to_openai_messages",
)
_dynamic_imports = {
"AIMessage": "ai",
"AIMessageChunk": "ai",
"BaseMessage": "base",
"BaseMessageChunk": "base",
"merge_content": "base",
"message_to_dict": "base",
"messages_to_dict": "base",
"ChatMessage": "chat",
"ChatMessageChunk": "chat",
"FunctionMessage": "function",
"FunctionMessageChunk": "function",
"HumanMessage": "human",
"HumanMessageChunk": "human",
"RemoveMessage": "modifier",
"SystemMessage": "system",
"SystemMessageChunk": "system",
"InvalidToolCall": "tool",
"ToolCall": "tool",
"ToolCallChunk": "tool",
"ToolMessage": "tool",
"ToolMessageChunk": "tool",
"AnyMessage": "utils",
"MessageLikeRepresentation": "utils",
"_message_from_dict": "utils",
"convert_to_messages": "utils",
"convert_to_openai_data_block": "content_blocks",
"convert_to_openai_image_block": "content_blocks",
"convert_to_openai_messages": "utils",
"filter_messages": "utils",
"get_buffer_string": "utils",
"is_data_content_block": "content_blocks",
"merge_message_runs": "utils",
"message_chunk_to_message": "utils",
"messages_from_dict": "utils",
"trim_messages": "utils",
}
def __getattr__(attr_name: str) -> object:
module_name = _dynamic_imports.get(attr_name)
result = import_attr(attr_name, module_name, __spec__.parent)
globals()[attr_name] = result
return result
def __dir__() -> list[str]:
return list(__all__)
|
"""**Messages** are objects used in prompts and chat conversations.
**Class hierarchy:**
.. code-block::
BaseMessage --> SystemMessage, AIMessage, HumanMessage, ChatMessage, FunctionMessage, ToolMessage
--> BaseMessageChunk --> SystemMessageChunk, AIMessageChunk, HumanMessageChunk, ChatMessageChunk, FunctionMessageChunk, ToolMessageChunk
**Main helpers:**
.. code-block::
ChatPromptTemplate
""" # noqa: E501
from typing import TYPE_CHECKING
from langchain_core._import_utils import import_attr
if TYPE_CHECKING:
from langchain_core.messages.ai import (
AIMessage,
AIMessageChunk,
)
from langchain_core.messages.base import (
BaseMessage,
BaseMessageChunk,
merge_content,
message_to_dict,
messages_to_dict,
)
from langchain_core.messages.chat import ChatMessage, ChatMessageChunk
from langchain_core.messages.content_blocks import (
convert_to_openai_image_block,
is_data_content_block,
)
from langchain_core.messages.function import FunctionMessage, FunctionMessageChunk
from langchain_core.messages.human import HumanMessage, HumanMessageChunk
from langchain_core.messages.modifier import RemoveMessage
from langchain_core.messages.system import SystemMessage, SystemMessageChunk
from langchain_core.messages.tool import (
InvalidToolCall,
ToolCall,
ToolCallChunk,
ToolMessage,
ToolMessageChunk,
)
from langchain_core.messages.utils import (
AnyMessage,
MessageLikeRepresentation,
_message_from_dict,
convert_to_messages,
convert_to_openai_messages,
filter_messages,
get_buffer_string,
merge_message_runs,
message_chunk_to_message,
messages_from_dict,
trim_messages,
)
__all__ = (
"AIMessage",
"AIMessageChunk",
"AnyMessage",
"BaseMessage",
"BaseMessageChunk",
"ChatMessage",
"ChatMessageChunk",
"FunctionMessage",
"FunctionMessageChunk",
"HumanMessage",
"HumanMessageChunk",
"InvalidToolCall",
"MessageLikeRepresentation",
"SystemMessage",
"SystemMessageChunk",
"ToolCall",
"ToolCallChunk",
"ToolMessage",
"ToolMessageChunk",
"RemoveMessage",
"_message_from_dict",
"convert_to_openai_image_block",
"convert_to_messages",
"get_buffer_string",
"is_data_content_block",
"merge_content",
"message_chunk_to_message",
"message_to_dict",
"messages_from_dict",
"messages_to_dict",
"filter_messages",
"merge_message_runs",
"trim_messages",
"convert_to_openai_messages",
)
_dynamic_imports = {
"AIMessage": "ai",
"AIMessageChunk": "ai",
"BaseMessage": "base",
"BaseMessageChunk": "base",
"merge_content": "base",
"message_to_dict": "base",
"messages_to_dict": "base",
"ChatMessage": "chat",
"ChatMessageChunk": "chat",
"FunctionMessage": "function",
"FunctionMessageChunk": "function",
"HumanMessage": "human",
"HumanMessageChunk": "human",
"RemoveMessage": "modifier",
"SystemMessage": "system",
"SystemMessageChunk": "system",
"InvalidToolCall": "tool",
"ToolCall": "tool",
"ToolCallChunk": "tool",
"ToolMessage": "tool",
"ToolMessageChunk": "tool",
"AnyMessage": "utils",
"MessageLikeRepresentation": "utils",
"_message_from_dict": "utils",
"convert_to_messages": "utils",
"convert_to_openai_image_block": "content_blocks",
"convert_to_openai_messages": "utils",
"filter_messages": "utils",
"get_buffer_string": "utils",
"is_data_content_block": "content_blocks",
"merge_message_runs": "utils",
"message_chunk_to_message": "utils",
"messages_from_dict": "utils",
"trim_messages": "utils",
}
def __getattr__(attr_name: str) -> object:
module_name = _dynamic_imports.get(attr_name)
result = import_attr(attr_name, module_name, __spec__.parent)
globals()[attr_name] = result
return result
def __dir__() -> list[str]:
return list(__all__)
|
"""
This is a simple application for sparse encoder: Computing embeddings.
we have multiple sentences and we want to compute their embeddings.
The embeddings are sparse, meaning that most of the values are zero.
The embeddings are stored in a sparse matrix format, which is more efficient for storage and computation.
we can also visualize the top tokens for each text."""
from sentence_transformers import SparseEncoder
# Initialize the SPLADE model
model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
# Embed a list of sentences
sentences = [
"This framework generates embeddings for each input sentence",
"Sentences are passed as a list of string.",
"The quick brown fox jumps over the lazy dog.",
]
# Generate embeddings
embeddings = model.encode(sentences)
# Print embedding sim and sparsity
print(f"Embedding dim: {model.get_sentence_embedding_dimension()}")
stats = model.sparsity(embeddings)
print(f"Embedding sparsity: {stats}")
print(f"Average non-zero dimensions: {stats['active_dims']:.2f}")
print(f"Sparsity percentage: {stats['sparsity_ratio']:.2%}")
"""
Embedding dim: 30522
Embedding sparsity: {'active_dims': 56.66666793823242, 'sparsity_ratio': 0.9981433749198914}
Average non-zero dimensions: 56.67
Sparsity percentage: 99.81%
"""
# Visualize top tokens for each text
top_k = 10
token_weights = model.decode(embeddings, top_k=top_k)
print(f"\nTop tokens {top_k} for each text:")
# The result is a list of sentence embeddings as numpy arrays
for i, sentence in enumerate(sentences):
token_scores = ", ".join([f'("{token.strip()}", {value:.2f})' for token, value in token_weights[i]])
print(f"{i}: {sentence} -> Top tokens: {token_scores}")
"""
Top tokens 10 for each text:
0: This framework generates embeddings for each input sentence -> Top tokens: ("framework", 2.19), ("##bed", 2.12), ("input", 1.99), ("each", 1.60), ("em", 1.58), ("sentence", 1.49), ("generate", 1.42), ("##ding", 1.33), ("sentences", 1.10), ("create", 0.93)
1: Sentences are passed as a list of string. -> Top tokens: ("string", 2.72), ("pass", 2.24), ("sentences", 2.15), ("passed", 2.07), ("sentence", 1.90), ("strings", 1.86), ("list", 1.84), ("lists", 1.49), ("as", 1.18), ("passing", 0.73)
2: The quick brown fox jumps over the lazy dog. -> Top tokens: ("lazy", 2.18), ("fox", 1.67), ("brown", 1.56), ("over", 1.52), ("dog", 1.50), ("quick", 1.49), ("jump", 1.39), ("dogs", 1.25), ("foxes", 0.99), ("jumping", 0.84)
"""
# Example of using max_active_dims during encoding
print("\n--- Using max_active_dims during encoding ---")
# Generate embeddings with limited active dimensions
embeddings_limited = model.encode(sentences, max_active_dims=32)
stats_limited = model.sparsity(embeddings_limited)
print(f"Limited embedding sparsity: {stats_limited}")
print(f"Average non-zero dimensions: {stats_limited['active_dims']:.2f}")
print(f"Sparsity percentage: {stats_limited['sparsity_ratio']:.2%}")
"""
--- Using max_active_dims during encoding ---
Limited embedding sparsity: {'active_dims': 32.0, 'sparsity_ratio': 0.9989516139030457}
Average non-zero dimensions: 32.00
Sparsity percentage: 99.90%
"""
# Comparing memory usage
print("\n--- Comparing memory usage ---")
def get_memory_size(tensor):
if tensor.is_sparse:
# For sparse tensors, only count non-zero elements
return (
tensor._values().element_size() * tensor._values().nelement()
+ tensor._indices().element_size() * tensor._indices().nelement()
)
else:
return tensor.element_size() * tensor.nelement()
print(f"Original embeddings memory: {get_memory_size(embeddings) / 1024:.2f} KB")
print(f"Embeddings with max_active_dims=32 memory: {get_memory_size(embeddings_limited) / 1024:.2f} KB")
"""
--- Comparing memory usage ---
Original embeddings memory: 3.32 KB
Embeddings with max_active_dims=32 memory: 1.88 KB
"""
|
"""
This is a simple application for sparse encoder: Computing embeddings.
we have multiple sentences and we want to compute their embeddings.
The embeddings are sparse, meaning that most of the values are zero.
The embeddings are stored in a sparse matrix format, which is more efficient for storage and computation.
we can also visualize the top tokens for each text."""
from sentence_transformers import SparseEncoder
# Initialize the SPLADE model
model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
# Embed a list of sentences
sentences = [
"This framework generates embeddings for each input sentence",
"Sentences are passed as a list of string.",
"The quick brown fox jumps over the lazy dog.",
]
# Generate embeddings
embeddings = model.encode(sentences)
# Print embedding sim and sparsity
print(f"Embedding dim: {model.get_sentence_embedding_dimension()}")
stats = model.get_sparsity_stats(embeddings)
print(f"Embedding sparsity: {stats}")
print(f"Average non-zero dimensions: {stats['row_non_zero_mean']:.2f}")
print(f"Sparsity percentage: {stats['row_sparsity_mean']:.2%}")
"""
Embedding dim: 30522
Embedding sparsity: {'num_rows': 3, 'num_cols': 30522, 'row_non_zero_mean': 56.66666793823242, 'row_sparsity_mean': 0.9981433749198914}
Average non-zero dimensions: 56.67
Sparsity percentage: 99.81%
"""
# Visualize top tokens for each text
top_k = 10
token_weights = model.decode(embeddings, top_k=top_k)
print(f"\nTop tokens {top_k} for each text:")
# The result is a list of sentence embeddings as numpy arrays
for i, sentence in enumerate(sentences):
token_scores = ", ".join([f'("{token.strip()}", {value:.2f})' for token, value in token_weights[i]])
print(f"{i}: {sentence} -> Top tokens: {token_scores}")
"""
Top tokens 10 for each text:
0: This framework generates embeddings for each input sentence -> Top tokens: ("framework", 2.19), ("##bed", 2.12), ("input", 1.99), ("each", 1.60), ("em", 1.58), ("sentence", 1.49), ("generate", 1.42), ("##ding", 1.33), ("sentences", 1.10), ("create", 0.93)
1: Sentences are passed as a list of string. -> Top tokens: ("string", 2.72), ("pass", 2.24), ("sentences", 2.15), ("passed", 2.07), ("sentence", 1.90), ("strings", 1.86), ("list", 1.84), ("lists", 1.49), ("as", 1.18), ("passing", 0.73)
2: The quick brown fox jumps over the lazy dog. -> Top tokens: ("lazy", 2.18), ("fox", 1.67), ("brown", 1.56), ("over", 1.52), ("dog", 1.50), ("quick", 1.49), ("jump", 1.39), ("dogs", 1.25), ("foxes", 0.99), ("jumping", 0.84)
"""
# Example of using max_active_dims during encoding
print("\n--- Using max_active_dims during encoding ---")
# Generate embeddings with limited active dimensions
embeddings_limited = model.encode(sentences, max_active_dims=32)
stats_limited = model.get_sparsity_stats(embeddings_limited)
print(f"Limited embedding sparsity: {stats_limited}")
print(f"Average non-zero dimensions: {stats_limited['row_non_zero_mean']:.2f}")
print(f"Sparsity percentage: {stats_limited['row_sparsity_mean']:.2%}")
"""
--- Using max_active_dims during encoding ---
Limited embedding sparsity: {'num_rows': 3, 'num_cols': 30522, 'row_non_zero_mean': 32.0, 'row_sparsity_mean': 0.9989516139030457}
Average non-zero dimensions: 32.00
Sparsity percentage: 99.90%
"""
# Comparing memory usage
print("\n--- Comparing memory usage ---")
def get_memory_size(tensor):
if tensor.is_sparse:
# For sparse tensors, only count non-zero elements
return (
tensor._values().element_size() * tensor._values().nelement()
+ tensor._indices().element_size() * tensor._indices().nelement()
)
else:
return tensor.element_size() * tensor.nelement()
print(f"Original embeddings memory: {get_memory_size(embeddings) / 1024:.2f} KB")
print(f"Embeddings with max_active_dims=32 memory: {get_memory_size(embeddings_limited) / 1024:.2f} KB")
"""
--- Comparing memory usage ---
Original embeddings memory: 3.32 KB
Embeddings with max_active_dims=32 memory: 1.88 KB
"""
|
# Copyright (c) OpenMMLab. All rights reserved.
import copy
import os.path as osp
import unittest
import numpy as np
import torch
from mmengine.structures import InstanceData, PixelData
from mmdet.datasets.transforms import PackDetInputs
from mmdet.structures import DetDataSample
from mmdet.structures.mask import BitmapMasks
class TestPackDetInputs(unittest.TestCase):
def setUp(self):
"""Setup the model and optimizer which are used in every test method.
TestCase calls functions in this order: setUp() -> testMethod() ->
tearDown() -> cleanUp()
"""
data_prefix = osp.join(osp.dirname(__file__), '../../data')
img_path = osp.join(data_prefix, 'color.jpg')
rng = np.random.RandomState(0)
self.results1 = {
'img_id': 1,
'img_path': img_path,
'ori_shape': (300, 400),
'img_shape': (600, 800),
'scale_factor': 2.0,
'flip': False,
'img': rng.rand(300, 400),
'gt_seg_map': rng.rand(300, 400),
'gt_masks':
BitmapMasks(rng.rand(3, 300, 400), height=300, width=400),
'gt_bboxes_labels': rng.rand(3, ),
'gt_ignore_flags': np.array([0, 0, 1], dtype=bool),
'proposals': rng.rand(2, 4),
'proposals_scores': rng.rand(2, )
}
self.results2 = {
'img_id': 1,
'img_path': img_path,
'ori_shape': (300, 400),
'img_shape': (600, 800),
'scale_factor': 2.0,
'flip': False,
'img': rng.rand(300, 400),
'gt_seg_map': rng.rand(300, 400),
'gt_masks':
BitmapMasks(rng.rand(3, 300, 400), height=300, width=400),
'gt_bboxes_labels': rng.rand(3, ),
'proposals': rng.rand(2, 4),
'proposals_scores': rng.rand(2, )
}
self.meta_keys = ('img_id', 'img_path', 'ori_shape', 'scale_factor',
'flip')
def test_transform(self):
transform = PackDetInputs(meta_keys=self.meta_keys)
results = transform(copy.deepcopy(self.results1))
self.assertIn('data_samples', results)
self.assertIsInstance(results['data_samples'], DetDataSample)
self.assertIsInstance(results['data_samples'].gt_instances,
InstanceData)
self.assertIsInstance(results['data_samples'].ignored_instances,
InstanceData)
self.assertEqual(len(results['data_samples'].gt_instances), 2)
self.assertEqual(len(results['data_samples'].ignored_instances), 1)
self.assertIsInstance(results['data_samples'].gt_sem_seg, PixelData)
self.assertIsInstance(results['data_samples'].proposals, InstanceData)
self.assertEqual(len(results['data_samples'].proposals), 2)
self.assertIsInstance(results['data_samples'].proposals.bboxes,
torch.Tensor)
self.assertIsInstance(results['data_samples'].proposals.scores,
torch.Tensor)
def test_transform_without_ignore(self):
transform = PackDetInputs(meta_keys=self.meta_keys)
results = transform(copy.deepcopy(self.results2))
self.assertIn('data_samples', results)
self.assertIsInstance(results['data_samples'], DetDataSample)
self.assertIsInstance(results['data_samples'].gt_instances,
InstanceData)
self.assertIsInstance(results['data_samples'].ignored_instances,
InstanceData)
self.assertEqual(len(results['data_samples'].gt_instances), 3)
self.assertEqual(len(results['data_samples'].ignored_instances), 0)
self.assertIsInstance(results['data_samples'].gt_sem_seg, PixelData)
self.assertIsInstance(results['data_samples'].proposals, InstanceData)
self.assertEqual(len(results['data_samples'].proposals), 2)
self.assertIsInstance(results['data_samples'].proposals.bboxes,
torch.Tensor)
self.assertIsInstance(results['data_samples'].proposals.scores,
torch.Tensor)
def test_repr(self):
transform = PackDetInputs(meta_keys=self.meta_keys)
self.assertEqual(
repr(transform), f'PackDetInputs(meta_keys={self.meta_keys})')
|
# Copyright (c) OpenMMLab. All rights reserved.
import copy
import os.path as osp
import unittest
import numpy as np
import torch
from mmengine.structures import InstanceData, PixelData
from mmdet.datasets.transforms import PackDetInputs
from mmdet.structures import DetDataSample
from mmdet.structures.mask import BitmapMasks
class TestPackDetInputs(unittest.TestCase):
def setUp(self):
"""Setup the model and optimizer which are used in every test method.
TestCase calls functions in this order: setUp() -> testMethod() ->
tearDown() -> cleanUp()
"""
data_prefix = osp.join(osp.dirname(__file__), '../../data')
img_path = osp.join(data_prefix, 'color.jpg')
rng = np.random.RandomState(0)
self.results1 = {
'img_id': 1,
'img_path': img_path,
'ori_shape': (300, 400),
'img_shape': (600, 800),
'scale_factor': 2.0,
'flip': False,
'img': rng.rand(300, 400),
'gt_seg_map': rng.rand(300, 400),
'gt_masks':
BitmapMasks(rng.rand(3, 300, 400), height=300, width=400),
'gt_bboxes_labels': rng.rand(3, ),
'gt_ignore_flags': np.array([0, 0, 1], dtype=np.bool),
'proposals': rng.rand(2, 4),
'proposals_scores': rng.rand(2, )
}
self.results2 = {
'img_id': 1,
'img_path': img_path,
'ori_shape': (300, 400),
'img_shape': (600, 800),
'scale_factor': 2.0,
'flip': False,
'img': rng.rand(300, 400),
'gt_seg_map': rng.rand(300, 400),
'gt_masks':
BitmapMasks(rng.rand(3, 300, 400), height=300, width=400),
'gt_bboxes_labels': rng.rand(3, ),
'proposals': rng.rand(2, 4),
'proposals_scores': rng.rand(2, )
}
self.meta_keys = ('img_id', 'img_path', 'ori_shape', 'scale_factor',
'flip')
def test_transform(self):
transform = PackDetInputs(meta_keys=self.meta_keys)
results = transform(copy.deepcopy(self.results1))
self.assertIn('data_samples', results)
self.assertIsInstance(results['data_samples'], DetDataSample)
self.assertIsInstance(results['data_samples'].gt_instances,
InstanceData)
self.assertIsInstance(results['data_samples'].ignored_instances,
InstanceData)
self.assertEqual(len(results['data_samples'].gt_instances), 2)
self.assertEqual(len(results['data_samples'].ignored_instances), 1)
self.assertIsInstance(results['data_samples'].gt_sem_seg, PixelData)
self.assertIsInstance(results['data_samples'].proposals, InstanceData)
self.assertEqual(len(results['data_samples'].proposals), 2)
self.assertIsInstance(results['data_samples'].proposals.bboxes,
torch.Tensor)
self.assertIsInstance(results['data_samples'].proposals.scores,
torch.Tensor)
def test_transform_without_ignore(self):
transform = PackDetInputs(meta_keys=self.meta_keys)
results = transform(copy.deepcopy(self.results2))
self.assertIn('data_samples', results)
self.assertIsInstance(results['data_samples'], DetDataSample)
self.assertIsInstance(results['data_samples'].gt_instances,
InstanceData)
self.assertIsInstance(results['data_samples'].ignored_instances,
InstanceData)
self.assertEqual(len(results['data_samples'].gt_instances), 3)
self.assertEqual(len(results['data_samples'].ignored_instances), 0)
self.assertIsInstance(results['data_samples'].gt_sem_seg, PixelData)
self.assertIsInstance(results['data_samples'].proposals, InstanceData)
self.assertEqual(len(results['data_samples'].proposals), 2)
self.assertIsInstance(results['data_samples'].proposals.bboxes,
torch.Tensor)
self.assertIsInstance(results['data_samples'].proposals.scores,
torch.Tensor)
def test_repr(self):
transform = PackDetInputs(meta_keys=self.meta_keys)
self.assertEqual(
repr(transform), f'PackDetInputs(meta_keys={self.meta_keys})')
|
from tqdm import tqdm
from typing import Any, Sequence
from llama_index.core.schema import TransformComponent, BaseNode, NodeRelationship
from llama_index.core.graph_stores.types import Relation, KG_NODES_KEY, KG_RELATIONS_KEY
def get_node_rel_string(relationship: NodeRelationship) -> str:
return str(relationship).split(".")[-1]
class ImplicitPathExtractor(TransformComponent):
"""
Extract edges from node relationships.
Uses `node.relationships` to extract relations between nodes.
"""
@classmethod
def class_name(cls) -> str:
return "ImplicitPathExtractor"
def __call__(
self, nodes: Sequence[BaseNode], show_progress: bool = False, **kwargs: Any
) -> Sequence[BaseNode]:
"""Extract edges from node relationships."""
if show_progress:
nodes = tqdm(nodes, desc="Extracting implicit paths")
for node in nodes:
existing_relations = node.metadata.pop(KG_RELATIONS_KEY, [])
existing_nodes = node.metadata.pop(KG_NODES_KEY, [])
edges = []
metadata = node.metadata.copy()
if node.source_node:
edges.append(
Relation(
target_id=node.source_node.node_id,
source_id=node.node_id,
label=get_node_rel_string(NodeRelationship.SOURCE),
properties=metadata,
)
)
if node.parent_node:
edges.append(
Relation(
target_id=node.parent_node.node_id,
source_id=node.node_id,
label=get_node_rel_string(NodeRelationship.PARENT),
properties=metadata,
)
)
if node.prev_node:
edges.append(
Relation(
target_id=node.prev_node.node_id,
source_id=node.node_id,
label=get_node_rel_string(NodeRelationship.PREVIOUS),
properties=metadata,
)
)
if node.next_node:
edges.append(
Relation(
source_id=node.node_id,
target_id=node.next_node.node_id,
label=get_node_rel_string(NodeRelationship.NEXT),
properties=metadata,
)
)
if node.child_nodes:
for child_node in node.child_nodes:
edges.append(
Relation(
source_id=node.node_id,
target_id=child_node.node_id,
label=get_node_rel_string(NodeRelationship.CHILD),
properties=metadata,
)
)
existing_relations.extend(edges)
node.metadata[KG_RELATIONS_KEY] = existing_relations
node.metadata[KG_NODES_KEY] = existing_nodes
return nodes
|
from tqdm import tqdm
from typing import Any, Sequence
from llama_index.core.schema import TransformComponent, BaseNode, NodeRelationship
from llama_index.core.graph_stores.types import Relation, KG_NODES_KEY, KG_RELATIONS_KEY
def get_node_rel_string(relationship: NodeRelationship) -> str:
return str(relationship).split(".")[-1]
class ImplicitPathExtractor(TransformComponent):
"""Extract edges from node relationships.
Uses `node.relationships` to extract relations between nodes.
"""
@classmethod
def class_name(cls) -> str:
return "ImplicitPathExtractor"
def __call__(
self, nodes: Sequence[BaseNode], show_progress: bool = False, **kwargs: Any
) -> Sequence[BaseNode]:
"""Extract edges from node relationships."""
if show_progress:
nodes = tqdm(nodes, desc="Extracting implicit paths")
for node in nodes:
existing_relations = node.metadata.pop(KG_RELATIONS_KEY, [])
existing_nodes = node.metadata.pop(KG_NODES_KEY, [])
edges = []
metadata = node.metadata.copy()
if node.source_node:
edges.append(
Relation(
target_id=node.source_node.node_id,
source_id=node.node_id,
label=get_node_rel_string(NodeRelationship.SOURCE),
properties=metadata,
)
)
if node.parent_node:
edges.append(
Relation(
target_id=node.parent_node.node_id,
source_id=node.node_id,
label=get_node_rel_string(NodeRelationship.PARENT),
properties=metadata,
)
)
if node.prev_node:
edges.append(
Relation(
target_id=node.prev_node.node_id,
source_id=node.node_id,
label=get_node_rel_string(NodeRelationship.PREVIOUS),
properties=metadata,
)
)
if node.next_node:
edges.append(
Relation(
source_id=node.node_id,
target_id=node.next_node.node_id,
label=get_node_rel_string(NodeRelationship.NEXT),
properties=metadata,
)
)
if node.child_nodes:
for child_node in node.child_nodes:
edges.append(
Relation(
source_id=node.node_id,
target_id=child_node.node_id,
label=get_node_rel_string(NodeRelationship.CHILD),
properties=metadata,
)
)
existing_relations.extend(edges)
node.metadata[KG_RELATIONS_KEY] = existing_relations
node.metadata[KG_NODES_KEY] = existing_nodes
return nodes
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.chat_message_histories import CassandraChatMessageHistory
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"CassandraChatMessageHistory": "langchain_community.chat_message_histories",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"CassandraChatMessageHistory",
]
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.chat_message_histories import CassandraChatMessageHistory
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"CassandraChatMessageHistory": "langchain_community.chat_message_histories"
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"CassandraChatMessageHistory",
]
|
import logging
from typing import List, Optional
from llama_index.core.schema import Document
from llama_index.readers.box import BoxReaderBase
from llama_index.readers.box.BoxAPI.box_api import (
get_box_files_details,
get_box_folder_files_details,
get_files_ai_extract_data,
box_check_connection,
)
from llama_index.readers.box.BoxAPI.box_llama_adaptors import box_file_to_llama_document
from box_sdk_gen import (
BoxClient,
File,
)
logger = logging.getLogger(__name__)
class BoxReaderAIExtract(BoxReaderBase):
"""
A reader class for loading data from Box files using Box AI Extract.
This class inherits from the `BaseReader` class and specializes in
processing data from Box files using Box AI Extract. It utilizes the
provided BoxClient object to interact with the Box API and extracts
data based on a specified AI prompt.
Attributes:
_box_client (BoxClient): An authenticated Box client object used
for interacting with the Box API.
"""
_box_client: BoxClient
@classmethod
def class_name(cls) -> str:
return "BoxReaderAIExtract"
def __init__(self, box_client: BoxClient):
super().__init__(box_client=box_client)
def load_data(
self,
ai_prompt: str,
file_ids: Optional[List[str]] = None,
folder_id: Optional[str] = None,
is_recursive: bool = False,
) -> List[Document]:
"""
Extracts data from Box files using Box AI and creates Document objects.
This method utilizes the Box AI Extract functionality to extract data
based on the provided AI prompt from the specified Box files. It then
creates Document objects containing the extracted data along with
file metadata.
Args:
ai_prompt (str): The AI prompt that specifies what data to extract
from the files.
file_ids (Optional[List[str]], optional): A list of Box file IDs
to extract data from. If provided, folder_id is ignored.
Defaults to None.
folder_id (Optional[str], optional): The ID of the Box folder to
extract data from. If provided, along with is_recursive set to
True, retrieves data from sub-folders as well. Defaults to None.
is_recursive (bool, optional): If True and folder_id is provided,
extracts data from sub-folders within the specified folder.
Defaults to False.
Returns:
List[Document]: A list of Document objects containing the extracted
data and file metadata.
"""
# check if the box client is authenticated
box_check_connection(self._box_client)
docs: List[Document] = []
box_files: List[File] = []
# get payload information
if file_ids is not None:
box_files.extend(
get_box_files_details(box_client=self._box_client, file_ids=file_ids)
)
elif folder_id is not None:
box_files.extend(
get_box_folder_files_details(
box_client=self._box_client,
folder_id=folder_id,
is_recursive=is_recursive,
)
)
box_files = get_files_ai_extract_data(
box_client=self._box_client,
box_files=box_files,
ai_prompt=ai_prompt,
)
for file in box_files:
doc = box_file_to_llama_document(file)
doc.text = file.ai_response if file.ai_response else ""
doc.metadata["ai_prompt"] = file.ai_prompt
doc.metadata["ai_response"] = file.ai_response
docs.append(doc)
return docs
def load_resource(self, box_file_id: str, ai_prompt: str) -> List[Document]:
"""
Load data from a specific resource.
Args:
resource (str): The resource identifier.
Returns:
List[Document]: A list of documents loaded from the resource.
"""
return self.load_data(file_ids=[box_file_id], ai_prompt=ai_prompt)
|
import logging
from typing import List, Optional
from llama_index.core.schema import Document
from llama_index.readers.box import BoxReaderBase
from llama_index.readers.box.BoxAPI.box_api import (
get_box_files_details,
get_box_folder_files_details,
get_files_ai_extract_data,
box_check_connection,
)
from llama_index.readers.box.BoxAPI.box_llama_adaptors import box_file_to_llama_document
from box_sdk_gen import (
BoxClient,
File,
)
logger = logging.getLogger(__name__)
class BoxReaderAIExtract(BoxReaderBase):
"""
A reader class for loading data from Box files using Box AI Extract.
This class inherits from the `BaseReader` class and specializes in
processing data from Box files using Box AI Extract. It utilizes the
provided BoxClient object to interact with the Box API and extracts
data based on a specified AI prompt.
Attributes:
_box_client (BoxClient): An authenticated Box client object used
for interacting with the Box API.
"""
_box_client: BoxClient
@classmethod
def class_name(cls) -> str:
return "BoxReaderAIExtract"
def __init__(self, box_client: BoxClient):
super().__init__(box_client=box_client)
def load_data(
self,
ai_prompt: str,
file_ids: Optional[List[str]] = None,
folder_id: Optional[str] = None,
is_recursive: bool = False,
) -> List[Document]:
"""
Extracts data from Box files using Box AI and creates Document objects.
This method utilizes the Box AI Extract functionality to extract data
based on the provided AI prompt from the specified Box files. It then
creates Document objects containing the extracted data along with
file metadata.
Args:
ai_prompt (str): The AI prompt that specifies what data to extract
from the files.
file_ids (Optional[List[str]], optional): A list of Box file IDs
to extract data from. If provided, folder_id is ignored.
Defaults to None.
folder_id (Optional[str], optional): The ID of the Box folder to
extract data from. If provided, along with is_recursive set to
True, retrieves data from sub-folders as well. Defaults to None.
is_recursive (bool, optional): If True and folder_id is provided,
extracts data from sub-folders within the specified folder.
Defaults to False.
Returns:
List[Document]: A list of Document objects containing the extracted
data and file metadata.
"""
# check if the box client is authenticated
box_check_connection(self._box_client)
docs: List[Document] = []
box_files: List[File] = []
# get payload information
if file_ids is not None:
box_files.extend(
get_box_files_details(box_client=self._box_client, file_ids=file_ids)
)
elif folder_id is not None:
box_files.extend(
get_box_folder_files_details(
box_client=self._box_client,
folder_id=folder_id,
is_recursive=is_recursive,
)
)
box_files = get_files_ai_extract_data(
box_client=self._box_client,
box_files=box_files,
ai_prompt=ai_prompt,
)
for file in box_files:
doc = box_file_to_llama_document(file)
doc.text = file.ai_response if file.ai_response else ""
doc.metadata["ai_prompt"] = file.ai_prompt
doc.metadata["ai_response"] = file.ai_response
docs.append(doc)
return docs
def load_resource(self, box_file_id: str, ai_prompt: str) -> List[Document]:
"""
Load data from a specific resource.
Args:
resource (str): The resource identifier.
Returns:
List[Document]: A list of documents loaded from the resource.
"""
return self.load_data(file_ids=[box_file_id], ai_prompt=ai_prompt)
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch
from mmdet.registry import TASK_UTILS
from .base_bbox_coder import BaseBBoxCoder
@TASK_UTILS.register_module()
class YOLOBBoxCoder(BaseBBoxCoder):
"""YOLO BBox coder.
Following `YOLO <https://arxiv.org/abs/1506.02640>`_, this coder divide
image into grids, and encode bbox (x1, y1, x2, y2) into (cx, cy, dw, dh).
cx, cy in [0., 1.], denotes relative center position w.r.t the center of
bboxes. dw, dh are the same as :obj:`DeltaXYWHBBoxCoder`.
Args:
eps (float): Min value of cx, cy when encoding.
"""
def __init__(self, eps=1e-6):
super(BaseBBoxCoder, self).__init__()
self.eps = eps
def encode(self, bboxes, gt_bboxes, stride):
"""Get box regression transformation deltas that can be used to
transform the ``bboxes`` into the ``gt_bboxes``.
Args:
bboxes (torch.Tensor): Source boxes, e.g., anchors.
gt_bboxes (torch.Tensor): Target of the transformation, e.g.,
ground-truth boxes.
stride (torch.Tensor | int): Stride of bboxes.
Returns:
torch.Tensor: Box transformation deltas
"""
assert bboxes.size(0) == gt_bboxes.size(0)
assert bboxes.size(-1) == gt_bboxes.size(-1) == 4
x_center_gt = (gt_bboxes[..., 0] + gt_bboxes[..., 2]) * 0.5
y_center_gt = (gt_bboxes[..., 1] + gt_bboxes[..., 3]) * 0.5
w_gt = gt_bboxes[..., 2] - gt_bboxes[..., 0]
h_gt = gt_bboxes[..., 3] - gt_bboxes[..., 1]
x_center = (bboxes[..., 0] + bboxes[..., 2]) * 0.5
y_center = (bboxes[..., 1] + bboxes[..., 3]) * 0.5
w = bboxes[..., 2] - bboxes[..., 0]
h = bboxes[..., 3] - bboxes[..., 1]
w_target = torch.log((w_gt / w).clamp(min=self.eps))
h_target = torch.log((h_gt / h).clamp(min=self.eps))
x_center_target = ((x_center_gt - x_center) / stride + 0.5).clamp(
self.eps, 1 - self.eps)
y_center_target = ((y_center_gt - y_center) / stride + 0.5).clamp(
self.eps, 1 - self.eps)
encoded_bboxes = torch.stack(
[x_center_target, y_center_target, w_target, h_target], dim=-1)
return encoded_bboxes
def decode(self, bboxes, pred_bboxes, stride):
"""Apply transformation `pred_bboxes` to `boxes`.
Args:
boxes (torch.Tensor): Basic boxes, e.g. anchors.
pred_bboxes (torch.Tensor): Encoded boxes with shape
stride (torch.Tensor | int): Strides of bboxes.
Returns:
torch.Tensor: Decoded boxes.
"""
assert pred_bboxes.size(-1) == bboxes.size(-1) == 4
xy_centers = (bboxes[..., :2] + bboxes[..., 2:]) * 0.5 + (
pred_bboxes[..., :2] - 0.5) * stride
whs = (bboxes[..., 2:] -
bboxes[..., :2]) * 0.5 * pred_bboxes[..., 2:].exp()
decoded_bboxes = torch.stack(
(xy_centers[..., 0] - whs[..., 0], xy_centers[..., 1] -
whs[..., 1], xy_centers[..., 0] + whs[..., 0],
xy_centers[..., 1] + whs[..., 1]),
dim=-1)
return decoded_bboxes
|
# Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import torch
from mmdet.registry import TASK_UTILS
from .base_bbox_coder import BaseBBoxCoder
@TASK_UTILS.register_module()
class YOLOBBoxCoder(BaseBBoxCoder):
"""YOLO BBox coder.
Following `YOLO <https://arxiv.org/abs/1506.02640>`_, this coder divide
image into grids, and encode bbox (x1, y1, x2, y2) into (cx, cy, dw, dh).
cx, cy in [0., 1.], denotes relative center position w.r.t the center of
bboxes. dw, dh are the same as :obj:`DeltaXYWHBBoxCoder`.
Args:
eps (float): Min value of cx, cy when encoding.
"""
def __init__(self, eps=1e-6):
super(BaseBBoxCoder, self).__init__()
self.eps = eps
@mmcv.jit(coderize=True)
def encode(self, bboxes, gt_bboxes, stride):
"""Get box regression transformation deltas that can be used to
transform the ``bboxes`` into the ``gt_bboxes``.
Args:
bboxes (torch.Tensor): Source boxes, e.g., anchors.
gt_bboxes (torch.Tensor): Target of the transformation, e.g.,
ground-truth boxes.
stride (torch.Tensor | int): Stride of bboxes.
Returns:
torch.Tensor: Box transformation deltas
"""
assert bboxes.size(0) == gt_bboxes.size(0)
assert bboxes.size(-1) == gt_bboxes.size(-1) == 4
x_center_gt = (gt_bboxes[..., 0] + gt_bboxes[..., 2]) * 0.5
y_center_gt = (gt_bboxes[..., 1] + gt_bboxes[..., 3]) * 0.5
w_gt = gt_bboxes[..., 2] - gt_bboxes[..., 0]
h_gt = gt_bboxes[..., 3] - gt_bboxes[..., 1]
x_center = (bboxes[..., 0] + bboxes[..., 2]) * 0.5
y_center = (bboxes[..., 1] + bboxes[..., 3]) * 0.5
w = bboxes[..., 2] - bboxes[..., 0]
h = bboxes[..., 3] - bboxes[..., 1]
w_target = torch.log((w_gt / w).clamp(min=self.eps))
h_target = torch.log((h_gt / h).clamp(min=self.eps))
x_center_target = ((x_center_gt - x_center) / stride + 0.5).clamp(
self.eps, 1 - self.eps)
y_center_target = ((y_center_gt - y_center) / stride + 0.5).clamp(
self.eps, 1 - self.eps)
encoded_bboxes = torch.stack(
[x_center_target, y_center_target, w_target, h_target], dim=-1)
return encoded_bboxes
@mmcv.jit(coderize=True)
def decode(self, bboxes, pred_bboxes, stride):
"""Apply transformation `pred_bboxes` to `boxes`.
Args:
boxes (torch.Tensor): Basic boxes, e.g. anchors.
pred_bboxes (torch.Tensor): Encoded boxes with shape
stride (torch.Tensor | int): Strides of bboxes.
Returns:
torch.Tensor: Decoded boxes.
"""
assert pred_bboxes.size(-1) == bboxes.size(-1) == 4
xy_centers = (bboxes[..., :2] + bboxes[..., 2:]) * 0.5 + (
pred_bboxes[..., :2] - 0.5) * stride
whs = (bboxes[..., 2:] -
bboxes[..., :2]) * 0.5 * pred_bboxes[..., 2:].exp()
decoded_bboxes = torch.stack(
(xy_centers[..., 0] - whs[..., 0], xy_centers[..., 1] -
whs[..., 1], xy_centers[..., 0] + whs[..., 0],
xy_centers[..., 1] + whs[..., 1]),
dim=-1)
return decoded_bboxes
|
_base_ = [
'../_base_/models/mask_rcnn_r50_fpn.py',
'../_base_/datasets/lvis_v1_instance.py',
'../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py'
]
model = dict(
roi_head=dict(
bbox_head=dict(
num_classes=1203,
cls_predictor_cfg=dict(type='NormedLinear', tempearture=20),
loss_cls=dict(
type='SeesawLoss',
p=0.8,
q=2.0,
num_classes=1203,
loss_weight=1.0)),
mask_head=dict(num_classes=1203)),
test_cfg=dict(
rcnn=dict(
score_thr=0.0001,
# LVIS allows up to 300
max_per_img=300)))
# dataset settings
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='RandomChoiceResize',
scales=[(1333, 640), (1333, 672), (1333, 704), (1333, 736),
(1333, 768), (1333, 800)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(dataset=dict(pipeline=train_pipeline)))
train_cfg = dict(val_interval=24)
|
_base_ = [
'../_base_/models/mask_rcnn_r50_fpn.py',
'../_base_/datasets/lvis_v1_instance.py',
'../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py'
]
model = dict(
roi_head=dict(
bbox_head=dict(
num_classes=1203,
cls_predictor_cfg=dict(type='NormedLinear', tempearture=20),
loss_cls=dict(
type='SeesawLoss',
p=0.8,
q=2.0,
num_classes=1203,
loss_weight=1.0)),
mask_head=dict(num_classes=1203)),
test_cfg=dict(
rcnn=dict(
score_thr=0.0001,
# LVIS allows up to 300
max_per_img=300)))
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='Resize',
img_scale=[(1333, 640), (1333, 672), (1333, 704), (1333, 736),
(1333, 768), (1333, 800)],
multiscale_mode='value',
keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']),
]
data = dict(train=dict(dataset=dict(pipeline=train_pipeline)))
evaluation = dict(interval=12, metric=['bbox', 'segm'])
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.