input
stringlengths 33
5k
| output
stringlengths 32
5k
|
---|---|
# Copyright (c) OpenMMLab. All rights reserved.
from .anchor import * # noqa: F401, F403
from .bbox import * # noqa: F401, F403
from .data_structures import * # noqa: F401, F403
from .evaluation import * # noqa: F401, F403
from .hook import * # noqa: F401, F403
from .mask import * # noqa: F401, F403
from .optimizers import * # noqa: F401, F403
from .post_processing import * # noqa: F401, F403
from .scheduler import * # noqa: F401, F403
from .utils import * # noqa: F401, F403
from .visualization import * # noqa: F401, F403
|
# Copyright (c) OpenMMLab. All rights reserved.
from .anchor import * # noqa: F401, F403
from .bbox import * # noqa: F401, F403
from .data_structures import * # noqa: F401, F403
from .evaluation import * # noqa: F401, F403
from .hook import * # noqa: F401, F403
from .mask import * # noqa: F401, F403
from .optimizers import * # noqa: F401, F403
from .post_processing import * # noqa: F401, F403
from .utils import * # noqa: F401, F403
from .visualization import * # noqa: F401, F403
|
from __future__ import annotations
from typing import Iterable
import torch
from torch import Tensor, nn
from sentence_transformers import SentenceTransformer
class MSELoss(nn.Module):
def __init__(self, model: SentenceTransformer) -> None:
"""
Computes the MSE loss between the computed sentence embedding and a target sentence embedding. This loss
is used when extending sentence embeddings to new languages as described in our publication
Making Monolingual Sentence Embeddings Multilingual using Knowledge Distillation.
For an example, see `the distillation documentation <../../examples/training/distillation/README.html>`_ on extending language models to new languages.
Args:
model: SentenceTransformerModel
References:
- Making Monolingual Sentence Embeddings Multilingual using Knowledge Distillation: https://arxiv.org/abs/2004.09813
- `Training > Model Distillation <../../examples/training/distillation/README.html>`_
- `Training > Multilingual Models <../../examples/training/multilingual/README.html>`_
Requirements:
1. Usually uses a finetuned teacher M in a knowledge distillation setup
Relations:
- :class:`MarginMSELoss` is equivalent to this loss, but with a margin through a negative pair.
Input:
+-----------------------------------------+-----------------------------+
| Texts | Labels |
+=========================================+=============================+
| sentence | model sentence embeddings |
+-----------------------------------------+-----------------------------+
| sentence_1, sentence_2, ..., sentence_N | model sentence embeddings |
+-----------------------------------------+-----------------------------+
Example:
::
from sentence_transformers import SentenceTransformer, SentenceTransformerTrainer, losses
from datasets import Dataset
student_model = SentenceTransformer("microsoft/mpnet-base")
teacher_model = SentenceTransformer("all-mpnet-base-v2")
train_dataset = Dataset.from_dict({
"english": ["The first sentence", "The second sentence", "The third sentence", "The fourth sentence"],
"french": ["La première phrase", "La deuxième phrase", "La troisième phrase", "La quatrième phrase"],
})
def compute_labels(batch):
return {
"label": teacher_model.encode(batch["english"])
}
train_dataset = train_dataset.map(compute_labels, batched=True)
loss = losses.MSELoss(student_model)
trainer = SentenceTransformerTrainer(
model=student_model,
train_dataset=train_dataset,
loss=loss,
)
trainer.train()
"""
super().__init__()
self.model = model
self.loss_fct = nn.MSELoss()
def forward(self, sentence_features: Iterable[dict[str, Tensor]], labels: Tensor) -> Tensor:
# Concatenate multiple inputs on the batch dimension
if len(sentence_features) > 1:
embeddings = torch.cat([self.model(inputs)["sentence_embedding"] for inputs in sentence_features], dim=0)
# Repeat the labels for each input
return self.loss_fct(embeddings, labels.repeat(len(sentence_features), 1))
embeddings = self.model(sentence_features[0])["sentence_embedding"]
return self.loss_fct(embeddings, labels)
@property
def citation(self) -> str:
return """
@inproceedings{reimers-2020-multilingual-sentence-bert,
title = "Making Monolingual Sentence Embeddings Multilingual using Knowledge Distillation",
author = "Reimers, Nils and Gurevych, Iryna",
booktitle = "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing",
month = "11",
year = "2020",
publisher = "Association for Computational Linguistics",
url = "https://arxiv.org/abs/2004.09813",
}
"""
|
from __future__ import annotations
from typing import Iterable
import torch
from torch import Tensor, nn
from sentence_transformers import SentenceTransformer
class MSELoss(nn.Module):
def __init__(self, model: SentenceTransformer) -> None:
"""
Computes the MSE loss between the computed sentence embedding and a target sentence embedding. This loss
is used when extending sentence embeddings to new languages as described in our publication
Making Monolingual Sentence Embeddings Multilingual using Knowledge Distillation.
For an example, see `the distillation documentation <../../examples/training/distillation/README.html>`_ on extending language models to new languages.
Args:
model: SentenceTransformerModel
References:
- Making Monolingual Sentence Embeddings Multilingual using Knowledge Distillation: https://arxiv.org/abs/2004.09813
- `Training > Model Distillation <../../examples/training/distillation/README.html>`_
- `Training > Multilingual Models <../../examples/training/multilingual/README.html>`_
Requirements:
1. Usually uses a finetuned teacher M in a knowledge distillation setup
Relations:
- :class:`MarginMSELoss` is equivalent to this loss, but with a margin through a negative pair.
Input:
+-----------------------------------------+-----------------------------+
| Texts | Labels |
+=========================================+=============================+
| sentence | model sentence embeddings |
+-----------------------------------------+-----------------------------+
| sentence_1, sentence_2, ..., sentence_N | model sentence embeddings |
+-----------------------------------------+-----------------------------+
Example:
::
from sentence_transformers import SentenceTransformer, SentenceTransformerTrainer, losses
from datasets import Dataset
student_model = SentenceTransformer("microsoft/mpnet-base")
teacher_model = SentenceTransformer("all-mpnet-base-v2")
train_dataset = Dataset.from_dict({
"english": ["The first sentence", "The second sentence", "The third sentence", "The fourth sentence"],
"french": ["La première phrase", "La deuxième phrase", "La troisième phrase", "La quatrième phrase"],
})
def compute_labels(batch):
return {
"label": teacher_model.encode(batch["english"])
}
train_dataset = train_dataset.map(compute_labels, batched=True)
loss = losses.MSELoss(student_model)
trainer = SentenceTransformerTrainer(
model=student_model,
train_dataset=train_dataset,
loss=loss,
)
trainer.train()
"""
super(MSELoss, self).__init__()
self.model = model
self.loss_fct = nn.MSELoss()
def forward(self, sentence_features: Iterable[dict[str, Tensor]], labels: Tensor) -> Tensor:
# Concatenate multiple inputs on the batch dimension
if len(sentence_features) > 1:
embeddings = torch.cat([self.model(inputs)["sentence_embedding"] for inputs in sentence_features], dim=0)
# Repeat the labels for each input
return self.loss_fct(embeddings, labels.repeat(len(sentence_features), 1))
embeddings = self.model(sentence_features[0])["sentence_embedding"]
return self.loss_fct(embeddings, labels)
@property
def citation(self) -> str:
return """
@inproceedings{reimers-2020-multilingual-sentence-bert,
title = "Making Monolingual Sentence Embeddings Multilingual using Knowledge Distillation",
author = "Reimers, Nils and Gurevych, Iryna",
booktitle = "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing",
month = "11",
year = "2020",
publisher = "Association for Computational Linguistics",
url = "https://arxiv.org/abs/2004.09813",
}
"""
|
__version__ = '0.1.0'
from docarray.array.array import DocumentArray
from docarray.base_document.document import BaseDocument
__all__ = [
'BaseDocument',
'DocumentArray',
]
|
__version__ = '0.1.0'
from docarray.array.array import DocumentArray
from docarray.document.document import BaseDocument
from docarray.predefined_document import Audio, Image, Mesh3D, PointCloud3D, Text
__all__ = [
'BaseDocument',
'DocumentArray',
'Image',
'Audio',
'Text',
'Mesh3D',
'PointCloud3D',
]
|
import os
import numpy as np
import pytest
import torch
from pydantic import parse_obj_as
from docarray.typing import ImageBytes, ImageNdArray, ImageTorchTensor
from docarray.utils._internal.misc import is_tf_available
tf_available = is_tf_available()
if tf_available:
import tensorflow as tf
from docarray.typing.tensor.image import ImageTensorFlowTensor
@pytest.mark.parametrize(
'cls_tensor,tensor',
[
(ImageTorchTensor, torch.zeros((224, 224, 3))),
(ImageNdArray, np.zeros((224, 224, 3))),
],
)
def test_save_image_tensor_to_file(cls_tensor, tensor, tmpdir):
tmp_file = str(tmpdir / 'tmp.jpg')
image_tensor = parse_obj_as(cls_tensor, tensor)
image_tensor.save(tmp_file)
assert os.path.isfile(tmp_file)
@pytest.mark.tensorflow
def test_save_image_tensorflow_tensor_to_file(tmpdir):
tmp_file = str(tmpdir / 'tmp.jpg')
image_tensor = parse_obj_as(ImageTensorFlowTensor, tf.zeros((224, 224, 3)))
image_tensor.save(tmp_file)
assert os.path.isfile(tmp_file)
@pytest.mark.parametrize(
'image_tensor',
[
parse_obj_as(ImageTorchTensor, torch.zeros(224, 224, 3)),
parse_obj_as(ImageNdArray, np.zeros((224, 224, 3))),
],
)
def test_save_image_tensor_to_bytes(image_tensor):
b = image_tensor.to_bytes()
isinstance(b, bytes)
isinstance(b, ImageBytes)
|
import os
import numpy as np
import pytest
import torch
from pydantic import parse_obj_as
from docarray.typing import ImageNdArray, ImageTorchTensor
from docarray.utils._internal.misc import is_tf_available
tf_available = is_tf_available()
if tf_available:
import tensorflow as tf
from docarray.typing.tensor.image import ImageTensorFlowTensor
@pytest.mark.parametrize(
'cls_tensor,tensor',
[
(ImageTorchTensor, torch.zeros((224, 224, 3))),
(ImageNdArray, np.zeros((224, 224, 3))),
],
)
def test_save_image_tensor_to_file(cls_tensor, tensor, tmpdir):
tmp_file = str(tmpdir / 'tmp.jpg')
image_tensor = parse_obj_as(cls_tensor, tensor)
image_tensor.save(tmp_file)
assert os.path.isfile(tmp_file)
@pytest.mark.tensorflow
def test_save_image_tensorflow_tensor_to_file(tmpdir):
tmp_file = str(tmpdir / 'tmp.jpg')
image_tensor = parse_obj_as(ImageTensorFlowTensor, tf.zeros((224, 224, 3)))
image_tensor.save(tmp_file)
assert os.path.isfile(tmp_file)
|
# Copyright (c) OpenMMLab. All rights reserved.
from .default_scope import DefaultScope
from .registry import Registry, build_from_cfg
from .root import (DATA_SAMPLERS, DATASETS, HOOKS, LOG_PROCESSORS, LOOPS,
METRICS, MODEL_WRAPPERS, MODELS, OPTIMIZER_CONSTRUCTORS,
OPTIMIZERS, PARAM_SCHEDULERS, RUNNER_CONSTRUCTORS, RUNNERS,
TASK_UTILS, TRANSFORMS, VISBACKENDS, VISUALIZERS,
WEIGHT_INITIALIZERS)
from .utils import count_registered_modules, traverse_registry_tree
__all__ = [
'Registry', 'build_from_cfg', 'RUNNERS', 'RUNNER_CONSTRUCTORS', 'HOOKS',
'DATASETS', 'DATA_SAMPLERS', 'TRANSFORMS', 'MODELS', 'WEIGHT_INITIALIZERS',
'OPTIMIZERS', 'OPTIMIZER_CONSTRUCTORS', 'TASK_UTILS', 'PARAM_SCHEDULERS',
'METRICS', 'MODEL_WRAPPERS', 'LOOPS', 'VISBACKENDS', 'VISUALIZERS',
'LOG_PROCESSORS', 'DefaultScope', 'traverse_registry_tree',
'count_registered_modules'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .default_scope import DefaultScope
from .registry import Registry, build_from_cfg
from .root import (DATA_SAMPLERS, DATASETS, HOOKS, LOG_PROCESSOR, LOOPS,
METRICS, MODEL_WRAPPERS, MODELS, OPTIMIZER_CONSTRUCTORS,
OPTIMIZERS, PARAM_SCHEDULERS, RUNNER_CONSTRUCTORS, RUNNERS,
TASK_UTILS, TRANSFORMS, VISBACKENDS, VISUALIZERS,
WEIGHT_INITIALIZERS)
from .utils import count_registered_modules, traverse_registry_tree
__all__ = [
'Registry', 'build_from_cfg', 'RUNNERS', 'RUNNER_CONSTRUCTORS', 'HOOKS',
'DATASETS', 'DATA_SAMPLERS', 'TRANSFORMS', 'MODELS', 'WEIGHT_INITIALIZERS',
'OPTIMIZERS', 'OPTIMIZER_CONSTRUCTORS', 'TASK_UTILS', 'PARAM_SCHEDULERS',
'METRICS', 'MODEL_WRAPPERS', 'LOOPS', 'VISBACKENDS', 'VISUALIZERS',
'LOG_PROCESSOR', 'DefaultScope', 'traverse_registry_tree',
'count_registered_modules'
]
|
from typing import Optional
import torch
__all__ = [
"version",
"is_available",
"get_max_alg_id",
]
try:
from torch._C import _cusparselt
except ImportError:
_cusparselt = None # type: ignore[assignment]
__cusparselt_version: Optional[int] = None
__MAX_ALG_ID: Optional[int] = None
if _cusparselt is not None:
def _init() -> bool:
global __cusparselt_version
global __MAX_ALG_ID
if __cusparselt_version is None:
__cusparselt_version = _cusparselt.getVersionInt()
if __cusparselt_version == 400:
__MAX_ALG_ID = 4
elif __cusparselt_version == 502:
__MAX_ALG_ID = 5
elif __cusparselt_version == 602:
__MAX_ALG_ID = 37
return True
else:
def _init() -> bool:
return False
def version() -> Optional[int]:
"""Return the version of cuSPARSELt"""
if not _init():
return None
return __cusparselt_version
def is_available() -> bool:
r"""Return a bool indicating if cuSPARSELt is currently available."""
return torch._C._has_cusparselt
def get_max_alg_id() -> Optional[int]:
if not _init():
return None
return __MAX_ALG_ID
|
# mypy: allow-untyped-defs
from typing import Optional
import torch
__all__ = [
"version",
"is_available",
"get_max_alg_id",
]
try:
from torch._C import _cusparselt
except ImportError:
_cusparselt = None # type: ignore[assignment]
__cusparselt_version: Optional[int] = None
__MAX_ALG_ID: Optional[int] = None
if _cusparselt is not None:
def _init():
global __cusparselt_version
global __MAX_ALG_ID
if __cusparselt_version is None:
__cusparselt_version = _cusparselt.getVersionInt()
if __cusparselt_version == 400:
__MAX_ALG_ID = 4
elif __cusparselt_version == 502:
__MAX_ALG_ID = 5
elif __cusparselt_version == 602:
__MAX_ALG_ID = 37
return True
else:
def _init():
return False
def version() -> Optional[int]:
"""Return the version of cuSPARSELt"""
if not _init():
return None
return __cusparselt_version
def is_available() -> bool:
r"""Return a bool indicating if cuSPARSELt is currently available."""
return torch._C._has_cusparselt
def get_max_alg_id() -> Optional[int]:
if not _init():
return None
return __MAX_ALG_ID
|
from typing import Callable, Optional
from .. import Features
from ..packaged_modules.generator.generator import Generator
from .abc import AbstractDatasetInputStream
class GeneratorDatasetInputStream(AbstractDatasetInputStream):
def __init__(
self,
generator: Callable,
features: Optional[Features] = None,
cache_dir: str = None,
keep_in_memory: bool = False,
streaming: bool = False,
gen_kwargs: Optional[dict] = None,
num_proc: Optional[int] = None,
**kwargs,
):
super().__init__(
features=features,
cache_dir=cache_dir,
keep_in_memory=keep_in_memory,
streaming=streaming,
num_proc=num_proc,
**kwargs,
)
self.builder = Generator(
cache_dir=cache_dir,
features=features,
generator=generator,
gen_kwargs=gen_kwargs,
**kwargs,
)
def read(self):
# Build iterable dataset
if self.streaming:
dataset = self.builder.as_streaming_dataset(split="train")
# Build regular (map-style) dataset
else:
download_config = None
download_mode = None
verification_mode = None
base_path = None
self.builder.download_and_prepare(
download_config=download_config,
download_mode=download_mode,
verification_mode=verification_mode,
base_path=base_path,
num_proc=self.num_proc,
)
dataset = self.builder.as_dataset(
split="train", verification_mode=verification_mode, in_memory=self.keep_in_memory
)
return dataset
|
from typing import Callable, Optional
from .. import Features
from ..packaged_modules.generator.generator import Generator
from .abc import AbstractDatasetInputStream
class GeneratorDatasetInputStream(AbstractDatasetInputStream):
def __init__(
self,
generator: Callable,
features: Optional[Features] = None,
cache_dir: str = None,
keep_in_memory: bool = False,
streaming: bool = False,
gen_kwargs: Optional[dict] = None,
num_proc: Optional[int] = None,
**kwargs,
):
super().__init__(
features=features,
cache_dir=cache_dir,
keep_in_memory=keep_in_memory,
streaming=streaming,
num_proc=num_proc,
**kwargs,
)
self.builder = Generator(
cache_dir=cache_dir,
features=features,
generator=generator,
gen_kwargs=gen_kwargs,
**kwargs,
)
def read(self):
# Build iterable dataset
if self.streaming:
dataset = self.builder.as_streaming_dataset(split="train")
# Build regular (map-style) dataset
else:
download_config = None
download_mode = None
verification_mode = None
base_path = None
self.builder.download_and_prepare(
download_config=download_config,
download_mode=download_mode,
verification_mode=verification_mode,
try_from_hf_gcs=False,
base_path=base_path,
num_proc=self.num_proc,
)
dataset = self.builder.as_dataset(
split="train", verification_mode=verification_mode, in_memory=self.keep_in_memory
)
return dataset
|
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from ..providers import ProviderName
from ._base import BaseWebhooksManager
_WEBHOOK_MANAGERS: dict["ProviderName", type["BaseWebhooksManager"]] = {}
# --8<-- [start:load_webhook_managers]
def load_webhook_managers() -> dict["ProviderName", type["BaseWebhooksManager"]]:
if _WEBHOOK_MANAGERS:
return _WEBHOOK_MANAGERS
from .compass import CompassWebhookManager
from .github import GithubWebhooksManager
from .slant3d import Slant3DWebhooksManager
_WEBHOOK_MANAGERS.update(
{
handler.PROVIDER_NAME: handler
for handler in [
CompassWebhookManager,
GithubWebhooksManager,
Slant3DWebhooksManager,
]
}
)
return _WEBHOOK_MANAGERS
# --8<-- [end:load_webhook_managers]
def get_webhook_manager(provider_name: "ProviderName") -> "BaseWebhooksManager":
return load_webhook_managers()[provider_name]()
def supports_webhooks(provider_name: "ProviderName") -> bool:
return provider_name in load_webhook_managers()
__all__ = ["get_webhook_manager", "supports_webhooks"]
|
from typing import TYPE_CHECKING
from .compass import CompassWebhookManager
from .github import GithubWebhooksManager
from .slant3d import Slant3DWebhooksManager
if TYPE_CHECKING:
from ..providers import ProviderName
from ._base import BaseWebhooksManager
# --8<-- [start:WEBHOOK_MANAGERS_BY_NAME]
WEBHOOK_MANAGERS_BY_NAME: dict["ProviderName", type["BaseWebhooksManager"]] = {
handler.PROVIDER_NAME: handler
for handler in [
CompassWebhookManager,
GithubWebhooksManager,
Slant3DWebhooksManager,
]
}
# --8<-- [end:WEBHOOK_MANAGERS_BY_NAME]
__all__ = ["WEBHOOK_MANAGERS_BY_NAME"]
|
# Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import mmengine
from mmengine.utils import digit_version
from .version import __version__, version_info
mmcv_minimum_version = '2.0.0rc4'
mmcv_maximum_version = '2.1.0'
mmcv_version = digit_version(mmcv.__version__)
mmengine_minimum_version = '0.6.0'
mmengine_maximum_version = '1.0.0'
mmengine_version = digit_version(mmengine.__version__)
assert (mmcv_version >= digit_version(mmcv_minimum_version)
and mmcv_version < digit_version(mmcv_maximum_version)), \
f'MMCV=={mmcv.__version__} is used but incompatible. ' \
f'Please install mmcv>={mmcv_minimum_version}, <{mmcv_maximum_version}.'
assert (mmengine_version >= digit_version(mmengine_minimum_version)
and mmengine_version < digit_version(mmengine_maximum_version)), \
f'MMEngine=={mmengine.__version__} is used but incompatible. ' \
f'Please install mmengine>={mmengine_minimum_version}, ' \
f'<{mmengine_maximum_version}.'
__all__ = ['__version__', 'version_info', 'digit_version']
|
# Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import mmengine
from mmengine.utils import digit_version
from .version import __version__, version_info
mmcv_minimum_version = '2.0.0rc4'
mmcv_maximum_version = '2.1.0'
mmcv_version = digit_version(mmcv.__version__)
mmengine_minimum_version = '0.4.0'
mmengine_maximum_version = '1.0.0'
mmengine_version = digit_version(mmengine.__version__)
assert (mmcv_version >= digit_version(mmcv_minimum_version)
and mmcv_version < digit_version(mmcv_maximum_version)), \
f'MMCV=={mmcv.__version__} is used but incompatible. ' \
f'Please install mmcv>={mmcv_minimum_version}, <{mmcv_maximum_version}.'
assert (mmengine_version >= digit_version(mmengine_minimum_version)
and mmengine_version < digit_version(mmengine_maximum_version)), \
f'MMEngine=={mmengine.__version__} is used but incompatible. ' \
f'Please install mmengine>={mmengine_minimum_version}, ' \
f'<{mmengine_maximum_version}.'
__all__ = ['__version__', 'version_info', 'digit_version']
|
from typing import List, Optional
from torchaudio._internal.module_utils import deprecated
from . import utils
from .common import AudioMetaData
__all__ = [
"AudioMetaData",
"load",
"info",
"save",
"list_audio_backends",
"get_audio_backend",
"set_audio_backend",
]
info = utils.get_info_func()
load = utils.get_load_func()
save = utils.get_save_func()
def list_audio_backends() -> List[str]:
"""List available backends
Returns:
list of str: The list of available backends.
The possible values are;
- Dispatcher mode: ``"ffmpeg"``, ``"sox"`` and ``"soundfile"``.
- Legacy backend mode: ``"sox_io"``, ``"soundfile"``.
"""
return list(utils.get_available_backends().keys())
# Temporary until global backend is removed
@deprecated("With dispatcher enabled, this function is no-op. You can remove the function call.")
def get_audio_backend() -> Optional[str]:
"""Get the name of the current global backend
Returns:
str or None:
If dispatcher mode is enabled, returns ``None`` otherwise,
the name of current backend or ``None`` (no backend is set).
"""
return None
# Temporary until global backend is removed
@deprecated("With dispatcher enabled, this function is no-op. You can remove the function call.")
def set_audio_backend(backend: Optional[str]): # noqa
"""Set the global backend.
This is a no-op when dispatcher mode is enabled.
Args:
backend (str or None): Name of the backend.
One of ``"sox_io"`` or ``"soundfile"`` based on availability
of the system. If ``None`` is provided the current backend is unassigned.
"""
pass
|
from typing import List, Optional
import torchaudio
from torchaudio._internal.module_utils import deprecated
# TODO: Once legacy global backend is removed, move this to torchaudio.__init__
def _init_backend():
from . import utils
torchaudio.info = utils.get_info_func()
torchaudio.load = utils.get_load_func()
torchaudio.save = utils.get_save_func()
def list_audio_backends() -> List[str]:
"""List available backends
Returns:
list of str: The list of available backends.
The possible values are;
- Dispatcher mode: ``"ffmpeg"``, ``"sox"`` and ``"soundfile"``.
- Legacy backend mode: ``"sox_io"``, ``"soundfile"``.
"""
from . import utils
return list(utils.get_available_backends().keys())
# Temporary until global backend is removed
@deprecated("With dispatcher enabled, this function is no-op. You can remove the function call.")
def get_audio_backend() -> Optional[str]:
"""Get the name of the current global backend
Returns:
str or None:
If dispatcher mode is enabled, returns ``None`` otherwise,
the name of current backend or ``None`` (no backend is set).
"""
return None
# Temporary until global backend is removed
@deprecated("With dispatcher enabled, this function is no-op. You can remove the function call.")
def set_audio_backend(backend: Optional[str]): # noqa
"""Set the global backend.
This is a no-op when dispatcher mode is enabled.
Args:
backend (str or None): Name of the backend.
One of ``"sox_io"`` or ``"soundfile"`` based on availability
of the system. If ``None`` is provided the current backend is unassigned.
"""
pass
|
# Copyright (c) OpenMMLab. All rights reserved.
from ..builder import DETECTORS
from .single_stage import SingleStageDetector
@DETECTORS.register_module()
class YOLOF(SingleStageDetector):
r"""Implementation of `You Only Look One-level Feature
<https://arxiv.org/abs/2103.09460>`_"""
def __init__(self,
backbone,
neck,
bbox_head,
train_cfg=None,
test_cfg=None,
pretrained=None):
super(YOLOF, self).__init__(backbone, neck, bbox_head, train_cfg,
test_cfg, pretrained)
|
from ..builder import DETECTORS
from .single_stage import SingleStageDetector
@DETECTORS.register_module()
class YOLOF(SingleStageDetector):
r"""Implementation of `You Only Look One-level Feature
<https://arxiv.org/abs/2103.09460>`_"""
def __init__(self,
backbone,
neck,
bbox_head,
train_cfg=None,
test_cfg=None,
pretrained=None):
super(YOLOF, self).__init__(backbone, neck, bbox_head, train_cfg,
test_cfg, pretrained)
|
# Copyright (c) OpenMMLab. All rights reserved.
from .base_sampler import BaseSampler
from .combined_sampler import CombinedSampler
from .instance_balanced_pos_sampler import InstanceBalancedPosSampler
from .iou_balanced_neg_sampler import IoUBalancedNegSampler
from .mask_pseudo_sampler import MaskPseudoSampler
from .mask_sampling_result import MaskSamplingResult
from .ohem_sampler import OHEMSampler
from .pseudo_sampler import PseudoSampler
from .random_sampler import RandomSampler
from .sampling_result import SamplingResult
from .score_hlr_sampler import ScoreHLRSampler
__all__ = [
'BaseSampler', 'PseudoSampler', 'RandomSampler',
'InstanceBalancedPosSampler', 'IoUBalancedNegSampler', 'CombinedSampler',
'OHEMSampler', 'SamplingResult', 'ScoreHLRSampler', 'MaskPseudoSampler',
'MaskSamplingResult'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .base_sampler import BaseSampler
from .combined_sampler import CombinedSampler
from .instance_balanced_pos_sampler import InstanceBalancedPosSampler
from .iou_balanced_neg_sampler import IoUBalancedNegSampler
from .ohem_sampler import OHEMSampler
from .pseudo_sampler import PseudoSampler
from .random_sampler import RandomSampler
from .sampling_result import SamplingResult
from .score_hlr_sampler import ScoreHLRSampler
__all__ = [
'BaseSampler', 'PseudoSampler', 'RandomSampler',
'InstanceBalancedPosSampler', 'IoUBalancedNegSampler', 'CombinedSampler',
'OHEMSampler', 'SamplingResult', 'ScoreHLRSampler'
]
|
import os
from abc import abstractmethod
from unittest import mock
import pytest
from langchain_core.embeddings import Embeddings
from pydantic import SecretStr
from langchain_tests.base import BaseStandardTests
class EmbeddingsTests(BaseStandardTests):
""":private:"""
@property
@abstractmethod
def embeddings_class(self) -> type[Embeddings]: ...
@property
def embedding_model_params(self) -> dict:
return {}
@pytest.fixture
def model(self) -> Embeddings:
return self.embeddings_class(**self.embedding_model_params)
class EmbeddingsUnitTests(EmbeddingsTests):
"""Base class for embeddings unit tests.
Test subclasses must implement the ``embeddings_class`` property to specify the
embeddings model to be tested. You can also override the
``embedding_model_params`` property to specify initialization parameters.
Example:
.. code-block:: python
from typing import Type
from langchain_tests.unit_tests import EmbeddingsUnitTests
from my_package.embeddings import MyEmbeddingsModel
class TestMyEmbeddingsModelUnit(EmbeddingsUnitTests):
@property
def embeddings_class(self) -> Type[MyEmbeddingsModel]:
# Return the embeddings model class to test here
return MyEmbeddingsModel
@property
def embedding_model_params(self) -> dict:
# Return initialization parameters for the model.
return {"model": "model-001"}
.. note::
API references for individual test methods include troubleshooting tips.
Testing initialization from environment variables
Overriding the ``init_from_env_params`` property will enable additional tests
for initialization from environment variables. See below for details.
.. dropdown:: init_from_env_params
This property is used in unit tests to test initialization from
environment variables. It should return a tuple of three dictionaries
that specify the environment variables, additional initialization args,
and expected instance attributes to check.
Defaults to empty dicts. If not overridden, the test is skipped.
Example:
.. code-block:: python
@property
def init_from_env_params(self) -> Tuple[dict, dict, dict]:
return (
{
"MY_API_KEY": "api_key",
},
{
"model": "model-001",
},
{
"my_api_key": "api_key",
},
)
"""
def test_init(self) -> None:
"""Test model initialization.
.. dropdown:: Troubleshooting
If this test fails, ensure that ``embedding_model_params`` is specified
and the model can be initialized from those params.
"""
model = self.embeddings_class(**self.embedding_model_params)
assert model is not None
@property
def init_from_env_params(self) -> tuple[dict, dict, dict]:
"""This property is used in unit tests to test initialization from environment
variables. It should return a tuple of three dictionaries that specify the
environment variables, additional initialization args, and expected instance
attributes to check.
"""
return {}, {}, {}
def test_init_from_env(self) -> None:
"""Test initialization from environment variables. Relies on the
``init_from_env_params`` property. Test is skipped if that property is not
set.
.. dropdown:: Troubleshooting
If this test fails, ensure that ``init_from_env_params`` is specified
correctly and that model parameters are properly set from environment
variables during initialization.
"""
env_params, embeddings_params, expected_attrs = self.init_from_env_params
if env_params:
with mock.patch.dict(os.environ, env_params):
model = self.embeddings_class(**embeddings_params)
assert model is not None
for k, expected in expected_attrs.items():
actual = getattr(model, k)
if isinstance(actual, SecretStr):
actual = actual.get_secret_value()
assert actual == expected
|
import os
from abc import abstractmethod
from unittest import mock
import pytest
from langchain_core.embeddings import Embeddings
from pydantic import SecretStr
from langchain_tests.base import BaseStandardTests
class EmbeddingsTests(BaseStandardTests):
"""
:private:
"""
@property
@abstractmethod
def embeddings_class(self) -> type[Embeddings]: ...
@property
def embedding_model_params(self) -> dict:
return {}
@pytest.fixture
def model(self) -> Embeddings:
return self.embeddings_class(**self.embedding_model_params)
class EmbeddingsUnitTests(EmbeddingsTests):
"""Base class for embeddings unit tests.
Test subclasses must implement the ``embeddings_class`` property to specify the
embeddings model to be tested. You can also override the
``embedding_model_params`` property to specify initialization parameters.
Example:
.. code-block:: python
from typing import Type
from langchain_tests.unit_tests import EmbeddingsUnitTests
from my_package.embeddings import MyEmbeddingsModel
class TestMyEmbeddingsModelUnit(EmbeddingsUnitTests):
@property
def embeddings_class(self) -> Type[MyEmbeddingsModel]:
# Return the embeddings model class to test here
return MyEmbeddingsModel
@property
def embedding_model_params(self) -> dict:
# Return initialization parameters for the model.
return {"model": "model-001"}
.. note::
API references for individual test methods include troubleshooting tips.
Testing initialization from environment variables
Overriding the ``init_from_env_params`` property will enable additional tests
for initialization from environment variables. See below for details.
.. dropdown:: init_from_env_params
This property is used in unit tests to test initialization from
environment variables. It should return a tuple of three dictionaries
that specify the environment variables, additional initialization args,
and expected instance attributes to check.
Defaults to empty dicts. If not overridden, the test is skipped.
Example:
.. code-block:: python
@property
def init_from_env_params(self) -> Tuple[dict, dict, dict]:
return (
{
"MY_API_KEY": "api_key",
},
{
"model": "model-001",
},
{
"my_api_key": "api_key",
},
)
""" # noqa: E501
def test_init(self) -> None:
"""Test model initialization.
.. dropdown:: Troubleshooting
If this test fails, ensure that ``embedding_model_params`` is specified
and the model can be initialized from those params.
"""
model = self.embeddings_class(**self.embedding_model_params)
assert model is not None
@property
def init_from_env_params(self) -> tuple[dict, dict, dict]:
"""This property is used in unit tests to test initialization from environment
variables. It should return a tuple of three dictionaries that specify the
environment variables, additional initialization args, and expected instance
attributes to check."""
return {}, {}, {}
def test_init_from_env(self) -> None:
"""Test initialization from environment variables. Relies on the
``init_from_env_params`` property. Test is skipped if that property is not
set.
.. dropdown:: Troubleshooting
If this test fails, ensure that ``init_from_env_params`` is specified
correctly and that model parameters are properly set from environment
variables during initialization.
"""
env_params, embeddings_params, expected_attrs = self.init_from_env_params
if env_params:
with mock.patch.dict(os.environ, env_params):
model = self.embeddings_class(**embeddings_params)
assert model is not None
for k, expected in expected_attrs.items():
actual = getattr(model, k)
if isinstance(actual, SecretStr):
actual = actual.get_secret_value()
assert actual == expected
|
# Copyright (c) OpenMMLab. All rights reserved.
import logging
import random
from typing import List, Optional, Tuple
import numpy as np
import torch
from torch.utils.data import DataLoader
from mmengine.device import is_cuda_available, is_musa_available
from mmengine.dist import get_rank, sync_random_seed
from mmengine.logging import print_log
from mmengine.utils import digit_version, is_list_of
from mmengine.utils.dl_utils import TORCH_VERSION
def calc_dynamic_intervals(
start_interval: int,
dynamic_interval_list: Optional[List[Tuple[int, int]]] = None
) -> Tuple[List[int], List[int]]:
"""Calculate dynamic intervals.
Args:
start_interval (int): The interval used in the beginning.
dynamic_interval_list (List[Tuple[int, int]], optional): The
first element in the tuple is a milestone and the second
element is a interval. The interval is used after the
corresponding milestone. Defaults to None.
Returns:
Tuple[List[int], List[int]]: a list of milestone and its corresponding
intervals.
"""
if dynamic_interval_list is None:
return [0], [start_interval]
assert is_list_of(dynamic_interval_list, tuple)
dynamic_milestones = [0]
dynamic_milestones.extend(
[dynamic_interval[0] for dynamic_interval in dynamic_interval_list])
dynamic_intervals = [start_interval]
dynamic_intervals.extend(
[dynamic_interval[1] for dynamic_interval in dynamic_interval_list])
return dynamic_milestones, dynamic_intervals
def set_random_seed(seed: Optional[int] = None,
deterministic: bool = False,
diff_rank_seed: bool = False) -> int:
"""Set random seed.
Args:
seed (int, optional): Seed to be used.
deterministic (bool): Whether to set the deterministic option for
CUDNN backend, i.e., set `torch.backends.cudnn.deterministic`
to True and `torch.backends.cudnn.benchmark` to False.
Defaults to False.
diff_rank_seed (bool): Whether to add rank number to the random seed to
have different random seed in different threads. Defaults to False.
"""
if seed is None:
seed = sync_random_seed()
if diff_rank_seed:
rank = get_rank()
seed += rank
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
# torch.cuda.manual_seed(seed)
if is_cuda_available():
torch.cuda.manual_seed_all(seed)
elif is_musa_available():
torch.musa.manual_seed_all(seed)
# os.environ['PYTHONHASHSEED'] = str(seed)
if deterministic:
if torch.backends.cudnn.benchmark:
print_log(
'torch.backends.cudnn.benchmark is going to be set as '
'`False` to cause cuDNN to deterministically select an '
'algorithm',
logger='current',
level=logging.WARNING)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
if digit_version(TORCH_VERSION) >= digit_version('1.10.0'):
torch.use_deterministic_algorithms(True)
return seed
def _get_batch_size(dataloader: dict):
if isinstance(dataloader, dict):
if 'batch_size' in dataloader:
return dataloader['batch_size']
elif ('batch_sampler' in dataloader
and 'batch_size' in dataloader['batch_sampler']):
return dataloader['batch_sampler']['batch_size']
else:
raise ValueError('Please set batch_size in `Dataloader` or '
'`batch_sampler`')
elif isinstance(dataloader, DataLoader):
return dataloader.batch_sampler.batch_size
else:
raise ValueError('dataloader should be a dict or a Dataloader '
f'instance, but got {type(dataloader)}')
|
# Copyright (c) OpenMMLab. All rights reserved.
import logging
import random
from typing import List, Optional, Tuple
import numpy as np
import torch
from torch.utils.data import DataLoader
from mmengine.dist import get_rank, sync_random_seed
from mmengine.logging import print_log
from mmengine.utils import digit_version, is_list_of
from mmengine.utils.dl_utils import TORCH_VERSION
def calc_dynamic_intervals(
start_interval: int,
dynamic_interval_list: Optional[List[Tuple[int, int]]] = None
) -> Tuple[List[int], List[int]]:
"""Calculate dynamic intervals.
Args:
start_interval (int): The interval used in the beginning.
dynamic_interval_list (List[Tuple[int, int]], optional): The
first element in the tuple is a milestone and the second
element is a interval. The interval is used after the
corresponding milestone. Defaults to None.
Returns:
Tuple[List[int], List[int]]: a list of milestone and its corresponding
intervals.
"""
if dynamic_interval_list is None:
return [0], [start_interval]
assert is_list_of(dynamic_interval_list, tuple)
dynamic_milestones = [0]
dynamic_milestones.extend(
[dynamic_interval[0] for dynamic_interval in dynamic_interval_list])
dynamic_intervals = [start_interval]
dynamic_intervals.extend(
[dynamic_interval[1] for dynamic_interval in dynamic_interval_list])
return dynamic_milestones, dynamic_intervals
def set_random_seed(seed: Optional[int] = None,
deterministic: bool = False,
diff_rank_seed: bool = False) -> int:
"""Set random seed.
Args:
seed (int, optional): Seed to be used.
deterministic (bool): Whether to set the deterministic option for
CUDNN backend, i.e., set `torch.backends.cudnn.deterministic`
to True and `torch.backends.cudnn.benchmark` to False.
Defaults to False.
diff_rank_seed (bool): Whether to add rank number to the random seed to
have different random seed in different threads. Defaults to False.
"""
if seed is None:
seed = sync_random_seed()
if diff_rank_seed:
rank = get_rank()
seed += rank
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
# torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
# os.environ['PYTHONHASHSEED'] = str(seed)
if deterministic:
if torch.backends.cudnn.benchmark:
print_log(
'torch.backends.cudnn.benchmark is going to be set as '
'`False` to cause cuDNN to deterministically select an '
'algorithm',
logger='current',
level=logging.WARNING)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
if digit_version(TORCH_VERSION) >= digit_version('1.10.0'):
torch.use_deterministic_algorithms(True)
return seed
def _get_batch_size(dataloader: dict):
if isinstance(dataloader, dict):
if 'batch_size' in dataloader:
return dataloader['batch_size']
elif ('batch_sampler' in dataloader
and 'batch_size' in dataloader['batch_sampler']):
return dataloader['batch_sampler']['batch_size']
else:
raise ValueError('Please set batch_size in `Dataloader` or '
'`batch_sampler`')
elif isinstance(dataloader, DataLoader):
return dataloader.batch_sampler.batch_size
else:
raise ValueError('dataloader should be a dict or a Dataloader '
f'instance, but got {type(dataloader)}')
|
# coding=utf-8
# Copyright 2024 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
import unittest
import torch
from diffusers import (
IFPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import (
load_numpy,
require_accelerator,
require_torch_gpu,
skip_mps,
slow,
torch_device,
)
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class IFPipelineFastTests(PipelineTesterMixin, IFPipelineTesterMixin, unittest.TestCase):
pipeline_class = IFPipeline
params = TEXT_TO_IMAGE_PARAMS - {"width", "height", "latents"}
batch_params = TEXT_TO_IMAGE_BATCH_PARAMS
required_optional_params = PipelineTesterMixin.required_optional_params - {"latents"}
def get_dummy_components(self):
return self._get_dummy_components()
def get_dummy_inputs(self, device, seed=0):
if str(device).startswith("mps"):
generator = torch.manual_seed(seed)
else:
generator = torch.Generator(device=device).manual_seed(seed)
inputs = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"output_type": "np",
}
return inputs
def test_save_load_optional_components(self):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device not in ["cuda", "xpu"], reason="float16 requires CUDA or XPU")
@require_accelerator
def test_save_load_float16(self):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_float16(expected_max_diff=1e-1)
def test_attention_slicing_forward_pass(self):
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2)
def test_save_load_local(self):
self._test_save_load_local()
def test_inference_batch_single_identical(self):
self._test_inference_batch_single_identical(
expected_max_diff=1e-2,
)
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available(),
reason="XFormers attention is only available with CUDA and `xformers` installed",
)
def test_xformers_attention_forwardGenerator_pass(self):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3)
@slow
@require_torch_gpu
class IFPipelineSlowTests(unittest.TestCase):
def setUp(self):
# clean up the VRAM before each test
super().setUp()
gc.collect()
torch.cuda.empty_cache()
def tearDown(self):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def test_if_text_to_image(self):
pipe = IFPipeline.from_pretrained("DeepFloyd/IF-I-XL-v1.0", variant="fp16", torch_dtype=torch.float16)
pipe.unet.set_attn_processor(AttnAddedKVProcessor())
pipe.enable_model_cpu_offload()
torch.cuda.reset_max_memory_allocated()
torch.cuda.empty_cache()
torch.cuda.reset_peak_memory_stats()
generator = torch.Generator(device="cpu").manual_seed(0)
output = pipe(
prompt="anime turtle",
num_inference_steps=2,
generator=generator,
output_type="np",
)
image = output.images[0]
mem_bytes = torch.cuda.max_memory_allocated()
assert mem_bytes < 12 * 10**9
expected_image = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy"
)
assert_mean_pixel_difference(image, expected_image)
pipe.remove_all_hooks()
|
# coding=utf-8
# Copyright 2024 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
import unittest
import torch
from diffusers import (
IFPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class IFPipelineFastTests(PipelineTesterMixin, IFPipelineTesterMixin, unittest.TestCase):
pipeline_class = IFPipeline
params = TEXT_TO_IMAGE_PARAMS - {"width", "height", "latents"}
batch_params = TEXT_TO_IMAGE_BATCH_PARAMS
required_optional_params = PipelineTesterMixin.required_optional_params - {"latents"}
def get_dummy_components(self):
return self._get_dummy_components()
def get_dummy_inputs(self, device, seed=0):
if str(device).startswith("mps"):
generator = torch.manual_seed(seed)
else:
generator = torch.Generator(device=device).manual_seed(seed)
inputs = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"output_type": "np",
}
return inputs
def test_save_load_optional_components(self):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != "cuda", reason="float16 requires CUDA")
def test_save_load_float16(self):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_float16(expected_max_diff=1e-1)
def test_attention_slicing_forward_pass(self):
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2)
def test_save_load_local(self):
self._test_save_load_local()
def test_inference_batch_single_identical(self):
self._test_inference_batch_single_identical(
expected_max_diff=1e-2,
)
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available(),
reason="XFormers attention is only available with CUDA and `xformers` installed",
)
def test_xformers_attention_forwardGenerator_pass(self):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3)
@slow
@require_torch_gpu
class IFPipelineSlowTests(unittest.TestCase):
def setUp(self):
# clean up the VRAM before each test
super().setUp()
gc.collect()
torch.cuda.empty_cache()
def tearDown(self):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def test_if_text_to_image(self):
pipe = IFPipeline.from_pretrained("DeepFloyd/IF-I-XL-v1.0", variant="fp16", torch_dtype=torch.float16)
pipe.unet.set_attn_processor(AttnAddedKVProcessor())
pipe.enable_model_cpu_offload()
torch.cuda.reset_max_memory_allocated()
torch.cuda.empty_cache()
torch.cuda.reset_peak_memory_stats()
generator = torch.Generator(device="cpu").manual_seed(0)
output = pipe(
prompt="anime turtle",
num_inference_steps=2,
generator=generator,
output_type="np",
)
image = output.images[0]
mem_bytes = torch.cuda.max_memory_allocated()
assert mem_bytes < 12 * 10**9
expected_image = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy"
)
assert_mean_pixel_difference(image, expected_image)
pipe.remove_all_hooks()
|
from unittest.mock import mock_open, patch
from cryptography.hazmat.primitives.asymmetric import rsa
from cryptography.hazmat.primitives import serialization
from llama_index.llms.cortex.utils import (
generate_sf_jwt,
is_spcs_environment,
get_spcs_base_url,
get_default_spcs_token,
SPCS_TOKEN_PATH,
)
import os
def test_spcs_utils():
os.environ["SNOWFLAKE_HOST"] = "abc-xyz.snowflakecomputing.com"
# Mock the path check to ensure we're not in SPCS environment
with patch("os.path.exists", return_value=False):
# Test that ValueError is raised when not in SPCS environment
try:
get_spcs_base_url()
assert AssertionError("ValueError not raised when not in SPCS environment")
except ValueError:
pass
# Test is_spcs_environment
with patch("os.path.exists", return_value=True):
assert is_spcs_environment()
with patch("os.path.exists", return_value=False):
assert not is_spcs_environment()
# Test get_default_spcs_token
fake_token = "fake-jwt-token-for-testing"
with patch("builtins.open", mock_open(read_data=fake_token)) as mock_file:
token = get_default_spcs_token()
assert token == fake_token
mock_file.assert_called_once_with(SPCS_TOKEN_PATH)
del os.environ["SNOWFLAKE_HOST"]
def test_generate_sf_jwt():
sf_account = "MY_SNOWFLAKE_ORG-MY_SNOWFLAKE_ACCOUNT"
sf_user = "MY_SNOWFLAKE_USER"
private_key_obj = rsa.generate_private_key(public_exponent=65537, key_size=2048)
# Serialize the private key to PEM format
private_key = private_key_obj.private_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PrivateFormat.TraditionalOpenSSL,
encryption_algorithm=serialization.NoEncryption(),
)
with patch("builtins.open", mock_open(read_data=private_key)) as mock_file:
mock_file.return_value.read.return_value = (
private_key # Ensure binary data is returned
)
token = generate_sf_jwt(sf_account, sf_user, "dummy_key_file.pem")
assert isinstance(token, str)
|
from unittest.mock import mock_open, patch
from cryptography.hazmat.primitives.asymmetric import rsa
from cryptography.hazmat.primitives import serialization
from llama_index.llms.cortex.utils import (
generate_sf_jwt,
is_spcs_environment,
get_spcs_base_url,
get_default_spcs_token,
SPCS_TOKEN_PATH,
)
import os
def test_spcs_utils():
# Setup environment variables
os.environ["SNOWFLAKE_HOST"] = "snowflake.example-spcs-host.com"
os.environ["SNOWFLAKE_ACCOUNT"] = "abcdef_ghijkl"
# Test get_spcs_base_url
expected_url = "https://abcdef-ghijkl.example-spcs-host.com"
# Mock the path check to ensure we're not in SPCS environment
with patch("os.path.exists", return_value=False):
# Test that ValueError is raised when not in SPCS environment
try:
get_spcs_base_url()
assert AssertionError("ValueError not raised when not in SPCS environment")
except ValueError:
pass
# Mock the path check to pretend we're in SPCS environment
with patch("os.path.exists", return_value=True):
# Test that base URL is correctly formed
base_url = get_spcs_base_url()
assert base_url == expected_url
# Test is_spcs_environment
with patch("os.path.exists", return_value=True):
assert is_spcs_environment()
with patch("os.path.exists", return_value=False):
assert not is_spcs_environment()
# Test get_default_spcs_token
fake_token = "fake-jwt-token-for-testing"
with patch("builtins.open", mock_open(read_data=fake_token)) as mock_file:
token = get_default_spcs_token()
assert token == fake_token
mock_file.assert_called_once_with(SPCS_TOKEN_PATH)
# Clean up environment variables
del os.environ["SNOWFLAKE_HOST"]
del os.environ["SNOWFLAKE_ACCOUNT"]
def test_generate_sf_jwt():
sf_account = "MY_SNOWFLAKE_ORG-MY_SNOWFLAKE_ACCOUNT"
sf_user = "MY_SNOWFLAKE_USER"
private_key_obj = rsa.generate_private_key(public_exponent=65537, key_size=2048)
# Serialize the private key to PEM format
private_key = private_key_obj.private_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PrivateFormat.TraditionalOpenSSL,
encryption_algorithm=serialization.NoEncryption(),
)
with patch("builtins.open", mock_open(read_data=private_key)) as mock_file:
mock_file.return_value.read.return_value = (
private_key # Ensure binary data is returned
)
token = generate_sf_jwt(sf_account, sf_user, "dummy_key_file.pem")
assert isinstance(token, str)
|
import pytest
from docarray import DocumentArray, Document
from docarray.array.qdrant import DocumentArrayQdrant
from docarray.array.sqlite import DocumentArraySqlite
from docarray.array.annlite import DocumentArrayAnnlite, AnnliteConfig
from docarray.array.storage.qdrant import QdrantConfig
from docarray.array.storage.weaviate import WeaviateConfig
from docarray.array.weaviate import DocumentArrayWeaviate
from docarray.array.elastic import DocumentArrayElastic, ElasticConfig
from docarray.array.redis import DocumentArrayRedis, RedisConfig
from docarray.array.milvus import DocumentArrayMilvus, MilvusConfig
N = 100
def da_and_dam():
da = DocumentArray.empty(N)
dasq = DocumentArraySqlite.empty(N)
return (da, dasq)
@pytest.fixture
def docs():
yield (Document(text=str(j)) for j in range(100))
@pytest.mark.parametrize(
'da_cls,config',
[
(DocumentArray, None),
(DocumentArraySqlite, None),
(DocumentArrayAnnlite, AnnliteConfig(n_dim=128)),
(DocumentArrayWeaviate, WeaviateConfig(n_dim=128)),
(DocumentArrayQdrant, QdrantConfig(n_dim=128)),
(DocumentArrayElastic, ElasticConfig(n_dim=128)),
(DocumentArrayRedis, RedisConfig(n_dim=1)),
(DocumentArrayMilvus, MilvusConfig(n_dim=128)),
],
)
def test_iter_len_bool(da_cls, config, start_storage):
if config:
da = da_cls.empty(N, config=config)
else:
da = da_cls.empty(N)
j = 0
for _ in da:
j += 1
assert j == N
assert j == len(da)
assert da
da.clear()
assert not da
@pytest.mark.parametrize(
'da_cls,config',
[
(DocumentArray, None),
(DocumentArraySqlite, None),
(DocumentArrayAnnlite, AnnliteConfig(n_dim=128)),
(DocumentArrayWeaviate, WeaviateConfig(n_dim=128)),
(DocumentArrayQdrant, QdrantConfig(n_dim=128)),
(DocumentArrayElastic, ElasticConfig(n_dim=128)),
(DocumentArrayRedis, RedisConfig(n_dim=128)),
(DocumentArrayMilvus, MilvusConfig(n_dim=128)),
],
)
def test_repr(da_cls, config, start_storage):
if config:
da = da_cls.empty(N, config=config)
else:
da = da_cls.empty(N)
assert f'length={N}' in repr(da)
@pytest.mark.parametrize(
'storage, config',
[
('memory', None),
('sqlite', None),
('annlite', AnnliteConfig(n_dim=128)),
('weaviate', WeaviateConfig(n_dim=128)),
('qdrant', QdrantConfig(n_dim=128)),
('elasticsearch', ElasticConfig(n_dim=128)),
('redis', RedisConfig(n_dim=128)),
('milvus', MilvusConfig(n_dim=128)),
],
)
def test_repr_str(docs, storage, config, start_storage):
if config:
da = DocumentArray(docs, storage=storage, config=config)
else:
da = DocumentArray(docs, storage=storage)
da.summary()
assert da
da.clear()
assert not da
print(da)
@pytest.mark.parametrize(
'da_cls, config',
[
(DocumentArray, None),
(DocumentArraySqlite, None),
(DocumentArrayAnnlite, AnnliteConfig(n_dim=10)),
(DocumentArrayWeaviate, WeaviateConfig(n_dim=10)),
(DocumentArrayQdrant, QdrantConfig(n_dim=10)),
(DocumentArrayElastic, ElasticConfig(n_dim=10)),
(DocumentArrayRedis, RedisConfig(n_dim=10)),
(DocumentArrayMilvus, MilvusConfig(n_dim=10)),
],
)
def test_iadd(da_cls, config, start_storage):
if config:
da = da_cls.empty(N, config=config)
else:
da = da_cls.empty(N)
oid = id(da)
dap = DocumentArray.empty(10)
da += dap
assert len(da) == N + len(dap)
nid = id(da)
assert nid == oid
@pytest.mark.parametrize('da', [da_and_dam()[0]])
def test_add(da):
oid = id(da)
dap = DocumentArray.empty(10)
da = da + dap
assert len(da) == N + len(dap)
nid = id(da)
assert nid != oid
|
import pytest
from docarray import DocumentArray, Document
from docarray.array.qdrant import DocumentArrayQdrant
from docarray.array.sqlite import DocumentArraySqlite
from docarray.array.annlite import DocumentArrayAnnlite, AnnliteConfig
from docarray.array.storage.qdrant import QdrantConfig
from docarray.array.storage.weaviate import WeaviateConfig
from docarray.array.weaviate import DocumentArrayWeaviate
from docarray.array.elastic import DocumentArrayElastic, ElasticConfig
from docarray.array.redis import DocumentArrayRedis, RedisConfig
N = 100
def da_and_dam():
da = DocumentArray.empty(N)
dasq = DocumentArraySqlite.empty(N)
return (da, dasq)
@pytest.fixture
def docs():
yield (Document(text=str(j)) for j in range(100))
@pytest.mark.parametrize(
'da_cls,config',
[
(DocumentArray, None),
(DocumentArraySqlite, None),
(DocumentArrayAnnlite, AnnliteConfig(n_dim=128)),
(DocumentArrayWeaviate, WeaviateConfig(n_dim=128)),
(DocumentArrayQdrant, QdrantConfig(n_dim=128)),
(DocumentArrayElastic, ElasticConfig(n_dim=128)),
(DocumentArrayRedis, RedisConfig(n_dim=1)),
],
)
def test_iter_len_bool(da_cls, config, start_storage):
if config:
da = da_cls.empty(N, config=config)
else:
da = da_cls.empty(N)
j = 0
for _ in da:
j += 1
assert j == N
assert j == len(da)
assert da
da.clear()
assert not da
@pytest.mark.parametrize(
'da_cls,config',
[
(DocumentArray, None),
(DocumentArraySqlite, None),
(DocumentArrayAnnlite, AnnliteConfig(n_dim=128)),
(DocumentArrayWeaviate, WeaviateConfig(n_dim=128)),
(DocumentArrayQdrant, QdrantConfig(n_dim=128)),
(DocumentArrayElastic, ElasticConfig(n_dim=128)),
(DocumentArrayRedis, RedisConfig(n_dim=128)),
],
)
def test_repr(da_cls, config, start_storage):
if config:
da = da_cls.empty(N, config=config)
else:
da = da_cls.empty(N)
assert f'length={N}' in repr(da)
@pytest.mark.parametrize(
'storage, config',
[
('memory', None),
('sqlite', None),
('annlite', AnnliteConfig(n_dim=128)),
('weaviate', WeaviateConfig(n_dim=128)),
('qdrant', QdrantConfig(n_dim=128)),
('elasticsearch', ElasticConfig(n_dim=128)),
('redis', RedisConfig(n_dim=128)),
],
)
def test_repr_str(docs, storage, config, start_storage):
if config:
da = DocumentArray(docs, storage=storage, config=config)
else:
da = DocumentArray(docs, storage=storage)
da.summary()
assert da
da.clear()
assert not da
print(da)
@pytest.mark.parametrize(
'da_cls, config',
[
(DocumentArray, None),
(DocumentArraySqlite, None),
(DocumentArrayAnnlite, AnnliteConfig(n_dim=10)),
(DocumentArrayWeaviate, WeaviateConfig(n_dim=10)),
(DocumentArrayQdrant, QdrantConfig(n_dim=10)),
(DocumentArrayElastic, ElasticConfig(n_dim=10)),
(DocumentArrayRedis, RedisConfig(n_dim=10)),
],
)
def test_iadd(da_cls, config, start_storage):
if config:
da = da_cls.empty(N, config=config)
else:
da = da_cls.empty(N)
oid = id(da)
dap = DocumentArray.empty(10)
da += dap
assert len(da) == N + len(dap)
nid = id(da)
assert nid == oid
@pytest.mark.parametrize('da', [da_and_dam()[0]])
def test_add(da):
oid = id(da)
dap = DocumentArray.empty(10)
da = da + dap
assert len(da) == N + len(dap)
nid = id(da)
assert nid != oid
|
from typing import Any, List, Optional, Union
from pathlib import Path
from llama_index.core.bridge.pydantic import Field, PrivateAttr
from llama_index.core.callbacks import CBEventType, EventPayload
from llama_index.core.instrumentation import get_dispatcher
from llama_index.core.instrumentation.events.rerank import (
ReRankEndEvent,
ReRankStartEvent,
)
from llama_index.core.postprocessor.types import BaseNodePostprocessor
from llama_index.core.schema import MetadataMode, NodeWithScore, QueryBundle
from llama_index.core.utils import infer_torch_device
DEFAULT_SENTENCE_TRANSFORMER_MAX_LENGTH = 512
dispatcher = get_dispatcher(__name__)
class SentenceTransformerRerank(BaseNodePostprocessor):
model: str = Field(description="Sentence transformer model name.")
top_n: int = Field(description="Number of nodes to return sorted by score.")
device: str = Field(
default="cpu",
description="Device to use for sentence transformer.",
)
keep_retrieval_score: bool = Field(
default=False,
description="Whether to keep the retrieval score in metadata.",
)
_model: Any = PrivateAttr()
def __init__(
self,
top_n: int = 2,
model: str = "cross-encoder/stsb-distilroberta-base",
device: Optional[str] = None,
keep_retrieval_score: Optional[bool] = False,
cache_dir: Optional[Union[str, Path]] = None,
):
try:
from sentence_transformers import CrossEncoder
except ImportError:
raise ImportError(
"Cannot import sentence-transformers or torch package,",
"please `pip install torch sentence-transformers`",
)
super().__init__(
top_n=top_n,
model=model,
device=device,
keep_retrieval_score=keep_retrieval_score,
)
device = infer_torch_device() if device is None else device
self._model = CrossEncoder(
model,
max_length=DEFAULT_SENTENCE_TRANSFORMER_MAX_LENGTH,
device=device,
cache_dir=cache_dir,
)
@classmethod
def class_name(cls) -> str:
return "SentenceTransformerRerank"
def _postprocess_nodes(
self,
nodes: List[NodeWithScore],
query_bundle: Optional[QueryBundle] = None,
) -> List[NodeWithScore]:
dispatcher.event(
ReRankStartEvent(
query=query_bundle,
nodes=nodes,
top_n=self.top_n,
model_name=self.model,
)
)
if query_bundle is None:
raise ValueError("Missing query bundle in extra info.")
if len(nodes) == 0:
return []
query_and_nodes = [
(
query_bundle.query_str,
node.node.get_content(metadata_mode=MetadataMode.EMBED),
)
for node in nodes
]
with self.callback_manager.event(
CBEventType.RERANKING,
payload={
EventPayload.NODES: nodes,
EventPayload.MODEL_NAME: self.model,
EventPayload.QUERY_STR: query_bundle.query_str,
EventPayload.TOP_K: self.top_n,
},
) as event:
scores = self._model.predict(query_and_nodes)
assert len(scores) == len(nodes)
for node, score in zip(nodes, scores):
if self.keep_retrieval_score:
# keep the retrieval score in metadata
node.node.metadata["retrieval_score"] = node.score
node.score = float(score)
new_nodes = sorted(nodes, key=lambda x: -x.score if x.score else 0)[
: self.top_n
]
event.on_end(payload={EventPayload.NODES: new_nodes})
dispatcher.event(ReRankEndEvent(nodes=new_nodes))
return new_nodes
|
from typing import Any, List, Optional
from llama_index.core.bridge.pydantic import Field, PrivateAttr
from llama_index.core.callbacks import CBEventType, EventPayload
from llama_index.core.instrumentation import get_dispatcher
from llama_index.core.instrumentation.events.rerank import (
ReRankEndEvent,
ReRankStartEvent,
)
from llama_index.core.postprocessor.types import BaseNodePostprocessor
from llama_index.core.schema import MetadataMode, NodeWithScore, QueryBundle
from llama_index.core.utils import infer_torch_device
DEFAULT_SENTENCE_TRANSFORMER_MAX_LENGTH = 512
dispatcher = get_dispatcher(__name__)
class SentenceTransformerRerank(BaseNodePostprocessor):
model: str = Field(description="Sentence transformer model name.")
top_n: int = Field(description="Number of nodes to return sorted by score.")
device: str = Field(
default="cpu",
description="Device to use for sentence transformer.",
)
keep_retrieval_score: bool = Field(
default=False,
description="Whether to keep the retrieval score in metadata.",
)
_model: Any = PrivateAttr()
def __init__(
self,
top_n: int = 2,
model: str = "cross-encoder/stsb-distilroberta-base",
device: Optional[str] = None,
keep_retrieval_score: Optional[bool] = False,
):
try:
from sentence_transformers import CrossEncoder
except ImportError:
raise ImportError(
"Cannot import sentence-transformers or torch package,",
"please `pip install torch sentence-transformers`",
)
super().__init__(
top_n=top_n,
model=model,
device=device,
keep_retrieval_score=keep_retrieval_score,
)
device = infer_torch_device() if device is None else device
self._model = CrossEncoder(
model, max_length=DEFAULT_SENTENCE_TRANSFORMER_MAX_LENGTH, device=device
)
@classmethod
def class_name(cls) -> str:
return "SentenceTransformerRerank"
def _postprocess_nodes(
self,
nodes: List[NodeWithScore],
query_bundle: Optional[QueryBundle] = None,
) -> List[NodeWithScore]:
dispatcher.event(
ReRankStartEvent(
query=query_bundle,
nodes=nodes,
top_n=self.top_n,
model_name=self.model,
)
)
if query_bundle is None:
raise ValueError("Missing query bundle in extra info.")
if len(nodes) == 0:
return []
query_and_nodes = [
(
query_bundle.query_str,
node.node.get_content(metadata_mode=MetadataMode.EMBED),
)
for node in nodes
]
with self.callback_manager.event(
CBEventType.RERANKING,
payload={
EventPayload.NODES: nodes,
EventPayload.MODEL_NAME: self.model,
EventPayload.QUERY_STR: query_bundle.query_str,
EventPayload.TOP_K: self.top_n,
},
) as event:
scores = self._model.predict(query_and_nodes)
assert len(scores) == len(nodes)
for node, score in zip(nodes, scores):
if self.keep_retrieval_score:
# keep the retrieval score in metadata
node.node.metadata["retrieval_score"] = node.score
node.score = float(score)
new_nodes = sorted(nodes, key=lambda x: -x.score if x.score else 0)[
: self.top_n
]
event.on_end(payload={EventPayload.NODES: new_nodes})
dispatcher.event(ReRankEndEvent(nodes=new_nodes))
return new_nodes
|
from typing import Dict
from jina import Client, Document, DocumentArray, Executor, Flow, requests
ORIGINAL_PARAMS = {'param1': 50, 'param2': 60, 'exec_name': {'param1': 'changed'}}
OVERRIDEN_EXECUTOR1_PARAMS = {
'param1': 'changed',
'param2': 60,
'exec_name': {'param1': 'changed'},
}
class DummyOverrideParams(Executor):
@requests()
def bar(self, docs: DocumentArray, parameters: Dict, *args, **kwargs):
for doc in docs:
doc.tags = parameters
class DummyAssertNotOverrideBetweenPodsParams(Executor):
@requests()
def bar(self, parameters: Dict, *args, **kwargs):
assert parameters == ORIGINAL_PARAMS
parameters['param2'] = 'change_in_pod'
class DummyAssertIfParamsCanBeChangedInsidePods(Executor):
@requests()
def bar(self, parameters: Dict, *args, **kwargs):
# this test is not sure it is intended, but good way of documenting
assert parameters == ORIGINAL_PARAMS
def test_override_params(mocker, port_generator):
exposed_port = port_generator()
f = (
Flow(port=exposed_port)
.add(
uses={'jtype': 'DummyOverrideParams', 'metas': {'name': 'exec_name'}},
)
.add(uses=DummyAssertNotOverrideBetweenPodsParams)
.add(uses=DummyAssertIfParamsCanBeChangedInsidePods)
)
error_mock = mocker.Mock()
with f:
resp = Client(port=exposed_port).index(
inputs=DocumentArray([Document()]),
parameters={'param1': 50, 'param2': 60, 'exec_name': {'param1': 'changed'}},
on_error=error_mock,
return_responses=True,
)
error_mock.assert_not_called()
assert len(resp) == 1
assert len(resp[0].docs) == 1
for doc in resp[0].docs:
assert doc.tags == OVERRIDEN_EXECUTOR1_PARAMS
assert doc.tags['param1'] == 'changed'
assert doc.tags['param2'] == 60
assert doc.tags['exec_name']['param1'] == 'changed'
|
from typing import Dict
from jina import Client, Document, DocumentArray, Executor, Flow, requests
ORIGINAL_PARAMS = {'param1': 50, 'param2': 60, 'exec_name': {'param1': 'changed'}}
OVERRIDEN_EXECUTOR1_PARAMS = {
'param1': 'changed',
'param2': 60,
'exec_name': {'param1': 'changed'},
}
class DummyOverrideParams(Executor):
@requests()
def bar(self, docs: 'DocumentArray', parameters: Dict, *args, **kwargs):
for doc in docs:
doc.tags = parameters
class DummyAssertNotOverrideBetweenPodsParams(Executor):
@requests()
def bar(self, parameters: Dict, *args, **kwargs):
assert parameters == ORIGINAL_PARAMS
parameters['param2'] = 'change_in_pod'
class DummyAssertIfParamsCanBeChangedInsidePods(Executor):
@requests()
def bar(self, parameters: Dict, *args, **kwargs):
# this test is not sure it is intended, but good way of documenting
assert parameters == ORIGINAL_PARAMS
def test_override_params(mocker, port_generator):
exposed_port = port_generator()
f = (
Flow(port=exposed_port)
.add(
uses={'jtype': 'DummyOverrideParams', 'metas': {'name': 'exec_name'}},
)
.add(uses=DummyAssertNotOverrideBetweenPodsParams)
.add(uses=DummyAssertIfParamsCanBeChangedInsidePods)
)
error_mock = mocker.Mock()
with f:
resp = Client(port=exposed_port).index(
inputs=DocumentArray([Document()]),
parameters={'param1': 50, 'param2': 60, 'exec_name': {'param1': 'changed'}},
on_error=error_mock,
return_responses=True,
)
error_mock.assert_not_called()
assert len(resp) == 1
assert len(resp[0].docs) == 1
for doc in resp[0].docs:
assert doc.tags == OVERRIDEN_EXECUTOR1_PARAMS
assert doc.tags['param1'] == 'changed'
assert doc.tags['param2'] == 60
assert doc.tags['exec_name']['param1'] == 'changed'
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.applications.efficientnet_v2 import (
EfficientNetV2B0 as EfficientNetV2B0,
)
from keras.src.applications.efficientnet_v2 import (
EfficientNetV2B1 as EfficientNetV2B1,
)
from keras.src.applications.efficientnet_v2 import (
EfficientNetV2B2 as EfficientNetV2B2,
)
from keras.src.applications.efficientnet_v2 import (
EfficientNetV2B3 as EfficientNetV2B3,
)
from keras.src.applications.efficientnet_v2 import (
EfficientNetV2L as EfficientNetV2L,
)
from keras.src.applications.efficientnet_v2 import (
EfficientNetV2M as EfficientNetV2M,
)
from keras.src.applications.efficientnet_v2 import (
EfficientNetV2S as EfficientNetV2S,
)
from keras.src.applications.efficientnet_v2 import (
decode_predictions as decode_predictions,
)
from keras.src.applications.efficientnet_v2 import (
preprocess_input as preprocess_input,
)
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.applications.efficientnet_v2 import EfficientNetV2B0
from keras.src.applications.efficientnet_v2 import EfficientNetV2B1
from keras.src.applications.efficientnet_v2 import EfficientNetV2B2
from keras.src.applications.efficientnet_v2 import EfficientNetV2B3
from keras.src.applications.efficientnet_v2 import EfficientNetV2L
from keras.src.applications.efficientnet_v2 import EfficientNetV2M
from keras.src.applications.efficientnet_v2 import EfficientNetV2S
from keras.src.applications.efficientnet_v2 import decode_predictions
from keras.src.applications.efficientnet_v2 import preprocess_input
|
_base_ = '../cascade_rcnn/cascade-mask-rcnn_r50_fpn_1x_coco.py'
train_pipeline = [
dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
dict(
type='InstaBoost',
action_candidate=('normal', 'horizontal', 'skip'),
action_prob=(1, 0, 0),
scale=(0.8, 1.2),
dx=15,
dy=15,
theta=(-1, 1),
color_prob=0.5,
hflag=False,
aug_ratio=0.5),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
max_epochs = 48
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[32, 44],
gamma=0.1)
]
train_cfg = dict(max_epochs=max_epochs)
# only keep latest 3 checkpoints
default_hooks = dict(checkpoint=dict(max_keep_ckpts=3))
|
_base_ = '../cascade_rcnn/cascade-mask-rcnn_r50_fpn_1x_coco.py'
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(
type='InstaBoost',
action_candidate=('normal', 'horizontal', 'skip'),
action_prob=(1, 0, 0),
scale=(0.8, 1.2),
dx=15,
dy=15,
theta=(-1, 1),
color_prob=0.5,
hflag=False,
aug_ratio=0.5),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
max_epochs = 48
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[32, 44],
gamma=0.1)
]
train_cfg = dict(max_epochs=max_epochs)
# only keep latest 3 checkpoints
default_hooks = dict(checkpoint=dict(max_keep_ckpts=3))
|
"""
Class for searching and importing data from OpenAlex.
"""
import logging
from typing import List
import requests
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
logger = logging.getLogger(__name__)
logger.setLevel(logging.ERROR)
class OpenAlexReader(BaseReader):
"""
This class is used to search and import data from OpenAlex.
Parameters
----------
email : str
Email address to use for OpenAlex API
Attributes
----------
Works : pyalex.Works
pyalex.Works object
pyalex : pyalex
pyalex object
"""
def __init__(self, email) -> None:
self.email = email
def _search_openalex(self, query, fields):
base_url = "https://api.openalex.org/works?search="
fields_param = f"&select={fields}"
email_param = f"&mailto={self.email}"
full_url = base_url + query + fields_param + email_param
try:
response = requests.get(full_url, timeout=10)
response.raise_for_status() # Check if request is successful
data = response.json() # Parse JSON data
if "error" in data:
raise ValueError(f"API returned error: {data['error']}")
return data
except requests.exceptions.HTTPError as http_error:
logger.error(f"HTTP error occurred: {http_error}")
except requests.exceptions.RequestException as request_error:
logger.error(f"Error occurred: {request_error}")
except ValueError as value_error:
logger.error(value_error)
return None
def _fulltext_search_openalex(self, query, fields):
base_url = "https://api.openalex.org/works?filter=fulltext.search:"
fields_param = f"&select={fields}"
email_param = f"&mailto={self.email}"
full_url = base_url + query + fields_param + email_param
try:
response = requests.get(full_url, timeout=10)
response.raise_for_status() # Check if request is successful
data = response.json() # Parse JSON data
if "error" in data:
raise ValueError(f"API returned error: {data['error']}")
return data
except requests.exceptions.HTTPError as http_error:
logger.error(f"HTTP error occurred: {http_error}")
except requests.exceptions.RequestException as request_error:
logger.error(f"Error occurred: {request_error}")
except ValueError as value_error:
logger.error(value_error)
return None
def _invert_abstract(self, inv_index):
if inv_index is not None:
l_inv = [(w, p) for w, pos in inv_index.items() for p in pos]
return " ".join(x[0] for x in sorted(l_inv, key=lambda x: x[1]))
return None
def load_data(self, query: str, full_text=False, fields=None) -> List[Document]:
if fields is None:
fields = "title,abstract_inverted_index,publication_year,keywords,authorships,primary_location"
if full_text:
works = self._fulltext_search_openalex(query, fields)
else:
works = self._search_openalex(query, fields)
documents = []
for work in works["results"]:
if work["abstract_inverted_index"] is not None:
abstract = self._invert_abstract(work["abstract_inverted_index"])
else:
abstract = None
title = work.get("title", None)
text = None
# concat title and abstract
if abstract and title:
text = title + " " + abstract
elif not abstract:
text = title
try:
primary_location = work["primary_location"]["source"]["display_name"]
except (KeyError, TypeError):
primary_location = None
metadata = {
"title": work.get("title", None),
"keywords": work.get("keywords", None),
"primary_location": primary_location,
"publication_year": work.get("publication_year", None),
"authorships": [
item["author"]["display_name"] for item in work["authorships"]
],
}
documents.append(Document(text=text, extra_info=metadata))
return documents
|
"""
Class for searching and importing data from OpenAlex.
"""
import logging
from typing import List
import requests
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
logger = logging.getLogger(__name__)
logger.setLevel(logging.ERROR)
class OpenAlexReader(BaseReader):
"""
This class is used to search and import data from OpenAlex.
Parameters
----------
email : str
Email address to use for OpenAlex API
Attributes:
----------
Works : pyalex.Works
pyalex.Works object
pyalex : pyalex
pyalex object
"""
def __init__(self, email) -> None:
self.email = email
def _search_openalex(self, query, fields):
base_url = "https://api.openalex.org/works?search="
fields_param = f"&select={fields}"
email_param = f"&mailto={self.email}"
full_url = base_url + query + fields_param + email_param
try:
response = requests.get(full_url, timeout=10)
response.raise_for_status() # Check if request is successful
data = response.json() # Parse JSON data
if "error" in data:
raise ValueError(f"API returned error: {data['error']}")
return data
except requests.exceptions.HTTPError as http_error:
logger.error(f"HTTP error occurred: {http_error}")
except requests.exceptions.RequestException as request_error:
logger.error(f"Error occurred: {request_error}")
except ValueError as value_error:
logger.error(value_error)
return None
def _fulltext_search_openalex(self, query, fields):
base_url = "https://api.openalex.org/works?filter=fulltext.search:"
fields_param = f"&select={fields}"
email_param = f"&mailto={self.email}"
full_url = base_url + query + fields_param + email_param
try:
response = requests.get(full_url, timeout=10)
response.raise_for_status() # Check if request is successful
data = response.json() # Parse JSON data
if "error" in data:
raise ValueError(f"API returned error: {data['error']}")
return data
except requests.exceptions.HTTPError as http_error:
logger.error(f"HTTP error occurred: {http_error}")
except requests.exceptions.RequestException as request_error:
logger.error(f"Error occurred: {request_error}")
except ValueError as value_error:
logger.error(value_error)
return None
def _invert_abstract(self, inv_index):
if inv_index is not None:
l_inv = [(w, p) for w, pos in inv_index.items() for p in pos]
return " ".join(x[0] for x in sorted(l_inv, key=lambda x: x[1]))
return None
def load_data(self, query: str, full_text=False, fields=None) -> List[Document]:
if fields is None:
fields = "title,abstract_inverted_index,publication_year,keywords,authorships,primary_location"
if full_text:
works = self._fulltext_search_openalex(query, fields)
else:
works = self._search_openalex(query, fields)
documents = []
for work in works["results"]:
if work["abstract_inverted_index"] is not None:
abstract = self._invert_abstract(work["abstract_inverted_index"])
else:
abstract = None
title = work.get("title", None)
text = None
# concat title and abstract
if abstract and title:
text = title + " " + abstract
elif not abstract:
text = title
try:
primary_location = work["primary_location"]["source"]["display_name"]
except (KeyError, TypeError):
primary_location = None
metadata = {
"title": work.get("title", None),
"keywords": work.get("keywords", None),
"primary_location": primary_location,
"publication_year": work.get("publication_year", None),
"authorships": [
item["author"]["display_name"] for item in work["authorships"]
],
}
documents.append(Document(text=text, extra_info=metadata))
return documents
|
# Copyright (c) OpenMMLab. All rights reserved.
from abc import ABCMeta, abstractmethod
class BaseAssigner(metaclass=ABCMeta):
"""Base assigner that assigns boxes to ground truth boxes."""
@abstractmethod
def assign(self, bboxes, gt_bboxes, gt_bboxes_ignore=None, gt_labels=None):
"""Assign boxes to either a ground truth boxes or a negative boxes."""
|
from abc import ABCMeta, abstractmethod
class BaseAssigner(metaclass=ABCMeta):
"""Base assigner that assigns boxes to ground truth boxes."""
@abstractmethod
def assign(self, bboxes, gt_bboxes, gt_bboxes_ignore=None, gt_labels=None):
"""Assign boxes to either a ground truth boxes or a negative boxes."""
|
from unittest.mock import MagicMock, AsyncMock
import pytest
import sys
from llama_index.readers.web.oxylabs_web.base import OxylabsWebReader
READER_TEST_PARAM = pytest.param(
[
"https://sandbox.oxylabs.io/products/1",
"https://sandbox.oxylabs.io/products/2",
],
{
"parse": True,
},
{
"results": [{"content": {"key1": "value1", "key2": "value2"}}],
"job": {"job_id": 42424242},
},
"# key1\n value1\n\n# key2\n value2\n",
id="response_success",
)
skip_if_py39_or_lower = sys.version_info < (3, 10)
@pytest.mark.skipif(skip_if_py39_or_lower, reason="Pytest does not support Python 3.9")
@pytest.mark.parametrize(
("urls", "additional_params", "return_value", "expected_output"),
[READER_TEST_PARAM],
)
def test_sync_oxylabs_reader(
urls: list[str],
additional_params: dict,
return_value: dict,
expected_output: str,
):
reader = OxylabsWebReader(
username="OXYLABS_USERNAME",
password="OXYLABS_PASSWORD",
)
get_response_mock = MagicMock()
get_response_mock.return_value = return_value
reader.api.get_response = get_response_mock
docs = reader.load_data(urls, additional_params)
for doc in docs:
assert doc.text == expected_output
@pytest.mark.skipif(skip_if_py39_or_lower, reason="Pytest does not support Python 3.9")
@pytest.mark.parametrize(
("urls", "additional_params", "return_value", "expected_output"),
[READER_TEST_PARAM],
)
@pytest.mark.asyncio
async def test_async_oxylabs_reader(
urls: list[str],
additional_params: dict,
return_value: dict,
expected_output: str,
):
reader = OxylabsWebReader(
username="OXYLABS_USERNAME",
password="OXYLABS_PASSWORD",
)
get_response_mock = AsyncMock()
get_response_mock.return_value = return_value
reader.async_api.get_response = get_response_mock
docs = await reader.aload_data(urls, additional_params)
for doc in docs:
assert doc.text == expected_output
|
from unittest.mock import MagicMock, AsyncMock
import pytest
import sys
from llama_index.readers.web.oxylabs_web.base import OxylabsWebReader
READER_TEST_PARAM = pytest.param(
[
"https://sandbox.oxylabs.io/products/1",
"https://sandbox.oxylabs.io/products/2",
],
{
"parse": True,
},
{
"results": [{"content": {"key1": "value1", "key2": "value2"}}],
"job": {"job_id": 42424242},
},
"# key1\n value1\n\n# key2\n value2\n",
id="response_success",
)
skip_if_py39_or_lower = sys.version_info < (3, 10)
@pytest.mark.skipif(skip_if_py39_or_lower, reason="Pytest does not support Python 3.9")
@pytest.mark.parametrize(
("urls", "additional_params", "return_value", "expected_output"),
[READER_TEST_PARAM],
)
def test_sync_oxylabs_reader(
urls: list[str],
additional_params: dict,
return_value: dict,
expected_output: str,
):
reader = OxylabsWebReader(
username="OXYLABS_USERNAME",
password="OXYLABS_PASSWORD",
)
get_response_mock = MagicMock()
get_response_mock.return_value = return_value
reader.api.get_response = get_response_mock
docs = reader.load_data(urls, additional_params)
for doc in docs:
assert doc.text == expected_output
@pytest.mark.skipif(skip_if_py39_or_lower, reason="Pytest does not support Python 3.9")
@pytest.mark.parametrize(
("urls", "additional_params", "return_value", "expected_output"),
[READER_TEST_PARAM],
)
@pytest.mark.asyncio
async def test_async_oxylabs_reader(
urls: list[str],
additional_params: dict,
return_value: dict,
expected_output: str,
):
reader = OxylabsWebReader(
username="OXYLABS_USERNAME",
password="OXYLABS_PASSWORD",
)
get_response_mock = AsyncMock()
get_response_mock.return_value = return_value
reader.async_api.get_response = get_response_mock
docs = await reader.aload_data(urls, additional_params)
for doc in docs:
assert doc.text == expected_output
|
from pathlib import Path
from typing import Any, BinaryIO, Optional, Union
from torchdata.datapipes.iter import Demultiplexer, Filter, IterDataPipe, IterKeyZipper, LineReader, Mapper
from torchvision.prototype.datasets.utils import Dataset, EncodedImage, HttpResource, OnlineResource
from torchvision.prototype.datasets.utils._internal import (
getitem,
hint_sharding,
hint_shuffling,
INFINITE_BUFFER_SIZE,
path_comparator,
read_categories_file,
)
from torchvision.prototype.tv_tensors import Label
from .._api import register_dataset, register_info
NAME = "food101"
@register_info(NAME)
def _info() -> dict[str, Any]:
return dict(categories=read_categories_file(NAME))
@register_dataset(NAME)
class Food101(Dataset):
"""Food 101 dataset
homepage="https://data.vision.ee.ethz.ch/cvl/datasets_extra/food-101",
"""
def __init__(self, root: Union[str, Path], *, split: str = "train", skip_integrity_check: bool = False) -> None:
self._split = self._verify_str_arg(split, "split", {"train", "test"})
self._categories = _info()["categories"]
super().__init__(root, skip_integrity_check=skip_integrity_check)
def _resources(self) -> list[OnlineResource]:
return [
HttpResource(
url="http://data.vision.ee.ethz.ch/cvl/food-101.tar.gz",
sha256="d97d15e438b7f4498f96086a4f7e2fa42a32f2712e87d3295441b2b6314053a4",
preprocess="decompress",
)
]
def _classify_archive(self, data: tuple[str, Any]) -> Optional[int]:
path = Path(data[0])
if path.parents[1].name == "images":
return 0
elif path.parents[0].name == "meta":
return 1
else:
return None
def _prepare_sample(self, data: tuple[str, tuple[str, BinaryIO]]) -> dict[str, Any]:
id, (path, buffer) = data
return dict(
label=Label.from_category(id.split("/", 1)[0], categories=self._categories),
path=path,
image=EncodedImage.from_file(buffer),
)
def _image_key(self, data: tuple[str, Any]) -> str:
path = Path(data[0])
return path.relative_to(path.parents[1]).with_suffix("").as_posix()
def _datapipe(self, resource_dps: list[IterDataPipe]) -> IterDataPipe[dict[str, Any]]:
archive_dp = resource_dps[0]
images_dp, split_dp = Demultiplexer(
archive_dp, 2, self._classify_archive, drop_none=True, buffer_size=INFINITE_BUFFER_SIZE
)
split_dp = Filter(split_dp, path_comparator("name", f"{self._split}.txt"))
split_dp = LineReader(split_dp, decode=True, return_path=False)
split_dp = hint_sharding(split_dp)
split_dp = hint_shuffling(split_dp)
dp = IterKeyZipper(
split_dp,
images_dp,
key_fn=getitem(),
ref_key_fn=self._image_key,
buffer_size=INFINITE_BUFFER_SIZE,
)
return Mapper(dp, self._prepare_sample)
def _generate_categories(self) -> list[str]:
resources = self._resources()
dp = resources[0].load(self._root)
dp = Filter(dp, path_comparator("name", "classes.txt"))
dp = LineReader(dp, decode=True, return_path=False)
return list(dp)
def __len__(self) -> int:
return 75_750 if self._split == "train" else 25_250
|
from pathlib import Path
from typing import Any, BinaryIO, Dict, List, Optional, Tuple, Union
from torchdata.datapipes.iter import Demultiplexer, Filter, IterDataPipe, IterKeyZipper, LineReader, Mapper
from torchvision.prototype.datasets.utils import Dataset, EncodedImage, HttpResource, OnlineResource
from torchvision.prototype.datasets.utils._internal import (
getitem,
hint_sharding,
hint_shuffling,
INFINITE_BUFFER_SIZE,
path_comparator,
read_categories_file,
)
from torchvision.prototype.tv_tensors import Label
from .._api import register_dataset, register_info
NAME = "food101"
@register_info(NAME)
def _info() -> Dict[str, Any]:
return dict(categories=read_categories_file(NAME))
@register_dataset(NAME)
class Food101(Dataset):
"""Food 101 dataset
homepage="https://data.vision.ee.ethz.ch/cvl/datasets_extra/food-101",
"""
def __init__(self, root: Union[str, Path], *, split: str = "train", skip_integrity_check: bool = False) -> None:
self._split = self._verify_str_arg(split, "split", {"train", "test"})
self._categories = _info()["categories"]
super().__init__(root, skip_integrity_check=skip_integrity_check)
def _resources(self) -> List[OnlineResource]:
return [
HttpResource(
url="http://data.vision.ee.ethz.ch/cvl/food-101.tar.gz",
sha256="d97d15e438b7f4498f96086a4f7e2fa42a32f2712e87d3295441b2b6314053a4",
preprocess="decompress",
)
]
def _classify_archive(self, data: Tuple[str, Any]) -> Optional[int]:
path = Path(data[0])
if path.parents[1].name == "images":
return 0
elif path.parents[0].name == "meta":
return 1
else:
return None
def _prepare_sample(self, data: Tuple[str, Tuple[str, BinaryIO]]) -> Dict[str, Any]:
id, (path, buffer) = data
return dict(
label=Label.from_category(id.split("/", 1)[0], categories=self._categories),
path=path,
image=EncodedImage.from_file(buffer),
)
def _image_key(self, data: Tuple[str, Any]) -> str:
path = Path(data[0])
return path.relative_to(path.parents[1]).with_suffix("").as_posix()
def _datapipe(self, resource_dps: List[IterDataPipe]) -> IterDataPipe[Dict[str, Any]]:
archive_dp = resource_dps[0]
images_dp, split_dp = Demultiplexer(
archive_dp, 2, self._classify_archive, drop_none=True, buffer_size=INFINITE_BUFFER_SIZE
)
split_dp = Filter(split_dp, path_comparator("name", f"{self._split}.txt"))
split_dp = LineReader(split_dp, decode=True, return_path=False)
split_dp = hint_sharding(split_dp)
split_dp = hint_shuffling(split_dp)
dp = IterKeyZipper(
split_dp,
images_dp,
key_fn=getitem(),
ref_key_fn=self._image_key,
buffer_size=INFINITE_BUFFER_SIZE,
)
return Mapper(dp, self._prepare_sample)
def _generate_categories(self) -> List[str]:
resources = self._resources()
dp = resources[0].load(self._root)
dp = Filter(dp, path_comparator("name", "classes.txt"))
dp = LineReader(dp, decode=True, return_path=False)
return list(dp)
def __len__(self) -> int:
return 75_750 if self._split == "train" else 25_250
|
"""Spotify reader."""
from typing import List, Optional
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
class SpotifyReader(BaseReader):
"""
Spotify Reader.
Read a user's saved albums, tracks, or playlists from Spotify.
"""
def load_data(self, collection: Optional[str] = "albums") -> List[Document]:
"""
Load data from a user's Spotify account.
Args:
collections (Optional[str]): "albums", "tracks", or "playlists"
"""
import spotipy
from spotipy.oauth2 import SpotifyOAuth
scope = "user-library-read"
sp = spotipy.Spotify(auth_manager=SpotifyOAuth(scope=scope))
results = []
if collection == "albums":
response = sp.current_user_saved_albums()
items = response["items"]
for item in items:
album = item["album"]
album_name = album["name"]
artist_name = album["artists"][0]["name"]
album_string = f"Album {album_name} by Artist {artist_name}\n"
results.append(Document(text=album_string))
elif collection == "tracks":
response = sp.current_user_saved_tracks()
items = response["items"]
for item in items:
track = item["track"]
track_name = track["name"]
artist_name = track["artists"][0]["name"]
artist_string = f"Track {track_name} by Artist {artist_name}\n"
results.append(Document(text=artist_string))
elif collection == "playlists":
response = sp.current_user_playlists()
items = response["items"]
for item in items:
playlist_name = item["name"]
owner_name = item["owner"]["display_name"]
playlist_string = f"Playlist {playlist_name} created by {owner_name}\n"
results.append(Document(text=playlist_string))
else:
raise ValueError(
"Invalid collection parameter value. Allowed values are 'albums',"
" 'tracks', or 'playlists'."
)
return results
if __name__ == "__main__":
reader = SpotifyReader()
print(reader.load_data())
|
"""Spotify reader."""
from typing import List, Optional
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
class SpotifyReader(BaseReader):
"""Spotify Reader.
Read a user's saved albums, tracks, or playlists from Spotify.
"""
def load_data(self, collection: Optional[str] = "albums") -> List[Document]:
"""Load data from a user's Spotify account.
Args:
collections (Optional[str]): "albums", "tracks", or "playlists"
"""
import spotipy
from spotipy.oauth2 import SpotifyOAuth
scope = "user-library-read"
sp = spotipy.Spotify(auth_manager=SpotifyOAuth(scope=scope))
results = []
if collection == "albums":
response = sp.current_user_saved_albums()
items = response["items"]
for item in items:
album = item["album"]
album_name = album["name"]
artist_name = album["artists"][0]["name"]
album_string = f"Album {album_name} by Artist {artist_name}\n"
results.append(Document(text=album_string))
elif collection == "tracks":
response = sp.current_user_saved_tracks()
items = response["items"]
for item in items:
track = item["track"]
track_name = track["name"]
artist_name = track["artists"][0]["name"]
artist_string = f"Track {track_name} by Artist {artist_name}\n"
results.append(Document(text=artist_string))
elif collection == "playlists":
response = sp.current_user_playlists()
items = response["items"]
for item in items:
playlist_name = item["name"]
owner_name = item["owner"]["display_name"]
playlist_string = f"Playlist {playlist_name} created by {owner_name}\n"
results.append(Document(text=playlist_string))
else:
raise ValueError(
"Invalid collection parameter value. Allowed values are 'albums',"
" 'tracks', or 'playlists'."
)
return results
if __name__ == "__main__":
reader = SpotifyReader()
print(reader.load_data())
|
import os
import time
import pytest
import subprocess
cur_dir = os.path.dirname(os.path.abspath(__file__))
@pytest.fixture()
def docker_image():
import docker
client = docker.from_env()
client.images.build(path=os.path.join(cur_dir), tag='clitest')
client.close()
yield
time.sleep(2)
client = docker.from_env()
client.containers.prune()
def test_executor_cli_docker(docker_image):
process = subprocess.Popen(
['jina', 'executor', '--uses', 'docker://clitest:latest']
)
time.sleep(5)
poll = process.poll()
process.terminate()
assert poll is None
|
import os
import time
import pytest
import subprocess
cur_dir = os.path.dirname(os.path.abspath(__file__))
@pytest.fixture()
def docker_image():
import docker
client = docker.from_env()
client.images.build(path=os.path.join(cur_dir), tag='clitest')
client.close()
yield
time.sleep(2)
client = docker.from_env()
client.containers.prune()
def test_executor_cli_docker(docker_image):
process = subprocess.Popen(
['jina', 'executor', '--uses', 'docker://clitest:latest']
)
time.sleep(5)
poll = process.poll()
process.terminate()
assert poll is None
def test_zed_runtime_cli_docker(docker_image):
process = subprocess.Popen(
['jina', 'executor', '--native', '--uses', 'docker://clitest:latest']
)
time.sleep(5)
poll = process.poll()
process.terminate()
assert poll == 1 # failed
|
# Copyright (c) OpenMMLab. All rights reserved.
from .empty_cache_hook import EmptyCacheHook
from .hook import Hook
from .iter_timer_hook import IterTimerHook
from .optimizer_hook import OptimizerHook
from .param_scheduler_hook import ParamSchedulerHook
from .sampler_seed_hook import DistSamplerSeedHook
__all__ = [
'Hook', 'IterTimerHook', 'DistSamplerSeedHook', 'ParamSchedulerHook',
'OptimizerHook', 'EmptyCacheHook'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .hook import Hook
from .iter_timer_hook import IterTimerHook
from .optimizer_hook import OptimizerHook
from .param_scheduler_hook import ParamSchedulerHook
from .sampler_seed_hook import DistSamplerSeedHook
__all__ = [
'Hook', 'IterTimerHook', 'DistSamplerSeedHook', 'ParamSchedulerHook',
'OptimizerHook'
]
|
import pytest
from backend.util.request import pin_url, validate_url
@pytest.mark.parametrize(
"raw_url, trusted_origins, expected_value, should_raise",
[
# Rejected IP ranges
("localhost", [], None, True),
("192.168.1.1", [], None, True),
("127.0.0.1", [], None, True),
("0.0.0.0", [], None, True),
# Normal URLs (should default to http:// if no scheme provided)
("google.com/a?b=c", [], "http://google.com/a?b=c", False),
("github.com?key=!@!@", [], "http://github.com?key=!@!@", False),
# Scheme Enforcement
("ftp://example.com", [], None, True),
("file://example.com", [], None, True),
# International domain converting to punycode (allowed if public)
("http://xn--exmple-cua.com", [], "http://xn--exmple-cua.com", False),
# Invalid domain (IDNA failure)
("http://exa◌mple.com", [], None, True),
# IPv6 addresses (loopback/blocked)
("::1", [], None, True),
("http://[::1]", [], None, True),
# Suspicious Characters in Hostname
("http://example_underscore.com", [], None, True),
("http://exa mple.com", [], None, True),
# Malformed URLs
("http://", [], None, True), # No hostname
("://missing-scheme", [], None, True), # Missing proper scheme
# Trusted Origins
(
"internal-api.company.com",
["internal-api.company.com", "10.0.0.5"],
"http://internal-api.company.com",
False,
),
("10.0.0.5", ["10.0.0.5"], "http://10.0.0.5", False),
# Special Characters in Path
(
"example.com/path%20with%20spaces",
[],
"http://example.com/path%20with%20spaces",
False,
),
# Backslashes should be replaced with forward slashes
("http://example.com\\backslash", [], "http://example.com/backslash", False),
# Check default-scheme behavior for valid domains
("example.com", [], "http://example.com", False),
("https://secure.com", [], "https://secure.com", False),
# Non-ASCII Characters in Query/Fragment
("example.com?param=äöü", [], "http://example.com?param=äöü", False),
],
)
def test_validate_url_no_dns_rebinding(
raw_url: str, trusted_origins: list[str], expected_value: str, should_raise: bool
):
if should_raise:
with pytest.raises(ValueError):
validate_url(raw_url, trusted_origins)
else:
validated_url, _, _ = validate_url(raw_url, trusted_origins)
assert validated_url.geturl() == expected_value
@pytest.mark.parametrize(
"hostname, resolved_ips, expect_error, expected_ip",
[
# Multiple public IPs, none blocked
("public-example.com", ["8.8.8.8", "9.9.9.9"], False, "8.8.8.8"),
# Includes a blocked IP (e.g. link-local 169.254.x.x) => should raise
("rebinding.com", ["1.2.3.4", "169.254.169.254"], True, None),
# Single public IP
("single-public.com", ["8.8.8.8"], False, "8.8.8.8"),
# Single blocked IP
("blocked.com", ["127.0.0.1"], True, None),
],
)
def test_dns_rebinding_fix(
monkeypatch,
hostname: str,
resolved_ips: list[str],
expect_error: bool,
expected_ip: str,
):
"""
Tests that validate_url pins the first valid public IP address, and rejects
the domain if any of the resolved IPs are blocked (i.e., DNS Rebinding scenario).
"""
def mock_getaddrinfo(host, port, *args, **kwargs):
# Simulate multiple IPs returned for the given hostname
return [(None, None, None, None, (ip, port)) for ip in resolved_ips]
# Patch socket.getaddrinfo so we control the DNS resolution in the test
monkeypatch.setattr("socket.getaddrinfo", mock_getaddrinfo)
if expect_error:
# If any IP is blocked, we expect a ValueError
with pytest.raises(ValueError):
url, _, ip_addresses = validate_url(hostname, [])
pin_url(url, ip_addresses)
else:
url, _, ip_addresses = validate_url(hostname, [])
pinned_url = pin_url(url, ip_addresses).geturl()
# The pinned_url should contain the first valid IP
assert pinned_url.startswith("http://") or pinned_url.startswith("https://")
assert expected_ip in pinned_url
# The unpinned URL's hostname should match our original IDNA encoded hostname
assert url.hostname == hostname
|
import pytest
from backend.util.request import validate_url
@pytest.mark.parametrize(
"url, trusted_origins, expected_value, should_raise",
[
# Rejected IP ranges
("localhost", [], None, True),
("192.168.1.1", [], None, True),
("127.0.0.1", [], None, True),
("0.0.0.0", [], None, True),
# Normal URLs (should default to http:// if no scheme provided)
("google.com/a?b=c", [], "http://google.com/a?b=c", False),
("github.com?key=!@!@", [], "http://github.com?key=!@!@", False),
# Scheme Enforcement
("ftp://example.com", [], None, True),
("file://example.com", [], None, True),
# International domain converting to punycode (allowed if public)
("http://xn--exmple-cua.com", [], "http://xn--exmple-cua.com", False),
# Invalid domain (IDNA failure)
("http://exa◌mple.com", [], None, True),
# IPv6 addresses (loopback/blocked)
("::1", [], None, True),
("http://[::1]", [], None, True),
# Suspicious Characters in Hostname
("http://example_underscore.com", [], None, True),
("http://exa mple.com", [], None, True),
# Malformed URLs
("http://", [], None, True), # No hostname
("://missing-scheme", [], None, True), # Missing proper scheme
# Trusted Origins
(
"internal-api.company.com",
["internal-api.company.com", "10.0.0.5"],
"http://internal-api.company.com",
False,
),
("10.0.0.5", ["10.0.0.5"], "http://10.0.0.5", False),
# Special Characters in Path
(
"example.com/path%20with%20spaces",
[],
"http://example.com/path%20with%20spaces",
False,
),
# Backslashes should be replaced with forward slashes
("http://example.com\\backslash", [], "http://example.com/backslash", False),
# Check default-scheme behavior for valid domains
("example.com", [], "http://example.com", False),
("https://secure.com", [], "https://secure.com", False),
# Non-ASCII Characters in Query/Fragment
("example.com?param=äöü", [], "http://example.com?param=äöü", False),
],
)
def test_validate_url_no_dns_rebinding(
url, trusted_origins, expected_value, should_raise
):
if should_raise:
with pytest.raises(ValueError):
validate_url(url, trusted_origins, enable_dns_rebinding=False)
else:
url, host = validate_url(url, trusted_origins, enable_dns_rebinding=False)
assert url == expected_value
@pytest.mark.parametrize(
"hostname, resolved_ips, expect_error, expected_ip",
[
# Multiple public IPs, none blocked
("public-example.com", ["8.8.8.8", "9.9.9.9"], False, "8.8.8.8"),
# Includes a blocked IP (e.g. link-local 169.254.x.x) => should raise
("rebinding.com", ["1.2.3.4", "169.254.169.254"], True, None),
# Single public IP
("single-public.com", ["8.8.8.8"], False, "8.8.8.8"),
# Single blocked IP
("blocked.com", ["127.0.0.1"], True, None),
],
)
def test_dns_rebinding_fix(
monkeypatch, hostname, resolved_ips, expect_error, expected_ip
):
"""
Tests that validate_url pins the first valid public IP address, and rejects
the domain if any of the resolved IPs are blocked (i.e., DNS Rebinding scenario).
"""
def mock_getaddrinfo(host, port, *args, **kwargs):
# Simulate multiple IPs returned for the given hostname
return [(None, None, None, None, (ip, port)) for ip in resolved_ips]
# Patch socket.getaddrinfo so we control the DNS resolution in the test
monkeypatch.setattr("socket.getaddrinfo", mock_getaddrinfo)
if expect_error:
# If any IP is blocked, we expect a ValueError
with pytest.raises(ValueError):
validate_url(hostname, [])
else:
pinned_url, ascii_hostname = validate_url(hostname, [])
# The pinned_url should contain the first valid IP
assert pinned_url.startswith("http://") or pinned_url.startswith("https://")
assert expected_ip in pinned_url
# The ascii_hostname should match our original hostname after IDNA encoding
assert ascii_hostname == hostname
|
_base_ = './mask-rcnn_r50_fpn_instaboost-4x_coco.py'
model = dict(
backbone=dict(
type='ResNeXt',
depth=101,
groups=64,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
style='pytorch',
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d')))
|
_base_ = './mask_rcnn_r50_fpn_instaboost_4x_coco.py'
model = dict(
backbone=dict(
type='ResNeXt',
depth=101,
groups=64,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
style='pytorch',
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d')))
|
# Copyright (c) OpenMMLab. All rights reserved.
__version__ = '3.0.0rc2'
short_version = __version__
def parse_version_info(version_str):
"""Parse a version string into a tuple.
Args:
version_str (str): The version string.
Returns:
tuple[int | str]: The version info, e.g., "1.3.0" is parsed into
(1, 3, 0), and "2.0.0rc1" is parsed into (2, 0, 0, 'rc1').
"""
version_info = []
for x in version_str.split('.'):
if x.isdigit():
version_info.append(int(x))
elif x.find('rc') != -1:
patch_version = x.split('rc')
version_info.append(int(patch_version[0]))
version_info.append(f'rc{patch_version[1]}')
return tuple(version_info)
version_info = parse_version_info(__version__)
|
# Copyright (c) OpenMMLab. All rights reserved.
__version__ = '3.0.0rc1'
short_version = __version__
def parse_version_info(version_str):
"""Parse a version string into a tuple.
Args:
version_str (str): The version string.
Returns:
tuple[int | str]: The version info, e.g., "1.3.0" is parsed into
(1, 3, 0), and "2.0.0rc1" is parsed into (2, 0, 0, 'rc1').
"""
version_info = []
for x in version_str.split('.'):
if x.isdigit():
version_info.append(int(x))
elif x.find('rc') != -1:
patch_version = x.split('rc')
version_info.append(int(patch_version[0]))
version_info.append(f'rc{patch_version[1]}')
return tuple(version_info)
version_info = parse_version_info(__version__)
|
# Copyright (c) OpenMMLab. All rights reserved.
from .evaluator import Evaluator
from .metric import BaseMetric
from .utils import get_metric_value
__all__ = ['BaseMetric', 'Evaluator', 'get_metric_value']
|
# Copyright (c) OpenMMLab. All rights reserved.
from .base import BaseEvaluator
from .builder import build_evaluator
from .composed_evaluator import ComposedEvaluator
from .utils import get_metric_value
__all__ = [
'BaseEvaluator', 'ComposedEvaluator', 'build_evaluator', 'get_metric_value'
]
|
import pytest
from datasets import Dataset
from torch.utils.data import BatchSampler, ConcatDataset, SequentialSampler
from sentence_transformers.sampler import RoundRobinBatchSampler
DATASET_LENGTH = 25
@pytest.fixture
def dummy_concat_dataset() -> ConcatDataset:
"""
Dummy dataset for testing purposes. The dataset looks as follows:
{
"data": [0, 1, 2, ... , 23, 24, 100, 101, ..., 123, 124],
"label": [0, 1, 0, 1, ..., 0, 1],
}
"""
values_1 = list(range(DATASET_LENGTH))
labels = [x % 2 for x in values_1]
dataset_1 = Dataset.from_dict({"data": values_1, "label": labels})
values_2 = [x + 100 for x in values_1] + [x + 200 for x in values_1]
dataset_2 = Dataset.from_dict({"data": values_2, "label": labels + labels})
return ConcatDataset([dataset_1, dataset_2])
def test_round_robin_batch_sampler(dummy_concat_dataset: ConcatDataset) -> None:
batch_size = 4
batch_sampler_1 = BatchSampler(
SequentialSampler(range(len(dummy_concat_dataset.datasets[0]))), batch_size=batch_size, drop_last=True
)
batch_sampler_2 = BatchSampler(
SequentialSampler(range(len(dummy_concat_dataset.datasets[1]))), batch_size=batch_size, drop_last=True
)
sampler = RoundRobinBatchSampler(dataset=dummy_concat_dataset, batch_samplers=[batch_sampler_1, batch_sampler_2])
batches = list(iter(sampler))
# Despite the second dataset being larger (2 * DATASET_LENGTH), we still only sample DATASET_LENGTH // batch_size batches from each dataset
# because the RoundRobinBatchSampler should stop sampling once it has sampled all elements from one dataset
assert len(batches) == 2 * DATASET_LENGTH // batch_size
assert len(sampler) == len(batches)
# Assert that batches are produced in a round-robin fashion
for i in range(0, len(batches), 2):
# Batch from the first part of the dataset
batch_1 = batches[i]
assert all(
dummy_concat_dataset[idx]["data"] < 100 for idx in batch_1
), f"Batch {i} contains data from the second part of the dataset: {[dummy_concat_dataset[idx]['data'] for idx in batch_1]}"
# Batch from the second part of the dataset
batch_2 = batches[i + 1]
assert all(
dummy_concat_dataset[idx]["data"] >= 100 for idx in batch_2
), f"Batch {i+1} contains data from the first part of the dataset: {[dummy_concat_dataset[idx]['data'] for idx in batch_2]}"
def test_round_robin_batch_sampler_value_error(dummy_concat_dataset: ConcatDataset) -> None:
batch_size = 4
batch_sampler_1 = BatchSampler(SequentialSampler(range(DATASET_LENGTH)), batch_size=batch_size, drop_last=True)
batch_sampler_2 = BatchSampler(SequentialSampler(range(DATASET_LENGTH)), batch_size=batch_size, drop_last=True)
batch_sampler_3 = BatchSampler(SequentialSampler(range(DATASET_LENGTH)), batch_size=batch_size, drop_last=True)
with pytest.raises(
ValueError, match="The number of batch samplers must match the number of datasets in the ConcatDataset"
):
RoundRobinBatchSampler(
dataset=dummy_concat_dataset, batch_samplers=[batch_sampler_1, batch_sampler_2, batch_sampler_3]
)
|
import pytest
from datasets import Dataset
from sentence_transformers.sampler import RoundRobinBatchSampler
from torch.utils.data import BatchSampler, SequentialSampler, ConcatDataset
DATASET_LENGTH = 25
@pytest.fixture
def dummy_concat_dataset() -> ConcatDataset:
"""
Dummy dataset for testing purposes. The dataset looks as follows:
{
"data": [0, 1, 2, ... , 23, 24, 100, 101, ..., 123, 124],
"label": [0, 1, 0, 1, ..., 0, 1],
}
"""
values_1 = list(range(DATASET_LENGTH))
labels = [x % 2 for x in values_1]
dataset_1 = Dataset.from_dict({"data": values_1, "label": labels})
values_2 = [x + 100 for x in values_1] + [x + 200 for x in values_1]
dataset_2 = Dataset.from_dict({"data": values_2, "label": labels + labels})
return ConcatDataset([dataset_1, dataset_2])
def test_round_robin_batch_sampler(dummy_concat_dataset: ConcatDataset) -> None:
batch_size = 4
batch_sampler_1 = BatchSampler(
SequentialSampler(range(len(dummy_concat_dataset.datasets[0]))), batch_size=batch_size, drop_last=True
)
batch_sampler_2 = BatchSampler(
SequentialSampler(range(len(dummy_concat_dataset.datasets[1]))), batch_size=batch_size, drop_last=True
)
sampler = RoundRobinBatchSampler(dataset=dummy_concat_dataset, batch_samplers=[batch_sampler_1, batch_sampler_2])
batches = list(iter(sampler))
# Despite the second dataset being larger (2 * DATASET_LENGTH), we still only sample DATASET_LENGTH // batch_size batches from each dataset
# because the RoundRobinBatchSampler should stop sampling once it has sampled all elements from one dataset
assert len(batches) == 2 * DATASET_LENGTH // batch_size
assert len(sampler) == len(batches)
# Assert that batches are produced in a round-robin fashion
for i in range(0, len(batches), 2):
# Batch from the first part of the dataset
batch_1 = batches[i]
assert all(
dummy_concat_dataset[idx]["data"] < 100 for idx in batch_1
), f"Batch {i} contains data from the second part of the dataset: {[dummy_concat_dataset[idx]['data'] for idx in batch_1]}"
# Batch from the second part of the dataset
batch_2 = batches[i + 1]
assert all(
dummy_concat_dataset[idx]["data"] >= 100 for idx in batch_2
), f"Batch {i+1} contains data from the first part of the dataset: {[dummy_concat_dataset[idx]['data'] for idx in batch_2]}"
def test_round_robin_batch_sampler_value_error(dummy_concat_dataset: ConcatDataset) -> None:
batch_size = 4
batch_sampler_1 = BatchSampler(SequentialSampler(range(DATASET_LENGTH)), batch_size=batch_size, drop_last=True)
batch_sampler_2 = BatchSampler(SequentialSampler(range(DATASET_LENGTH)), batch_size=batch_size, drop_last=True)
batch_sampler_3 = BatchSampler(SequentialSampler(range(DATASET_LENGTH)), batch_size=batch_size, drop_last=True)
with pytest.raises(
ValueError, match="The number of batch samplers must match the number of datasets in the ConcatDataset"
):
RoundRobinBatchSampler(
dataset=dummy_concat_dataset, batch_samplers=[batch_sampler_1, batch_sampler_2, batch_sampler_3]
)
|
# Copyright 2025 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, Union
from ..utils import is_torchdynamo_compiling
try:
from kernels import (
Device,
LayerRepository,
register_kernel_mapping,
replace_kernel_forward_from_hub,
)
from kernels import (
use_kernel_forward_from_hub as original_use_kernel_forward_from_hub,
)
_hub_kernels_available = True
_KERNEL_MAPPING: Dict[str, Dict[Union[Device, str], LayerRepository]] = {
"MultiScaleDeformableAttention": {
"cuda": LayerRepository(
repo_id="kernels-community/deformable-detr",
layer_name="MultiScaleDeformableAttention",
)
},
"Llama4TextMoe": {
"cuda": LayerRepository(
# Move to kernels-community/moe once we release.
repo_id="kernels-community/moe",
layer_name="Llama4TextMoe",
)
},
"RMSNorm": {
"cuda": LayerRepository(
repo_id="kernels-community/triton-layer-norm",
layer_name="LlamaRMSNorm",
revision="pure-layer-test",
)
},
"MLP": {
"cuda": LayerRepository(
repo_id="medmekk/triton-llama-mlp",
layer_name="TritonLlamaMLP",
)
},
}
register_kernel_mapping(_KERNEL_MAPPING)
def use_kernel_forward_from_hub(*args, **kwargs):
"""
Expands `kernels`' `use_kernel_forward_from_hub` to NOT use a kernel at compile time. This should be removed
when `kernels` supports `torch.compile`.
If the layer has a `config` attribute, we can also set `config.disable_custom_kernels = True` to disable the
kernel.
"""
def decorator_with_compile_path(cls):
# Keeps a reference to the original forward method
original_forward = cls.forward
# Applies the original decorator
decorator = original_use_kernel_forward_from_hub(*args, **kwargs)
cls = decorator(cls)
# Replaces the kernel forward with a compile-friendly version
kernel_forward = cls.forward
def forward_with_compile_path(*forward_args, **forward_kwargs):
disable_custom_kernels = hasattr(cls, "config") and getattr(cls.config, "disable_custom_kernels", None)
if is_torchdynamo_compiling() or disable_custom_kernels:
return original_forward(*forward_args, **forward_kwargs)
else:
return kernel_forward(*forward_args, **forward_kwargs)
cls.forward = forward_with_compile_path
return cls
return decorator_with_compile_path
except ImportError:
# Stub to make decorators int transformers work when `kernels`
# is not installed.
def use_kernel_forward_from_hub(*args, **kwargs):
def decorator(cls):
return cls
return decorator
class LayerRepository:
def __init__(self, *args, **kwargs):
raise RuntimeError("LayerRepository requires `kernels` to be installed. Run `pip install kernels`.")
def replace_kernel_forward_from_hub(*args, **kwargs):
raise RuntimeError(
"replace_kernel_forward_from_hub requires `kernels` to be installed. Run `pip install kernels`."
)
def register_kernel_mapping(*args, **kwargs):
raise RuntimeError("register_kernel_mapping requires `kernels` to be installed. Run `pip install kernels`.")
_hub_kernels_available = False
def is_hub_kernels_available():
return _hub_kernels_available
__all__ = [
"LayerRepository",
"is_hub_kernels_available",
"use_kernel_forward_from_hub",
"register_kernel_mapping",
"replace_kernel_forward_from_hub",
]
|
# Copyright 2025 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, Union
try:
from kernels import (
Device,
LayerRepository,
register_kernel_mapping,
replace_kernel_forward_from_hub,
use_kernel_forward_from_hub,
)
_hub_kernels_available = True
_KERNEL_MAPPING: Dict[str, Dict[Union[Device, str], LayerRepository]] = {
"MultiScaleDeformableAttention": {
"cuda": LayerRepository(
repo_id="kernels-community/deformable-detr",
layer_name="MultiScaleDeformableAttention",
)
},
"Llama4TextMoe": {
"cuda": LayerRepository(
# Move to kernels-community/moe once we release.
repo_id="kernels-community/moe",
layer_name="Llama4TextMoe",
)
},
"RMSNorm": {
"cuda": LayerRepository(
repo_id="kernels-community/triton-layer-norm",
layer_name="LlamaRMSNorm",
revision="pure-layer-test",
)
},
"MLP": {
"cuda": LayerRepository(
repo_id="medmekk/triton-llama-mlp",
layer_name="TritonLlamaMLP",
)
},
}
register_kernel_mapping(_KERNEL_MAPPING)
except ImportError:
# Stub to make decorators int transformers work when `kernels`
# is not installed.
def use_kernel_forward_from_hub(*args, **kwargs):
def decorator(cls):
return cls
return decorator
class LayerRepository:
def __init__(self, *args, **kwargs):
raise RuntimeError("LayerRepository requires `kernels` to be installed. Run `pip install kernels`.")
def replace_kernel_forward_from_hub(*args, **kwargs):
raise RuntimeError(
"replace_kernel_forward_from_hub requires `kernels` to be installed. Run `pip install kernels`."
)
def register_kernel_mapping(*args, **kwargs):
raise RuntimeError("register_kernel_mapping requires `kernels` to be installed. Run `pip install kernels`.")
_hub_kernels_available = False
def is_hub_kernels_available():
return _hub_kernels_available
__all__ = [
"LayerRepository",
"is_hub_kernels_available",
"use_kernel_forward_from_hub",
"register_kernel_mapping",
"replace_kernel_forward_from_hub",
]
|
from io import BytesIO
from typing import TYPE_CHECKING, Any, Optional, Tuple, Type, TypeVar
import numpy as np
from pydantic import parse_obj_as
from pydantic.validators import bytes_validator
from docarray.typing.abstract_type import AbstractType
from docarray.typing.proto_register import _register_proto
from docarray.typing.tensor.image.image_ndarray import ImageNdArray
from docarray.utils._internal.misc import import_library
if TYPE_CHECKING:
from PIL import Image as PILImage
from pydantic.fields import BaseConfig, ModelField
from docarray.proto import NodeProto
T = TypeVar('T', bound='ImageBytes')
@_register_proto(proto_type_name='image_bytes')
class ImageBytes(bytes, AbstractType):
"""
Bytes that store an image and that can be load into an image tensor
"""
@classmethod
def validate(
cls: Type[T],
value: Any,
field: 'ModelField',
config: 'BaseConfig',
) -> T:
value = bytes_validator(value)
return cls(value)
@classmethod
def from_protobuf(cls: Type[T], pb_msg: T) -> T:
return parse_obj_as(cls, pb_msg)
def _to_node_protobuf(self: T) -> 'NodeProto':
from docarray.proto import NodeProto
return NodeProto(blob=self, type=self._proto_type_name)
def load_pil(
self,
) -> 'PILImage.Image':
"""
Load the image from the bytes into a `PIL.Image.Image` instance
---
```python
from pydantic import parse_obj_as
from docarray import BaseDoc
from docarray.typing import ImageUrl
img_url = "https://upload.wikimedia.org/wikipedia/commons/8/80/Dag_Sebastian_Ahlander_at_G%C3%B6teborg_Book_Fair_2012b.jpg"
img_url = parse_obj_as(ImageUrl, img_url)
img = img_url.load_pil()
from PIL.Image import Image
assert isinstance(img, Image)
```
---
:return: a Pillow image
"""
PIL = import_library('PIL', raise_error=True) # noqa: F841
from PIL import Image as PILImage
return PILImage.open(BytesIO(self))
def load(
self,
width: Optional[int] = None,
height: Optional[int] = None,
axis_layout: Tuple[str, str, str] = ('H', 'W', 'C'),
) -> ImageNdArray:
"""
Load the image from the [`ImageBytes`][docarray.typing.ImageBytes] into an
[`ImageNdArray`][docarray.typing.ImageNdArray].
---
```python
from docarray import BaseDoc
from docarray.typing import ImageNdArray, ImageUrl
class MyDoc(BaseDoc):
img_url: ImageUrl
doc = MyDoc(
img_url="https://upload.wikimedia.org/wikipedia/commons/8/80/"
"Dag_Sebastian_Ahlander_at_G%C3%B6teborg_Book_Fair_2012b.jpg"
)
img_tensor = doc.img_url.load()
assert isinstance(img_tensor, ImageNdArray)
img_tensor = doc.img_url.load(height=224, width=224)
assert img_tensor.shape == (224, 224, 3)
layout = ('C', 'W', 'H')
img_tensor = doc.img_url.load(height=100, width=200, axis_layout=layout)
assert img_tensor.shape == (3, 200, 100)
```
---
:param width: width of the image tensor.
:param height: height of the image tensor.
:param axis_layout: ordering of the different image axes.
'H' = height, 'W' = width, 'C' = color channel
:return: [`ImageNdArray`][docarray.typing.ImageNdArray] representing the image as RGB values
"""
raw_img = self.load_pil()
if width or height:
new_width = width or raw_img.width
new_height = height or raw_img.height
raw_img = raw_img.resize((new_width, new_height))
try:
tensor = np.array(raw_img.convert('RGB'))
except Exception:
tensor = np.array(raw_img)
img = self._move_channel_axis(tensor, axis_layout=axis_layout)
return parse_obj_as(ImageNdArray, img)
@staticmethod
def _move_channel_axis(
tensor: np.ndarray, axis_layout: Tuple[str, str, str] = ('H', 'W', 'C')
) -> np.ndarray:
"""Moves channel axis around."""
channel_to_offset = {'H': 0, 'W': 1, 'C': 2}
permutation = tuple(channel_to_offset[axis] for axis in axis_layout)
return np.transpose(tensor, permutation)
|
from io import BytesIO
from typing import TYPE_CHECKING, Any, Optional, Tuple, Type, TypeVar
import numpy as np
from pydantic import parse_obj_as
from pydantic.validators import bytes_validator
from docarray.typing.abstract_type import AbstractType
from docarray.typing.proto_register import _register_proto
from docarray.typing.tensor.image.image_ndarray import ImageNdArray
from docarray.utils._internal.misc import import_library
if TYPE_CHECKING:
from PIL import Image as PILImage
from pydantic.fields import BaseConfig, ModelField
from docarray.proto import NodeProto
T = TypeVar('T', bound='ImageBytes')
@_register_proto(proto_type_name='image_bytes')
class ImageBytes(bytes, AbstractType):
"""
Bytes that store an image and that can be load into an image tensor
"""
@classmethod
def validate(
cls: Type[T],
value: Any,
field: 'ModelField',
config: 'BaseConfig',
) -> T:
value = bytes_validator(value)
return cls(value)
@classmethod
def from_protobuf(cls: Type[T], pb_msg: T) -> T:
return parse_obj_as(cls, pb_msg)
def _to_node_protobuf(self: T) -> 'NodeProto':
from docarray.proto import NodeProto
return NodeProto(blob=self, type=self._proto_type_name)
def load_pil(
self,
) -> 'PILImage.Image':
"""
Load the image from the bytes into a `PIL.Image.Image` instance
---
```python
from pydantic import parse_obj_as
from docarray import BaseDoc
from docarray.typing import ImageUrl
img_url = "https://upload.wikimedia.org/wikipedia/commons/8/80/Dag_Sebastian_Ahlander_at_G%C3%B6teborg_Book_Fair_2012b.jpg"
img_url = parse_obj_as(ImageUrl, img_url)
img = img_url.load_pil()
from PIL.Image import Image
assert isinstance(img, Image)
```
---
:return: a Pillow image
"""
PIL = import_library('PIL', raise_error=True) # noqa: F841
from PIL import Image as PILImage
return PILImage.open(BytesIO(self))
def load(
self,
width: Optional[int] = None,
height: Optional[int] = None,
axis_layout: Tuple[str, str, str] = ('H', 'W', 'C'),
) -> ImageNdArray:
"""
Load the image from the ImageBytes into an ImageNdArray
---
```python
from docarray import BaseDoc
from docarray.typing import ImageNdArray, ImageUrl
class MyDoc(BaseDoc):
img_url: ImageUrl
doc = MyDoc(
img_url="https://upload.wikimedia.org/wikipedia/commons/8/80/"
"Dag_Sebastian_Ahlander_at_G%C3%B6teborg_Book_Fair_2012b.jpg"
)
img_tensor = doc.img_url.load()
assert isinstance(img_tensor, ImageNdArray)
img_tensor = doc.img_url.load(height=224, width=224)
assert img_tensor.shape == (224, 224, 3)
layout = ('C', 'W', 'H')
img_tensor = doc.img_url.load(height=100, width=200, axis_layout=layout)
assert img_tensor.shape == (3, 200, 100)
```
---
:param width: width of the image tensor.
:param height: height of the image tensor.
:param axis_layout: ordering of the different image axes.
'H' = height, 'W' = width, 'C' = color channel
:return: ImageNdArray representing the image as RGB values
"""
raw_img = self.load_pil()
if width or height:
new_width = width or raw_img.width
new_height = height or raw_img.height
raw_img = raw_img.resize((new_width, new_height))
try:
tensor = np.array(raw_img.convert('RGB'))
except Exception:
tensor = np.array(raw_img)
img = self._move_channel_axis(tensor, axis_layout=axis_layout)
return parse_obj_as(ImageNdArray, img)
@staticmethod
def _move_channel_axis(
tensor: np.ndarray, axis_layout: Tuple[str, str, str] = ('H', 'W', 'C')
) -> np.ndarray:
"""Moves channel axis around."""
channel_to_offset = {'H': 0, 'W': 1, 'C': 2}
permutation = tuple(channel_to_offset[axis] for axis in axis_layout)
return np.transpose(tensor, permutation)
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.vectorstores.utils import (
DistanceStrategy,
filter_complex_metadata,
maximal_marginal_relevance,
)
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"DistanceStrategy": "langchain_community.vectorstores.utils",
"maximal_marginal_relevance": "langchain_community.vectorstores.utils",
"filter_complex_metadata": "langchain_community.vectorstores.utils",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"DistanceStrategy",
"filter_complex_metadata",
"maximal_marginal_relevance",
]
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.vectorstores.utils import (
DistanceStrategy,
filter_complex_metadata,
maximal_marginal_relevance,
)
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"DistanceStrategy": "langchain_community.vectorstores.utils",
"maximal_marginal_relevance": "langchain_community.vectorstores.utils",
"filter_complex_metadata": "langchain_community.vectorstores.utils",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"DistanceStrategy",
"maximal_marginal_relevance",
"filter_complex_metadata",
]
|
from enum import Enum
from typing import Any, Dict, Iterable
import torch.nn.functional as F
from torch import Tensor, nn
from sentence_transformers.SentenceTransformer import SentenceTransformer
class SiameseDistanceMetric(Enum):
"""The metric for the contrastive loss"""
EUCLIDEAN = lambda x, y: F.pairwise_distance(x, y, p=2)
MANHATTAN = lambda x, y: F.pairwise_distance(x, y, p=1)
COSINE_DISTANCE = lambda x, y: 1 - F.cosine_similarity(x, y)
class ContrastiveLoss(nn.Module):
def __init__(
self,
model: SentenceTransformer,
distance_metric=SiameseDistanceMetric.COSINE_DISTANCE,
margin: float = 0.5,
size_average: bool = True,
) -> None:
"""
Contrastive loss. Expects as input two texts and a label of either 0 or 1. If the label == 1, then the distance between the
two embeddings is reduced. If the label == 0, then the distance between the embeddings is increased.
Args:
model: SentenceTransformer model
distance_metric: Function that returns a distance between
two embeddings. The class SiameseDistanceMetric contains
pre-defined metrices that can be used
margin: Negative samples (label == 0) should have a distance
of at least the margin value.
size_average: Average by the size of the mini-batch.
References:
* Further information: http://yann.lecun.com/exdb/publis/pdf/hadsell-chopra-lecun-06.pdf
* `Training Examples > Quora Duplicate Questions <../../examples/training/quora_duplicate_questions/README.html>`_
Requirements:
1. (anchor, positive/negative) pairs
Relations:
- :class:`OnlineContrastiveLoss` is similar, but uses hard positive and hard negative pairs.
It often yields better results.
Inputs:
+-----------------------------------------------+------------------------------+
| Texts | Labels |
+===============================================+==============================+
| (anchor, positive/negative) pairs | 1 if positive, 0 if negative |
+-----------------------------------------------+------------------------------+
Example:
::
from sentence_transformers import SentenceTransformer, SentenceTransformerTrainer, losses
from datasets import Dataset
model = SentenceTransformer("microsoft/mpnet-base")
train_dataset = Dataset.from_dict({
"sentence1": ["It's nice weather outside today.", "He drove to work."],
"sentence2": ["It's so sunny.", "She walked to the store."],
"label": [1, 0],
})
loss = losses.ContrastiveLoss(model)
trainer = SentenceTransformerTrainer(
model=model,
train_dataset=train_dataset,
loss=loss,
)
trainer.train()
"""
super(ContrastiveLoss, self).__init__()
self.distance_metric = distance_metric
self.margin = margin
self.model = model
self.size_average = size_average
def get_config_dict(self) -> Dict[str, Any]:
distance_metric_name = self.distance_metric.__name__
for name, value in vars(SiameseDistanceMetric).items():
if value == self.distance_metric:
distance_metric_name = "SiameseDistanceMetric.{}".format(name)
break
return {"distance_metric": distance_metric_name, "margin": self.margin, "size_average": self.size_average}
def forward(self, sentence_features: Iterable[Dict[str, Tensor]], labels: Tensor) -> Tensor:
reps = [self.model(sentence_feature)["sentence_embedding"] for sentence_feature in sentence_features]
assert len(reps) == 2
rep_anchor, rep_other = reps
distances = self.distance_metric(rep_anchor, rep_other)
losses = 0.5 * (
labels.float() * distances.pow(2) + (1 - labels).float() * F.relu(self.margin - distances).pow(2)
)
return losses.mean() if self.size_average else losses.sum()
@property
def citation(self) -> str:
return """
@inproceedings{hadsell2006dimensionality,
author={Hadsell, R. and Chopra, S. and LeCun, Y.},
booktitle={2006 IEEE Computer Society Conference on Computer Vision and Pattern Recognition (CVPR'06)},
title={Dimensionality Reduction by Learning an Invariant Mapping},
year={2006},
volume={2},
number={},
pages={1735-1742},
doi={10.1109/CVPR.2006.100}
}
"""
|
from enum import Enum
from typing import Dict, Iterable
import torch.nn.functional as F
from torch import Tensor, nn
from sentence_transformers.SentenceTransformer import SentenceTransformer
class SiameseDistanceMetric(Enum):
"""The metric for the contrastive loss"""
EUCLIDEAN = lambda x, y: F.pairwise_distance(x, y, p=2)
MANHATTAN = lambda x, y: F.pairwise_distance(x, y, p=1)
COSINE_DISTANCE = lambda x, y: 1 - F.cosine_similarity(x, y)
class ContrastiveLoss(nn.Module):
def __init__(
self,
model: SentenceTransformer,
distance_metric=SiameseDistanceMetric.COSINE_DISTANCE,
margin: float = 0.5,
size_average: bool = True,
):
"""
Contrastive loss. Expects as input two texts and a label of either 0 or 1. If the label == 1, then the distance between the
two embeddings is reduced. If the label == 0, then the distance between the embeddings is increased.
Args:
model: SentenceTransformer model
distance_metric: Function that returns a distance between
two embeddings. The class SiameseDistanceMetric contains
pre-defined metrices that can be used
margin: Negative samples (label == 0) should have a distance
of at least the margin value.
size_average: Average by the size of the mini-batch.
References:
* Further information: http://yann.lecun.com/exdb/publis/pdf/hadsell-chopra-lecun-06.pdf
* `Training Examples > Quora Duplicate Questions <../../examples/training/quora_duplicate_questions/README.html>`_
Requirements:
1. (anchor, positive/negative) pairs
Relations:
- :class:`OnlineContrastiveLoss` is similar, but uses hard positive and hard negative pairs.
It often yields better results.
Inputs:
+-----------------------------------------------+------------------------------+
| Texts | Labels |
+===============================================+==============================+
| (anchor, positive/negative) pairs | 1 if positive, 0 if negative |
+-----------------------------------------------+------------------------------+
Example:
::
from sentence_transformers import SentenceTransformer, SentenceTransformerTrainer, losses
from datasets import Dataset
model = SentenceTransformer("microsoft/mpnet-base")
train_dataset = Dataset.from_dict({
"sentence1": ["It's nice weather outside today.", "He drove to work."],
"sentence2": ["It's so sunny.", "She walked to the store."],
"label": [1, 0],
})
loss = losses.ContrastiveLoss(model)
trainer = SentenceTransformerTrainer(
model=model,
train_dataset=train_dataset,
loss=loss,
)
trainer.train()
"""
super(ContrastiveLoss, self).__init__()
self.distance_metric = distance_metric
self.margin = margin
self.model = model
self.size_average = size_average
def get_config_dict(self):
distance_metric_name = self.distance_metric.__name__
for name, value in vars(SiameseDistanceMetric).items():
if value == self.distance_metric:
distance_metric_name = "SiameseDistanceMetric.{}".format(name)
break
return {"distance_metric": distance_metric_name, "margin": self.margin, "size_average": self.size_average}
def forward(self, sentence_features: Iterable[Dict[str, Tensor]], labels: Tensor):
reps = [self.model(sentence_feature)["sentence_embedding"] for sentence_feature in sentence_features]
assert len(reps) == 2
rep_anchor, rep_other = reps
distances = self.distance_metric(rep_anchor, rep_other)
losses = 0.5 * (
labels.float() * distances.pow(2) + (1 - labels).float() * F.relu(self.margin - distances).pow(2)
)
return losses.mean() if self.size_average else losses.sum()
@property
def citation(self) -> str:
return """
@inproceedings{hadsell2006dimensionality,
author={Hadsell, R. and Chopra, S. and LeCun, Y.},
booktitle={2006 IEEE Computer Society Conference on Computer Vision and Pattern Recognition (CVPR'06)},
title={Dimensionality Reduction by Learning an Invariant Mapping},
year={2006},
volume={2},
number={},
pages={1735-1742},
doi={10.1109/CVPR.2006.100}
}
"""
|
from typing import ClassVar, Optional, Union
import torch
import torch.utils.checkpoint
from torch import nn
from transformers.models.paligemma.modeling_paligemma import PaliGemmaForConditionalGeneration
from ...cache_utils import Cache
class NewTaskModelForNewTask(PaliGemmaForConditionalGeneration):
main_input_name: ClassVar[str] = "doc_input_ids" # transformers-related
def __init__(self, config):
super().__init__(config=config)
self.embedding_dim = self.config.embedding_dim
self.custom_text_proj = nn.Linear(self.config.text_config.hidden_size, self.embedding_dim)
if self.language_model._tied_weights_keys is not None:
self._tied_weights_keys = [f"model.language_model.{k}" for k in self.language_model._tied_weights_keys]
self.post_init()
def forward(
self,
input_ids: torch.LongTensor = None,
pixel_values: torch.FloatTensor = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Union[list[torch.FloatTensor], Cache]] = None,
token_type_ids: Optional[torch.LongTensor] = None,
cache_position: Optional[torch.LongTensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
num_logits_to_keep: int = 0,
):
r"""
Returns:
"""
vlm_outputs = super().forward(
input_ids=input_ids,
pixel_values=pixel_values,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
token_type_ids=token_type_ids,
cache_position=cache_position,
inputs_embeds=inputs_embeds,
labels=labels,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=True,
return_dict=True,
num_logits_to_keep=num_logits_to_keep,
)
last_hidden_states = vlm_outputs.hidden_states[-1] # (batch_size, sequence_length, hidden_size)
proj = self.custom_text_proj(last_hidden_states) # (batch_size, sequence_length, dim)
# L2 normalization
embeddings = proj / proj.norm(dim=-1, keepdim=True) # (batch_size, sequence_length, dim)
if attention_mask is not None:
embeddings = embeddings * attention_mask.unsqueeze(-1) # (batch_size, sequence_length, dim)
return (embeddings,) + vlm_outputs
def resize_token_embeddings(
self, new_num_tokens: Optional[int] = None, pad_to_multiple_of=None, mean_resizing=True
) -> nn.Embedding:
model_embeds = self.language_model.resize_token_embeddings(new_num_tokens, pad_to_multiple_of, mean_resizing)
# Update vocab size
self.config.text_config.vocab_size = model_embeds.num_embeddings
self.config.vocab_size = model_embeds.num_embeddings
self.vocab_size = model_embeds.num_embeddings
return model_embeds
|
from typing import ClassVar, Optional, Union
import torch
import torch.utils.checkpoint
from torch import nn
from transformers.models.paligemma.modeling_paligemma import PaliGemmaForConditionalGeneration
from ...cache_utils import Cache
class NewTaskModelForNewTask(PaliGemmaForConditionalGeneration):
main_input_name: ClassVar[str] = "doc_input_ids" # transformers-related
def __init__(self, config):
super().__init__(config=config)
self.embedding_dim = self.config.embedding_dim
self.custom_text_proj = nn.Linear(self.config.text_config.hidden_size, self.embedding_dim)
if self.language_model._tied_weights_keys is not None:
self._tied_weights_keys = [f"model.language_model.{k}" for k in self.language_model._tied_weights_keys]
self.post_init()
def forward(
self,
input_ids: torch.LongTensor = None,
pixel_values: torch.FloatTensor = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Union[list[torch.FloatTensor], Cache]] = None,
token_type_ids: Optional[torch.LongTensor] = None,
cache_position: Optional[torch.LongTensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
num_logits_to_keep: int = 0,
):
r"""
Returns:
"""
vlm_outputs = super().forward(
input_ids=input_ids,
pixel_values=pixel_values,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
token_type_ids=token_type_ids,
cache_position=cache_position,
inputs_embeds=inputs_embeds,
labels=labels,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=True,
return_dict=True,
num_logits_to_keep=num_logits_to_keep,
)
last_hidden_states = vlm_outputs.hidden_states[-1] # (batch_size, sequence_length, hidden_size)
proj = self.custom_text_proj(last_hidden_states) # (batch_size, sequence_length, dim)
# L2 normalization
embeddings = proj / proj.norm(dim=-1, keepdim=True) # (batch_size, sequence_length, dim)
embeddings = embeddings * attention_mask.unsqueeze(-1) # (batch_size, sequence_length, dim)
return (embeddings,) + vlm_outputs
def resize_token_embeddings(
self, new_num_tokens: Optional[int] = None, pad_to_multiple_of=None, mean_resizing=True
) -> nn.Embedding:
model_embeds = self.language_model.resize_token_embeddings(new_num_tokens, pad_to_multiple_of, mean_resizing)
# Update vocab size
self.config.text_config.vocab_size = model_embeds.num_embeddings
self.config.vocab_size = model_embeds.num_embeddings
self.vocab_size = model_embeds.num_embeddings
return model_embeds
|
# Copyright (c) OpenMMLab. All rights reserved.
import os
import os.path as osp
from unittest.mock import Mock, patch
from mmengine.hooks import CheckpointHook
class MockPetrel:
_allow_symlink = False
def __init__(self):
pass
@property
def name(self):
return self.__class__.__name__
@property
def allow_symlink(self):
return self._allow_symlink
prefix_to_backends = {'s3': MockPetrel}
class TestCheckpointHook:
@patch('mmengine.fileio.file_client.FileClient._prefix_to_backends',
prefix_to_backends)
def test_before_train(self, tmp_path):
runner = Mock()
work_dir = str(tmp_path)
runner.work_dir = work_dir
# the out_dir of the checkpoint hook is None
checkpoint_hook = CheckpointHook(interval=1, by_epoch=True)
checkpoint_hook.before_train(runner)
assert checkpoint_hook.out_dir == runner.work_dir
# the out_dir of the checkpoint hook is not None
checkpoint_hook = CheckpointHook(
interval=1, by_epoch=True, out_dir='test_dir')
checkpoint_hook.before_train(runner)
assert checkpoint_hook.out_dir == (
f'test_dir/{osp.basename(work_dir)}')
# create_symlink in args and create_symlink is True
checkpoint_hook = CheckpointHook(
interval=1, by_epoch=True, out_dir='test_dir', create_symlink=True)
checkpoint_hook.before_train(runner)
assert checkpoint_hook.args['create_symlink']
runner.work_dir = 's3://path/of/file'
checkpoint_hook = CheckpointHook(
interval=1, by_epoch=True, create_symlink=True)
checkpoint_hook.before_train(runner)
assert not checkpoint_hook.args['create_symlink']
def test_after_train_epoch(self, tmp_path):
runner = Mock()
work_dir = str(tmp_path)
runner.work_dir = tmp_path
runner.epoch = 9
runner.meta = dict()
runner.model = Mock()
# by epoch is True
checkpoint_hook = CheckpointHook(interval=2, by_epoch=True)
checkpoint_hook.before_train(runner)
checkpoint_hook.after_train_epoch(runner)
assert (runner.epoch + 1) % 2 == 0
assert runner.meta['hook_msgs']['last_ckpt'] == (
f'{work_dir}/epoch_10.pth')
# epoch can not be evenly divided by 2
runner.epoch = 10
checkpoint_hook.after_train_epoch(runner)
assert runner.meta['hook_msgs']['last_ckpt'] == (
f'{work_dir}/epoch_10.pth')
# by epoch is False
runner.epoch = 9
runner.meta = dict()
checkpoint_hook = CheckpointHook(interval=2, by_epoch=False)
checkpoint_hook.before_train(runner)
checkpoint_hook.after_train_epoch(runner)
assert runner.meta.get('hook_msgs', None) is None
# max_keep_ckpts > 0
runner.work_dir = work_dir
os.system(f'touch {work_dir}/epoch_8.pth')
checkpoint_hook = CheckpointHook(
interval=2, by_epoch=True, max_keep_ckpts=1)
checkpoint_hook.before_train(runner)
checkpoint_hook.after_train_epoch(runner)
assert (runner.epoch + 1) % 2 == 0
assert not os.path.exists(f'{work_dir}/epoch_8.pth')
def test_after_train_iter(self, tmp_path):
work_dir = str(tmp_path)
runner = Mock()
runner.work_dir = str(work_dir)
runner.iter = 9
batch_idx = 9
runner.meta = dict()
runner.model = Mock()
# by epoch is True
checkpoint_hook = CheckpointHook(interval=2, by_epoch=True)
checkpoint_hook.before_train(runner)
checkpoint_hook.after_train_iter(runner, batch_idx=batch_idx)
assert runner.meta.get('hook_msgs', None) is None
# by epoch is False
checkpoint_hook = CheckpointHook(interval=2, by_epoch=False)
checkpoint_hook.before_train(runner)
checkpoint_hook.after_train_iter(runner, batch_idx=batch_idx)
assert (runner.iter + 1) % 2 == 0
assert runner.meta['hook_msgs']['last_ckpt'] == (
f'{work_dir}/iter_10.pth')
# epoch can not be evenly divided by 2
runner.iter = 10
checkpoint_hook.after_train_epoch(runner)
assert runner.meta['hook_msgs']['last_ckpt'] == (
f'{work_dir}/iter_10.pth')
# max_keep_ckpts > 0
runner.iter = 9
runner.work_dir = work_dir
os.system(f'touch {work_dir}/iter_8.pth')
checkpoint_hook = CheckpointHook(
interval=2, by_epoch=False, max_keep_ckpts=1)
checkpoint_hook.before_train(runner)
checkpoint_hook.after_train_iter(runner, batch_idx=batch_idx)
assert not os.path.exists(f'{work_dir}/iter_8.pth')
|
# Copyright (c) OpenMMLab. All rights reserved.
import os
import sys
from tempfile import TemporaryDirectory
from unittest.mock import Mock, patch
from mmengine.hooks import CheckpointHook
sys.modules['file_client'] = sys.modules['mmengine.fileio.file_client']
class MockPetrel:
_allow_symlink = False
def __init__(self):
pass
@property
def name(self):
return self.__class__.__name__
@property
def allow_symlink(self):
return self._allow_symlink
prefix_to_backends = {'s3': MockPetrel}
class TestCheckpointHook:
@patch('file_client.FileClient._prefix_to_backends', prefix_to_backends)
def test_before_run(self):
runner = Mock()
runner.work_dir = './tmp'
# the out_dir of the checkpoint hook is None
checkpoint_hook = CheckpointHook(interval=1, by_epoch=True)
checkpoint_hook.before_run(runner)
assert checkpoint_hook.out_dir == runner.work_dir
# the out_dir of the checkpoint hook is not None
checkpoint_hook = CheckpointHook(
interval=1, by_epoch=True, out_dir='test_dir')
checkpoint_hook.before_run(runner)
assert checkpoint_hook.out_dir == 'test_dir/tmp'
# create_symlink in args and create_symlink is True
checkpoint_hook = CheckpointHook(
interval=1, by_epoch=True, out_dir='test_dir', create_symlink=True)
checkpoint_hook.before_run(runner)
assert checkpoint_hook.args['create_symlink']
runner.work_dir = 's3://path/of/file'
checkpoint_hook = CheckpointHook(
interval=1, by_epoch=True, create_symlink=True)
checkpoint_hook.before_run(runner)
assert not checkpoint_hook.args['create_symlink']
def test_after_train_epoch(self):
runner = Mock()
runner.work_dir = './tmp'
runner.epoch = 9
runner.meta = dict()
runner.model = Mock()
# by epoch is True
checkpoint_hook = CheckpointHook(interval=2, by_epoch=True)
checkpoint_hook.before_run(runner)
checkpoint_hook.after_train_epoch(runner)
assert (runner.epoch + 1) % 2 == 0
assert runner.meta['hook_msgs']['last_ckpt'] == './tmp/epoch_10.pth'
# epoch can not be evenly divided by 2
runner.epoch = 10
checkpoint_hook.after_train_epoch(runner)
assert runner.meta['hook_msgs']['last_ckpt'] == './tmp/epoch_10.pth'
# by epoch is False
runner.epoch = 9
runner.meta = dict()
checkpoint_hook = CheckpointHook(interval=2, by_epoch=False)
checkpoint_hook.before_run(runner)
checkpoint_hook.after_train_epoch(runner)
assert runner.meta.get('hook_msgs', None) is None
# max_keep_ckpts > 0
with TemporaryDirectory() as tempo_dir:
runner.work_dir = tempo_dir
os.system(f'touch {tempo_dir}/epoch_8.pth')
checkpoint_hook = CheckpointHook(
interval=2, by_epoch=True, max_keep_ckpts=1)
checkpoint_hook.before_run(runner)
checkpoint_hook.after_train_epoch(runner)
assert (runner.epoch + 1) % 2 == 0
assert not os.path.exists(f'{tempo_dir}/epoch_8.pth')
def test_after_train_iter(self):
runner = Mock()
runner.work_dir = './tmp'
runner.iter = 9
batch_idx = 9
runner.meta = dict()
runner.model = Mock()
# by epoch is True
checkpoint_hook = CheckpointHook(interval=2, by_epoch=True)
checkpoint_hook.before_run(runner)
checkpoint_hook.after_train_iter(runner, batch_idx=batch_idx)
assert runner.meta.get('hook_msgs', None) is None
# by epoch is False
checkpoint_hook = CheckpointHook(interval=2, by_epoch=False)
checkpoint_hook.before_run(runner)
checkpoint_hook.after_train_iter(runner, batch_idx=batch_idx)
assert (runner.iter + 1) % 2 == 0
assert runner.meta['hook_msgs']['last_ckpt'] == './tmp/iter_10.pth'
# epoch can not be evenly divided by 2
runner.iter = 10
checkpoint_hook.after_train_epoch(runner)
assert runner.meta['hook_msgs']['last_ckpt'] == './tmp/iter_10.pth'
# max_keep_ckpts > 0
runner.iter = 9
with TemporaryDirectory() as tempo_dir:
runner.work_dir = tempo_dir
os.system(f'touch {tempo_dir}/iter_8.pth')
checkpoint_hook = CheckpointHook(
interval=2, by_epoch=False, max_keep_ckpts=1)
checkpoint_hook.before_run(runner)
checkpoint_hook.after_train_iter(runner, batch_idx=batch_idx)
assert not os.path.exists(f'{tempo_dir}/iter_8.pth')
|
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import subprocess
from typing import List
import numpy as np
import pytest
from jina import Document, DocumentArray, Flow
from ...paddle_image import ImagePaddlehubEncoder
@pytest.mark.parametrize(
'arr_in',
[
(np.ones((3, 224, 224), dtype=np.float32)),
(np.ones((3, 100, 100), dtype=np.float32)),
(np.ones((3, 50, 40), dtype=np.float32)),
],
)
def test_paddle_no_batch(arr_in: np.ndarray):
flow = Flow().add(uses=ImagePaddlehubEncoder)
with flow:
results = flow.post(
on='/test',
inputs=DocumentArray([Document(blob=arr_in)]),
return_results=True,
)
assert len(results[0].docs) == 1
assert results[0].docs[0].embedding.shape == (2048,)
def test_paddle_batch():
flow = Flow().add(uses=ImagePaddlehubEncoder)
with flow:
results = flow.post(
on='/test',
inputs=(
Document(blob=np.ones((3, 224, 224), dtype=np.float32))
for _ in range(25)
),
return_results=True,
)
assert len(results[0].docs.get_attributes('embedding')) == 25
assert results[0].docs.get_attributes('embedding')[0].shape == (2048,)
@pytest.mark.parametrize(
['docs', 'docs_per_path', 'traversal_paths'],
[
(pytest.lazy_fixture('docs_with_blobs'), [['r', 10], ['c', 0], ['cc', 0]], 'r'),
(
pytest.lazy_fixture('docs_with_chunk_blobs'),
[['r', 0], ['c', 10], ['cc', 0]],
'c',
),
(
pytest.lazy_fixture('docs_with_chunk_chunk_blobs'),
[['r', 0], ['c', 0], ['cc', 10]],
'cc',
),
],
)
def test_traversal_path(
docs: DocumentArray, docs_per_path: List[List[str]], traversal_paths: str
):
flow = Flow().add(uses=ImagePaddlehubEncoder)
with flow:
results = flow.post(
on='/test',
inputs=docs,
parameters={'traversal_paths': [traversal_paths]},
return_results=True,
)
for path, count in docs_per_path:
embeddings = (
DocumentArray(results[0].docs)
.traverse_flat([path])
.get_attributes('embedding')
)
assert len([em for em in embeddings if em is not None]) == count
@pytest.mark.docker
def test_docker_runtime(build_docker_image: str):
with pytest.raises(subprocess.TimeoutExpired):
subprocess.run(
['jina', 'executor', f'--uses=docker://{build_docker_image}'],
timeout=30,
check=True,
)
@pytest.mark.gpu
@pytest.mark.docker
def test_docker_runtime_gpu(build_docker_image_gpu: str):
with pytest.raises(subprocess.TimeoutExpired):
subprocess.run(
[
'jina',
'pea',
f'--uses=docker://{build_docker_image_gpu}',
'--gpus',
'all',
'--uses-with',
'device:cuda',
],
timeout=30,
check=True,
)
|
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
from typing import List
import numpy as np
import pytest
from jina import Flow, Document, DocumentArray
from ...paddle_image import ImagePaddlehubEncoder
@pytest.mark.parametrize('arr_in', [
(np.ones((3, 224, 224), dtype=np.float32)),
(np.ones((3, 100, 100), dtype=np.float32)),
(np.ones((3, 50, 40), dtype=np.float32))
])
def test_paddle_no_batch(arr_in: np.ndarray):
flow = Flow().add(uses=ImagePaddlehubEncoder)
with flow:
results = flow.post(
on='/test',
inputs=DocumentArray([Document(blob=arr_in)]),
return_results=True
)
assert len(results[0].docs) == 1
assert results[0].docs[0].embedding.shape == (2048,)
def test_paddle_batch():
flow = Flow().add(uses=ImagePaddlehubEncoder)
with flow:
results = flow.post(
on='/test',
inputs=(Document(blob=np.ones((3, 224, 224), dtype=np.float32)) for _ in range(25)),
return_results=True
)
assert len(results[0].docs.get_attributes('embedding')) == 25
assert results[0].docs.get_attributes('embedding')[0].shape == (2048,)
@pytest.mark.parametrize(
['docs', 'docs_per_path', 'traversal_paths'],
[
(pytest.lazy_fixture('docs_with_blobs'), [['r', 10], ['c', 0], ['cc', 0]], 'r'),
(pytest.lazy_fixture('docs_with_chunk_blobs'), [['r', 0], ['c', 10], ['cc', 0]], 'c'),
(pytest.lazy_fixture('docs_with_chunk_chunk_blobs'), [['r', 0], ['c', 0], ['cc', 10]], 'cc')
]
)
def test_traversal_path(docs: DocumentArray, docs_per_path: List[List[str]], traversal_paths: str):
flow = Flow().add(uses=ImagePaddlehubEncoder)
with flow:
results = flow.post(
on='/test',
inputs=docs,
parameters={'traversal_paths': [traversal_paths]},
return_results=True
)
for path, count in docs_per_path:
assert len(DocumentArray(results[0].docs).traverse_flat([path]).get_attributes('embedding')) == count
|
_base_ = [
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
teacher_ckpt = 'https://download.openmmlab.com/mmdetection/v2.0/paa/paa_r50_fpn_1x_coco/paa_r50_fpn_1x_coco_20200821-936edec3.pth' # noqa
model = dict(
type='LAD',
# student
backbone=dict(
type='ResNet',
depth=101,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
add_extra_convs='on_output',
num_outs=5),
bbox_head=dict(
type='LADHead',
reg_decoded_bbox=True,
score_voting=True,
topk=9,
num_classes=80,
in_channels=256,
stacked_convs=4,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
octave_base_scale=8,
scales_per_octave=1,
strides=[8, 16, 32, 64, 128]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[0.1, 0.1, 0.2, 0.2]),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=1.3),
loss_centerness=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=0.5)),
# teacher
teacher_ckpt=teacher_ckpt,
teacher_backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch'),
teacher_neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
add_extra_convs='on_output',
num_outs=5),
teacher_bbox_head=dict(
type='LADHead',
reg_decoded_bbox=True,
score_voting=True,
topk=9,
num_classes=80,
in_channels=256,
stacked_convs=4,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
octave_base_scale=8,
scales_per_octave=1,
strides=[8, 16, 32, 64, 128]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[0.1, 0.1, 0.2, 0.2]),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=1.3),
loss_centerness=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=0.5)),
# training and testing settings
train_cfg=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.1,
neg_iou_thr=0.1,
min_pos_iou=0,
ignore_iof_thr=-1),
allowed_border=-1,
pos_weight=-1,
debug=False),
test_cfg=dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
score_voting=True,
nms=dict(type='nms', iou_threshold=0.6),
max_per_img=100))
data = dict(samples_per_gpu=8, workers_per_gpu=4)
optimizer = dict(lr=0.01)
fp16 = dict(loss_scale=512.)
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (8 GPUs) x (8 samples per GPU)
auto_scale_lr = dict(base_batch_size=64)
|
_base_ = [
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
teacher_ckpt = 'https://download.openmmlab.com/mmdetection/v2.0/paa/paa_r50_fpn_1x_coco/paa_r50_fpn_1x_coco_20200821-936edec3.pth' # noqa
model = dict(
type='LAD',
# student
backbone=dict(
type='ResNet',
depth=101,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
add_extra_convs='on_output',
num_outs=5),
bbox_head=dict(
type='LADHead',
reg_decoded_bbox=True,
score_voting=True,
topk=9,
num_classes=80,
in_channels=256,
stacked_convs=4,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
octave_base_scale=8,
scales_per_octave=1,
strides=[8, 16, 32, 64, 128]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[0.1, 0.1, 0.2, 0.2]),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=1.3),
loss_centerness=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=0.5)),
# teacher
teacher_ckpt=teacher_ckpt,
teacher_backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch'),
teacher_neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
add_extra_convs='on_output',
num_outs=5),
teacher_bbox_head=dict(
type='LADHead',
reg_decoded_bbox=True,
score_voting=True,
topk=9,
num_classes=80,
in_channels=256,
stacked_convs=4,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
octave_base_scale=8,
scales_per_octave=1,
strides=[8, 16, 32, 64, 128]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[0.1, 0.1, 0.2, 0.2]),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=1.3),
loss_centerness=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=0.5)),
# training and testing settings
train_cfg=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.1,
neg_iou_thr=0.1,
min_pos_iou=0,
ignore_iof_thr=-1),
allowed_border=-1,
pos_weight=-1,
debug=False),
test_cfg=dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
score_voting=True,
nms=dict(type='nms', iou_threshold=0.6),
max_per_img=100))
data = dict(samples_per_gpu=8, workers_per_gpu=4)
optimizer = dict(lr=0.01)
fp16 = dict(loss_scale=512.)
|
import json
import logging
from abc import ABC, abstractmethod
from datetime import datetime
from typing import Any, AsyncGenerator, Generator, Generic, TypeVar
from pydantic import BaseModel
from redis.asyncio.client import PubSub as AsyncPubSub
from redis.client import PubSub
from backend.data import redis
from backend.data.execution import ExecutionResult
from backend.util.settings import Config
logger = logging.getLogger(__name__)
config = Config()
class DateTimeEncoder(json.JSONEncoder):
def default(self, o):
if isinstance(o, datetime):
return o.isoformat()
return super().default(o)
M = TypeVar("M", bound=BaseModel)
class BaseRedisEventBus(Generic[M], ABC):
Model: type[M]
@property
@abstractmethod
def event_bus_name(self) -> str:
pass
def _serialize_message(self, item: M, channel_key: str) -> tuple[str, str]:
message = json.dumps(item.model_dump(), cls=DateTimeEncoder)
channel_name = f"{self.event_bus_name}-{channel_key}"
logger.info(f"[{channel_name}] Publishing an event to Redis {message}")
return message, channel_name
def _deserialize_message(self, msg: Any, channel_key: str) -> M | None:
message_type = "pmessage" if "*" in channel_key else "message"
if msg["type"] != message_type:
return None
try:
data = json.loads(msg["data"])
logger.info(f"Consuming an event from Redis {data}")
return self.Model(**data)
except Exception as e:
logger.error(f"Failed to parse event result from Redis {msg} {e}")
def _subscribe(
self, connection: redis.Redis | redis.AsyncRedis, channel_key: str
) -> tuple[PubSub | AsyncPubSub, str]:
channel_name = f"{self.event_bus_name}-{channel_key}"
pubsub = connection.pubsub()
return pubsub, channel_name
class RedisEventBus(BaseRedisEventBus[M], ABC):
Model: type[M]
@property
def connection(self) -> redis.Redis:
return redis.get_redis()
def publish_event(self, event: M, channel_key: str):
message, channel_name = self._serialize_message(event, channel_key)
self.connection.publish(channel_name, message)
def listen_events(self, channel_key: str) -> Generator[M, None, None]:
pubsub, channel_name = self._subscribe(self.connection, channel_key)
assert isinstance(pubsub, PubSub)
if "*" in channel_key:
pubsub.psubscribe(channel_name)
else:
pubsub.subscribe(channel_name)
for message in pubsub.listen():
if event := self._deserialize_message(message, channel_key):
yield event
class AsyncRedisEventBus(BaseRedisEventBus[M], ABC):
Model: type[M]
@property
async def connection(self) -> redis.AsyncRedis:
return await redis.get_redis_async()
async def publish_event(self, event: M, channel_key: str):
message, channel_name = self._serialize_message(event, channel_key)
connection = await self.connection
await connection.publish(channel_name, message)
async def listen_events(self, channel_key: str) -> AsyncGenerator[M, None]:
pubsub, channel_name = self._subscribe(await self.connection, channel_key)
assert isinstance(pubsub, AsyncPubSub)
if "*" in channel_key:
await pubsub.psubscribe(channel_name)
else:
await pubsub.subscribe(channel_name)
async for message in pubsub.listen():
if event := self._deserialize_message(message, channel_key):
yield event
class RedisExecutionEventBus(RedisEventBus[ExecutionResult]):
Model = ExecutionResult
@property
def event_bus_name(self) -> str:
return config.execution_event_bus_name
def publish(self, res: ExecutionResult):
self.publish_event(res, f"{res.graph_id}-{res.graph_exec_id}")
def listen(
self, graph_id: str = "*", graph_exec_id: str = "*"
) -> Generator[ExecutionResult, None, None]:
for execution_result in self.listen_events(f"{graph_id}-{graph_exec_id}"):
yield execution_result
class AsyncRedisExecutionEventBus(AsyncRedisEventBus[ExecutionResult]):
Model = ExecutionResult
@property
def event_bus_name(self) -> str:
return config.execution_event_bus_name
async def publish(self, res: ExecutionResult):
await self.publish_event(res, f"{res.graph_id}-{res.graph_exec_id}")
async def listen(
self, graph_id: str = "*", graph_exec_id: str = "*"
) -> AsyncGenerator[ExecutionResult, None]:
async for execution_result in self.listen_events(f"{graph_id}-{graph_exec_id}"):
yield execution_result
|
import json
import logging
from abc import ABC, abstractmethod
from datetime import datetime
from backend.data import redis
from backend.data.execution import ExecutionResult
logger = logging.getLogger(__name__)
class DateTimeEncoder(json.JSONEncoder):
def default(self, o):
if isinstance(o, datetime):
return o.isoformat()
return super().default(o)
class AbstractEventQueue(ABC):
@abstractmethod
def put(self, execution_result: ExecutionResult):
pass
@abstractmethod
def get(self) -> ExecutionResult | None:
pass
class RedisEventQueue(AbstractEventQueue):
def __init__(self):
self.queue_name = redis.QUEUE_NAME
@property
def connection(self):
return redis.get_redis()
def put(self, execution_result: ExecutionResult):
message = json.dumps(execution_result.model_dump(), cls=DateTimeEncoder)
logger.info(f"Putting execution result to Redis {message}")
self.connection.lpush(self.queue_name, message)
def get(self) -> ExecutionResult | None:
message = self.connection.rpop(self.queue_name)
if message is not None and isinstance(message, (str, bytes, bytearray)):
data = json.loads(message)
logger.info(f"Getting execution result from Redis {data}")
return ExecutionResult(**data)
elif message is not None:
logger.error(f"Failed to get execution result from Redis {message}")
return None
|
_base_ = [
'../_base_/models/faster-rcnn_r50-caffe-c4.py',
'../_base_/schedules/schedule_1x.py', '../_base_/datasets/voc0712.py',
'../_base_/default_runtime.py'
]
model = dict(roi_head=dict(bbox_head=dict(num_classes=20)))
# dataset settings
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='RandomChoiceResize',
scales=[(1333, 480), (1333, 512), (1333, 544), (1333, 576),
(1333, 608), (1333, 640), (1333, 672), (1333, 704),
(1333, 736), (1333, 768), (1333, 800)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
# avoid bboxes being resized
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(
sampler=dict(type='InfiniteSampler', shuffle=True),
dataset=dict(
_delete_=True,
type='ConcatDataset',
datasets=[
dict(
type='VOCDataset',
data_root={{_base_.data_root}},
ann_file='VOC2007/ImageSets/Main/trainval.txt',
data_prefix=dict(sub_data_root='VOC2007/'),
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=train_pipeline),
dict(
type='VOCDataset',
data_root={{_base_.data_root}},
ann_file='VOC2012/ImageSets/Main/trainval.txt',
data_prefix=dict(sub_data_root='VOC2012/'),
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=train_pipeline)
]))
val_dataloader = dict(dataset=dict(pipeline=test_pipeline))
test_dataloader = val_dataloader
# training schedule for 18k
max_iter = 18000
train_cfg = dict(
_delete_=True,
type='IterBasedTrainLoop',
max_iters=max_iter,
val_interval=3000)
# learning rate
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=100),
dict(
type='MultiStepLR',
begin=0,
end=max_iter,
by_epoch=False,
milestones=[12000, 16000],
gamma=0.1)
]
# optimizer
optim_wrapper = dict(
type='OptimWrapper',
optimizer=dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001))
default_hooks = dict(checkpoint=dict(by_epoch=False, interval=3000))
log_processor = dict(by_epoch=False)
|
_base_ = [
'../_base_/models/faster_rcnn_r50_caffe_c4.py',
'../_base_/schedules/schedule_1x.py', '../_base_/datasets/voc0712.py',
'../_base_/default_runtime.py'
]
model = dict(roi_head=dict(bbox_head=dict(num_classes=20)))
# dataset settings
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='RandomChoiceResize',
scales=[(1333, 480), (1333, 512), (1333, 544), (1333, 576),
(1333, 608), (1333, 640), (1333, 672), (1333, 704),
(1333, 736), (1333, 768), (1333, 800)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
# avoid bboxes being resized
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(
sampler=dict(type='InfiniteSampler', shuffle=True),
dataset=dict(
_delete_=True,
type='ConcatDataset',
datasets=[
dict(
type='VOCDataset',
data_root={{_base_.data_root}},
ann_file='VOC2007/ImageSets/Main/trainval.txt',
data_prefix=dict(sub_data_root='VOC2007/'),
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=train_pipeline),
dict(
type='VOCDataset',
data_root={{_base_.data_root}},
ann_file='VOC2012/ImageSets/Main/trainval.txt',
data_prefix=dict(sub_data_root='VOC2012/'),
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=train_pipeline)
]))
val_dataloader = dict(dataset=dict(pipeline=test_pipeline))
test_dataloader = val_dataloader
# training schedule for 18k
max_iter = 18000
train_cfg = dict(
_delete_=True,
type='IterBasedTrainLoop',
max_iters=max_iter,
val_interval=3000)
# learning rate
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=100),
dict(
type='MultiStepLR',
begin=0,
end=max_iter,
by_epoch=False,
milestones=[12000, 16000],
gamma=0.1)
]
# optimizer
optim_wrapper = dict(
type='OptimWrapper',
optimizer=dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001))
default_hooks = dict(checkpoint=dict(by_epoch=False, interval=3000))
log_processor = dict(by_epoch=False)
|
# Copyright (c) OpenMMLab. All rights reserved.
from .augment_wrappers import AutoAugment, RandAugment
from .colorspace import (AutoContrast, Brightness, Color, ColorTransform,
Contrast, Equalize, Invert, Posterize, Sharpness,
Solarize, SolarizeAdd)
from .formatting import ImageToTensor, PackDetInputs, ToTensor, Transpose
from .geometric import (GeomTransform, Rotate, ShearX, ShearY, TranslateX,
TranslateY)
from .instaboost import InstaBoost
from .loading import (FilterAnnotations, LoadAnnotations, LoadEmptyAnnotations,
LoadImageFromNDArray, LoadMultiChannelImageFromFiles,
LoadPanopticAnnotations, LoadProposals)
from .transforms import (Albu, CachedMixUp, CachedMosaic, CopyPaste, CutOut,
Expand, FixShapeResize, MinIoURandomCrop, MixUp,
Mosaic, Pad, PhotoMetricDistortion, RandomAffine,
RandomCenterCropPad, RandomCrop, RandomErasing,
RandomFlip, RandomShift, Resize, SegRescale,
YOLOXHSVRandomAug)
from .wrappers import MultiBranch, RandomOrder
__all__ = [
'PackDetInputs', 'ToTensor', 'ImageToTensor', 'Transpose',
'LoadImageFromNDArray', 'LoadAnnotations', 'LoadPanopticAnnotations',
'LoadMultiChannelImageFromFiles', 'LoadProposals', 'Resize', 'RandomFlip',
'RandomCrop', 'SegRescale', 'MinIoURandomCrop', 'Expand',
'PhotoMetricDistortion', 'Albu', 'InstaBoost', 'RandomCenterCropPad',
'AutoAugment', 'CutOut', 'ShearX', 'ShearY', 'Rotate', 'Color', 'Equalize',
'Brightness', 'Contrast', 'TranslateX', 'TranslateY', 'RandomShift',
'Mosaic', 'MixUp', 'RandomAffine', 'YOLOXHSVRandomAug', 'CopyPaste',
'FilterAnnotations', 'Pad', 'GeomTransform', 'ColorTransform',
'RandAugment', 'Sharpness', 'Solarize', 'SolarizeAdd', 'Posterize',
'AutoContrast', 'Invert', 'MultiBranch', 'RandomErasing',
'LoadEmptyAnnotations', 'RandomOrder', 'CachedMosaic', 'CachedMixUp',
'FixShapeResize'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .augment_wrappers import AutoAugment, RandAugment
from .colorspace import (AutoContrast, Brightness, Color, ColorTransform,
Contrast, Equalize, Invert, Posterize, Sharpness,
Solarize, SolarizeAdd)
from .formatting import ImageToTensor, PackDetInputs, ToTensor, Transpose
from .geometric import (GeomTransform, Rotate, ShearX, ShearY, TranslateX,
TranslateY)
from .instaboost import InstaBoost
from .loading import (FilterAnnotations, LoadAnnotations, LoadEmptyAnnotations,
LoadImageFromNDArray, LoadMultiChannelImageFromFiles,
LoadPanopticAnnotations, LoadProposals)
from .transforms import (Albu, CachedMixUp, CachedMosaic, CopyPaste, CutOut,
Expand, FixShapeResize, MinIoURandomCrop, MixUp,
Mosaic, Normalize, Pad, PhotoMetricDistortion,
RandomAffine, RandomCenterCropPad, RandomCrop,
RandomErasing, RandomFlip, RandomShift, Resize,
SegRescale, YOLOXHSVRandomAug)
from .wrappers import MultiBranch, RandomOrder
__all__ = [
'PackDetInputs', 'ToTensor', 'ImageToTensor', 'Transpose',
'LoadImageFromNDArray', 'LoadAnnotations', 'LoadPanopticAnnotations',
'LoadMultiChannelImageFromFiles', 'LoadProposals', 'Resize', 'RandomFlip',
'RandomCrop', 'Normalize', 'SegRescale', 'MinIoURandomCrop', 'Expand',
'PhotoMetricDistortion', 'Albu', 'InstaBoost', 'RandomCenterCropPad',
'AutoAugment', 'CutOut', 'ShearX', 'ShearY', 'Rotate', 'Color', 'Equalize',
'Brightness', 'Contrast', 'TranslateX', 'TranslateY', 'RandomShift',
'Mosaic', 'MixUp', 'RandomAffine', 'YOLOXHSVRandomAug', 'CopyPaste',
'FilterAnnotations', 'Pad', 'GeomTransform', 'ColorTransform',
'RandAugment', 'Sharpness', 'Solarize', 'SolarizeAdd', 'Posterize',
'AutoContrast', 'Invert', 'MultiBranch', 'RandomErasing',
'LoadEmptyAnnotations', 'RandomOrder', 'CachedMosaic', 'CachedMixUp',
'FixShapeResize'
]
|
"""Math utils."""
import logging
from typing import List, Optional, Tuple, Union
import numpy as np
logger = logging.getLogger(__name__)
Matrix = Union[List[List[float]], List[np.ndarray], np.ndarray]
def cosine_similarity(X: Matrix, Y: Matrix) -> np.ndarray:
"""Row-wise cosine similarity between two equal-width matrices."""
if len(X) == 0 or len(Y) == 0:
return np.array([])
X = np.array(X)
Y = np.array(Y)
if X.shape[1] != Y.shape[1]:
raise ValueError(
f"Number of columns in X and Y must be the same. X has shape {X.shape} "
f"and Y has shape {Y.shape}."
)
try:
import simsimd as simd
X = np.array(X, dtype=np.float32)
Y = np.array(Y, dtype=np.float32)
Z = 1 - np.array(simd.cdist(X, Y, metric="cosine"))
return Z
except ImportError:
logger.debug(
"Unable to import simsimd, defaulting to NumPy implementation. If you want "
"to use simsimd please install with `pip install simsimd`."
)
X_norm = np.linalg.norm(X, axis=1)
Y_norm = np.linalg.norm(Y, axis=1)
# Ignore divide by zero errors run time warnings as those are handled below.
with np.errstate(divide="ignore", invalid="ignore"):
similarity = np.dot(X, Y.T) / np.outer(X_norm, Y_norm)
similarity[np.isnan(similarity) | np.isinf(similarity)] = 0.0
return similarity
def cosine_similarity_top_k(
X: Matrix,
Y: Matrix,
top_k: Optional[int] = 5,
score_threshold: Optional[float] = None,
) -> Tuple[List[Tuple[int, int]], List[float]]:
"""Row-wise cosine similarity with optional top-k and score threshold filtering.
Args:
X: Matrix.
Y: Matrix, same width as X.
top_k: Max number of results to return.
score_threshold: Minimum cosine similarity of results.
Returns:
Tuple of two lists. First contains two-tuples of indices (X_idx, Y_idx),
second contains corresponding cosine similarities.
"""
if len(X) == 0 or len(Y) == 0:
return [], []
score_array = cosine_similarity(X, Y)
score_threshold = score_threshold or -1.0
score_array[score_array < score_threshold] = 0
top_k = min(top_k or len(score_array), np.count_nonzero(score_array))
top_k_idxs = np.argpartition(score_array, -top_k, axis=None)[-top_k:]
top_k_idxs = top_k_idxs[np.argsort(score_array.ravel()[top_k_idxs])][::-1]
ret_idxs = np.unravel_index(top_k_idxs, score_array.shape)
scores = score_array.ravel()[top_k_idxs].tolist()
return list(zip(*ret_idxs)), scores # type: ignore[return-value]
|
"""Math utils."""
import logging
from typing import List, Optional, Tuple, Union
import numpy as np
logger = logging.getLogger(__name__)
Matrix = Union[List[List[float]], List[np.ndarray], np.ndarray]
def cosine_similarity(X: Matrix, Y: Matrix) -> np.ndarray:
"""Row-wise cosine similarity between two equal-width matrices."""
if len(X) == 0 or len(Y) == 0:
return np.array([])
X = np.array(X)
Y = np.array(Y)
if X.shape[1] != Y.shape[1]:
raise ValueError(
f"Number of columns in X and Y must be the same. X has shape {X.shape} "
f"and Y has shape {Y.shape}."
)
try:
import simsimd as simd
X = np.array(X, dtype=np.float32)
Y = np.array(Y, dtype=np.float32)
Z = 1 - np.array(simd.cdist(X, Y, metric="cosine"))
return Z
except ImportError:
logger.debug(
"Unable to import simsimd, defaulting to NumPy implementation. If you want "
"to use simsimd please install with `pip install simsimd`."
)
X_norm = np.linalg.norm(X, axis=1)
Y_norm = np.linalg.norm(Y, axis=1)
# Ignore divide by zero errors run time warnings as those are handled below.
with np.errstate(divide="ignore", invalid="ignore"):
similarity = np.dot(X, Y.T) / np.outer(X_norm, Y_norm)
similarity[np.isnan(similarity) | np.isinf(similarity)] = 0.0
return similarity
def cosine_similarity_top_k(
X: Matrix,
Y: Matrix,
top_k: Optional[int] = 5,
score_threshold: Optional[float] = None,
) -> Tuple[List[Tuple[int, int]], List[float]]:
"""Row-wise cosine similarity with optional top-k and score threshold filtering.
Args:
X: Matrix.
Y: Matrix, same width as X.
top_k: Max number of results to return.
score_threshold: Minimum cosine similarity of results.
Returns:
Tuple of two lists. First contains two-tuples of indices (X_idx, Y_idx),
second contains corresponding cosine similarities.
"""
if len(X) == 0 or len(Y) == 0:
return [], []
score_array = cosine_similarity(X, Y)
score_threshold = score_threshold or -1.0
score_array[score_array < score_threshold] = 0
top_k = min(top_k or len(score_array), np.count_nonzero(score_array))
top_k_idxs = np.argpartition(score_array, -top_k, axis=None)[-top_k:]
top_k_idxs = top_k_idxs[np.argsort(score_array.ravel()[top_k_idxs])][::-1]
ret_idxs = np.unravel_index(top_k_idxs, score_array.shape)
scores = score_array.ravel()[top_k_idxs].tolist()
return list(zip(*ret_idxs)), scores # type: ignore
|
import multiprocessing
import pytest
from jina import DocumentArray, Executor, requests
from jina.parsers import set_pod_parser
from jina.serve.runtimes.asyncio import AsyncNewLoopRuntime
from jina.serve.runtimes.worker import WorkerRuntime
from jina.serve.streamer import GatewayStreamer
class StreamerTestExecutor(Executor):
@requests
def foo(self, docs, parameters, **kwargs):
text_to_add = parameters.get('text_to_add', 'default ')
for doc in docs:
doc.text += text_to_add
def _create_worker_runtime(port, name=''):
args = set_pod_parser().parse_args([])
args.port = port
args.name = name
args.uses = 'StreamerTestExecutor'
with WorkerRuntime(args) as runtime:
runtime.run_forever()
def _setup(pod0_port, pod1_port):
pod0_process = multiprocessing.Process(
target=_create_worker_runtime, args=(pod0_port,)
)
pod0_process.start()
pod1_process = multiprocessing.Process(
target=_create_worker_runtime, args=(pod1_port,)
)
pod1_process.start()
AsyncNewLoopRuntime.wait_for_ready_or_shutdown(
timeout=5.0,
ctrl_address=f'0.0.0.0:{pod0_port}',
ready_or_shutdown_event=multiprocessing.Event(),
)
AsyncNewLoopRuntime.wait_for_ready_or_shutdown(
timeout=5.0,
ctrl_address=f'0.0.0.0:{pod1_port}',
ready_or_shutdown_event=multiprocessing.Event(),
)
return pod0_process, pod1_process
@pytest.mark.parametrize(
'parameters, target_executor, expected_text',
[ # (None, None, 'default default '),
({'pod0__text_to_add': 'param_pod0 '}, None, 'param_pod0 default '),
(None, 'pod1', 'default '),
({'pod0__text_to_add': 'param_pod0 '}, 'pod0', 'param_pod0 '),
],
)
@pytest.mark.parametrize('results_in_order', [False, True])
@pytest.mark.asyncio
async def test_custom_gateway(
port_generator, parameters, target_executor, expected_text, results_in_order
):
pod0_port = port_generator()
pod1_port = port_generator()
pod0_process, pod1_process = _setup(pod0_port, pod1_port)
graph_description = {
"start-gateway": ["pod0"],
"pod0": ["pod1"],
"pod1": ["end-gateway"],
}
pod_addresses = {"pod0": [f"0.0.0.0:{pod0_port}"], "pod1": [f"0.0.0.0:{pod1_port}"]}
# send requests to the gateway
gateway_streamer = GatewayStreamer(
graph_representation=graph_description, executor_addresses=pod_addresses
)
try:
input_da = DocumentArray.empty(60)
resp = DocumentArray.empty(0)
num_resp = 0
async for r in gateway_streamer.stream_docs(
docs=input_da,
request_size=10,
parameters=parameters,
target_executor=target_executor,
results_in_order=results_in_order,
):
num_resp += 1
resp.extend(r)
assert num_resp == 6
assert len(resp) == 60
for doc in resp:
assert doc.text == expected_text
except Exception:
assert False
finally: # clean up runtimes
pod0_process.terminate()
pod1_process.terminate()
pod0_process.join()
pod1_process.join()
await gateway_streamer.close()
|
import multiprocessing
import pytest
from jina import DocumentArray, Executor, requests
from jina.parsers import set_pod_parser
from jina.serve.runtimes.asyncio import AsyncNewLoopRuntime
from jina.serve.runtimes.worker import WorkerRuntime
from jina.serve.streamer import GatewayStreamer
class StreamerTestExecutor(Executor):
@requests
def foo(self, docs, parameters, **kwargs):
text_to_add = parameters.get('text_to_add', 'default ')
for doc in docs:
doc.text += text_to_add
def _create_worker_runtime(port, name=''):
args = set_pod_parser().parse_args([])
args.port = port
args.name = name
args.uses = 'StreamerTestExecutor'
with WorkerRuntime(args) as runtime:
runtime.run_forever()
def _setup(pod0_port, pod1_port):
pod0_process = multiprocessing.Process(
target=_create_worker_runtime, args=(pod0_port,)
)
pod0_process.start()
pod1_process = multiprocessing.Process(
target=_create_worker_runtime, args=(pod1_port,)
)
pod1_process.start()
AsyncNewLoopRuntime.wait_for_ready_or_shutdown(
timeout=5.0,
ctrl_address=f'0.0.0.0:{pod0_port}',
ready_or_shutdown_event=multiprocessing.Event(),
)
AsyncNewLoopRuntime.wait_for_ready_or_shutdown(
timeout=5.0,
ctrl_address=f'0.0.0.0:{pod1_port}',
ready_or_shutdown_event=multiprocessing.Event(),
)
return pod0_process, pod1_process
@pytest.mark.parametrize(
'parameters, target_executor, expected_text',
[ # (None, None, 'default default '),
({'pod0__text_to_add': 'param_pod0 '}, None, 'param_pod0 default '),
(None, 'pod1', 'default '),
({'pod0__text_to_add': 'param_pod0 '}, 'pod0', 'param_pod0 '),
],
)
@pytest.mark.parametrize('results_in_order', [False, True])
@pytest.mark.asyncio
async def test_custom_gateway(
port_generator, parameters, target_executor, expected_text, results_in_order
):
pod0_port = port_generator()
pod1_port = port_generator()
pod0_process, pod1_process = _setup(pod0_port, pod1_port)
graph_description = {
"start-gateway": ["pod0"],
"pod0": ["pod1"],
"pod1": ["end-gateway"],
}
pod_addresses = {"pod0": [f"0.0.0.0:{pod0_port}"], "pod1": [f"0.0.0.0:{pod1_port}"]}
# send requests to the gateway
gateway_streamer = GatewayStreamer(
graph_representation=graph_description, executor_addresses=pod_addresses
)
try:
input_da = DocumentArray.empty(60)
resp = DocumentArray.empty(0)
num_resp = 0
async for r in gateway_streamer.stream_docs(
docs=input_da,
request_size=10,
parameters=parameters,
target_executor=target_executor,
results_in_order=results_in_order
):
num_resp += 1
resp.extend(r)
assert num_resp == 6
assert len(resp) == 60
for doc in resp:
assert doc.text == expected_text
except Exception:
assert False
finally: # clean up runtimes
pod0_process.terminate()
pod1_process.terminate()
pod0_process.join()
pod1_process.join()
await gateway_streamer.close()
|
import io
from typing import TYPE_CHECKING, Any, Tuple, Type, TypeVar
import numpy as np
from pydantic import parse_obj_as
from pydantic.validators import bytes_validator
from docarray.typing.abstract_type import AbstractType
from docarray.typing.proto_register import _register_proto
from docarray.utils._internal.misc import import_library
if TYPE_CHECKING:
from pydantic.fields import BaseConfig, ModelField
from docarray.proto import NodeProto
T = TypeVar('T', bound='AudioBytes')
@_register_proto(proto_type_name='audio_bytes')
class AudioBytes(bytes, AbstractType):
"""
Bytes that store an audio and that can be load into an Audio tensor
"""
@classmethod
def validate(
cls: Type[T],
value: Any,
field: 'ModelField',
config: 'BaseConfig',
) -> T:
value = bytes_validator(value)
return cls(value)
@classmethod
def from_protobuf(cls: Type[T], pb_msg: T) -> T:
return parse_obj_as(cls, pb_msg)
def _to_node_protobuf(self: T) -> 'NodeProto':
from docarray.proto import NodeProto
return NodeProto(blob=self, type=self._proto_type_name)
def load(self) -> Tuple[np.ndarray, int]:
"""
Load the Audio from the bytes into a numpy.ndarray Audio tensor
---
```python
from typing import Optional
from docarray import BaseDoc
from docarray.typing import AudioUrl, NdArray, AudioBytes
import numpy as np
class MyAudio(BaseDoc):
url: AudioUrl
tensor: Optional[NdArray]
bytes: Optional[AudioBytes]
frame_rate: Optional[float]
doc = MyAudio(url='https://www.kozco.com/tech/piano2.wav')
doc.bytes = doc.url.load_bytes()
doc.tensor, doc.frame_rate = doc.bytes.load()
# Note this is equivalent to do
doc.tensor, doc.frame_rate = doc.url.load()
assert isinstance(doc.tensor, np.ndarray)
```
---
:return: np.ndarray representing the Audio as RGB values
"""
if TYPE_CHECKING:
import pydub
else:
pydub = import_library('pydub', raise_error=True)
segment = pydub.AudioSegment.from_file(io.BytesIO(self))
# Convert to float32 using NumPy
samples = np.array(segment.get_array_of_samples())
# Normalise float32 array so that values are between -1.0 and +1.0
samples_norm = samples / 2 ** (segment.sample_width * 8 - 1)
return samples_norm, segment.frame_rate
|
import io
from typing import TYPE_CHECKING, Any, Tuple, Type, TypeVar
import numpy as np
from pydantic import parse_obj_as
from pydantic.validators import bytes_validator
from docarray.typing.abstract_type import AbstractType
from docarray.typing.proto_register import _register_proto
if TYPE_CHECKING:
from pydantic.fields import BaseConfig, ModelField
from docarray.proto import NodeProto
T = TypeVar('T', bound='AudioBytes')
@_register_proto(proto_type_name='audio_bytes')
class AudioBytes(bytes, AbstractType):
"""
Bytes that store an audio and that can be load into an Audio tensor
"""
@classmethod
def validate(
cls: Type[T],
value: Any,
field: 'ModelField',
config: 'BaseConfig',
) -> T:
value = bytes_validator(value)
return cls(value)
@classmethod
def from_protobuf(cls: Type[T], pb_msg: T) -> T:
return parse_obj_as(cls, pb_msg)
def _to_node_protobuf(self: T) -> 'NodeProto':
from docarray.proto import NodeProto
return NodeProto(blob=self, type=self._proto_type_name)
def load(self) -> Tuple[np.ndarray, int]:
"""
Load the Audio from the bytes into a numpy.ndarray Audio tensor
---
```python
from typing import Optional
from docarray import BaseDoc
from docarray.typing import AudioUrl, NdArray, AudioBytes
import numpy as np
class MyAudio(BaseDoc):
url: AudioUrl
tensor: Optional[NdArray]
bytes: Optional[AudioBytes]
frame_rate: Optional[float]
doc = MyAudio(url='https://www.kozco.com/tech/piano2.wav')
doc.bytes = doc.url.load_bytes()
doc.tensor, doc.frame_rate = doc.bytes.load()
# Note this is equivalent to do
doc.tensor, doc.frame_rate = doc.url.load()
assert isinstance(doc.tensor, np.ndarray)
```
---
:return: np.ndarray representing the Audio as RGB values
"""
from pydub import AudioSegment # type: ignore
segment = AudioSegment.from_file(io.BytesIO(self))
# Convert to float32 using NumPy
samples = np.array(segment.get_array_of_samples())
# Normalise float32 array so that values are between -1.0 and +1.0
samples_norm = samples / 2 ** (segment.sample_width * 8 - 1)
return samples_norm, segment.frame_rate
|
"""Base interfaces for tracing runs."""
from langchain_core.exceptions import TracerException
from langchain_core.tracers.base import BaseTracer
__all__ = ["BaseTracer", "TracerException"]
|
"""Base interfaces for tracing runs."""
from langchain_core.tracers.base import BaseTracer, TracerException
__all__ = ["BaseTracer", "TracerException"]
|
import asyncio
import copy
from typing import Any, List, Optional
from jina.serve.gateway import BaseGateway
class CompositeGateway(BaseGateway):
"""GRPC Gateway implementation"""
def __init__(
self,
**kwargs,
):
"""Initialize the gateway
:param kwargs: keyword args
"""
super().__init__(**kwargs)
from jina.parsers.helper import _get_gateway_class
self.gateways: List[BaseGateway] = []
for port, protocol in zip(self.ports, self.protocols):
gateway_cls = _get_gateway_class(protocol)
# ignore metrics_registry since it is not copyable
runtime_args = self._deepcopy_with_ignore_attrs(
self.runtime_args, ['metrics_registry']
)
runtime_args.port = [port]
runtime_args.protocol = [protocol]
gateway_kwargs = {k: v for k, v in kwargs.items() if k != 'runtime_args'}
gateway_kwargs['runtime_args'] = dict(vars(runtime_args))
gateway = gateway_cls(**gateway_kwargs)
gateway.streamer = self.streamer
self.gateways.append(gateway)
async def setup_server(self):
"""
setup GRPC server
"""
tasks = []
for gateway in self.gateways:
tasks.append(asyncio.create_task(gateway.setup_server()))
await asyncio.gather(*tasks)
async def shutdown(self):
"""Free other resources allocated with the server, e.g, gateway object, ..."""
shutdown_tasks = []
for gateway in self.gateways:
shutdown_tasks.append(asyncio.create_task(gateway.shutdown()))
await asyncio.gather(*shutdown_tasks)
async def run_server(self):
"""Run GRPC server forever"""
run_server_tasks = []
for gateway in self.gateways:
run_server_tasks.append(asyncio.create_task(gateway.run_server()))
await asyncio.gather(*run_server_tasks)
@staticmethod
def _deepcopy_with_ignore_attrs(obj: Any, ignore_attrs: List[str]) -> Any:
"""Deep copy an object and ignore some attributes
:param obj: the object to copy
:param ignore_attrs: the attributes to ignore
:return: the copied object
"""
memo = {}
for k in ignore_attrs:
if hasattr(obj, k):
memo[id(getattr(obj, k))] = None # getattr(obj, k)
return copy.deepcopy(obj, memo)
@property
def _should_exit(self) -> bool:
should_exit_values = [
getattr(gateway.server, 'should_exit', True) for gateway in self.gateways
]
return all(should_exit_values)
|
import asyncio
import copy
from typing import Any, List, Optional
from jina.serve.gateway import BaseGateway
class CompositeGateway(BaseGateway):
"""GRPC Gateway implementation"""
def __init__(
self,
**kwargs,
):
"""Initialize the gateway
:param kwargs: keyword args
"""
super().__init__(**kwargs)
from jina.parsers.helper import _get_gateway_class
self.gateways: List[BaseGateway] = []
for port, protocol in zip(self.ports, self.protocols):
gateway_cls = _get_gateway_class(protocol)
# ignore metrics_registry since it is not copyable
runtime_args = self._deepcopy_with_ignore_attrs(
self.runtime_args, ['metrics_registry']
)
runtime_args.port = [port]
runtime_args.protocol = [protocol]
gateway_kwargs = {k: v for k, v in kwargs.items() if k != 'runtime_args'}
gateway_kwargs['runtime_args'] = dict(vars(runtime_args))
gateway = gateway_cls(**gateway_kwargs)
self.gateways.append(gateway)
async def setup_server(self):
"""
setup GRPC server
"""
tasks = []
for gateway in self.gateways:
tasks.append(asyncio.create_task(gateway.setup_server()))
await asyncio.gather(*tasks)
async def shutdown(self):
"""Free other resources allocated with the server, e.g, gateway object, ..."""
shutdown_tasks = []
for gateway in self.gateways:
shutdown_tasks.append(asyncio.create_task(gateway.shutdown()))
await asyncio.gather(*shutdown_tasks)
async def run_server(self):
"""Run GRPC server forever"""
run_server_tasks = []
for gateway in self.gateways:
run_server_tasks.append(asyncio.create_task(gateway.run_server()))
await asyncio.gather(*run_server_tasks)
@staticmethod
def _deepcopy_with_ignore_attrs(obj: Any, ignore_attrs: List[str]) -> Any:
"""Deep copy an object and ignore some attributes
:param obj: the object to copy
:param ignore_attrs: the attributes to ignore
:return: the copied object
"""
memo = {}
for k in ignore_attrs:
if hasattr(obj, k):
memo[id(getattr(obj, k))] = None # getattr(obj, k)
return copy.deepcopy(obj, memo)
@property
def _should_exit(self) -> bool:
should_exit_values = [
getattr(gateway.server, 'should_exit', True) for gateway in self.gateways
]
return all(should_exit_values)
|
"""
===================================================
Recursive feature elimination with cross-validation
===================================================
A Recursive Feature Elimination (RFE) example with automatic tuning of the
number of features selected with cross-validation.
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
# %%
# Data generation
# ---------------
#
# We build a classification task using 3 informative features. The introduction
# of 2 additional redundant (i.e. correlated) features has the effect that the
# selected features vary depending on the cross-validation fold. The remaining
# features are non-informative as they are drawn at random.
from sklearn.datasets import make_classification
n_features = 15
feat_names = [f"feature_{i}" for i in range(15)]
X, y = make_classification(
n_samples=500,
n_features=n_features,
n_informative=3,
n_redundant=2,
n_repeated=0,
n_classes=8,
n_clusters_per_class=1,
class_sep=0.8,
random_state=0,
)
# %%
# Model training and selection
# ----------------------------
#
# We create the RFE object and compute the cross-validated scores. The scoring
# strategy "accuracy" optimizes the proportion of correctly classified samples.
from sklearn.feature_selection import RFECV
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import StratifiedKFold
min_features_to_select = 1 # Minimum number of features to consider
clf = LogisticRegression()
cv = StratifiedKFold(5)
rfecv = RFECV(
estimator=clf,
step=1,
cv=cv,
scoring="accuracy",
min_features_to_select=min_features_to_select,
n_jobs=2,
)
rfecv.fit(X, y)
print(f"Optimal number of features: {rfecv.n_features_}")
# %%
# In the present case, the model with 3 features (which corresponds to the true
# generative model) is found to be the most optimal.
#
# Plot number of features VS. cross-validation scores
# ---------------------------------------------------
import matplotlib.pyplot as plt
import pandas as pd
data = {
key: value
for key, value in rfecv.cv_results_.items()
if key in ["n_features", "mean_test_score", "std_test_score"]
}
cv_results = pd.DataFrame(data)
plt.figure()
plt.xlabel("Number of features selected")
plt.ylabel("Mean test accuracy")
plt.errorbar(
x=cv_results["n_features"],
y=cv_results["mean_test_score"],
yerr=cv_results["std_test_score"],
)
plt.title("Recursive Feature Elimination \nwith correlated features")
plt.show()
# %%
# From the plot above one can further notice a plateau of equivalent scores
# (similar mean value and overlapping errorbars) for 3 to 5 selected features.
# This is the result of introducing correlated features. Indeed, the optimal
# model selected by the RFE can lie within this range, depending on the
# cross-validation technique. The test accuracy decreases above 5 selected
# features, this is, keeping non-informative features leads to over-fitting and
# is therefore detrimental for the statistical performance of the models.
# %%
import numpy as np
for i in range(cv.n_splits):
mask = rfecv.cv_results_[f"split{i}_support"][
rfecv.n_features_
] # mask of features selected by the RFE
features_selected = np.ma.compressed(np.ma.masked_array(feat_names, mask=1 - mask))
print(f"Features selected in fold {i}: {features_selected}")
# %%
# In the five folds, the selected features are consistent. This is good news,
# it means that the selection is stable across folds, and it confirms that
# these features are the most informative ones.
|
"""
===================================================
Recursive feature elimination with cross-validation
===================================================
A Recursive Feature Elimination (RFE) example with automatic tuning of the
number of features selected with cross-validation.
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
# %%
# Data generation
# ---------------
#
# We build a classification task using 3 informative features. The introduction
# of 2 additional redundant (i.e. correlated) features has the effect that the
# selected features vary depending on the cross-validation fold. The remaining
# features are non-informative as they are drawn at random.
from sklearn.datasets import make_classification
n_features = 15
feat_names = [f"feature_{i}" for i in range(15)]
X, y = make_classification(
n_samples=500,
n_features=n_features,
n_informative=3,
n_redundant=2,
n_repeated=0,
n_classes=8,
n_clusters_per_class=1,
class_sep=0.8,
random_state=0,
)
# %%
# Model training and selection
# ----------------------------
#
# We create the RFE object and compute the cross-validated scores. The scoring
# strategy "accuracy" optimizes the proportion of correctly classified samples.
from sklearn.feature_selection import RFECV
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import StratifiedKFold
min_features_to_select = 1 # Minimum number of features to consider
clf = LogisticRegression()
cv = StratifiedKFold(5)
rfecv = RFECV(
estimator=clf,
step=1,
cv=cv,
scoring="accuracy",
min_features_to_select=min_features_to_select,
n_jobs=2,
)
rfecv.fit(X, y)
print(f"Optimal number of features: {rfecv.n_features_}")
# %%
# In the present case, the model with 3 features (which corresponds to the true
# generative model) is found to be the most optimal.
#
# Plot number of features VS. cross-validation scores
# ---------------------------------------------------
import matplotlib.pyplot as plt
import pandas as pd
data = {
key: value
for key, value in rfecv.cv_results_.items()
if key in ["n_features", "mean_test_score", "std_test_score"]
}
cv_results = pd.DataFrame(data)
plt.figure()
plt.xlabel("Number of features selected")
plt.ylabel("Mean test accuracy")
plt.errorbar(
x=cv_results["n_features"],
y=cv_results["mean_test_score"],
yerr=cv_results["std_test_score"],
)
plt.title("Recursive Feature Elimination \nwith correlated features")
plt.show()
# %%
# From the plot above one can further notice a plateau of equivalent scores
# (similar mean value and overlapping errorbars) for 3 to 5 selected features.
# This is the result of introducing correlated features. Indeed, the optimal
# model selected by the RFE can lie within this range, depending on the
# cross-validation technique. The test accuracy decreases above 5 selected
# features, this is, keeping non-informative features leads to over-fitting and
# is therefore detrimental for the statistical performance of the models.
# %%
import numpy as np
for i in range(cv.n_splits):
mask = rfecv.cv_results_[f"split{i}_support"][
rfecv.n_features_
] # mask of features selected by the RFE
features_selected = np.ma.compressed(np.ma.masked_array(feat_names, mask=1 - mask))
print(f"Features selected in fold {i}: {features_selected}")
# %%
# In the five folds, the selected features are consistant. This is good news,
# it means that the selection is stable accross folds, and it confirms that
# these features are the most informative ones.
|
import json
import sys
def format_json_to_md(input_json_file, output_md_file):
with open(input_json_file, encoding="utf-8") as f:
results = json.load(f)
output_md = ["<details>", "<summary>Show updated benchmarks!</summary>", " "]
for benchmark_name in sorted(results):
benchmark_res = results[benchmark_name]
benchmark_file_name = benchmark_name.split("/")[-1]
output_md.append(f"### Benchmark: {benchmark_file_name}")
title = "| metric |"
lines = "|--------|"
value = "| new / old (diff) |"
for metric_name in sorted(benchmark_res):
metric_vals = benchmark_res[metric_name]
new_val = metric_vals["new"]
old_val = metric_vals.get("old", None)
dif_val = metric_vals.get("diff", None)
val_str = f" {new_val:f}" if isinstance(new_val, (int, float)) else "None"
if old_val is not None:
val_str += f" / {old_val:f}" if isinstance(old_val, (int, float)) else "None"
if dif_val is not None:
val_str += f" ({dif_val:f})" if isinstance(dif_val, (int, float)) else "None"
title += " " + metric_name + " |"
lines += "---|"
value += val_str + " |"
output_md += [title, lines, value, " "]
output_md.append("</details>")
with open(output_md_file, "w", encoding="utf-8") as f:
f.writelines("\n".join(output_md))
if __name__ == "__main__":
input_json_file = sys.argv[1]
output_md_file = sys.argv[2]
format_json_to_md(input_json_file, output_md_file)
|
import json
import sys
def format_json_to_md(input_json_file, output_md_file):
with open(input_json_file, encoding="utf-8") as f:
results = json.load(f)
output_md = ["<details>", "<summary>Show updated benchmarks!</summary>", " "]
for benchmark_name in sorted(results):
benchmark_res = results[benchmark_name]
benchmark_file_name = benchmark_name.split("/")[-1]
output_md.append(f"### Benchmark: {benchmark_file_name}")
title = "| metric |"
lines = "|--------|"
value = "| new / old (diff) |"
for metric_name in sorted(benchmark_res):
metric_vals = benchmark_res[metric_name]
new_val = metric_vals["new"]
old_val = metric_vals.get("old", None)
dif_val = metric_vals.get("diff", None)
val_str = f" {new_val:f}" if isinstance(new_val, (int, float)) else "None"
if old_val is not None:
val_str += f" / {old_val:f}" if isinstance(old_val, (int, float)) else "None"
if dif_val is not None:
val_str += f" ({dif_val:f})" if isinstance(dif_val, (int, float)) else "None"
title += " " + metric_name + " |"
lines += "---|"
value += val_str + " |"
output_md += [title, lines, value, " "]
output_md.append("</details>")
with open(output_md_file, "w", encoding="utf-8") as f:
f.writelines("\n".join(output_md))
if __name__ == "__main__":
input_json_file = sys.argv[1]
output_md_file = sys.argv[2]
format_json_to_md(input_json_file, output_md_file)
|
import pytest
from absl.testing import parameterized
from keras.src import backend
from keras.src import layers
from keras.src import testing
class IdentityTest(testing.TestCase):
@parameterized.named_parameters(
[
{"testcase_name": "dense", "sparse": False},
{"testcase_name": "sparse", "sparse": True},
]
)
@pytest.mark.requires_trainable_backend
def test_identity_basics(self, sparse):
if sparse and not backend.SUPPORTS_SPARSE_TENSORS:
pytest.skip("Backend does not support sparse tensors.")
self.run_layer_test(
layers.Identity,
init_kwargs={},
input_shape=(2, 3),
input_sparse=sparse,
expected_output_shape=(2, 3),
expected_output_sparse=sparse,
expected_num_trainable_weights=0,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=0,
expected_num_losses=0,
run_training_check=not sparse,
supports_masking=True,
assert_built_after_instantiation=True,
)
|
import pytest
from absl.testing import parameterized
from keras.src import backend
from keras.src import layers
from keras.src import testing
class IdentityTest(testing.TestCase, parameterized.TestCase):
@parameterized.named_parameters(
[
{"testcase_name": "dense", "sparse": False},
{"testcase_name": "sparse", "sparse": True},
]
)
@pytest.mark.requires_trainable_backend
def test_identity_basics(self, sparse):
if sparse and not backend.SUPPORTS_SPARSE_TENSORS:
pytest.skip("Backend does not support sparse tensors.")
self.run_layer_test(
layers.Identity,
init_kwargs={},
input_shape=(2, 3),
input_sparse=sparse,
expected_output_shape=(2, 3),
expected_output_sparse=sparse,
expected_num_trainable_weights=0,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=0,
expected_num_losses=0,
run_training_check=not sparse,
supports_masking=True,
assert_built_after_instantiation=True,
)
|
def check_health_pod(addr: str):
"""check if a pods is healthy
:param addr: the address on which the pod is serving ex : localhost:1234
"""
from jina.serve.runtimes.servers import BaseServer
is_ready = BaseServer.is_ready(addr)
if not is_ready:
raise Exception('Pod is unhealthy')
print('The Pod is healthy')
if __name__ == '__main__':
"""
Health check cli (for docker):
Example:
python jina.resources.health_check.pod localhost:1234
"""
import sys
if len(sys.argv) < 2:
raise ValueError('You need to specify a address to check health')
addr = sys.argv[1]
check_health_pod(addr)
|
def check_health_pod(addr: str):
"""check if a pods is healthy
:param addr: the address on which the pod is serving ex : localhost:1234
"""
from jina.serve.runtimes.asyncio import AsyncNewLoopRuntime
is_ready = AsyncNewLoopRuntime.is_ready(addr)
if not is_ready:
raise Exception('Pod is unhealthy')
print('The Pod is healthy')
if __name__ == '__main__':
"""
Health check cli (for docker):
Example:
python jina.resources.health_check.pod localhost:1234
"""
import sys
if len(sys.argv) < 2:
raise ValueError('You need to specify a address to check health')
addr = sys.argv[1]
check_health_pod(addr)
|
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__version__ = "3.3.0"
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled
from .info import DatasetInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_default_config_name,
get_dataset_infos,
get_dataset_split_names,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .utils import *
from .utils import logging
|
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__version__ = "3.2.1.dev0"
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled
from .info import DatasetInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_default_config_name,
get_dataset_infos,
get_dataset_split_names,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .utils import *
from .utils import logging
|
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import subprocess
import numpy as np
import pytest
from executor.audioclip_image import AudioCLIPImageEncoder
from jina import Document, DocumentArray, Flow
@pytest.mark.parametrize("request_size", [1, 10, 50, 100])
def test_integration(request_size: int):
docs = DocumentArray(
[
Document(blob=np.random.randint(0, 255, (100, 100, 3), dtype=np.uint8))
for _ in range(50)
]
)
with Flow(return_results=True).add(uses=AudioCLIPImageEncoder) as flow:
resp = flow.post(
on="/index",
inputs=docs,
request_size=request_size,
return_results=True,
)
assert sum(len(resp_batch.docs) for resp_batch in resp) == 50
for r in resp:
for doc in r.docs:
assert doc.embedding is not None
assert doc.embedding.shape == (1024,)
@pytest.mark.docker
def test_docker_runtime(build_docker_image: str):
with pytest.raises(subprocess.TimeoutExpired):
subprocess.run(
[
'jina',
'executor',
f'--uses=docker://{build_docker_image}',
'--volumes=.cache:/workdir/.cache',
],
timeout=30,
check=True,
)
@pytest.mark.gpu
@pytest.mark.docker
def test_docker_runtime_gpu(build_docker_image_gpu: str):
with pytest.raises(subprocess.TimeoutExpired):
subprocess.run(
[
'jina',
'executor',
f'--uses=docker://{build_docker_image_gpu}',
'--gpus',
'all',
'--uses-with',
'device:cuda',
'--volumes=.cache:/workdir/.cache',
],
timeout=30,
check=True,
)
|
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import subprocess
import numpy as np
import pytest
from jina import Document, DocumentArray, Flow
from ...audioclip_image import AudioCLIPImageEncoder
@pytest.mark.parametrize("request_size", [1, 10, 50, 100])
def test_integration(request_size: int):
docs = DocumentArray(
[
Document(blob=np.random.randint(0, 255, (100, 100, 3), dtype=np.uint8))
for _ in range(50)
]
)
with Flow(return_results=True).add(uses=AudioCLIPImageEncoder) as flow:
resp = flow.post(
on="/index",
inputs=docs,
request_size=request_size,
return_results=True,
)
assert sum(len(resp_batch.docs) for resp_batch in resp) == 50
for r in resp:
for doc in r.docs:
assert doc.embedding is not None
assert doc.embedding.shape == (1024,)
@pytest.mark.docker
def test_docker_runtime(build_docker_image: str):
with pytest.raises(subprocess.TimeoutExpired):
subprocess.run(
[
'jina',
'executor',
f'--uses=docker://{build_docker_image}',
'--volumes=.cache:/workdir/.cache',
],
timeout=30,
check=True,
)
@pytest.mark.gpu
@pytest.mark.docker
def test_docker_runtime_gpu(build_docker_image_gpu: str):
with pytest.raises(subprocess.TimeoutExpired):
subprocess.run(
[
'jina',
'pea',
f'--uses=docker://{build_docker_image_gpu}',
'--gpus',
'all',
'--uses-with',
'device:cuda',
'--volumes=.cache:/workdir/.cache',
],
timeout=30,
check=True,
)
|
"""Lilac reader that loads enriched and labeled Lilac datasets into GPTIndex and LangChain."""
from typing import TYPE_CHECKING, List, Optional
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
if TYPE_CHECKING:
from lilac import ColumnId, FilterLike, Path
class LilacReader(BaseReader):
"""
Lilac dataset reader.
"""
def load_data(
self,
dataset: str,
text_path: "Path" = "text",
doc_id_path: Optional["Path"] = "doc_id",
columns: Optional[List["ColumnId"]] = None,
filters: Optional[List["FilterLike"]] = None,
project_dir: Optional[str] = None,
) -> List[Document]:
"""
Load text from relevant posts and top-level comments in subreddit(s), given keyword(s) for search.
Args:
project_dir (Optional[str]): The Lilac project dir to read from. If not defined, uses the `LILAC_PROJECT_DIR`
environment variable.
text_path: The path to the text field in the dataset. If not defined, uses 'text'.
columns (Optional[List[ColumnId]]): The columns to load from the dataset. If not defined, loads all columns.
dataset (str): The dataset to load. Should be formatted like {namespace}/{dataset_name}.
filters (Optional[Filter]): A filter to apply to the dataset before loading into documents. Useful to filter
for labeled data.
"""
try:
import lilac as ll
except ImportError:
raise ("`lilac` package not found, please run `pip install lilac`")
namespace, dataset_name = dataset.split("/")
lilac_dataset = ll.get_dataset(namespace, dataset_name, project_dir=project_dir)
# Check to make sure text path, and doc_id path are valid.
manifest = lilac_dataset.manifest()
text_path = ll.normalize_path(text_path)
text_field = manifest.data_schema.get_field(text_path)
if not text_field:
raise ValueError(
f"Could not find text field {text_path} in dataset {dataset}"
)
doc_id_path = ll.normalize_path(doc_id_path)
doc_id_field = manifest.data_schema.get_field(doc_id_path)
if not doc_id_field:
raise ValueError(
f"Could not find doc_id field {doc_id_path} in dataset {dataset}"
)
rows = lilac_dataset.select_rows(
columns=([*columns, text_field, doc_id_path]) if columns else ["*"],
filters=filters,
combine_columns=True,
)
def _item_from_path(item: ll.Item, path: ll.PathTuple) -> ll.Item:
if len(path) == 1:
item = item[path[0]]
if isinstance(item, dict):
return item[ll.VALUE_KEY]
else:
return item
else:
return _item_from_path(item[path[0]], path[1:])
def _remove_item_path(item: ll.Item, path: ll.PathTuple) -> None:
if len(path) == 0:
return
if len(path) == 1:
if item and path[0] in item:
leaf_item = item[path[0]]
if isinstance(leaf_item, dict):
del item[path[0]][ll.VALUE_KEY]
else:
del item[path[0]]
return
else:
_remove_item_path(item[path[0]], path[1:])
documents: List[Document] = []
for row in rows:
text = _item_from_path(row, text_path)
doc_id = _item_from_path(row, doc_id_path)
_remove_item_path(row, text_path)
_remove_item_path(row, doc_id_path)
documents.append(Document(text=text, doc_id=doc_id, extra_info=row or {}))
return documents
|
"""Lilac reader that loads enriched and labeled Lilac datasets into GPTIndex and LangChain."""
from typing import TYPE_CHECKING, List, Optional
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
if TYPE_CHECKING:
from lilac import ColumnId, FilterLike, Path
class LilacReader(BaseReader):
"""
Lilac dataset reader.
"""
def load_data(
self,
dataset: str,
text_path: "Path" = "text",
doc_id_path: Optional["Path"] = "doc_id",
columns: Optional[List["ColumnId"]] = None,
filters: Optional[List["FilterLike"]] = None,
project_dir: Optional[str] = None,
) -> List[Document]:
"""
Load text from relevant posts and top-level comments in subreddit(s), given keyword(s) for search.
Args:
project_dir (Optional[str]): The Lilac project dir to read from. If not defined, uses the `LILAC_PROJECT_DIR`
environment variable.
text_path: The path to the text field in the dataset. If not defined, uses 'text'.
columns (Optional[List[ColumnId]]): The columns to load from the dataset. If not defined, loads all columns.
dataset (str): The dataset to load. Should be formatted like {namespace}/{dataset_name}.
filters (Optional[Filter]): A filter to apply to the dataset before loading into documents. Useful to filter
for labeled data.
"""
try:
import lilac as ll
except ImportError:
raise ("`lilac` package not found, please run `pip install lilac`")
namespace, dataset_name = dataset.split("/")
lilac_dataset = ll.get_dataset(namespace, dataset_name, project_dir=project_dir)
# Check to make sure text path, and doc_id path are valid.
manifest = lilac_dataset.manifest()
text_path = ll.normalize_path(text_path)
text_field = manifest.data_schema.get_field(text_path)
if not text_field:
raise ValueError(
f"Could not find text field {text_path} in dataset {dataset}"
)
doc_id_path = ll.normalize_path(doc_id_path)
doc_id_field = manifest.data_schema.get_field(doc_id_path)
if not doc_id_field:
raise ValueError(
f"Could not find doc_id field {doc_id_path} in dataset {dataset}"
)
rows = lilac_dataset.select_rows(
columns=([*columns, text_field, doc_id_path]) if columns else ["*"],
filters=filters,
combine_columns=True,
)
def _item_from_path(item: ll.Item, path: ll.PathTuple) -> ll.Item:
if len(path) == 1:
item = item[path[0]]
if isinstance(item, dict):
return item[ll.VALUE_KEY]
else:
return item
else:
return _item_from_path(item[path[0]], path[1:])
def _remove_item_path(item: ll.Item, path: ll.PathTuple) -> None:
if len(path) == 0:
return
if len(path) == 1:
if item and path[0] in item:
leaf_item = item[path[0]]
if isinstance(leaf_item, dict):
del item[path[0]][ll.VALUE_KEY]
else:
del item[path[0]]
return
else:
_remove_item_path(item[path[0]], path[1:])
documents: List[Document] = []
for row in rows:
text = _item_from_path(row, text_path)
doc_id = _item_from_path(row, doc_id_path)
_remove_item_path(row, text_path)
_remove_item_path(row, doc_id_path)
documents.append(Document(text=text, doc_id=doc_id, extra_info=row or {}))
return documents
|
_base_ = './retinanet_r50_fpn_1x_coco.py'
# use caffe img_norm
preprocess_cfg = dict(
mean=[103.530, 116.280, 123.675],
std=[1.0, 1.0, 1.0],
to_rgb=False,
pad_size_divisor=32)
model = dict(
preprocess_cfg=preprocess_cfg,
backbone=dict(
norm_cfg=dict(requires_grad=False),
norm_eval=True,
style='caffe',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet50_caffe')))
|
_base_ = './retinanet_r50_fpn_1x_coco.py'
model = dict(
backbone=dict(
norm_cfg=dict(requires_grad=False),
norm_eval=True,
style='caffe',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet50_caffe')))
# use caffe img_norm
img_norm_cfg = dict(
mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
train=dict(pipeline=train_pipeline),
val=dict(pipeline=test_pipeline),
test=dict(pipeline=test_pipeline))
|
# Copyright (c) OpenMMLab. All rights reserved.
import numpy as np
import torch
from ..builder import BBOX_SAMPLERS
from .random_sampler import RandomSampler
@BBOX_SAMPLERS.register_module()
class InstanceBalancedPosSampler(RandomSampler):
"""Instance balanced sampler that samples equal number of positive samples
for each instance."""
def _sample_pos(self, assign_result, num_expected, **kwargs):
"""Sample positive boxes.
Args:
assign_result (:obj:`AssignResult`): The assigned results of boxes.
num_expected (int): The number of expected positive samples
Returns:
Tensor or ndarray: sampled indices.
"""
pos_inds = torch.nonzero(assign_result.gt_inds > 0, as_tuple=False)
if pos_inds.numel() != 0:
pos_inds = pos_inds.squeeze(1)
if pos_inds.numel() <= num_expected:
return pos_inds
else:
unique_gt_inds = assign_result.gt_inds[pos_inds].unique()
num_gts = len(unique_gt_inds)
num_per_gt = int(round(num_expected / float(num_gts)) + 1)
sampled_inds = []
for i in unique_gt_inds:
inds = torch.nonzero(
assign_result.gt_inds == i.item(), as_tuple=False)
if inds.numel() != 0:
inds = inds.squeeze(1)
else:
continue
if len(inds) > num_per_gt:
inds = self.random_choice(inds, num_per_gt)
sampled_inds.append(inds)
sampled_inds = torch.cat(sampled_inds)
if len(sampled_inds) < num_expected:
num_extra = num_expected - len(sampled_inds)
extra_inds = np.array(
list(set(pos_inds.cpu()) - set(sampled_inds.cpu())))
if len(extra_inds) > num_extra:
extra_inds = self.random_choice(extra_inds, num_extra)
extra_inds = torch.from_numpy(extra_inds).to(
assign_result.gt_inds.device).long()
sampled_inds = torch.cat([sampled_inds, extra_inds])
elif len(sampled_inds) > num_expected:
sampled_inds = self.random_choice(sampled_inds, num_expected)
return sampled_inds
|
import numpy as np
import torch
from ..builder import BBOX_SAMPLERS
from .random_sampler import RandomSampler
@BBOX_SAMPLERS.register_module()
class InstanceBalancedPosSampler(RandomSampler):
"""Instance balanced sampler that samples equal number of positive samples
for each instance."""
def _sample_pos(self, assign_result, num_expected, **kwargs):
"""Sample positive boxes.
Args:
assign_result (:obj:`AssignResult`): The assigned results of boxes.
num_expected (int): The number of expected positive samples
Returns:
Tensor or ndarray: sampled indices.
"""
pos_inds = torch.nonzero(assign_result.gt_inds > 0, as_tuple=False)
if pos_inds.numel() != 0:
pos_inds = pos_inds.squeeze(1)
if pos_inds.numel() <= num_expected:
return pos_inds
else:
unique_gt_inds = assign_result.gt_inds[pos_inds].unique()
num_gts = len(unique_gt_inds)
num_per_gt = int(round(num_expected / float(num_gts)) + 1)
sampled_inds = []
for i in unique_gt_inds:
inds = torch.nonzero(
assign_result.gt_inds == i.item(), as_tuple=False)
if inds.numel() != 0:
inds = inds.squeeze(1)
else:
continue
if len(inds) > num_per_gt:
inds = self.random_choice(inds, num_per_gt)
sampled_inds.append(inds)
sampled_inds = torch.cat(sampled_inds)
if len(sampled_inds) < num_expected:
num_extra = num_expected - len(sampled_inds)
extra_inds = np.array(
list(set(pos_inds.cpu()) - set(sampled_inds.cpu())))
if len(extra_inds) > num_extra:
extra_inds = self.random_choice(extra_inds, num_extra)
extra_inds = torch.from_numpy(extra_inds).to(
assign_result.gt_inds.device).long()
sampled_inds = torch.cat([sampled_inds, extra_inds])
elif len(sampled_inds) > num_expected:
sampled_inds = self.random_choice(sampled_inds, num_expected)
return sampled_inds
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.document_transformers import Html2TextTransformer
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"Html2TextTransformer": "langchain_community.document_transformers",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"Html2TextTransformer",
]
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.document_transformers import Html2TextTransformer
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"Html2TextTransformer": "langchain_community.document_transformers"
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"Html2TextTransformer",
]
|
from typing import TYPE_CHECKING, Optional, Type
from langchain_core.callbacks import (
CallbackManagerForToolRun,
)
from langchain_core.tools import BaseTool
from pydantic import BaseModel, Field
if TYPE_CHECKING:
# This is for linting and IDE typehints
import multion
else:
try:
# We do this so pydantic can resolve the types when instantiating
import multion
except ImportError:
pass
class CloseSessionSchema(BaseModel):
"""Input for UpdateSessionTool."""
sessionId: str = Field(
...,
description="""The sessionId, received from one of the createSessions
or updateSessions run before""",
)
class MultionCloseSession(BaseTool):
"""Tool that closes an existing Multion Browser Window with provided fields.
Attributes:
name: The name of the tool. Default: "close_multion_session"
description: The description of the tool.
args_schema: The schema for the tool's arguments. Default: UpdateSessionSchema
"""
name: str = "close_multion_session"
description: str = """Use this tool to close \
an existing corresponding Multion Browser Window with provided fields. \
Note: SessionId must be received from previous Browser window creation."""
args_schema: Type[CloseSessionSchema] = CloseSessionSchema
sessionId: str = ""
def _run(
self,
sessionId: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> None:
try:
try:
multion.close_session(sessionId)
except Exception as e:
print(f"{e}, retrying...") # noqa: T201
except Exception as e:
raise Exception(f"An error occurred: {e}")
|
from typing import TYPE_CHECKING, Optional, Type
from langchain_core.callbacks import (
CallbackManagerForToolRun,
)
from langchain_core.tools import BaseTool
from pydantic import BaseModel, Field
if TYPE_CHECKING:
# This is for linting and IDE typehints
import multion
else:
try:
# We do this so pydantic can resolve the types when instantiating
import multion
except ImportError:
pass
class CloseSessionSchema(BaseModel):
"""Input for UpdateSessionTool."""
sessionId: str = Field(
...,
description="""The sessionId, received from one of the createSessions
or updateSessions run before""",
)
class MultionCloseSession(BaseTool): # type: ignore[override, override]
"""Tool that closes an existing Multion Browser Window with provided fields.
Attributes:
name: The name of the tool. Default: "close_multion_session"
description: The description of the tool.
args_schema: The schema for the tool's arguments. Default: UpdateSessionSchema
"""
name: str = "close_multion_session"
description: str = """Use this tool to close \
an existing corresponding Multion Browser Window with provided fields. \
Note: SessionId must be received from previous Browser window creation."""
args_schema: Type[CloseSessionSchema] = CloseSessionSchema
sessionId: str = ""
def _run(
self,
sessionId: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> None:
try:
try:
multion.close_session(sessionId)
except Exception as e:
print(f"{e}, retrying...") # noqa: T201
except Exception as e:
raise Exception(f"An error occurred: {e}")
|
from contextlib import nullcontext
from sentence_transformers.evaluation import SentenceEvaluator
from sentence_transformers import SentenceTransformer
from typing import List, Optional, Tuple, Dict
import numpy as np
import logging
import os
import csv
logger = logging.getLogger(__name__)
class MSEEvaluatorFromDataFrame(SentenceEvaluator):
"""
Computes the mean squared error (x100) between the computed sentence embedding and some target sentence embedding.
:param dataframe: It must have the following format. Rows contains different, parallel sentences.
Columns are the respective language codes::
[{'en': 'My sentence in English', 'es': 'Oración en español', 'fr': 'Phrase en français'...},
{'en': 'My second sentence', ...}]
:param combinations: Must be of the format ``[('en', 'es'), ('en', 'fr'), ...]``.
First entry in a tuple is the source language. The sentence in the respective language will be fetched from
the dataframe and passed to the teacher model. Second entry in a tuple the the target language. Sentence
will be fetched from the dataframe and passed to the student model
:param batch_size: Batch size to compute sentence embeddings
:param name: Name of the evaluator
:param write_csv: Write results to CSV file
:param truncate_dim: The dimension to truncate sentence embeddings to. `None` uses the model's current truncation
dimension. Defaults to None.
"""
def __init__(
self,
dataframe: List[Dict[str, str]],
teacher_model: SentenceTransformer,
combinations: List[Tuple[str, str]],
batch_size: int = 8,
name: str = "",
write_csv: bool = True,
truncate_dim: Optional[int] = None,
):
self.combinations = combinations
self.name = name
self.batch_size = batch_size
if name:
name = "_" + name
self.csv_file = "mse_evaluation" + name + "_results.csv"
self.csv_headers = ["epoch", "steps"]
self.write_csv = write_csv
self.truncate_dim = truncate_dim
self.data = {}
logger.info("Compute teacher embeddings")
all_source_sentences = set()
for src_lang, trg_lang in self.combinations:
src_sentences = []
trg_sentences = []
for row in dataframe:
if row[src_lang].strip() != "" and row[trg_lang].strip() != "":
all_source_sentences.add(row[src_lang])
src_sentences.append(row[src_lang])
trg_sentences.append(row[trg_lang])
self.data[(src_lang, trg_lang)] = (src_sentences, trg_sentences)
self.csv_headers.append("{}-{}".format(src_lang, trg_lang))
all_source_sentences = list(all_source_sentences)
with nullcontext() if self.truncate_dim is None else teacher_model.truncate_sentence_embeddings(
self.truncate_dim
):
all_src_embeddings = teacher_model.encode(all_source_sentences, batch_size=self.batch_size)
self.teacher_embeddings = {sent: emb for sent, emb in zip(all_source_sentences, all_src_embeddings)}
def __call__(self, model: SentenceTransformer, output_path: str = None, epoch: int = -1, steps: int = -1):
model.eval()
mse_scores = []
for src_lang, trg_lang in self.combinations:
src_sentences, trg_sentences = self.data[(src_lang, trg_lang)]
src_embeddings = np.asarray([self.teacher_embeddings[sent] for sent in src_sentences])
with nullcontext() if self.truncate_dim is None else model.truncate_sentence_embeddings(self.truncate_dim):
trg_embeddings = np.asarray(model.encode(trg_sentences, batch_size=self.batch_size))
mse = ((src_embeddings - trg_embeddings) ** 2).mean()
mse *= 100
mse_scores.append(mse)
logger.info("MSE evaluation on {} dataset - {}-{}:".format(self.name, src_lang, trg_lang))
logger.info("MSE (*100):\t{:4f}".format(mse))
if output_path is not None and self.write_csv:
csv_path = os.path.join(output_path, self.csv_file)
output_file_exists = os.path.isfile(csv_path)
with open(csv_path, newline="", mode="a" if output_file_exists else "w", encoding="utf-8") as f:
writer = csv.writer(f)
if not output_file_exists:
writer.writerow(self.csv_headers)
writer.writerow([epoch, steps] + mse_scores)
return -np.mean(mse_scores) # Return negative score as SentenceTransformers maximizes the performance
|
from contextlib import nullcontext
from sentence_transformers.evaluation import SentenceEvaluator
from sentence_transformers import SentenceTransformer
from typing import List, Optional, Tuple, Dict
import numpy as np
import logging
import os
import csv
logger = logging.getLogger(__name__)
class MSEEvaluatorFromDataFrame(SentenceEvaluator):
"""
Computes the mean squared error (x100) between the computed sentence embedding and some target sentence embedding.
:param dataframe: It must have the following format. Rows contains different, parallel sentences.
Columns are the respective language codes::
[{'en': 'My sentence in English', 'es': 'Oración en español', 'fr': 'Phrase en français'...},
{'en': 'My second sentence', ...}]
:param combinations: Must be of the format ``[('en', 'es'), ('en', 'fr'), ...]``.
First entry in a tuple is the source language. The sentence in the respective language will be fetched from
the dataframe and passed to the teacher model. Second entry in a tuple the the target language. Sentence
will be fetched from the dataframe and passed to the student model
:param batch_size: Batch size to compute sentence embeddings
:param name: Name of the evaluator
:param write_csv: Write results to CSV file
:param truncate_dim: The dimension to truncate sentence embeddings to. `None` uses the model's current truncation
dimension. Defaults to None.
"""
def __init__(
self,
dataframe: List[Dict[str, str]],
teacher_model: SentenceTransformer,
combinations: List[Tuple[str, str]],
batch_size: int = 8,
name: str = "",
write_csv: bool = True,
truncate_dim: Optional[int] = None,
):
self.combinations = combinations
self.name = name
self.batch_size = batch_size
if name:
name = "_" + name
self.csv_file = "mse_evaluation" + name + "_results.csv"
self.csv_headers = ["epoch", "steps"]
self.write_csv = write_csv
self.truncate_dim = truncate_dim
self.data = {}
logger.info("Compute teacher embeddings")
all_source_sentences = set()
for src_lang, trg_lang in self.combinations:
src_sentences = []
trg_sentences = []
for row in dataframe:
if row[src_lang].strip() != "" and row[trg_lang].strip() != "":
all_source_sentences.add(row[src_lang])
src_sentences.append(row[src_lang])
trg_sentences.append(row[trg_lang])
self.data[(src_lang, trg_lang)] = (src_sentences, trg_sentences)
self.csv_headers.append("{}-{}".format(src_lang, trg_lang))
all_source_sentences = list(all_source_sentences)
with nullcontext() if self.truncate_dim is None else teacher_model.truncate_sentence_embeddings(
self.truncate_dim
):
all_src_embeddings = teacher_model.encode(all_source_sentences, batch_size=self.batch_size)
self.teacher_embeddings = {sent: emb for sent, emb in zip(all_source_sentences, all_src_embeddings)}
def __call__(self, model, output_path: str = None, epoch: int = -1, steps: int = -1):
model.eval()
mse_scores = []
for src_lang, trg_lang in self.combinations:
src_sentences, trg_sentences = self.data[(src_lang, trg_lang)]
src_embeddings = np.asarray([self.teacher_embeddings[sent] for sent in src_sentences])
with nullcontext() if self.truncate_dim is None else model.truncate_sentence_embeddings(self.truncate_dim):
trg_embeddings = np.asarray(model.encode(trg_sentences, batch_size=self.batch_size))
mse = ((src_embeddings - trg_embeddings) ** 2).mean()
mse *= 100
mse_scores.append(mse)
logger.info("MSE evaluation on {} dataset - {}-{}:".format(self.name, src_lang, trg_lang))
logger.info("MSE (*100):\t{:4f}".format(mse))
if output_path is not None and self.write_csv:
csv_path = os.path.join(output_path, self.csv_file)
output_file_exists = os.path.isfile(csv_path)
with open(csv_path, newline="", mode="a" if output_file_exists else "w", encoding="utf-8") as f:
writer = csv.writer(f)
if not output_file_exists:
writer.writerow(self.csv_headers)
writer.writerow([epoch, steps] + mse_scores)
return -np.mean(mse_scores) # Return negative score as SentenceTransformers maximizes the performance
|
import numpy as np
from absl.testing import parameterized
from tensorflow import data as tf_data
from keras.src import backend
from keras.src import layers
from keras.src import testing
class RandomRotationTest(testing.TestCase):
@parameterized.named_parameters(
("random_rotate_neg4", -0.4),
("random_rotate_neg2", -0.2),
("random_rotate_4", 0.4),
("random_rotate_2", 0.2),
("random_rotate_tuple", (-0.2, 0.4)),
)
def test_random_rotation_shapes(self, factor):
self.run_layer_test(
layers.RandomRotation,
init_kwargs={
"factor": factor,
},
input_shape=(2, 3, 4),
expected_output_shape=(2, 3, 4),
supports_masking=False,
run_training_check=False,
)
def test_random_rotation_correctness(self):
if backend.config.image_data_format() == "channels_last":
input_shape = (1, 5, 5, 1)
else:
input_shape = (1, 1, 5, 5)
input_image = np.reshape(np.arange(0, 25), input_shape)
layer = layers.RandomRotation(factor=(0.5, 0.5))
actual_output = layer(input_image)
expected_output = np.asarray(
[
[24, 23, 22, 21, 20],
[19, 18, 17, 16, 15],
[14, 13, 12, 11, 10],
[9, 8, 7, 6, 5],
[4, 3, 2, 1, 0],
]
).reshape(input_shape)
self.assertAllClose(
backend.convert_to_tensor(expected_output), actual_output, atol=1e-5
)
def test_training_false(self):
input_image = np.reshape(np.arange(0, 25), (1, 5, 5, 1))
layer = layers.RandomRotation(factor=(0.5, 0.5))
actual_output = layer(input_image, training=False)
self.assertAllClose(actual_output, input_image)
def test_tf_data_compatibility(self):
if backend.config.image_data_format() == "channels_last":
input_shape = (1, 5, 5, 1)
else:
input_shape = (1, 1, 5, 5)
input_image = np.reshape(np.arange(0, 25), input_shape)
layer = layers.RandomRotation(factor=(0.5, 0.5))
ds = tf_data.Dataset.from_tensor_slices(input_image).map(layer)
expected_output = np.asarray(
[
[24, 23, 22, 21, 20],
[19, 18, 17, 16, 15],
[14, 13, 12, 11, 10],
[9, 8, 7, 6, 5],
[4, 3, 2, 1, 0],
]
).reshape(input_shape[1:])
for output in ds.take(1):
output = output.numpy()
self.assertAllClose(expected_output, output)
|
import numpy as np
from absl.testing import parameterized
from tensorflow import data as tf_data
from keras.src import backend
from keras.src import layers
from keras.src import testing
class RandomRotationTest(testing.TestCase, parameterized.TestCase):
@parameterized.named_parameters(
("random_rotate_neg4", -0.4),
("random_rotate_neg2", -0.2),
("random_rotate_4", 0.4),
("random_rotate_2", 0.2),
("random_rotate_tuple", (-0.2, 0.4)),
)
def test_random_rotation_shapes(self, factor):
self.run_layer_test(
layers.RandomRotation,
init_kwargs={
"factor": factor,
},
input_shape=(2, 3, 4),
expected_output_shape=(2, 3, 4),
supports_masking=False,
run_training_check=False,
)
def test_random_rotation_correctness(self):
if backend.config.image_data_format() == "channels_last":
input_shape = (1, 5, 5, 1)
else:
input_shape = (1, 1, 5, 5)
input_image = np.reshape(np.arange(0, 25), input_shape)
layer = layers.RandomRotation(factor=(0.5, 0.5))
actual_output = layer(input_image)
expected_output = np.asarray(
[
[24, 23, 22, 21, 20],
[19, 18, 17, 16, 15],
[14, 13, 12, 11, 10],
[9, 8, 7, 6, 5],
[4, 3, 2, 1, 0],
]
).reshape(input_shape)
self.assertAllClose(
backend.convert_to_tensor(expected_output), actual_output, atol=1e-5
)
def test_training_false(self):
input_image = np.reshape(np.arange(0, 25), (1, 5, 5, 1))
layer = layers.RandomRotation(factor=(0.5, 0.5))
actual_output = layer(input_image, training=False)
self.assertAllClose(actual_output, input_image)
def test_tf_data_compatibility(self):
if backend.config.image_data_format() == "channels_last":
input_shape = (1, 5, 5, 1)
else:
input_shape = (1, 1, 5, 5)
input_image = np.reshape(np.arange(0, 25), input_shape)
layer = layers.RandomRotation(factor=(0.5, 0.5))
ds = tf_data.Dataset.from_tensor_slices(input_image).map(layer)
expected_output = np.asarray(
[
[24, 23, 22, 21, 20],
[19, 18, 17, 16, 15],
[14, 13, 12, 11, 10],
[9, 8, 7, 6, 5],
[4, 3, 2, 1, 0],
]
).reshape(input_shape[1:])
for output in ds.take(1):
output = output.numpy()
self.assertAllClose(expected_output, output)
|
import pytest
from jina import Executor, Flow, requests
from jina.constants import __default_executor__
from tests import random_docs
@pytest.mark.parametrize('protocol', ['websocket', 'grpc', 'http'])
def test_flow(protocol):
docs = random_docs(10)
f = Flow(protocol=protocol).add(name='p1')
with f:
f.index(docs)
assert f.num_deployments == 2
assert f._deployment_nodes['p1'].num_pods == 1
assert f.num_pods == 2
class MyExec(Executor):
@requests
def foo(self, **kwargs):
pass
@pytest.mark.slow
@pytest.mark.parametrize('protocol', ['websocket', 'grpc', 'http'])
def test_flow_before(protocol):
docs = random_docs(10)
f = Flow(protocol=protocol).add(uses_before=MyExec, name='p1', shards=2)
with f:
f.index(docs)
assert f.num_deployments == 2
assert f._deployment_nodes['p1'].num_pods == 4
assert f.num_pods == 5
@pytest.mark.slow
@pytest.mark.parametrize('protocol', ['websocket', 'grpc', 'http'])
def test_flow_after(protocol):
docs = random_docs(10)
f = Flow(protocol=protocol).add(uses_after=MyExec, name='p1', shards=2)
with f:
f.index(docs)
assert f.num_deployments == 2
assert f._deployment_nodes['p1'].num_pods == 4
assert f.num_pods == 5
@pytest.mark.slow
@pytest.mark.parametrize('protocol', ['websocket', 'grpc', 'http'])
def test_flow_before_after_no_shard_ignored(protocol):
docs = random_docs(10)
f = Flow(protocol=protocol).add(
uses_after=MyExec, uses_before=MyExec, name='p1', shards=1
)
with f:
f.index(docs)
assert f.num_deployments == 2
assert f._deployment_nodes['p1'].num_pods == 1
assert f.num_pods == 2
@pytest.mark.slow
@pytest.mark.parametrize('protocol', ['websocket', 'grpc', 'http'])
def test_flow_default_before_after_is_ignored(protocol):
docs = random_docs(10)
f = Flow(protocol=protocol).add(
uses_after=__default_executor__, uses_before=__default_executor__, name='p1'
)
with f:
f.index(docs)
assert f.num_deployments == 2
assert f._deployment_nodes['p1'].num_pods == 1
assert f.num_pods == 2
@pytest.mark.slow
@pytest.mark.parametrize('protocol', ['websocket', 'grpc', 'http'])
def test_flow_before_after(protocol):
docs = random_docs(10)
f = Flow(protocol=protocol).add(
uses_before=MyExec, uses_after=MyExec, name='p1', shards=2
)
with f:
f.index(docs)
assert f.num_deployments == 2
assert f._deployment_nodes['p1'].num_pods == 5
assert f.num_pods == 6
|
import pytest
from jina import Executor, Flow, __default_executor__, requests
from tests import random_docs
@pytest.mark.parametrize('protocol', ['websocket', 'grpc', 'http'])
def test_flow(protocol):
docs = random_docs(10)
f = Flow(protocol=protocol).add(name='p1')
with f:
f.index(docs)
assert f.num_deployments == 2
assert f._deployment_nodes['p1'].num_pods == 1
assert f.num_pods == 2
class MyExec(Executor):
@requests
def foo(self, **kwargs):
pass
@pytest.mark.slow
@pytest.mark.parametrize('protocol', ['websocket', 'grpc', 'http'])
def test_flow_before(protocol):
docs = random_docs(10)
f = Flow(protocol=protocol).add(uses_before=MyExec, name='p1', shards=2)
with f:
f.index(docs)
assert f.num_deployments == 2
assert f._deployment_nodes['p1'].num_pods == 4
assert f.num_pods == 5
@pytest.mark.slow
@pytest.mark.parametrize('protocol', ['websocket', 'grpc', 'http'])
def test_flow_after(protocol):
docs = random_docs(10)
f = Flow(protocol=protocol).add(uses_after=MyExec, name='p1', shards=2)
with f:
f.index(docs)
assert f.num_deployments == 2
assert f._deployment_nodes['p1'].num_pods == 4
assert f.num_pods == 5
@pytest.mark.slow
@pytest.mark.parametrize('protocol', ['websocket', 'grpc', 'http'])
def test_flow_before_after_no_shard_ignored(protocol):
docs = random_docs(10)
f = Flow(protocol=protocol).add(
uses_after=MyExec, uses_before=MyExec, name='p1', shards=1
)
with f:
f.index(docs)
assert f.num_deployments == 2
assert f._deployment_nodes['p1'].num_pods == 1
assert f.num_pods == 2
@pytest.mark.slow
@pytest.mark.parametrize('protocol', ['websocket', 'grpc', 'http'])
def test_flow_default_before_after_is_ignored(protocol):
docs = random_docs(10)
f = Flow(protocol=protocol).add(
uses_after=__default_executor__, uses_before=__default_executor__, name='p1'
)
with f:
f.index(docs)
assert f.num_deployments == 2
assert f._deployment_nodes['p1'].num_pods == 1
assert f.num_pods == 2
@pytest.mark.slow
@pytest.mark.parametrize('protocol', ['websocket', 'grpc', 'http'])
def test_flow_before_after(protocol):
docs = random_docs(10)
f = Flow(protocol=protocol).add(
uses_before=MyExec, uses_after=MyExec, name='p1', shards=2
)
with f:
f.index(docs)
assert f.num_deployments == 2
assert f._deployment_nodes['p1'].num_pods == 5
assert f.num_pods == 6
|
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import subprocess
import sys
import pytorch_sphinx_theme
sys.path.insert(0, os.path.abspath('../../'))
# -- Project information -----------------------------------------------------
project = 'MMDetection'
copyright = '2018-2021, OpenMMLab'
author = 'MMDetection Authors'
version_file = '../../mmdet/version.py'
def get_version():
with open(version_file, 'r') as f:
exec(compile(f.read(), version_file, 'exec'))
return locals()['__version__']
# The full version, including alpha/beta/rc tags
release = get_version()
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.napoleon',
'sphinx.ext.viewcode',
'myst_parser',
'sphinx_markdown_tables',
'sphinx_copybutton',
]
myst_enable_extensions = ['colon_fence']
myst_heading_anchors = 3
autodoc_mock_imports = [
'matplotlib', 'pycocotools', 'terminaltables', 'mmdet.version', 'mmcv.ops'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
source_suffix = {
'.rst': 'restructuredtext',
'.md': 'markdown',
}
# The master toctree document.
master_doc = 'index'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
# html_theme = 'sphinx_rtd_theme'
html_theme = 'pytorch_sphinx_theme'
html_theme_path = [pytorch_sphinx_theme.get_html_theme_path()]
html_theme_options = {
'menu': [
{
'name': 'GitHub',
'url': 'https://github.com/open-mmlab/mmdetection'
},
],
# Specify the language of shared menu
'menu_lang':
'cn',
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
html_css_files = ['css/readthedocs.css']
language = 'zh_CN'
# -- Extension configuration -------------------------------------------------
# Ignore >>> when copying code
copybutton_prompt_text = r'>>> |\.\.\. '
copybutton_prompt_is_regexp = True
def builder_inited_handler(app):
subprocess.run(['./stat.py'])
def setup(app):
app.connect('builder-inited', builder_inited_handler)
|
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import subprocess
import sys
import pytorch_sphinx_theme
sys.path.insert(0, os.path.abspath('../../'))
# -- Project information -----------------------------------------------------
project = 'MMDetection'
copyright = '2018-2021, OpenMMLab'
author = 'MMDetection Authors'
version_file = '../../mmdet/version.py'
def get_version():
with open(version_file, 'r') as f:
exec(compile(f.read(), version_file, 'exec'))
return locals()['__version__']
# The full version, including alpha/beta/rc tags
release = get_version()
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.napoleon',
'sphinx.ext.viewcode',
'myst_parser',
'sphinx_markdown_tables',
'sphinx_copybutton',
]
autodoc_mock_imports = [
'matplotlib', 'pycocotools', 'terminaltables', 'mmdet.version', 'mmcv.ops'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
source_suffix = {
'.rst': 'restructuredtext',
'.md': 'markdown',
}
# The master toctree document.
master_doc = 'index'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
# html_theme = 'sphinx_rtd_theme'
html_theme = 'pytorch_sphinx_theme'
html_theme_path = [pytorch_sphinx_theme.get_html_theme_path()]
html_theme_options = {
'menu': [
{
'name': 'GitHub',
'url': 'https://github.com/open-mmlab/mmdetection'
},
],
# Specify the language of shared menu
'menu_lang':
'cn',
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
html_css_files = ['css/readthedocs.css']
language = 'zh_CN'
# -- Extension configuration -------------------------------------------------
# Ignore >>> when copying code
copybutton_prompt_text = r'>>> |\.\.\. '
copybutton_prompt_is_regexp = True
def builder_inited_handler(app):
subprocess.run(['./stat.py'])
def setup(app):
app.connect('builder-inited', builder_inited_handler)
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Dict
import torch.nn as nn
from torch import Tensor
from mmdet.registry import MODELS
from ..layers import (ConditionalDetrTransformerDecoder,
DetrTransformerEncoder, SinePositionalEncoding)
from .detr import DETR
@MODELS.register_module()
class ConditionalDETR(DETR):
r"""Implementation of `Conditional DETR for Fast Training Convergence.
<https://arxiv.org/abs/2108.06152>`_.
Code is modified from the `official github repo
<https://github.com/Atten4Vis/ConditionalDETR>`_.
"""
def _init_layers(self) -> None:
"""Initialize layers except for backbone, neck and bbox_head."""
self.positional_encoding = SinePositionalEncoding(
**self.positional_encoding)
self.encoder = DetrTransformerEncoder(**self.encoder)
self.decoder = ConditionalDetrTransformerDecoder(**self.decoder)
self.embed_dims = self.encoder.embed_dims
# NOTE The embed_dims is typically passed from the inside out.
# For example in DETR, The embed_dims is passed as
# self_attn -> the first encoder layer -> encoder -> detector.
self.query_embedding = nn.Embedding(self.num_queries, self.embed_dims)
num_feats = self.positional_encoding.num_feats
assert num_feats * 2 == self.embed_dims, \
f'embed_dims should be exactly 2 times of num_feats. ' \
f'Found {self.embed_dims} and {num_feats}.'
def forward_decoder(self, query: Tensor, query_pos: Tensor, memory: Tensor,
memory_mask: Tensor, memory_pos: Tensor) -> Dict:
"""Forward with Transformer decoder.
Args:
query (Tensor): The queries of decoder inputs, has shape
(bs, num_queries, dim).
query_pos (Tensor): The positional queries of decoder inputs,
has shape (bs, num_queries, dim).
memory (Tensor): The output embeddings of the Transformer encoder,
has shape (bs, num_feat_points, dim).
memory_mask (Tensor): ByteTensor, the padding mask of the memory,
has shape (bs, num_feat_points).
memory_pos (Tensor): The positional embeddings of memory, has
shape (bs, num_feat_points, dim).
Returns:
dict: The dictionary of decoder outputs, which includes the
`hidden_states` and `references` of the decoder output.
- hidden_states (Tensor): Has shape
(num_decoder_layers, bs, num_queries, dim)
- references (Tensor): Has shape
(bs, num_queries, 2)
"""
hidden_states, references = self.decoder(
query=query,
key=memory,
query_pos=query_pos,
key_pos=memory_pos,
key_padding_mask=memory_mask)
head_inputs_dict = dict(
hidden_states=hidden_states, references=references)
return head_inputs_dict
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Dict
import torch.nn as nn
from torch import Tensor
from mmdet.registry import MODELS
from ..layers import (ConditionalDetrTransformerDecoder,
DetrTransformerEncoder, SinePositionalEncoding)
from .detr import DETR
@MODELS.register_module()
class ConditionalDETR(DETR):
r"""Implementation of `Conditional DETR for Fast Training Convergence.
<https://arxiv.org/abs/2108.06152>`_.
Code is modified from the `official github repo
<https://github.com/Atten4Vis/ConditionalDETR>`_.
"""
def _init_layers(self) -> None:
"""Initialize layers except for backbone, neck and bbox_head."""
self.positional_encoding = SinePositionalEncoding(
**self.positional_encoding_cfg)
self.encoder = DetrTransformerEncoder(**self.encoder)
self.decoder = ConditionalDetrTransformerDecoder(**self.decoder)
self.embed_dims = self.encoder.embed_dims
# NOTE The embed_dims is typically passed from the inside out.
# For example in DETR, The embed_dims is passed as
# self_attn -> the first encoder layer -> encoder -> detector.
self.query_embedding = nn.Embedding(self.num_queries, self.embed_dims)
num_feats = self.positional_encoding.num_feats
assert num_feats * 2 == self.embed_dims, \
f'embed_dims should be exactly 2 times of num_feats. ' \
f'Found {self.embed_dims} and {num_feats}.'
def forward_decoder(self, query: Tensor, query_pos: Tensor, memory: Tensor,
memory_mask: Tensor, memory_pos: Tensor) -> Dict:
"""Forward with Transformer decoder.
Args:
query (Tensor): The queries of decoder inputs, has shape
(bs, num_queries, dim).
query_pos (Tensor): The positional queries of decoder inputs,
has shape (bs, num_queries, dim).
memory (Tensor): The output embeddings of the Transformer encoder,
has shape (bs, num_feat_points, dim).
memory_mask (Tensor): ByteTensor, the padding mask of the memory,
has shape (bs, num_feat_points).
memory_pos (Tensor): The positional embeddings of memory, has
shape (bs, num_feat_points, dim).
Returns:
dict: The dictionary of decoder outputs, which includes the
`hidden_states` and `references` of the decoder output.
- hidden_states (Tensor): Has shape
(num_decoder_layers, bs, num_queries, dim)
- references (Tensor): Has shape
(bs, num_queries, 2)
"""
hidden_states, references = self.decoder(
query=query,
key=memory,
query_pos=query_pos,
key_pos=memory_pos,
key_padding_mask=memory_mask)
head_inputs_dict = dict(
hidden_states=hidden_states, references=references)
return head_inputs_dict
|
"""
====================================
How to write your own TVTensor class
====================================
.. note::
Try on `collab <https://colab.research.google.com/github/pytorch/vision/blob/gh-pages/main/_generated_ipynb_notebooks/plot_custom_tv_tensors.ipynb>`_
or :ref:`go to the end <sphx_glr_download_auto_examples_transforms_plot_custom_tv_tensors.py>` to download the full example code.
This guide is intended for advanced users and downstream library maintainers. We explain how to
write your own TVTensor class, and how to make it compatible with the built-in
Torchvision v2 transforms. Before continuing, make sure you have read
:ref:`sphx_glr_auto_examples_transforms_plot_tv_tensors.py`.
"""
# %%
import torch
from torchvision import tv_tensors
from torchvision.transforms import v2
# %%
# We will create a very simple class that just inherits from the base
# :class:`~torchvision.tv_tensors.TVTensor` class. It will be enough to cover
# what you need to know to implement your more elaborate uses-cases. If you need
# to create a class that carries meta-data, take a look at how the
# :class:`~torchvision.tv_tensors.BoundingBoxes` class is `implemented
# <https://github.com/pytorch/vision/blob/main/torchvision/tv_tensors/_bounding_box.py>`_.
class MyTVTensor(tv_tensors.TVTensor):
pass
my_dp = MyTVTensor([1, 2, 3])
my_dp
# %%
# Now that we have defined our custom TVTensor class, we want it to be
# compatible with the built-in torchvision transforms, and the functional API.
# For that, we need to implement a kernel which performs the core of the
# transformation, and then "hook" it to the functional that we want to support
# via :func:`~torchvision.transforms.v2.functional.register_kernel`.
#
# We illustrate this process below: we create a kernel for the "horizontal flip"
# operation of our MyTVTensor class, and register it to the functional API.
from torchvision.transforms.v2 import functional as F
@F.register_kernel(functional="hflip", tv_tensor_cls=MyTVTensor)
def hflip_my_tv_tensor(my_dp, *args, **kwargs):
print("Flipping!")
out = my_dp.flip(-1)
return tv_tensors.wrap(out, like=my_dp)
# %%
# To understand why :func:`~torchvision.tv_tensors.wrap` is used, see
# :ref:`tv_tensor_unwrapping_behaviour`. Ignore the ``*args, **kwargs`` for now,
# we will explain it below in :ref:`param_forwarding`.
#
# .. note::
#
# In our call to ``register_kernel`` above we used a string
# ``functional="hflip"`` to refer to the functional we want to hook into. We
# could also have used the functional *itself*, i.e.
# ``@register_kernel(functional=F.hflip, ...)``.
#
# Now that we have registered our kernel, we can call the functional API on a
# ``MyTVTensor`` instance:
my_dp = MyTVTensor(torch.rand(3, 256, 256))
_ = F.hflip(my_dp)
# %%
# And we can also use the
# :class:`~torchvision.transforms.v2.RandomHorizontalFlip` transform, since it relies on :func:`~torchvision.transforms.v2.functional.hflip` internally:
t = v2.RandomHorizontalFlip(p=1)
_ = t(my_dp)
# %%
# .. note::
#
# We cannot register a kernel for a transform class, we can only register a
# kernel for a **functional**. The reason we can't register a transform
# class is because one transform may internally rely on more than one
# functional, so in general we can't register a single kernel for a given
# class.
#
# .. _param_forwarding:
#
# Parameter forwarding, and ensuring future compatibility of your kernels
# -----------------------------------------------------------------------
#
# The functional API that you're hooking into is public and therefore
# **backward** compatible: we guarantee that the parameters of these functionals
# won't be removed or renamed without a proper deprecation cycle. However, we
# don't guarantee **forward** compatibility, and we may add new parameters in
# the future.
#
# Imagine that in a future version, Torchvision adds a new ``inplace`` parameter
# to its :func:`~torchvision.transforms.v2.functional.hflip` functional. If you
# already defined and registered your own kernel as
def hflip_my_tv_tensor(my_dp): # noqa
print("Flipping!")
out = my_dp.flip(-1)
return tv_tensors.wrap(out, like=my_dp)
# %%
# then calling ``F.hflip(my_dp)`` will **fail**, because ``hflip`` will try to
# pass the new ``inplace`` parameter to your kernel, but your kernel doesn't
# accept it.
#
# For this reason, we recommend to always define your kernels with
# ``*args, **kwargs`` in their signature, as done above. This way, your kernel
# will be able to accept any new parameter that we may add in the future.
# (Technically, adding `**kwargs` only should be enough).
|
"""
=====================================
How to write your own TVTensor class
=====================================
.. note::
Try on `collab <https://colab.research.google.com/github/pytorch/vision/blob/gh-pages/main/_generated_ipynb_notebooks/plot_custom_tv_tensors.ipynb>`_
or :ref:`go to the end <sphx_glr_download_auto_examples_transforms_plot_custom_tv_tensors.py>` to download the full example code.
This guide is intended for advanced users and downstream library maintainers. We explain how to
write your own tv_tensor class, and how to make it compatible with the built-in
Torchvision v2 transforms. Before continuing, make sure you have read
:ref:`sphx_glr_auto_examples_transforms_plot_tv_tensors.py`.
"""
# %%
import torch
from torchvision import tv_tensors
from torchvision.transforms import v2
# %%
# We will create a very simple class that just inherits from the base
# :class:`~torchvision.tv_tensors.TVTensor` class. It will be enough to cover
# what you need to know to implement your more elaborate uses-cases. If you need
# to create a class that carries meta-data, take a look at how the
# :class:`~torchvision.tv_tensors.BoundingBoxes` class is `implemented
# <https://github.com/pytorch/vision/blob/main/torchvision/tv_tensors/_bounding_box.py>`_.
class MyTVTensor(tv_tensors.TVTensor):
pass
my_dp = MyTVTensor([1, 2, 3])
my_dp
# %%
# Now that we have defined our custom TVTensor class, we want it to be
# compatible with the built-in torchvision transforms, and the functional API.
# For that, we need to implement a kernel which performs the core of the
# transformation, and then "hook" it to the functional that we want to support
# via :func:`~torchvision.transforms.v2.functional.register_kernel`.
#
# We illustrate this process below: we create a kernel for the "horizontal flip"
# operation of our MyTVTensor class, and register it to the functional API.
from torchvision.transforms.v2 import functional as F
@F.register_kernel(functional="hflip", tv_tensor_cls=MyTVTensor)
def hflip_my_tv_tensor(my_dp, *args, **kwargs):
print("Flipping!")
out = my_dp.flip(-1)
return tv_tensors.wrap(out, like=my_dp)
# %%
# To understand why :func:`~torchvision.tv_tensors.wrap` is used, see
# :ref:`tv_tensor_unwrapping_behaviour`. Ignore the ``*args, **kwargs`` for now,
# we will explain it below in :ref:`param_forwarding`.
#
# .. note::
#
# In our call to ``register_kernel`` above we used a string
# ``functional="hflip"`` to refer to the functional we want to hook into. We
# could also have used the functional *itself*, i.e.
# ``@register_kernel(functional=F.hflip, ...)``.
#
# Now that we have registered our kernel, we can call the functional API on a
# ``MyTVTensor`` instance:
my_dp = MyTVTensor(torch.rand(3, 256, 256))
_ = F.hflip(my_dp)
# %%
# And we can also use the
# :class:`~torchvision.transforms.v2.RandomHorizontalFlip` transform, since it relies on :func:`~torchvision.transforms.v2.functional.hflip` internally:
t = v2.RandomHorizontalFlip(p=1)
_ = t(my_dp)
# %%
# .. note::
#
# We cannot register a kernel for a transform class, we can only register a
# kernel for a **functional**. The reason we can't register a transform
# class is because one transform may internally rely on more than one
# functional, so in general we can't register a single kernel for a given
# class.
#
# .. _param_forwarding:
#
# Parameter forwarding, and ensuring future compatibility of your kernels
# -----------------------------------------------------------------------
#
# The functional API that you're hooking into is public and therefore
# **backward** compatible: we guarantee that the parameters of these functionals
# won't be removed or renamed without a proper deprecation cycle. However, we
# don't guarantee **forward** compatibility, and we may add new parameters in
# the future.
#
# Imagine that in a future version, Torchvision adds a new ``inplace`` parameter
# to its :func:`~torchvision.transforms.v2.functional.hflip` functional. If you
# already defined and registered your own kernel as
def hflip_my_tv_tensor(my_dp): # noqa
print("Flipping!")
out = my_dp.flip(-1)
return tv_tensors.wrap(out, like=my_dp)
# %%
# then calling ``F.hflip(my_dp)`` will **fail**, because ``hflip`` will try to
# pass the new ``inplace`` parameter to your kernel, but your kernel doesn't
# accept it.
#
# For this reason, we recommend to always define your kernels with
# ``*args, **kwargs`` in their signature, as done above. This way, your kernel
# will be able to accept any new parameter that we may add in the future.
# (Technically, adding `**kwargs` only should be enough).
|
"""Module to change the configuration of libsox, which is used by I/O functions like
:py:mod:`~torchaudio.backend.sox_io_backend` and :py:mod:`~torchaudio.sox_effects`.
"""
from typing import Dict, List
import torchaudio
sox_ext = torchaudio._extension.lazy_import_sox_ext()
from torchaudio._internal.module_utils import dropping_support
@dropping_support
def set_seed(seed: int):
"""Set libsox's PRNG
Args:
seed (int): seed value. valid range is int32.
See Also:
http://sox.sourceforge.net/sox.html
"""
sox_ext.set_seed(seed)
@dropping_support
def set_verbosity(verbosity: int):
"""Set libsox's verbosity
Args:
verbosity (int): Set verbosity level of libsox.
* ``1`` failure messages
* ``2`` warnings
* ``3`` details of processing
* ``4``-``6`` increasing levels of debug messages
See Also:
http://sox.sourceforge.net/sox.html
"""
sox_ext.set_verbosity(verbosity)
@dropping_support
def set_buffer_size(buffer_size: int):
"""Set buffer size for sox effect chain
Args:
buffer_size (int): Set the size in bytes of the buffers used for processing audio.
See Also:
http://sox.sourceforge.net/sox.html
"""
sox_ext.set_buffer_size(buffer_size)
@dropping_support
def set_use_threads(use_threads: bool):
"""Set multithread option for sox effect chain
Args:
use_threads (bool): When ``True``, enables ``libsox``'s parallel effects channels processing.
To use mutlithread, the underlying ``libsox`` has to be compiled with OpenMP support.
See Also:
http://sox.sourceforge.net/sox.html
"""
sox_ext.set_use_threads(use_threads)
@dropping_support
def list_effects() -> Dict[str, str]:
"""List the available sox effect names
Returns:
Dict[str, str]: Mapping from ``effect name`` to ``usage``
"""
return dict(sox_ext.list_effects())
@dropping_support
def list_read_formats() -> List[str]:
"""List the supported audio formats for read
Returns:
List[str]: List of supported audio formats
"""
return sox_ext.list_read_formats()
@dropping_support
def list_write_formats() -> List[str]:
"""List the supported audio formats for write
Returns:
List[str]: List of supported audio formats
"""
return sox_ext.list_write_formats()
@dropping_support
def get_buffer_size() -> int:
"""Get buffer size for sox effect chain
Returns:
int: size in bytes of buffers used for processing audio.
"""
return sox_ext.get_buffer_size()
|
"""Module to change the configuration of libsox, which is used by I/O functions like
:py:mod:`~torchaudio.backend.sox_io_backend` and :py:mod:`~torchaudio.sox_effects`.
"""
from typing import Dict, List
import torchaudio
sox_ext = torchaudio._extension.lazy_import_sox_ext()
def set_seed(seed: int):
"""Set libsox's PRNG
Args:
seed (int): seed value. valid range is int32.
See Also:
http://sox.sourceforge.net/sox.html
"""
sox_ext.set_seed(seed)
def set_verbosity(verbosity: int):
"""Set libsox's verbosity
Args:
verbosity (int): Set verbosity level of libsox.
* ``1`` failure messages
* ``2`` warnings
* ``3`` details of processing
* ``4``-``6`` increasing levels of debug messages
See Also:
http://sox.sourceforge.net/sox.html
"""
sox_ext.set_verbosity(verbosity)
def set_buffer_size(buffer_size: int):
"""Set buffer size for sox effect chain
Args:
buffer_size (int): Set the size in bytes of the buffers used for processing audio.
See Also:
http://sox.sourceforge.net/sox.html
"""
sox_ext.set_buffer_size(buffer_size)
def set_use_threads(use_threads: bool):
"""Set multithread option for sox effect chain
Args:
use_threads (bool): When ``True``, enables ``libsox``'s parallel effects channels processing.
To use mutlithread, the underlying ``libsox`` has to be compiled with OpenMP support.
See Also:
http://sox.sourceforge.net/sox.html
"""
sox_ext.set_use_threads(use_threads)
def list_effects() -> Dict[str, str]:
"""List the available sox effect names
Returns:
Dict[str, str]: Mapping from ``effect name`` to ``usage``
"""
return dict(sox_ext.list_effects())
def list_read_formats() -> List[str]:
"""List the supported audio formats for read
Returns:
List[str]: List of supported audio formats
"""
return sox_ext.list_read_formats()
def list_write_formats() -> List[str]:
"""List the supported audio formats for write
Returns:
List[str]: List of supported audio formats
"""
return sox_ext.list_write_formats()
def get_buffer_size() -> int:
"""Get buffer size for sox effect chain
Returns:
int: size in bytes of buffers used for processing audio.
"""
return sox_ext.get_buffer_size()
|
from typing import Dict
from jina.helper import TYPE_CHECKING, T, deprecate_by, typename
if TYPE_CHECKING: # pragma: no cover
from jina.proto import jina_pb2
class ProtoTypeMixin:
"""The base mixin class of all Jina types.
.. note::
- All Jina types should inherit from this class.
- All subclass should have ``self._pb_body``
- All subclass should implement ``__init__`` with the possibility of initializing from ``None``, e.g.:
.. highlight:: python
.. code-block:: python
class MyJinaType(ProtoTypeMixin):
def __init__(self, proto: Optional[jina_pb2.SomePbMsg] = None):
self._pb_body = proto or jina_pb2.SomePbMsg()
"""
def to_json(self) -> str:
"""Return the object in JSON string
:return: JSON string of the object
"""
from google.protobuf.json_format import MessageToJson
return MessageToJson(
self.proto, preserving_proto_field_name=True, sort_keys=True
)
def to_dict(self, **kwargs) -> Dict:
"""Return the object in Python dictionary.
.. note::
Array like object such as :class:`numpy.ndarray` (i.e. anything described as :class:`jina_pb2.NdArrayProto`)
will be converted to Python list.
:param kwargs: Extra kwargs to be passed to MessageToDict, like use_integers_for_enums
:return: dict representation of the object
"""
from google.protobuf.json_format import MessageToDict
return MessageToDict(self.proto, preserving_proto_field_name=True, **kwargs)
@property
def proto(self) -> 'jina_pb2._reflection.GeneratedProtocolMessageType':
"""Return the underlying Protobuf object
:return: Protobuf representation of the object
"""
return self._pb_body
def to_bytes(self) -> bytes:
"""Return the serialized the message to a string.
For more Pythonic code, please use ``bytes(...)``.
:return: binary string representation of the object
"""
return self.proto.SerializePartialToString()
def __getstate__(self):
return self._pb_body.__getstate__()
def __setstate__(self, state):
self.__init__()
self._pb_body.__setstate__(state)
@property
def nbytes(self) -> int:
"""Return total bytes consumed by protobuf.
:return: number of bytes
"""
return len(bytes(self))
def __getattr__(self, name: str):
return getattr(self._pb_body, name)
def __repr__(self):
content = str(tuple(field[0].name for field in self.proto.ListFields()))
content += f' at {id(self)}'
return f'<{typename(self)} {content.strip()}>'
def MergeFrom(self: T, other: T) -> None:
"""Merge the content of target
:param other: the document to merge from
"""
self._pb_body.MergeFrom(other._pb_body)
def CopyFrom(self: T, other: T) -> None:
"""Copy the content of target
:param other: the document to copy from
"""
self._pb_body.CopyFrom(other._pb_body)
def clear(self) -> None:
"""Remove all values from all fields of this Document."""
self._pb_body.Clear()
def pop(self, *fields) -> None:
"""Remove the values from the given fields of this Document.
:param fields: field names
"""
for k in fields:
self._pb_body.ClearField(k)
def __eq__(self, other):
if other is None:
return False
return self.proto == other.proto
def __bytes__(self):
return self.to_bytes()
dict = deprecate_by(to_dict)
json = deprecate_by(to_json)
binary_str = deprecate_by(to_bytes)
|
from typing import Dict
from jina.helper import TYPE_CHECKING, T, deprecate_by, typename
if TYPE_CHECKING: # pragma: no cover
from jina.proto import jina_pb2
class ProtoTypeMixin:
"""The base mixin class of all Jina types.
.. note::
- All Jina types should inherit from this class.
- All subclass should have ``self._pb_body``
- All subclass should implement ``__init__`` with the possibility of initializing from ``None``, e.g.:
.. highlight:: python
.. code-block:: python
class MyJinaType(ProtoTypeMixin):
def __init__(self, proto: Optional[jina_pb2.SomePbMsg] = None):
self._pb_body = proto or jina_pb2.SomePbMsg()
"""
def to_json(self) -> str:
"""Return the object in JSON string
:return: JSON string of the object
"""
from google.protobuf.json_format import MessageToJson
return MessageToJson(
self.proto, preserving_proto_field_name=True, sort_keys=True
)
def to_dict(self, **kwargs) -> Dict:
"""Return the object in Python dictionary.
.. note::
Array like object such as :class:`numpy.ndarray` (i.e. anything described as :class:`jina_pb2.NdArrayProto`)
will be converted to Python list.
:param kwargs: Extra kwargs to be passed to MessageToDict, like use_integers_for_enums
:return: dict representation of the object
"""
from google.protobuf.json_format import MessageToDict
return MessageToDict(self.proto, preserving_proto_field_name=True, **kwargs)
@property
def proto(self) -> 'jina_pb2._reflection.GeneratedProtocolMessageType':
"""Return the underlying Protobuf object
:return: Protobuf representation of the object
"""
return self._pb_body
def to_bytes(self) -> bytes:
"""Return the serialized the message to a string.
For more Pythonic code, please use ``bytes(...)``.
:return: binary string representation of the object
"""
return self.proto.SerializePartialToString()
def __getstate__(self):
return self._pb_body.__getstate__()
def __setstate__(self, state):
self.__init__()
self._pb_body.__setstate__(state)
@property
def nbytes(self) -> int:
"""Return total bytes consumed by protobuf.
:return: number of bytes
"""
return len(bytes(self))
def __getattr__(self, name: str):
return getattr(self._pb_body, name)
def __repr__(self):
content = str(tuple(field[0].name for field in self.proto.ListFields()))
content += f' at {id(self)}'
return f'<{typename(self)} {content.strip()}>'
def MergeFrom(self: T, other: T) -> None:
"""Merge the content of target
:param other: the document to merge from
"""
self._pb_body.MergeFrom(other._pb_body)
def CopyFrom(self: T, other: T) -> None:
"""Copy the content of target
:param other: the document to copy from
"""
self._pb_body.CopyFrom(other._pb_body)
def clear(self) -> None:
"""Remove all values from all fields of this Document."""
self._pb_body.Clear()
def pop(self, *fields) -> None:
"""Remove the values from the given fields of this Document.
:param fields: field names
"""
for k in fields:
self._pb_body.ClearField(k)
def __eq__(self, other):
if other is None:
return False
return self.proto == other.proto
def __bytes__(self):
return self.to_bytes()
dict = deprecate_by(to_dict)
json = deprecate_by(to_json)
binary_str = deprecate_by(to_bytes)
|
"""Standard LangChain interface tests"""
import pytest
from langchain_core.language_models import BaseChatModel
from langchain_tests.unit_tests import ChatModelUnitTests
from pytest_benchmark.fixture import BenchmarkFixture # type: ignore[import-untyped]
from langchain_anthropic import ChatAnthropic
class TestAnthropicStandard(ChatModelUnitTests):
@property
def chat_model_class(self) -> type[BaseChatModel]:
return ChatAnthropic
@property
def chat_model_params(self) -> dict:
return {"model": "claude-3-haiku-20240307"}
@pytest.mark.benchmark
def test_init_time_with_client(benchmark: BenchmarkFixture) -> None:
"""Test initialization time, accounting for lazy loading of client."""
def _init_in_loop_with_clients() -> None:
for _ in range(10):
llm = ChatAnthropic(model="claude-3-5-haiku-latest")
_ = llm._client
_ = llm._async_client
benchmark(_init_in_loop_with_clients)
|
"""Standard LangChain interface tests"""
from langchain_core.language_models import BaseChatModel
from langchain_tests.unit_tests import ChatModelUnitTests
from langchain_anthropic import ChatAnthropic
class TestAnthropicStandard(ChatModelUnitTests):
@property
def chat_model_class(self) -> type[BaseChatModel]:
return ChatAnthropic
@property
def chat_model_params(self) -> dict:
return {"model": "claude-3-haiku-20240307"}
|
# Copyright (c) OpenMMLab. All rights reserved.
from .activations import SiLU
from .bbox_nms import fast_nms, multiclass_nms
from .brick_wrappers import (AdaptiveAvgPool2d, FrozenBatchNorm2d,
adaptive_avg_pool2d)
from .conv_upsample import ConvUpsample
from .csp_layer import CSPLayer
from .dropblock import DropBlock
from .ema import ExpMomentumEMA
from .inverted_residual import InvertedResidual
from .matrix_nms import mask_matrix_nms
from .msdeformattn_pixel_decoder import MSDeformAttnPixelDecoder
from .normed_predictor import NormedConv2d, NormedLinear
from .pixel_decoder import PixelDecoder, TransformerEncoderPixelDecoder
from .positional_encoding import (LearnedPositionalEncoding,
SinePositionalEncoding,
SinePositionalEncoding3D)
from .res_layer import ResLayer, SimplifiedBasicBlock
from .se_layer import ChannelAttention, DyReLU, SELayer
# yapf: disable
from .transformer import (MLP, AdaptivePadding, CdnQueryGenerator,
ConditionalAttention,
ConditionalDetrTransformerDecoder,
ConditionalDetrTransformerDecoderLayer,
DABDetrTransformerDecoder,
DABDetrTransformerDecoderLayer,
DABDetrTransformerEncoder, DDQTransformerDecoder,
DeformableDetrTransformerDecoder,
DeformableDetrTransformerDecoderLayer,
DeformableDetrTransformerEncoder,
DeformableDetrTransformerEncoderLayer,
DetrTransformerDecoder, DetrTransformerDecoderLayer,
DetrTransformerEncoder, DetrTransformerEncoderLayer,
DinoTransformerDecoder, DynamicConv,
Mask2FormerTransformerDecoder,
Mask2FormerTransformerDecoderLayer,
Mask2FormerTransformerEncoder, PatchEmbed,
PatchMerging, coordinate_to_encoding,
inverse_sigmoid, nchw_to_nlc, nlc_to_nchw)
# yapf: enable
__all__ = [
'fast_nms', 'multiclass_nms', 'mask_matrix_nms', 'DropBlock',
'PixelDecoder', 'TransformerEncoderPixelDecoder',
'MSDeformAttnPixelDecoder', 'ResLayer', 'PatchMerging',
'SinePositionalEncoding', 'LearnedPositionalEncoding', 'DynamicConv',
'SimplifiedBasicBlock', 'NormedLinear', 'NormedConv2d', 'InvertedResidual',
'SELayer', 'ConvUpsample', 'CSPLayer', 'adaptive_avg_pool2d',
'AdaptiveAvgPool2d', 'PatchEmbed', 'nchw_to_nlc', 'nlc_to_nchw', 'DyReLU',
'ExpMomentumEMA', 'inverse_sigmoid', 'ChannelAttention', 'SiLU', 'MLP',
'DetrTransformerEncoderLayer', 'DetrTransformerDecoderLayer',
'DetrTransformerEncoder', 'DetrTransformerDecoder',
'DeformableDetrTransformerEncoder', 'DeformableDetrTransformerDecoder',
'DeformableDetrTransformerEncoderLayer',
'DeformableDetrTransformerDecoderLayer', 'AdaptivePadding',
'coordinate_to_encoding', 'ConditionalAttention',
'DABDetrTransformerDecoderLayer', 'DABDetrTransformerDecoder',
'DABDetrTransformerEncoder', 'DDQTransformerDecoder',
'ConditionalDetrTransformerDecoder',
'ConditionalDetrTransformerDecoderLayer', 'DinoTransformerDecoder',
'CdnQueryGenerator', 'Mask2FormerTransformerEncoder',
'Mask2FormerTransformerDecoderLayer', 'Mask2FormerTransformerDecoder',
'SinePositionalEncoding3D', 'FrozenBatchNorm2d'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .activations import SiLU
from .bbox_nms import fast_nms, multiclass_nms
from .brick_wrappers import AdaptiveAvgPool2d, adaptive_avg_pool2d
from .conv_upsample import ConvUpsample
from .csp_layer import CSPLayer
from .dropblock import DropBlock
from .ema import ExpMomentumEMA
from .inverted_residual import InvertedResidual
from .matrix_nms import mask_matrix_nms
from .msdeformattn_pixel_decoder import MSDeformAttnPixelDecoder
from .normed_predictor import NormedConv2d, NormedLinear
from .pixel_decoder import PixelDecoder, TransformerEncoderPixelDecoder
from .positional_encoding import (LearnedPositionalEncoding,
SinePositionalEncoding,
SinePositionalEncoding3D)
from .res_layer import ResLayer, SimplifiedBasicBlock
from .se_layer import ChannelAttention, DyReLU, SELayer
# yapf: disable
from .transformer import (MLP, AdaptivePadding, CdnQueryGenerator,
ConditionalAttention,
ConditionalDetrTransformerDecoder,
ConditionalDetrTransformerDecoderLayer,
DABDetrTransformerDecoder,
DABDetrTransformerDecoderLayer,
DABDetrTransformerEncoder,
DeformableDetrTransformerDecoder,
DeformableDetrTransformerDecoderLayer,
DeformableDetrTransformerEncoder,
DeformableDetrTransformerEncoderLayer,
DetrTransformerDecoder, DetrTransformerDecoderLayer,
DetrTransformerEncoder, DetrTransformerEncoderLayer,
DinoTransformerDecoder, DynamicConv,
Mask2FormerTransformerDecoder,
Mask2FormerTransformerDecoderLayer,
Mask2FormerTransformerEncoder, PatchEmbed,
PatchMerging, coordinate_to_encoding,
inverse_sigmoid, nchw_to_nlc, nlc_to_nchw)
# yapf: enable
__all__ = [
'fast_nms', 'multiclass_nms', 'mask_matrix_nms', 'DropBlock',
'PixelDecoder', 'TransformerEncoderPixelDecoder',
'MSDeformAttnPixelDecoder', 'ResLayer', 'PatchMerging',
'SinePositionalEncoding', 'LearnedPositionalEncoding', 'DynamicConv',
'SimplifiedBasicBlock', 'NormedLinear', 'NormedConv2d', 'InvertedResidual',
'SELayer', 'ConvUpsample', 'CSPLayer', 'adaptive_avg_pool2d',
'AdaptiveAvgPool2d', 'PatchEmbed', 'nchw_to_nlc', 'nlc_to_nchw', 'DyReLU',
'ExpMomentumEMA', 'inverse_sigmoid', 'ChannelAttention', 'SiLU', 'MLP',
'DetrTransformerEncoderLayer', 'DetrTransformerDecoderLayer',
'DetrTransformerEncoder', 'DetrTransformerDecoder',
'DeformableDetrTransformerEncoder', 'DeformableDetrTransformerDecoder',
'DeformableDetrTransformerEncoderLayer',
'DeformableDetrTransformerDecoderLayer', 'AdaptivePadding',
'coordinate_to_encoding', 'ConditionalAttention',
'DABDetrTransformerDecoderLayer', 'DABDetrTransformerDecoder',
'DABDetrTransformerEncoder', 'ConditionalDetrTransformerDecoder',
'ConditionalDetrTransformerDecoderLayer', 'DinoTransformerDecoder',
'CdnQueryGenerator', 'Mask2FormerTransformerEncoder',
'Mask2FormerTransformerDecoderLayer', 'Mask2FormerTransformerDecoder',
'SinePositionalEncoding3D'
]
|
import torchaudio
from torchaudio_unittest import common_utils
class BackendSwitchMixin:
"""Test set/get_audio_backend works"""
backend = None
backend_module = None
def test_switch(self):
torchaudio.backend.utils.set_audio_backend(self.backend)
if self.backend is None:
assert torchaudio.backend.utils.get_audio_backend() is None
else:
assert torchaudio.backend.utils.get_audio_backend() == self.backend
assert torchaudio.load == self.backend_module.load
assert torchaudio.save == self.backend_module.save
assert torchaudio.info == self.backend_module.info
class TestBackendSwitch_NoBackend(BackendSwitchMixin, common_utils.TorchaudioTestCase):
backend = None
backend_module = torchaudio.backend.no_backend
@common_utils.skipIfNoSox
class TestBackendSwitch_SoXIO(BackendSwitchMixin, common_utils.TorchaudioTestCase):
backend = "sox_io"
backend_module = torchaudio.backend.sox_io_backend
@common_utils.skipIfNoModule("soundfile")
class TestBackendSwitch_soundfile(BackendSwitchMixin, common_utils.TorchaudioTestCase):
backend = "soundfile"
backend_module = torchaudio.backend.soundfile_backend
|
from unittest.mock import patch
import torchaudio
from torchaudio_unittest import common_utils
class BackendSwitchMixin:
"""Test set/get_audio_backend works"""
backend = None
backend_module = None
@patch("torchaudio.backend.utils._is_backend_dispatcher_enabled", lambda: False)
def test_switch(self):
torchaudio.set_audio_backend(self.backend)
if self.backend is None:
assert torchaudio.get_audio_backend() is None
else:
assert torchaudio.get_audio_backend() == self.backend
assert torchaudio.load == self.backend_module.load
assert torchaudio.save == self.backend_module.save
assert torchaudio.info == self.backend_module.info
class TestBackendSwitch_NoBackend(BackendSwitchMixin, common_utils.TorchaudioTestCase):
backend = None
backend_module = torchaudio.backend.no_backend
@common_utils.skipIfNoSox
class TestBackendSwitch_SoXIO(BackendSwitchMixin, common_utils.TorchaudioTestCase):
backend = "sox_io"
backend_module = torchaudio.backend.sox_io_backend
@common_utils.skipIfNoModule("soundfile")
class TestBackendSwitch_soundfile(BackendSwitchMixin, common_utils.TorchaudioTestCase):
backend = "soundfile"
backend_module = torchaudio.backend.soundfile_backend
|
import inspect
import re
from hashlib import sha256
from typing import Dict, List
from .arrow import arrow
from .audiofolder import audiofolder
from .csv import csv
from .imagefolder import imagefolder
from .json import json
from .pandas import pandas
from .parquet import parquet
from .sql import sql # noqa F401
from .text import text
def _hash_python_lines(lines: List[str]) -> str:
filtered_lines = []
for line in lines:
line = re.sub(r"#.*", "", line) # remove comments
if line:
filtered_lines.append(line)
full_str = "\n".join(filtered_lines)
# Make a hash from all this code
full_bytes = full_str.encode("utf-8")
return sha256(full_bytes).hexdigest()
# get importable module names and hash for caching
_PACKAGED_DATASETS_MODULES = {
"csv": (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())),
"json": (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())),
"pandas": (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())),
"parquet": (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())),
"arrow": (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())),
"text": (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())),
"imagefolder": (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())),
"audiofolder": (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())),
}
# Used to infer the module to use based on the data files extensions
_EXTENSION_TO_MODULE = {
".csv": ("csv", {}),
".tsv": ("csv", {"sep": "\t"}),
".json": ("json", {}),
".jsonl": ("json", {}),
".parquet": ("parquet", {}),
".arrow": ("arrow", {}),
".txt": ("text", {}),
}
_EXTENSION_TO_MODULE.update({ext: ("imagefolder", {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ("imagefolder", {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext: ("audiofolder", {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ("audiofolder", {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_MODULE_SUPPORTS_METADATA = {"imagefolder", "audiofolder"}
# Used to filter data files based on extensions given a module name
_MODULE_TO_EXTENSIONS: Dict[str, List[str]] = {}
for _ext, (_module, _) in _EXTENSION_TO_MODULE.items():
_MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext)
for _module in _MODULE_TO_EXTENSIONS:
_MODULE_TO_EXTENSIONS[_module].append(".zip")
|
import inspect
import re
from hashlib import sha256
from typing import Dict, List
from .arrow import arrow
from .audiofolder import audiofolder
from .csv import csv
from .imagefolder import imagefolder
from .json import json
from .pandas import pandas
from .parquet import parquet
from .sql import sql # noqa F401
from .text import text
def _hash_python_lines(lines: List[str]) -> str:
filtered_lines = []
for line in lines:
line = re.sub(r"#.*", "", line) # remove comments
if line:
filtered_lines.append(line)
full_str = "\n".join(filtered_lines)
# Make a hash from all this code
full_bytes = full_str.encode("utf-8")
return sha256(full_bytes).hexdigest()
# get importable module names and hash for caching
_PACKAGED_DATASETS_MODULES = {
"csv": (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())),
"json": (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())),
"pandas": (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())),
"parquet": (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())),
"arrow": (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())),
"text": (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())),
"imagefolder": (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())),
"audiofolder": (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())),
}
# Used to infer the module to use based on the data files extensions
_EXTENSION_TO_MODULE = {
".csv": ("csv", {}),
".tsv": ("csv", {"sep": "\t"}),
".json": ("json", {}),
".jsonl": ("json", {}),
".parquet": ("parquet", {}),
".arrow": ("arrow", {}),
".txt": ("text", {}),
}
_EXTENSION_TO_MODULE.update({ext: ("imagefolder", {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ("imagefolder", {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext: ("audiofolder", {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ("audiofolder", {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_MODULE_SUPPORTS_METADATA = {"imagefolder", "audiofolder"}
# Used to filter data files based on extensions given a module name
_MODULE_TO_EXTENSIONS: Dict[str, List[str]] = {}
for _ext, (_module, _) in _EXTENSION_TO_MODULE.items():
_MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext)
_MODULE_TO_EXTENSIONS["imagefolder"].append(".zip")
_MODULE_TO_EXTENSIONS["audiofolder"].append(".zip")
|
"""Types for content blocks."""
from typing import Any, Literal, Union
from pydantic import TypeAdapter, ValidationError
from typing_extensions import NotRequired, TypedDict
class BaseDataContentBlock(TypedDict, total=False):
"""Base class for data content blocks."""
mime_type: NotRequired[str]
"""MIME type of the content block (if needed)."""
class URLContentBlock(BaseDataContentBlock):
"""Content block for data from a URL."""
type: Literal["image", "audio", "file"]
"""Type of the content block."""
source_type: Literal["url"]
"""Source type (url)."""
url: str
"""URL for data."""
class Base64ContentBlock(BaseDataContentBlock):
"""Content block for inline data from a base64 string."""
type: Literal["image", "audio", "file"]
"""Type of the content block."""
source_type: Literal["base64"]
"""Source type (base64)."""
data: str
"""Data as a base64 string."""
class PlainTextContentBlock(BaseDataContentBlock):
"""Content block for plain text data (e.g., from a document)."""
type: Literal["file"]
"""Type of the content block."""
source_type: Literal["text"]
"""Source type (text)."""
text: str
"""Text data."""
class IDContentBlock(TypedDict):
"""Content block for data specified by an identifier."""
type: Literal["image", "audio", "file"]
"""Type of the content block."""
source_type: Literal["id"]
"""Source type (id)."""
id: str
"""Identifier for data source."""
DataContentBlock = Union[
URLContentBlock,
Base64ContentBlock,
PlainTextContentBlock,
IDContentBlock,
]
_DataContentBlockAdapter: TypeAdapter[DataContentBlock] = TypeAdapter(DataContentBlock)
def is_data_content_block(
content_block: dict,
) -> bool:
"""Check if the content block is a standard data content block.
Args:
content_block: The content block to check.
Returns:
True if the content block is a data content block, False otherwise.
"""
try:
_ = _DataContentBlockAdapter.validate_python(content_block)
except ValidationError:
return False
else:
return True
def convert_to_openai_image_block(content_block: dict[str, Any]) -> dict:
"""Convert image content block to format expected by OpenAI Chat Completions API."""
if content_block["source_type"] == "url":
return {
"type": "image_url",
"image_url": {
"url": content_block["url"],
},
}
if content_block["source_type"] == "base64":
if "mime_type" not in content_block:
error_message = "mime_type key is required for base64 data."
raise ValueError(error_message)
mime_type = content_block["mime_type"]
return {
"type": "image_url",
"image_url": {
"url": f"data:{mime_type};base64,{content_block['data']}",
},
}
error_message = "Unsupported source type. Only 'url' and 'base64' are supported."
raise ValueError(error_message)
|
"""Types for content blocks."""
from typing import Any, Literal, Union
from pydantic import TypeAdapter, ValidationError
from typing_extensions import NotRequired, TypedDict
class BaseDataContentBlock(TypedDict):
"""Base class for data content blocks."""
mime_type: NotRequired[str]
"""MIME type of the content block (if needed)."""
metadata: NotRequired[dict]
"""Provider-specific metadata such as citations or filenames."""
class URLContentBlock(BaseDataContentBlock):
"""Content block for data from a URL."""
type: Literal["image", "audio", "file"]
"""Type of the content block."""
source_type: Literal["url"]
"""Source type (url)."""
url: str
"""URL for data."""
class Base64ContentBlock(BaseDataContentBlock):
"""Content block for inline data from a base64 string."""
type: Literal["image", "audio", "file"]
"""Type of the content block."""
source_type: Literal["base64"]
"""Source type (base64)."""
data: str
"""Data as a base64 string."""
class PlainTextContentBlock(BaseDataContentBlock):
"""Content block for plain text data (e.g., from a document)."""
type: Literal["file"]
"""Type of the content block."""
source_type: Literal["text"]
"""Source type (text)."""
text: str
"""Text data."""
class IDContentBlock(TypedDict):
"""Content block for data specified by an identifier."""
type: Literal["image", "audio", "file"]
"""Type of the content block."""
source_type: Literal["id"]
"""Source type (id)."""
id: str
"""Identifier for data source."""
DataContentBlock = Union[
URLContentBlock,
Base64ContentBlock,
PlainTextContentBlock,
IDContentBlock,
]
_DataContentBlockAdapter: TypeAdapter[DataContentBlock] = TypeAdapter(DataContentBlock)
def is_data_content_block(
content_block: dict,
) -> bool:
"""Check if the content block is a standard data content block.
Args:
content_block: The content block to check.
Returns:
True if the content block is a data content block, False otherwise.
"""
try:
_ = _DataContentBlockAdapter.validate_python(content_block)
except ValidationError:
return False
else:
return True
def convert_to_openai_image_block(content_block: dict[str, Any]) -> dict:
"""Convert image content block to format expected by OpenAI Chat Completions API."""
if content_block["source_type"] == "url":
return {
"type": "image_url",
"image_url": {
"url": content_block["url"],
},
}
if content_block["source_type"] == "base64":
if "mime_type" not in content_block:
error_message = "mime_type key is required for base64 data."
raise ValueError(error_message)
mime_type = content_block["mime_type"]
return {
"type": "image_url",
"image_url": {
"url": f"data:{mime_type};base64,{content_block['data']}",
},
}
error_message = "Unsupported source type. Only 'url' and 'base64' are supported."
raise ValueError(error_message)
|
from typing import TYPE_CHECKING, Optional, Dict
if TYPE_CHECKING:
from ... import DocumentArray
class PostMixin:
"""Helper functions for posting DocumentArray to Jina Flow."""
def post(
self,
host: str,
show_progress: bool = False,
batch_size: Optional[int] = None,
parameters: Optional[Dict] = None,
) -> 'DocumentArray':
"""Posting itself to a remote Flow/Sandbox and get the modified DocumentArray back
:param host: a host string. Can be one of the following:
- `grpc://192.168.0.123:8080/endpoint`
- `ws://192.168.0.123:8080/endpoint`
- `http://192.168.0.123:8080/endpoint`
- `jinahub://Hello/endpoint`
- `jinahub+docker://Hello/endpoint`
- `jinahub+sandbox://Hello/endpoint`
:param show_progress: if to show a progressbar
:param batch_size: number of Document on each request
:param parameters: parameters to send in the request
:return: the new DocumentArray returned from remote
"""
if not self:
return
from urllib.parse import urlparse
r = urlparse(host)
_on = r.path or '/'
_port = r.port or None
standardized_host = (
r._replace(netloc=r.netloc.replace(f':{r.port}', ''))
._replace(path='')
.geturl()
)
batch_size = batch_size or len(self)
_scheme = r.scheme
_tls = False
if _scheme in ('grpcs', 'https', 'wss'):
_scheme = _scheme[:-1]
_tls = True
if _scheme == 'ws':
_scheme = 'websocket' # temp fix for the core
if _scheme.startswith('jinahub'):
from jina import Flow
f = Flow(quiet=True, prefetch=1).add(uses=standardized_host)
with f:
return f.post(
_on,
inputs=self,
show_progress=show_progress,
request_size=batch_size,
parameters=parameters,
)
elif _scheme in ('grpc', 'http', 'ws', 'websocket'):
from jina import Client
standardized_host = standardized_host + (f':{_port}' or '')
c = Client(host=standardized_host)
return c.post(
_on,
inputs=self,
show_progress=show_progress,
request_size=batch_size,
parameters=parameters,
)
else:
raise ValueError(f'unsupported scheme: {r.scheme}')
|
from typing import TYPE_CHECKING, Optional, Dict
if TYPE_CHECKING:
from ... import DocumentArray
class PostMixin:
"""Helper functions for posting DocumentArray to Jina Flow."""
def post(
self,
host: str,
show_progress: bool = False,
batch_size: Optional[int] = None,
parameters: Optional[Dict] = None,
) -> 'DocumentArray':
"""Posting itself to a remote Flow/Sandbox and get the modified DocumentArray back
:param host: a host string. Can be one of the following:
- `grpc://192.168.0.123:8080/endpoint`
- `ws://192.168.0.123:8080/endpoint`
- `http://192.168.0.123:8080/endpoint`
- `jinahub://Hello/endpoint`
- `jinahub+docker://Hello/endpoint`
- `jinahub+sandbox://Hello/endpoint`
:param show_progress: if to show a progressbar
:param batch_size: number of Document on each request
:param parameters: parameters to send in the request
:return: the new DocumentArray returned from remote
"""
if not self:
return
from urllib.parse import urlparse
r = urlparse(host)
_on = r.path or '/'
_port = r.port or None
standardized_host = (
r._replace(netloc=r.netloc.replace(f':{r.port}', ''))
._replace(path='')
.geturl()
)
batch_size = batch_size or len(self)
_scheme = r.scheme
_tls = False
if _scheme in ('grpcs', 'https', 'wss'):
_scheme = _scheme[:-1]
_tls = True
if _scheme == 'ws':
_scheme = 'websocket' # temp fix for the core
if _scheme.startswith('jinahub'):
from jina import Flow
f = Flow(quiet=True, prefetch=1).add(uses=standardized_host)
with f:
return f.post(
_on,
inputs=self,
show_progress=show_progress,
request_size=batch_size,
parameters=parameters,
)
elif _scheme in ('grpc', 'http', 'ws', 'websocket'):
from jina import Client
c = Client(host=r.hostname)
return c.post(
_on,
inputs=self,
show_progress=show_progress,
request_size=batch_size,
parameters=parameters,
)
else:
raise ValueError(f'unsupported scheme: {r.scheme}')
|
from .cmuarctic import CMUARCTIC
from .cmudict import CMUDict
from .commonvoice import COMMONVOICE
from .dr_vctk import DR_VCTK
from .fluentcommands import FluentSpeechCommands
from .gtzan import GTZAN
from .iemocap import IEMOCAP
from .librilight_limited import LibriLightLimited
from .librimix import LibriMix
from .librispeech import LIBRISPEECH
from .libritts import LIBRITTS
from .ljspeech import LJSPEECH
from .musdb_hq import MUSDB_HQ
from .quesst14 import QUESST14
from .speechcommands import SPEECHCOMMANDS
from .tedlium import TEDLIUM
from .vctk import VCTK_092
from .voxceleb1 import VoxCeleb1Identification, VoxCeleb1Verification
from .yesno import YESNO
__all__ = [
"COMMONVOICE",
"LIBRISPEECH",
"LibriLightLimited",
"SPEECHCOMMANDS",
"VCTK_092",
"DR_VCTK",
"YESNO",
"LJSPEECH",
"GTZAN",
"CMUARCTIC",
"CMUDict",
"LibriMix",
"LIBRITTS",
"TEDLIUM",
"QUESST14",
"MUSDB_HQ",
"FluentSpeechCommands",
"VoxCeleb1Identification",
"VoxCeleb1Verification",
"IEMOCAP",
]
|
from .cmuarctic import CMUARCTIC
from .cmudict import CMUDict
from .commonvoice import COMMONVOICE
from .dr_vctk import DR_VCTK
from .fluentcommands import FluentSpeechCommands
from .gtzan import GTZAN
from .librilight_limited import LibriLightLimited
from .librimix import LibriMix
from .librispeech import LIBRISPEECH
from .libritts import LIBRITTS
from .ljspeech import LJSPEECH
from .musdb_hq import MUSDB_HQ
from .quesst14 import QUESST14
from .speechcommands import SPEECHCOMMANDS
from .tedlium import TEDLIUM
from .vctk import VCTK_092
from .voxceleb1 import VoxCeleb1Identification, VoxCeleb1Verification
from .yesno import YESNO
__all__ = [
"COMMONVOICE",
"LIBRISPEECH",
"LibriLightLimited",
"SPEECHCOMMANDS",
"VCTK_092",
"DR_VCTK",
"YESNO",
"LJSPEECH",
"GTZAN",
"CMUARCTIC",
"CMUDict",
"LibriMix",
"LIBRITTS",
"TEDLIUM",
"QUESST14",
"MUSDB_HQ",
"FluentSpeechCommands",
"VoxCeleb1Identification",
"VoxCeleb1Verification",
]
|
import json
from json import JSONDecodeError
from typing import Union
from langchain_core.agents import AgentAction, AgentActionMessageLog, AgentFinish
from langchain_core.exceptions import OutputParserException
from langchain_core.messages import (
AIMessage,
BaseMessage,
ToolCall,
)
from langchain_core.outputs import ChatGeneration, Generation
from langchain.agents.agent import MultiActionAgentOutputParser
class ToolAgentAction(AgentActionMessageLog):
tool_call_id: str
"""Tool call that this message is responding to."""
def parse_ai_message_to_tool_action(
message: BaseMessage,
) -> Union[list[AgentAction], AgentFinish]:
"""Parse an AI message potentially containing tool_calls."""
if not isinstance(message, AIMessage):
msg = f"Expected an AI message got {type(message)}"
raise TypeError(msg)
actions: list = []
if message.tool_calls:
tool_calls = message.tool_calls
else:
if not message.additional_kwargs.get("tool_calls"):
return AgentFinish(
return_values={"output": message.content},
log=str(message.content),
)
# Best-effort parsing
tool_calls = []
for tool_call in message.additional_kwargs["tool_calls"]:
function = tool_call["function"]
function_name = function["name"]
try:
args = json.loads(function["arguments"] or "{}")
tool_calls.append(
ToolCall(name=function_name, args=args, id=tool_call["id"]),
)
except JSONDecodeError:
msg = (
f"Could not parse tool input: {function} because "
f"the `arguments` is not valid JSON."
)
raise OutputParserException(msg)
for tool_call in tool_calls:
# HACK HACK HACK:
# The code that encodes tool input into Open AI uses a special variable
# name called `__arg1` to handle old style tools that do not expose a
# schema and expect a single string argument as an input.
# We unpack the argument here if it exists.
# Open AI does not support passing in a JSON array as an argument.
function_name = tool_call["name"]
_tool_input = tool_call["args"]
tool_input = _tool_input.get("__arg1", _tool_input)
content_msg = f"responded: {message.content}\n" if message.content else "\n"
log = f"\nInvoking: `{function_name}` with `{tool_input}`\n{content_msg}\n"
actions.append(
ToolAgentAction(
tool=function_name,
tool_input=tool_input,
log=log,
message_log=[message],
tool_call_id=tool_call["id"],
),
)
return actions
class ToolsAgentOutputParser(MultiActionAgentOutputParser):
"""Parses a message into agent actions/finish.
If a tool_calls parameter is passed, then that is used to get
the tool names and tool inputs.
If one is not passed, then the AIMessage is assumed to be the final output.
"""
@property
def _type(self) -> str:
return "tools-agent-output-parser"
def parse_result(
self,
result: list[Generation],
*,
partial: bool = False,
) -> Union[list[AgentAction], AgentFinish]:
if not isinstance(result[0], ChatGeneration):
msg = "This output parser only works on ChatGeneration output"
raise ValueError(msg)
message = result[0].message
return parse_ai_message_to_tool_action(message)
def parse(self, text: str) -> Union[list[AgentAction], AgentFinish]:
msg = "Can only parse messages"
raise ValueError(msg)
|
import json
from json import JSONDecodeError
from typing import Union
from langchain_core.agents import AgentAction, AgentActionMessageLog, AgentFinish
from langchain_core.exceptions import OutputParserException
from langchain_core.messages import (
AIMessage,
BaseMessage,
ToolCall,
)
from langchain_core.outputs import ChatGeneration, Generation
from langchain.agents.agent import MultiActionAgentOutputParser
class ToolAgentAction(AgentActionMessageLog):
tool_call_id: str
"""Tool call that this message is responding to."""
def parse_ai_message_to_tool_action(
message: BaseMessage,
) -> Union[list[AgentAction], AgentFinish]:
"""Parse an AI message potentially containing tool_calls."""
if not isinstance(message, AIMessage):
msg = f"Expected an AI message got {type(message)}"
raise TypeError(msg)
actions: list = []
if message.tool_calls:
tool_calls = message.tool_calls
else:
if not message.additional_kwargs.get("tool_calls"):
return AgentFinish(
return_values={"output": message.content}, log=str(message.content)
)
# Best-effort parsing
tool_calls = []
for tool_call in message.additional_kwargs["tool_calls"]:
function = tool_call["function"]
function_name = function["name"]
try:
args = json.loads(function["arguments"] or "{}")
tool_calls.append(
ToolCall(name=function_name, args=args, id=tool_call["id"])
)
except JSONDecodeError:
msg = (
f"Could not parse tool input: {function} because "
f"the `arguments` is not valid JSON."
)
raise OutputParserException(msg)
for tool_call in tool_calls:
# HACK HACK HACK:
# The code that encodes tool input into Open AI uses a special variable
# name called `__arg1` to handle old style tools that do not expose a
# schema and expect a single string argument as an input.
# We unpack the argument here if it exists.
# Open AI does not support passing in a JSON array as an argument.
function_name = tool_call["name"]
_tool_input = tool_call["args"]
tool_input = _tool_input.get("__arg1", _tool_input)
content_msg = f"responded: {message.content}\n" if message.content else "\n"
log = f"\nInvoking: `{function_name}` with `{tool_input}`\n{content_msg}\n"
actions.append(
ToolAgentAction(
tool=function_name,
tool_input=tool_input,
log=log,
message_log=[message],
tool_call_id=tool_call["id"],
)
)
return actions
class ToolsAgentOutputParser(MultiActionAgentOutputParser):
"""Parses a message into agent actions/finish.
If a tool_calls parameter is passed, then that is used to get
the tool names and tool inputs.
If one is not passed, then the AIMessage is assumed to be the final output.
"""
@property
def _type(self) -> str:
return "tools-agent-output-parser"
def parse_result(
self, result: list[Generation], *, partial: bool = False
) -> Union[list[AgentAction], AgentFinish]:
if not isinstance(result[0], ChatGeneration):
msg = "This output parser only works on ChatGeneration output"
raise ValueError(msg)
message = result[0].message
return parse_ai_message_to_tool_action(message)
def parse(self, text: str) -> Union[list[AgentAction], AgentFinish]:
msg = "Can only parse messages"
raise ValueError(msg)
|
# Copyright (c) OpenMMLab. All rights reserved.
import copy
import torch.nn as nn
from mmcv.cnn import ConvModule, Scale
from mmdet.core import OptMultiConfig
from mmdet.models.dense_heads.fcos_head import FCOSHead
from mmdet.registry import MODELS
@MODELS.register_module()
class NASFCOSHead(FCOSHead):
"""Anchor-free head used in `NASFCOS <https://arxiv.org/abs/1906.04423>`_.
It is quite similar with FCOS head, except for the searched structure of
classification branch and bbox regression branch, where a structure of
"dconv3x3, conv3x3, dconv3x3, conv1x1" is utilized instead.
Args:
num_classes (int): Number of categories excluding the background
category.
in_channels (int): Number of channels in the input feature map.
strides (Sequence[int] or Sequence[Tuple[int, int]]): Strides of points
in multiple feature levels. Defaults to (4, 8, 16, 32, 64).
regress_ranges (Sequence[Tuple[int, int]]): Regress range of multiple
level points.
center_sampling (bool): If true, use center sampling.
Defaults to False.
center_sample_radius (float): Radius of center sampling.
Defaults to 1.5.
norm_on_bbox (bool): If true, normalize the regression targets with
FPN strides. Defaults to False.
centerness_on_reg (bool): If true, position centerness on the
regress branch. Please refer to https://github.com/tianzhi0549/FCOS/issues/89#issuecomment-516877042.
Defaults to False.
conv_bias (bool or str): If specified as `auto`, it will be decided by
the norm_cfg. Bias of conv will be set as True if `norm_cfg` is
None, otherwise False. Defaults to "auto".
loss_cls (:obj:`ConfigDict` or dict): Config of classification loss.
loss_bbox (:obj:`ConfigDict` or dict): Config of localization loss.
loss_centerness (:obj:`ConfigDict`, or dict): Config of centerness
loss.
norm_cfg (:obj:`ConfigDict` or dict): dictionary to construct and
config norm layer. Defaults to
``norm_cfg=dict(type='GN', num_groups=32, requires_grad=True)``.
init_cfg (:obj:`ConfigDict` or dict or list[:obj:`ConfigDict` or \
dict], opitonal): Initialization config dict.
""" # noqa: E501
def __init__(self,
*args,
init_cfg: OptMultiConfig = None,
**kwargs) -> None:
if init_cfg is None:
init_cfg = [
dict(type='Caffe2Xavier', layer=['ConvModule', 'Conv2d']),
dict(
type='Normal',
std=0.01,
override=[
dict(name='conv_reg'),
dict(name='conv_centerness'),
dict(
name='conv_cls',
type='Normal',
std=0.01,
bias_prob=0.01)
]),
]
super().__init__(*args, init_cfg=init_cfg, **kwargs)
def _init_layers(self) -> None:
"""Initialize layers of the head."""
dconv3x3_config = dict(
type='DCNv2',
kernel_size=3,
use_bias=True,
deform_groups=2,
padding=1)
conv3x3_config = dict(type='Conv', kernel_size=3, padding=1)
conv1x1_config = dict(type='Conv', kernel_size=1)
self.arch_config = [
dconv3x3_config, conv3x3_config, dconv3x3_config, conv1x1_config
]
self.cls_convs = nn.ModuleList()
self.reg_convs = nn.ModuleList()
for i, op_ in enumerate(self.arch_config):
op = copy.deepcopy(op_)
chn = self.in_channels if i == 0 else self.feat_channels
assert isinstance(op, dict)
use_bias = op.pop('use_bias', False)
padding = op.pop('padding', 0)
kernel_size = op.pop('kernel_size')
module = ConvModule(
chn,
self.feat_channels,
kernel_size,
stride=1,
padding=padding,
norm_cfg=self.norm_cfg,
bias=use_bias,
conv_cfg=op)
self.cls_convs.append(copy.deepcopy(module))
self.reg_convs.append(copy.deepcopy(module))
self.conv_cls = nn.Conv2d(
self.feat_channels, self.cls_out_channels, 3, padding=1)
self.conv_reg = nn.Conv2d(self.feat_channels, 4, 3, padding=1)
self.conv_centerness = nn.Conv2d(self.feat_channels, 1, 3, padding=1)
self.scales = nn.ModuleList([Scale(1.0) for _ in self.strides])
|
# Copyright (c) OpenMMLab. All rights reserved.
import copy
import torch.nn as nn
from mmcv.cnn import ConvModule, Scale
from mmdet.models.dense_heads.fcos_head import FCOSHead
from mmdet.registry import MODELS
@MODELS.register_module()
class NASFCOSHead(FCOSHead):
"""Anchor-free head used in `NASFCOS <https://arxiv.org/abs/1906.04423>`_.
It is quite similar with FCOS head, except for the searched structure of
classification branch and bbox regression branch, where a structure of
"dconv3x3, conv3x3, dconv3x3, conv1x1" is utilized instead.
Args:
num_classes (int): Number of categories excluding the background
category.
in_channels (int): Number of channels in the input feature map.
strides (list[int] | list[tuple[int, int]]): Strides of points
in multiple feature levels. Default: (4, 8, 16, 32, 64).
regress_ranges (tuple[tuple[int, int]]): Regress range of multiple
level points.
center_sampling (bool): If true, use center sampling. Default: False.
center_sample_radius (float): Radius of center sampling. Default: 1.5.
norm_on_bbox (bool): If true, normalize the regression targets
with FPN strides. Default: False.
centerness_on_reg (bool): If true, position centerness on the
regress branch. Please refer to https://github.com/tianzhi0549/FCOS/issues/89#issuecomment-516877042.
Default: False.
conv_bias (bool | str): If specified as `auto`, it will be decided by the
norm_cfg. Bias of conv will be set as True if `norm_cfg` is None, otherwise
False. Default: "auto".
loss_cls (dict): Config of classification loss.
loss_bbox (dict): Config of localization loss.
loss_centerness (dict): Config of centerness loss.
norm_cfg (dict): dictionary to construct and config norm layer.
Default: norm_cfg=dict(type='GN', num_groups=32, requires_grad=True).
init_cfg (dict or list[dict], optional): Initialization config dict.
""" # noqa: E501
def __init__(self, *args, init_cfg=None, **kwargs):
if init_cfg is None:
init_cfg = [
dict(type='Caffe2Xavier', layer=['ConvModule', 'Conv2d']),
dict(
type='Normal',
std=0.01,
override=[
dict(name='conv_reg'),
dict(name='conv_centerness'),
dict(
name='conv_cls',
type='Normal',
std=0.01,
bias_prob=0.01)
]),
]
super(NASFCOSHead, self).__init__(*args, init_cfg=init_cfg, **kwargs)
def _init_layers(self):
"""Initialize layers of the head."""
dconv3x3_config = dict(
type='DCNv2',
kernel_size=3,
use_bias=True,
deform_groups=2,
padding=1)
conv3x3_config = dict(type='Conv', kernel_size=3, padding=1)
conv1x1_config = dict(type='Conv', kernel_size=1)
self.arch_config = [
dconv3x3_config, conv3x3_config, dconv3x3_config, conv1x1_config
]
self.cls_convs = nn.ModuleList()
self.reg_convs = nn.ModuleList()
for i, op_ in enumerate(self.arch_config):
op = copy.deepcopy(op_)
chn = self.in_channels if i == 0 else self.feat_channels
assert isinstance(op, dict)
use_bias = op.pop('use_bias', False)
padding = op.pop('padding', 0)
kernel_size = op.pop('kernel_size')
module = ConvModule(
chn,
self.feat_channels,
kernel_size,
stride=1,
padding=padding,
norm_cfg=self.norm_cfg,
bias=use_bias,
conv_cfg=op)
self.cls_convs.append(copy.deepcopy(module))
self.reg_convs.append(copy.deepcopy(module))
self.conv_cls = nn.Conv2d(
self.feat_channels, self.cls_out_channels, 3, padding=1)
self.conv_reg = nn.Conv2d(self.feat_channels, 4, 3, padding=1)
self.conv_centerness = nn.Conv2d(self.feat_channels, 1, 3, padding=1)
self.scales = nn.ModuleList([Scale(1.0) for _ in self.strides])
|
import itertools
import warnings
from dataclasses import dataclass
from typing import Optional
import pandas as pd
import pyarrow as pa
import datasets
from datasets.table import table_cast
@dataclass
class PandasConfig(datasets.BuilderConfig):
"""BuilderConfig for Pandas."""
features: Optional[datasets.Features] = None
def __post_init__(self):
super().__post_init__()
class Pandas(datasets.ArrowBasedBuilder):
BUILDER_CONFIG_CLASS = PandasConfig
def _info(self):
warnings.warn(
"The Pandas builder is deprecated and will be removed in the next major version of datasets.",
FutureWarning,
)
return datasets.DatasetInfo(features=self.config.features)
def _split_generators(self, dl_manager):
"""We handle string, list and dicts in datafiles"""
if not self.config.data_files:
raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}")
data_files = dl_manager.download_and_extract(self.config.data_files)
if isinstance(data_files, (str, list, tuple)):
files = data_files
if isinstance(files, str):
files = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
files = [dl_manager.iter_files(file) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"files": files})]
splits = []
for split_name, files in data_files.items():
if isinstance(files, str):
files = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
files = [dl_manager.iter_files(file) for file in files]
splits.append(datasets.SplitGenerator(name=split_name, gen_kwargs={"files": files}))
return splits
def _cast_table(self, pa_table: pa.Table) -> pa.Table:
if self.config.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
pa_table = table_cast(pa_table, self.config.features.arrow_schema)
return pa_table
def _generate_tables(self, files):
for i, file in enumerate(itertools.chain.from_iterable(files)):
with open(file, "rb") as f:
pa_table = pa.Table.from_pandas(pd.read_pickle(f))
yield i, self._cast_table(pa_table)
|
import itertools
import warnings
from dataclasses import dataclass
from typing import Optional
import pandas as pd
import pyarrow as pa
import datasets
from datasets.table import table_cast
@dataclass
class PandasConfig(datasets.BuilderConfig):
"""BuilderConfig for Pandas."""
features: Optional[datasets.Features] = None
class Pandas(datasets.ArrowBasedBuilder):
BUILDER_CONFIG_CLASS = PandasConfig
def _info(self):
warnings.warn(
"The Pandas builder is deprecated and will be removed in the next major version of datasets.",
FutureWarning,
)
return datasets.DatasetInfo(features=self.config.features)
def _split_generators(self, dl_manager):
"""We handle string, list and dicts in datafiles"""
if not self.config.data_files:
raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}")
data_files = dl_manager.download_and_extract(self.config.data_files)
if isinstance(data_files, (str, list, tuple)):
files = data_files
if isinstance(files, str):
files = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
files = [dl_manager.iter_files(file) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"files": files})]
splits = []
for split_name, files in data_files.items():
if isinstance(files, str):
files = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
files = [dl_manager.iter_files(file) for file in files]
splits.append(datasets.SplitGenerator(name=split_name, gen_kwargs={"files": files}))
return splits
def _cast_table(self, pa_table: pa.Table) -> pa.Table:
if self.config.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
pa_table = table_cast(pa_table, self.config.features.arrow_schema)
return pa_table
def _generate_tables(self, files):
for i, file in enumerate(itertools.chain.from_iterable(files)):
with open(file, "rb") as f:
pa_table = pa.Table.from_pandas(pd.read_pickle(f))
yield i, self._cast_table(pa_table)
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Optional, Union
from mmengine.registry import HOOKS
from .hook import Hook
DATA_BATCH = Optional[Union[dict, tuple, list]]
@HOOKS.register_module()
class ParamSchedulerHook(Hook):
"""A hook to update some hyper-parameters in optimizer, e.g., learning rate
and momentum."""
priority = 'LOW'
def after_train_iter(self,
runner,
batch_idx: int,
data_batch: DATA_BATCH = None,
outputs: Optional[dict] = None) -> None:
"""Call step function for each scheduler after each iteration.
Args:
runner (Runner): The runner of the training process.
batch_idx (int): The index of the current batch in the train loop.
data_batch (dict or tuple or list, optional): Data from dataloader.
In order to keep this interface consistent with other hooks,
we keep ``data_batch`` here.
outputs (dict, optional): Outputs from model.
In order to keep this interface consistent with other hooks, we
keep ``data_batch`` here.
"""
def step(param_schedulers):
assert isinstance(param_schedulers, list)
for scheduler in param_schedulers:
if not scheduler.by_epoch:
scheduler.step()
if runner.param_schedulers is None:
return
if isinstance(runner.param_schedulers, list):
step(runner.param_schedulers)
elif isinstance(runner.param_schedulers, dict):
for param_schedulers in runner.param_schedulers.values():
step(param_schedulers)
else:
raise TypeError(
'runner.param_schedulers should be list of ParamScheduler or '
'a dict containing list of ParamScheduler, '
f'but got {runner.param_schedulers}')
def after_train_epoch(self, runner) -> None:
"""Call step function for each scheduler after each epoch.
Args:
runner (Runner): The runner of the training process.
"""
def step(param_schedulers):
assert isinstance(param_schedulers, list)
for scheduler in param_schedulers:
if scheduler.by_epoch:
scheduler.step()
if runner.param_schedulers is None:
return
if isinstance(runner.param_schedulers, list):
step(runner.param_schedulers)
elif isinstance(runner.param_schedulers, dict):
for param_schedulers in runner.param_schedulers.values():
step(param_schedulers)
else:
raise TypeError(
'runner.param_schedulers should be list of ParamScheduler or '
'a dict containing list of ParamScheduler, '
f'but got {runner.param_schedulers}')
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Optional, Sequence
from mmengine.registry import HOOKS
from .hook import Hook
DATA_BATCH = Optional[Sequence[dict]]
@HOOKS.register_module()
class ParamSchedulerHook(Hook):
"""A hook to update some hyper-parameters in optimizer, e.g., learning rate
and momentum."""
priority = 'LOW'
def after_train_iter(self,
runner,
batch_idx: int,
data_batch: DATA_BATCH = None,
outputs: Optional[dict] = None) -> None:
"""Call step function for each scheduler after each iteration.
Args:
runner (Runner): The runner of the training process.
batch_idx (int): The index of the current batch in the train loop.
data_batch (Sequence[dict], optional): Data from dataloader.
In order to keep this interface consistent with other hooks,
we keep ``data_batch`` here. Defaults to None.
outputs (dict, optional): Outputs from model.
In order to keep this interface consistent with other hooks, we
keep ``data_batch`` here. Defaults to None.
"""
def step(param_schedulers):
assert isinstance(param_schedulers, list)
for scheduler in param_schedulers:
if not scheduler.by_epoch:
scheduler.step()
if runner.param_schedulers is None:
return
if isinstance(runner.param_schedulers, list):
step(runner.param_schedulers)
elif isinstance(runner.param_schedulers, dict):
for param_schedulers in runner.param_schedulers.values():
step(param_schedulers)
else:
raise TypeError(
'runner.param_schedulers should be list of ParamScheduler or '
'a dict containing list of ParamScheduler, '
f'but got {runner.param_schedulers}')
def after_train_epoch(self, runner) -> None:
"""Call step function for each scheduler after each epoch.
Args:
runner (Runner): The runner of the training process.
"""
def step(param_schedulers):
assert isinstance(param_schedulers, list)
for scheduler in param_schedulers:
if scheduler.by_epoch:
scheduler.step()
if runner.param_schedulers is None:
return
if isinstance(runner.param_schedulers, list):
step(runner.param_schedulers)
elif isinstance(runner.param_schedulers, dict):
for param_schedulers in runner.param_schedulers.values():
step(param_schedulers)
else:
raise TypeError(
'runner.param_schedulers should be list of ParamScheduler or '
'a dict containing list of ParamScheduler, '
f'but got {runner.param_schedulers}')
|
# coding=utf-8
# Copyright 2020 The Google AI Language Team Authors, Allegro.pl, Facebook Inc. and the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_herbert import HerbertTokenizer
logger = logging.get_logger(__name__)
VOCAB_FILES_NAMES = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
class HerbertTokenizerFast(PreTrainedTokenizerFast):
"""
Construct a "Fast" BPE tokenizer for HerBERT (backed by HuggingFace's *tokenizers* library).
Peculiarities:
- uses BERT's pre-tokenizer: BertPreTokenizer splits tokens on spaces, and also on punctuation. Each occurrence of
a punctuation character will be treated separately.
This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the methods. Users should refer to the
superclass for more information regarding methods.
Args:
vocab_file (`str`):
Path to the vocabulary file.
merges_file (`str`):
Path to the merges file.
"""
vocab_files_names = VOCAB_FILES_NAMES
slow_tokenizer_class = HerbertTokenizer
def __init__(
self,
vocab_file=None,
merges_file=None,
tokenizer_file=None,
cls_token="<s>",
unk_token="<unk>",
pad_token="<pad>",
mask_token="<mask>",
sep_token="</s>",
**kwargs,
):
super().__init__(
vocab_file,
merges_file,
tokenizer_file=tokenizer_file,
cls_token=cls_token,
unk_token=unk_token,
pad_token=pad_token,
mask_token=mask_token,
sep_token=sep_token,
**kwargs,
)
def build_inputs_with_special_tokens(
self, token_ids_0: list[int], token_ids_1: Optional[list[int]] = None
) -> list[int]:
"""
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
adding special tokens. An HerBERT, like BERT sequence has the following format:
- single sequence: `<s> X </s>`
- pair of sequences: `<s> A </s> B </s>`
Args:
token_ids_0 (`List[int]`):
List of IDs to which the special tokens will be added.
token_ids_1 (`List[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
"""
cls = [self.cls_token_id]
sep = [self.sep_token_id]
if token_ids_1 is None:
return cls + token_ids_0 + sep
return cls + token_ids_0 + sep + token_ids_1 + sep
def get_special_tokens_mask(
self, token_ids_0: list[int], token_ids_1: Optional[list[int]] = None, already_has_special_tokens: bool = False
) -> list[int]:
"""
Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
special tokens using the tokenizer `prepare_for_model` method.
Args:
token_ids_0 (`List[int]`):
List of IDs.
token_ids_1 (`List[int]`, *optional*):
Optional second list of IDs for sequence pairs.
already_has_special_tokens (`bool`, *optional*, defaults to `False`):
Whether or not the token list is already formatted with special tokens for the model.
Returns:
`List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
)
if token_ids_1 is None:
return [1] + ([0] * len(token_ids_0)) + [1]
return [1] + ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1]
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> tuple[str]:
files = self._tokenizer.model.save(save_directory, name=filename_prefix)
return tuple(files)
__all__ = ["HerbertTokenizerFast"]
|
# coding=utf-8
# Copyright 2020 The Google AI Language Team Authors, Allegro.pl, Facebook Inc. and the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_herbert import HerbertTokenizer
logger = logging.get_logger(__name__)
VOCAB_FILES_NAMES = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
class HerbertTokenizerFast(PreTrainedTokenizerFast):
"""
Construct a "Fast" BPE tokenizer for HerBERT (backed by HuggingFace's *tokenizers* library).
Peculiarities:
- uses BERT's pre-tokenizer: BertPreTokenizer splits tokens on spaces, and also on punctuation. Each occurrence of
a punctuation character will be treated separately.
This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the methods. Users should refer to the
superclass for more information regarding methods.
Args:
vocab_file (`str`):
Path to the vocabulary file.
merges_file (`str`):
Path to the merges file.
"""
vocab_files_names = VOCAB_FILES_NAMES
slow_tokenizer_class = HerbertTokenizer
def __init__(
self,
vocab_file=None,
merges_file=None,
tokenizer_file=None,
cls_token="<s>",
unk_token="<unk>",
pad_token="<pad>",
mask_token="<mask>",
sep_token="</s>",
**kwargs,
):
super().__init__(
vocab_file,
merges_file,
tokenizer_file=tokenizer_file,
cls_token=cls_token,
unk_token=unk_token,
pad_token=pad_token,
mask_token=mask_token,
sep_token=sep_token,
**kwargs,
)
def build_inputs_with_special_tokens(
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
) -> List[int]:
"""
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
adding special tokens. An HerBERT, like BERT sequence has the following format:
- single sequence: `<s> X </s>`
- pair of sequences: `<s> A </s> B </s>`
Args:
token_ids_0 (`List[int]`):
List of IDs to which the special tokens will be added.
token_ids_1 (`List[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
"""
cls = [self.cls_token_id]
sep = [self.sep_token_id]
if token_ids_1 is None:
return cls + token_ids_0 + sep
return cls + token_ids_0 + sep + token_ids_1 + sep
def get_special_tokens_mask(
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
) -> List[int]:
"""
Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
special tokens using the tokenizer `prepare_for_model` method.
Args:
token_ids_0 (`List[int]`):
List of IDs.
token_ids_1 (`List[int]`, *optional*):
Optional second list of IDs for sequence pairs.
already_has_special_tokens (`bool`, *optional*, defaults to `False`):
Whether or not the token list is already formatted with special tokens for the model.
Returns:
`List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
)
if token_ids_1 is None:
return [1] + ([0] * len(token_ids_0)) + [1]
return [1] + ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1]
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
files = self._tokenizer.model.save(save_directory, name=filename_prefix)
return tuple(files)
__all__ = ["HerbertTokenizerFast"]
|
import inspect
import re
from typing import Dict, List, Tuple
from huggingface_hub.utils import insecure_hashlib
from .arrow import arrow
from .audiofolder import audiofolder
from .cache import cache
from .csv import csv
from .imagefolder import imagefolder
from .json import json
from .pandas import pandas
from .parquet import parquet
from .sql import sql
from .text import text
from .webdataset import webdataset
from .xml import xml
def _hash_python_lines(lines: List[str]) -> str:
filtered_lines = []
for line in lines:
line = re.sub(r"#.*", "", line) # remove comments
if line:
filtered_lines.append(line)
full_str = "\n".join(filtered_lines)
# Make a hash from all this code
full_bytes = full_str.encode("utf-8")
return insecure_hashlib.sha256(full_bytes).hexdigest()
# get importable module names and hash for caching
_PACKAGED_DATASETS_MODULES = {
"csv": (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())),
"json": (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())),
"pandas": (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())),
"parquet": (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())),
"arrow": (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())),
"text": (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())),
"imagefolder": (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())),
"audiofolder": (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())),
"webdataset": (webdataset.__name__, _hash_python_lines(inspect.getsource(webdataset).splitlines())),
"xml": (xml.__name__, _hash_python_lines(inspect.getsource(xml).splitlines())),
}
# get importable module names and hash for caching
_PACKAGED_DATASETS_MODULES_2_15_HASHES = {
"csv": "eea64c71ca8b46dd3f537ed218fc9bf495d5707789152eb2764f5c78fa66d59d",
"json": "8bb11242116d547c741b2e8a1f18598ffdd40a1d4f2a2872c7a28b697434bc96",
"pandas": "3ac4ffc4563c796122ef66899b9485a3f1a977553e2d2a8a318c72b8cc6f2202",
"parquet": "ca31c69184d9832faed373922c2acccec0b13a0bb5bbbe19371385c3ff26f1d1",
"arrow": "74f69db2c14c2860059d39860b1f400a03d11bf7fb5a8258ca38c501c878c137",
"text": "c4a140d10f020282918b5dd1b8a49f0104729c6177f60a6b49ec2a365ec69f34",
"imagefolder": "7b7ce5247a942be131d49ad4f3de5866083399a0f250901bd8dc202f8c5f7ce5",
"audiofolder": "d3c1655c66c8f72e4efb5c79e952975fa6e2ce538473a6890241ddbddee9071c",
}
# Used to infer the module to use based on the data files extensions
_EXTENSION_TO_MODULE: Dict[str, Tuple[str, dict]] = {
".csv": ("csv", {}),
".tsv": ("csv", {"sep": "\t"}),
".json": ("json", {}),
".jsonl": ("json", {}),
# ndjson is no longer maintained (see: https://github.com/ndjson/ndjson-spec/issues/35#issuecomment-1285673417)
".ndjson": ("json", {}),
".parquet": ("parquet", {}),
".geoparquet": ("parquet", {}),
".gpq": ("parquet", {}),
".arrow": ("arrow", {}),
".txt": ("text", {}),
".tar": ("webdataset", {}),
".xml": ("xml", {}),
}
_EXTENSION_TO_MODULE.update({ext: ("imagefolder", {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ("imagefolder", {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext: ("audiofolder", {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ("audiofolder", {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_MODULE_SUPPORTS_METADATA = {"imagefolder", "audiofolder"}
# Used to filter data files based on extensions given a module name
_MODULE_TO_EXTENSIONS: Dict[str, List[str]] = {}
for _ext, (_module, _) in _EXTENSION_TO_MODULE.items():
_MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext)
for _module in _MODULE_TO_EXTENSIONS:
_MODULE_TO_EXTENSIONS[_module].append(".zip")
|
import inspect
import re
from typing import Dict, List, Tuple
from huggingface_hub.utils import insecure_hashlib
from .arrow import arrow
from .audiofolder import audiofolder
from .cache import cache
from .csv import csv
from .imagefolder import imagefolder
from .json import json
from .pandas import pandas
from .parquet import parquet
from .sql import sql
from .text import text
from .webdataset import webdataset
def _hash_python_lines(lines: List[str]) -> str:
filtered_lines = []
for line in lines:
line = re.sub(r"#.*", "", line) # remove comments
if line:
filtered_lines.append(line)
full_str = "\n".join(filtered_lines)
# Make a hash from all this code
full_bytes = full_str.encode("utf-8")
return insecure_hashlib.sha256(full_bytes).hexdigest()
# get importable module names and hash for caching
_PACKAGED_DATASETS_MODULES = {
"csv": (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())),
"json": (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())),
"pandas": (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())),
"parquet": (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())),
"arrow": (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())),
"text": (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())),
"imagefolder": (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())),
"audiofolder": (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())),
"webdataset": (webdataset.__name__, _hash_python_lines(inspect.getsource(webdataset).splitlines())),
}
# get importable module names and hash for caching
_PACKAGED_DATASETS_MODULES_2_15_HASHES = {
"csv": "eea64c71ca8b46dd3f537ed218fc9bf495d5707789152eb2764f5c78fa66d59d",
"json": "8bb11242116d547c741b2e8a1f18598ffdd40a1d4f2a2872c7a28b697434bc96",
"pandas": "3ac4ffc4563c796122ef66899b9485a3f1a977553e2d2a8a318c72b8cc6f2202",
"parquet": "ca31c69184d9832faed373922c2acccec0b13a0bb5bbbe19371385c3ff26f1d1",
"arrow": "74f69db2c14c2860059d39860b1f400a03d11bf7fb5a8258ca38c501c878c137",
"text": "c4a140d10f020282918b5dd1b8a49f0104729c6177f60a6b49ec2a365ec69f34",
"imagefolder": "7b7ce5247a942be131d49ad4f3de5866083399a0f250901bd8dc202f8c5f7ce5",
"audiofolder": "d3c1655c66c8f72e4efb5c79e952975fa6e2ce538473a6890241ddbddee9071c",
}
# Used to infer the module to use based on the data files extensions
_EXTENSION_TO_MODULE: Dict[str, Tuple[str, dict]] = {
".csv": ("csv", {}),
".tsv": ("csv", {"sep": "\t"}),
".json": ("json", {}),
".jsonl": ("json", {}),
# ndjson is no longer maintained (see: https://github.com/ndjson/ndjson-spec/issues/35#issuecomment-1285673417)
".ndjson": ("json", {}),
".parquet": ("parquet", {}),
".geoparquet": ("parquet", {}),
".gpq": ("parquet", {}),
".arrow": ("arrow", {}),
".txt": ("text", {}),
".tar": ("webdataset", {}),
}
_EXTENSION_TO_MODULE.update({ext: ("imagefolder", {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ("imagefolder", {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext: ("audiofolder", {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ("audiofolder", {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_MODULE_SUPPORTS_METADATA = {"imagefolder", "audiofolder"}
# Used to filter data files based on extensions given a module name
_MODULE_TO_EXTENSIONS: Dict[str, List[str]] = {}
for _ext, (_module, _) in _EXTENSION_TO_MODULE.items():
_MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext)
for _module in _MODULE_TO_EXTENSIONS:
_MODULE_TO_EXTENSIONS[_module].append(".zip")
|
import os
from typing import Dict
from hubble.executor.helper import is_valid_docker_uri, parse_hub_uri
from hubble.executor.hubio import HubIO
from jina.constants import (
__default_composite_gateway__,
__default_executor__,
__default_grpc_gateway__,
__default_http_gateway__,
__default_websocket_gateway__,
)
from jina.enums import PodRoleType
def get_image_name(uses: str) -> str:
"""The image can be provided in different formats by the user.
This function converts it to an image name which can be understood by k8s.
It uses the Hub api to get the image name and the latest tag on Docker Hub.
If you don't want to rebuild image on Jina Hub,
you can set `JINA_HUB_NO_IMAGE_REBUILD` environment variable.
:param uses: image name
:return: normalized image name
"""
try:
rebuild_image = 'JINA_HUB_NO_IMAGE_REBUILD' not in os.environ
scheme, name, tag, secret = parse_hub_uri(uses)
meta_data, _ = HubIO.fetch_meta(
name, tag, secret=secret, rebuild_image=rebuild_image, force=True
)
image_name = meta_data.image_name
return image_name
except Exception:
if uses.startswith('docker'):
# docker:// is a valid requirement and user may want to put its own image
return uses.replace('docker://', '')
raise
def to_compatible_name(name: str) -> str:
"""Converts the deployment name to a valid name for K8s and docker compose.
:param name: name of the deployment
:return: compatible name
"""
return name.replace('/', '-').replace('_', '-').lower()
def get_base_executor_version():
"""
Get the version of jina to be used
:return: the version tag
"""
import requests
try:
from jina import __version__
url = 'https://registry.hub.docker.com/v2/repositories/jinaai/jina/tags'
result: Dict = requests.get(url, params={'name': __version__}).json()
if result.get('count', 0) > 0:
return __version__
else:
return 'master'
except:
return 'master'
def construct_runtime_container_args(cargs, uses_metas, uses_with, pod_type):
"""
Construct a set of Namespace arguments into a list of arguments to pass to a container entrypoint
:param cargs: The namespace arguments
:param uses_metas: The uses_metas to override
:param uses_with: The uses_with to override
:param pod_type: The pod_type
:return: Arguments to pass to container
"""
import json
from jina.helper import ArgNamespace
from jina.parsers import set_pod_parser
taboo = {
'uses_with',
'uses_metas',
'volumes',
'uses_before',
'uses_after',
'workspace_id',
'noblock_on_start',
'env',
}
if pod_type == PodRoleType.HEAD:
taboo.add('uses')
taboo.add('workspace')
if pod_type in {PodRoleType.WORKER, PodRoleType.GATEWAY}:
taboo.add('polling')
non_defaults = ArgNamespace.get_non_defaults_args(
cargs,
set_pod_parser(),
taboo=taboo,
)
_args = ArgNamespace.kwargs2list(non_defaults)
container_args = ['executor'] + _args
if uses_metas is not None:
container_args.extend(['--uses-metas', json.dumps(uses_metas)])
if uses_with is not None:
container_args.extend(['--uses-with', json.dumps(uses_with)])
container_args.append('--native')
return container_args
def validate_uses(uses: str):
"""Validate uses argument
:param uses: uses argument
:return: boolean indicating whether is a valid uses to be used in K8s or docker compose
"""
# Uses can be either None (not specified), default gateway class, default executor or docker image
# None => deplyoment uses base container image and uses is determined inside container
# default gateway class or default executor => deployment uses base container and sets uses in command
# container images => deployment uses the specified container image and uses is defined by container
if (
uses is None
or uses
in [
__default_http_gateway__,
__default_websocket_gateway__,
__default_grpc_gateway__,
__default_composite_gateway__,
__default_executor__,
]
or uses.startswith('docker://')
):
return True
try:
return is_valid_docker_uri(uses)
except ValueError:
return False
|
import os
from typing import Dict
from hubble.executor.helper import is_valid_docker_uri, parse_hub_uri
from hubble.executor.hubio import HubIO
from jina import (
__default_composite_gateway__,
__default_executor__,
__default_grpc_gateway__,
__default_http_gateway__,
__default_websocket_gateway__,
__version__,
)
from jina.enums import PodRoleType
def get_image_name(uses: str) -> str:
"""The image can be provided in different formats by the user.
This function converts it to an image name which can be understood by k8s.
It uses the Hub api to get the image name and the latest tag on Docker Hub.
If you don't want to rebuild image on Jina Hub,
you can set `JINA_HUB_NO_IMAGE_REBUILD` environment variable.
:param uses: image name
:return: normalized image name
"""
try:
rebuild_image = 'JINA_HUB_NO_IMAGE_REBUILD' not in os.environ
scheme, name, tag, secret = parse_hub_uri(uses)
meta_data, _ = HubIO.fetch_meta(
name, tag, secret=secret, rebuild_image=rebuild_image, force=True
)
image_name = meta_data.image_name
return image_name
except Exception:
if uses.startswith('docker'):
# docker:// is a valid requirement and user may want to put its own image
return uses.replace('docker://', '')
raise
def to_compatible_name(name: str) -> str:
"""Converts the deployment name to a valid name for K8s and docker compose.
:param name: name of the deployment
:return: compatible name
"""
return name.replace('/', '-').replace('_', '-').lower()
def get_base_executor_version():
"""
Get the version of jina to be used
:return: the version tag
"""
import requests
try:
url = 'https://registry.hub.docker.com/v2/repositories/jinaai/jina/tags'
result: Dict = requests.get(url, params={'name': __version__}).json()
if result.get('count', 0) > 0:
return __version__
else:
return 'master'
except:
return 'master'
def construct_runtime_container_args(cargs, uses_metas, uses_with, pod_type):
"""
Construct a set of Namespace arguments into a list of arguments to pass to a container entrypoint
:param cargs: The namespace arguments
:param uses_metas: The uses_metas to override
:param uses_with: The uses_with to override
:param pod_type: The pod_type
:return: Arguments to pass to container
"""
import json
from jina.helper import ArgNamespace
from jina.parsers import set_pod_parser
taboo = {
'uses_with',
'uses_metas',
'volumes',
'uses_before',
'uses_after',
'workspace_id',
'noblock_on_start',
'env',
}
if pod_type == PodRoleType.HEAD:
taboo.add('uses')
taboo.add('workspace')
if pod_type in {PodRoleType.WORKER, PodRoleType.GATEWAY}:
taboo.add('polling')
non_defaults = ArgNamespace.get_non_defaults_args(
cargs,
set_pod_parser(),
taboo=taboo,
)
_args = ArgNamespace.kwargs2list(non_defaults)
container_args = ['executor'] + _args
if uses_metas is not None:
container_args.extend(['--uses-metas', json.dumps(uses_metas)])
if uses_with is not None:
container_args.extend(['--uses-with', json.dumps(uses_with)])
container_args.append('--native')
return container_args
def validate_uses(uses: str):
"""Validate uses argument
:param uses: uses argument
:return: boolean indicating whether is a valid uses to be used in K8s or docker compose
"""
# Uses can be either None (not specified), default gateway class, default executor or docker image
# None => deplyoment uses base container image and uses is determined inside container
# default gateway class or default executor => deployment uses base container and sets uses in command
# container images => deployment uses the specified container image and uses is defined by container
if (
uses is None
or uses
in [
__default_http_gateway__,
__default_websocket_gateway__,
__default_grpc_gateway__,
__default_composite_gateway__,
__default_executor__,
]
or uses.startswith('docker://')
):
return True
try:
return is_valid_docker_uri(uses)
except ValueError:
return False
|
import asyncio
from typing import Any, AsyncGenerator, List, Optional
from llama_index.core.workflow.context import Context
from llama_index.core.workflow.errors import WorkflowDone
from llama_index.core.workflow.events import Event, StopEvent
from .types import RunResultT
from .utils import BUSY_WAIT_DELAY
class WorkflowHandler(asyncio.Future[RunResultT]):
def __init__(
self,
*args: Any,
ctx: Optional[Context] = None,
run_id: Optional[str] = None,
**kwargs: Any,
) -> None:
super().__init__(*args, **kwargs)
self.run_id = run_id
self._ctx = ctx
@property
def ctx(self) -> Optional[Context]:
return self._ctx
def __str__(self) -> str:
return str(self.result())
def is_done(self) -> bool:
return self.done()
async def stream_events(self) -> AsyncGenerator[Event, None]:
if self.ctx is None:
raise ValueError("Context is not set!")
while True:
ev = await self.ctx.streaming_queue.get()
yield ev
if type(ev) is StopEvent:
break
async def run_step(self) -> Optional[List[Event]]:
"""Runs the next workflow step and returns the output Event.
If return is None, then the workflow is considered done.
Examples:
```python
handler = workflow.run(stepwise=True)
while not handler.is_done():
ev = await handler.run_step()
handler.ctx.send_event(ev)
result = handler.result()
print(result)
```
"""
# since event is sent before calling this method, we need to unblock the event loop
await asyncio.sleep(0)
if self.ctx is None:
raise ValueError("Context must be set to run a workflow step-wise!")
if not self.ctx.stepwise:
raise ValueError(
"Workflow must be created passing stepwise=True to call this method."
)
try:
# Reset the events collected in current step
self.ctx._step_events_holding = None
# Unblock all pending steps
for flag in self.ctx._step_flags.values():
flag.set()
# Yield back control to the event loop to give an unblocked step
# the chance to run (we won't actually sleep here).
await asyncio.sleep(0)
# check if we're done, or if a step raised error
we_done = False
exception_raised = None
retval = None
for t in self.ctx._tasks:
# Check if we're done
if not t.done():
continue
we_done = True
e = t.exception()
if type(e) is not WorkflowDone:
exception_raised = e
if we_done:
await self.ctx.shutdown()
if exception_raised:
raise exception_raised
if not self.done():
self.set_result(self.ctx.get_result())
else:
# Continue with running next step. Make sure we wait for the
# step function to return before proceeding.
in_progress = len(await self.ctx.running_steps())
while in_progress:
await asyncio.sleep(BUSY_WAIT_DELAY)
in_progress = len(await self.ctx.running_steps())
# notify unblocked task that we're ready to accept next event
async with self.ctx._step_condition:
self.ctx._step_condition.notify()
# Wait to be notified that the new_ev has been written
async with self.ctx._step_event_written:
await self.ctx._step_event_written.wait()
retval = self.ctx.get_holding_events()
except Exception as e:
if not self.is_done(): # Avoid InvalidStateError edge case
self.set_exception(e)
raise
return retval
async def cancel_run(self) -> None:
"""Method to cancel a Workflow execution."""
if self.ctx:
self.ctx._cancel_flag.set()
await asyncio.sleep(0)
|
import asyncio
from typing import Any, AsyncGenerator, List, Optional
from llama_index.core.workflow.context import Context
from llama_index.core.workflow.errors import WorkflowDone
from llama_index.core.workflow.events import Event, StopEvent
from .types import RunResultT
from .utils import BUSY_WAIT_DELAY
class WorkflowHandler(asyncio.Future[RunResultT]):
def __init__(
self,
*args: Any,
ctx: Optional[Context] = None,
run_id: Optional[str] = None,
**kwargs: Any,
) -> None:
super().__init__(*args, **kwargs)
self.run_id = run_id
self.ctx = ctx
def __str__(self) -> str:
return str(self.result())
def is_done(self) -> bool:
return self.done()
async def stream_events(self) -> AsyncGenerator[Event, None]:
if not self.ctx:
raise ValueError("Context is not set!")
while True:
ev = await self.ctx.streaming_queue.get()
yield ev
if type(ev) is StopEvent:
break
async def run_step(self) -> Optional[List[Event]]:
"""Runs the next workflow step and returns the output Event.
If return is None, then the workflow is considered done.
Examples:
```python
handler = workflow.run(stepwise=True)
while not handler.is_done():
ev = await handler.run_step()
handler.ctx.send_event(ev)
result = handler.result()
print(result)
```
"""
# since event is sent before calling this method, we need to unblock the event loop
await asyncio.sleep(0)
if self.ctx is None:
raise ValueError("Context must be set to run a workflow step-wise!")
if not self.ctx.stepwise:
raise ValueError(
"Workflow must be created passing stepwise=True to call this method."
)
try:
# Reset the events collected in current step
self.ctx._step_events_holding = None
# Unblock all pending steps
for flag in self.ctx._step_flags.values():
flag.set()
# Yield back control to the event loop to give an unblocked step
# the chance to run (we won't actually sleep here).
await asyncio.sleep(0)
# check if we're done, or if a step raised error
we_done = False
exception_raised = None
retval = None
for t in self.ctx._tasks:
# Check if we're done
if not t.done():
continue
we_done = True
e = t.exception()
if type(e) is not WorkflowDone:
exception_raised = e
if we_done:
# Remove any reference to the tasks
for t in self.ctx._tasks:
t.cancel()
await asyncio.sleep(0)
# the context is no longer running
self.ctx.is_running = False
if exception_raised:
raise exception_raised
if not self.done():
self.set_result(self.ctx.get_result())
else:
# Continue with running next step. Make sure we wait for the
# step function to return before proceeding.
in_progress = len(await self.ctx.running_steps())
while in_progress:
await asyncio.sleep(BUSY_WAIT_DELAY)
in_progress = len(await self.ctx.running_steps())
# notify unblocked task that we're ready to accept next event
async with self.ctx._step_condition:
self.ctx._step_condition.notify()
# Wait to be notified that the new_ev has been written
async with self.ctx._step_event_written:
await self.ctx._step_event_written.wait()
retval = self.ctx.get_holding_events()
except Exception as e:
if not self.is_done(): # Avoid InvalidStateError edge case
self.set_exception(e)
raise
return retval
async def cancel_run(self) -> None:
"""Method to cancel a Workflow execution."""
if self.ctx:
self.ctx._cancel_flag.set()
await asyncio.sleep(0)
|
"""Utilities for the XGBoost Dask interface."""
import logging
from typing import TYPE_CHECKING, Any, Dict
LOGGER = logging.getLogger("[xgboost.dask]")
if TYPE_CHECKING:
import distributed
def get_n_threads(local_param: Dict[str, Any], worker: "distributed.Worker") -> int:
"""Get the number of threads from a worker and the user-supplied parameters."""
# dask worker nthreads, "state" is available in 2022.6.1
dwnt = worker.state.nthreads if hasattr(worker, "state") else worker.nthreads
n_threads = None
for p in ["nthread", "n_jobs"]:
if local_param.get(p, None) is not None and local_param.get(p, dwnt) != dwnt:
LOGGER.info("Overriding `nthreads` defined in dask worker.")
n_threads = local_param[p]
break
if n_threads == 0 or n_threads is None:
n_threads = dwnt
return n_threads
|
"""Utilities for the XGBoost Dask interface."""
import logging
from typing import TYPE_CHECKING, Any, Dict
LOGGER = logging.getLogger("[xgboost.dask]")
if TYPE_CHECKING:
import distributed
def get_n_threads(local_param: Dict[str, Any], worker: "distributed.Worker") -> int:
"""Get the number of threads from a worker and the user-supplied parameters."""
# dask worker nthreads, "state" is available in 2022.6.1
dwnt = worker.state.nthreads if hasattr(worker, "state") else worker.nthreads
n_threads = None
for p in ["nthread", "n_jobs"]:
if local_param.get(p, None) is not None and local_param.get(p, dwnt) != dwnt:
LOGGER.info("Overriding `nthreads` defined in dask worker.")
n_threads = local_param[p]
break
if n_threads == 0 or n_threads is None:
n_threads = dwnt
return n_threads
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Union
from ..registry import EVALUATORS
from .base import BaseEvaluator
from .composed_evaluator import ComposedEvaluator
def build_evaluator(
cfg: Union[dict, list]) -> Union[BaseEvaluator, ComposedEvaluator]:
"""Build function of evaluator.
When the evaluator config is a list, it will automatically build composed
evaluators.
Args:
cfg (dict | list): Config of evaluator. When the config is a list, it
will automatically build composed evaluators.
Returns:
BaseEvaluator or ComposedEvaluator: The built evaluator.
"""
if isinstance(cfg, list):
evaluators = [EVALUATORS.build(_cfg) for _cfg in cfg]
return ComposedEvaluator(evaluators=evaluators)
else:
return EVALUATORS.build(cfg)
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Optional, Union
from ..registry import EVALUATORS
from .base import BaseEvaluator
from .composed_evaluator import ComposedEvaluator
def build_evaluator(
cfg: Union[dict, list],
default_scope: Optional[str] = None
) -> Union[BaseEvaluator, ComposedEvaluator]:
"""Build function of evaluator.
When the evaluator config is a list, it will automatically build composed
evaluators.
Args:
cfg (dict | list): Config of evaluator. When the config is a list, it
will automatically build composed evaluators.
default_scope (str, optional): The ``default_scope`` is used to
reset the current registry. Defaults to None.
Returns:
BaseEvaluator or ComposedEvaluator: The built evaluator.
"""
if isinstance(cfg, list):
evaluators = [
EVALUATORS.build(_cfg, default_scope=default_scope) for _cfg in cfg
]
return ComposedEvaluator(evaluators=evaluators)
else:
return EVALUATORS.build(cfg, default_scope=default_scope)
|
"""
Top-level module of Jina.
The primary function of this module is to import all of the public Jina
interfaces into a single place. The interfaces themselves are located in
sub-modules, as described below.
"""
import os as _os
import platform as _platform
import signal as _signal
import sys as _sys
import warnings as _warnings
import docarray as _docarray
if _sys.version_info < (3, 7, 0):
raise OSError(f'Jina requires Python >= 3.7, but yours is {_sys.version_info}')
def _warning_on_one_line(message, category, filename, lineno, *args, **kwargs):
return '\033[1;33m%s: %s\033[0m \033[1;30m(raised from %s:%s)\033[0m\n' % (
category.__name__,
message,
filename,
lineno,
)
def _ignore_google_warnings():
import warnings
warnings.filterwarnings(
'ignore',
category=DeprecationWarning,
message='Deprecated call to `pkg_resources.declare_namespace(\'google\')`.',
append=True,
)
_warnings.formatwarning = _warning_on_one_line
_warnings.simplefilter('always', DeprecationWarning, append=True)
_ignore_google_warnings()
# fix fork error on MacOS but seems no effect? must do EXPORT manually before jina start
_os.environ['OBJC_DISABLE_INITIALIZE_FORK_SAFETY'] = 'YES'
# JINA_MP_START_METHOD has higher priority than os-patch
_start_method = _os.environ.get('JINA_MP_START_METHOD', None)
if _start_method and _start_method.lower() in {'fork', 'spawn', 'forkserver'}:
from multiprocessing import set_start_method as _set_start_method
try:
_set_start_method(_start_method.lower())
_warnings.warn(
f'multiprocessing start method is set to `{_start_method.lower()}`'
)
except Exception as e:
_warnings.warn(
f'failed to set multiprocessing start_method to `{_start_method.lower()}`: {e!r}'
)
elif _sys.version_info >= (3, 8, 0) and _platform.system() == 'Darwin':
# DO SOME OS-WISE PATCHES
# temporary fix for python 3.8 on macos where the default start is set to "spawn"
# https://docs.python.org/3/library/multiprocessing.html#contexts-and-start-methods
from multiprocessing import set_start_method as _set_start_method
try:
_set_start_method('fork')
_warnings.warn(f'multiprocessing start method is set to `fork`')
except Exception as e:
_warnings.warn(f'failed to set multiprocessing start_method to `fork`: {e!r}')
# do not change this line manually this is managed by git tag and updated on every release
# NOTE: this represents the NEXT release version
__version__ = '3.27.8'
# do not change this line manually
# this is managed by proto/build-proto.sh and updated on every execution
__proto_version__ = '0.1.27'
try:
__docarray_version__ = _docarray.__version__
except AttributeError as e:
raise RuntimeError(
'`docarray` dependency is not installed correctly, please reinstall with `pip install -U --force-reinstall docarray`'
)
try:
_signal.signal(_signal.SIGINT, _signal.default_int_handler)
except Exception as exc:
_warnings.warn(f'failed to set default signal handler: {exc!r}`')
def _set_nofile(nofile_atleast=4096):
"""
Set nofile soft limit to at least 4096, useful for running matlplotlib/seaborn on
parallel executing plot generators vs. Ubuntu default ulimit -n 1024 or OS X El Captian 256
temporary setting extinguishing with Python session.
:param nofile_atleast: nofile soft limit
:return: nofile soft limit and nofile hard limit
"""
try:
import resource as res
except ImportError: # Windows
res = None
if res is None:
return (None,) * 2
soft, ohard = res.getrlimit(res.RLIMIT_NOFILE)
hard = ohard
if soft < nofile_atleast:
soft = nofile_atleast
if hard < soft:
hard = soft
try:
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except (ValueError, res.error):
try:
hard = soft
print(f'trouble with max limit, retrying with soft,hard {soft},{hard}')
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except Exception:
print('failed to set ulimit, giving up')
soft, hard = res.getrlimit(res.RLIMIT_NOFILE)
return soft, hard
_set_nofile()
# ONLY FIRST CLASS CITIZENS ARE ALLOWED HERE, namely Document, Executor Flow
# Document
from jina._docarray import Document, DocumentArray
# Client
from jina.clients import Client
# Deployment
from jina.orchestrate.deployments import Deployment
from jina.orchestrate.flow.asyncio import AsyncFlow
# Flow
from jina.orchestrate.flow.base import Flow
# Executor
from jina.serve.executors import BaseExecutor as Executor
from jina.serve.executors.decorators import dynamic_batching, monitor, requests
# Custom Gateway
from jina.serve.runtimes.gateway.gateway import Gateway
|
"""
Top-level module of Jina.
The primary function of this module is to import all of the public Jina
interfaces into a single place. The interfaces themselves are located in
sub-modules, as described below.
"""
import os as _os
import platform as _platform
import signal as _signal
import sys as _sys
import warnings as _warnings
import docarray as _docarray
if _sys.version_info < (3, 7, 0):
raise OSError(f'Jina requires Python >= 3.7, but yours is {_sys.version_info}')
def _warning_on_one_line(message, category, filename, lineno, *args, **kwargs):
return '\033[1;33m%s: %s\033[0m \033[1;30m(raised from %s:%s)\033[0m\n' % (
category.__name__,
message,
filename,
lineno,
)
def _ignore_google_warnings():
import warnings
warnings.filterwarnings(
'ignore',
category=DeprecationWarning,
message='Deprecated call to `pkg_resources.declare_namespace(\'google\')`.',
append=True,
)
_warnings.formatwarning = _warning_on_one_line
_warnings.simplefilter('always', DeprecationWarning, append=True)
_ignore_google_warnings()
# fix fork error on MacOS but seems no effect? must do EXPORT manually before jina start
_os.environ['OBJC_DISABLE_INITIALIZE_FORK_SAFETY'] = 'YES'
# JINA_MP_START_METHOD has higher priority than os-patch
_start_method = _os.environ.get('JINA_MP_START_METHOD', None)
if _start_method and _start_method.lower() in {'fork', 'spawn', 'forkserver'}:
from multiprocessing import set_start_method as _set_start_method
try:
_set_start_method(_start_method.lower())
_warnings.warn(
f'multiprocessing start method is set to `{_start_method.lower()}`'
)
except Exception as e:
_warnings.warn(
f'failed to set multiprocessing start_method to `{_start_method.lower()}`: {e!r}'
)
elif _sys.version_info >= (3, 8, 0) and _platform.system() == 'Darwin':
# DO SOME OS-WISE PATCHES
# temporary fix for python 3.8 on macos where the default start is set to "spawn"
# https://docs.python.org/3/library/multiprocessing.html#contexts-and-start-methods
from multiprocessing import set_start_method as _set_start_method
try:
_set_start_method('fork')
_warnings.warn(f'multiprocessing start method is set to `fork`')
except Exception as e:
_warnings.warn(f'failed to set multiprocessing start_method to `fork`: {e!r}')
# do not change this line manually this is managed by git tag and updated on every release
# NOTE: this represents the NEXT release version
__version__ = '3.27.7'
# do not change this line manually
# this is managed by proto/build-proto.sh and updated on every execution
__proto_version__ = '0.1.27'
try:
__docarray_version__ = _docarray.__version__
except AttributeError as e:
raise RuntimeError(
'`docarray` dependency is not installed correctly, please reinstall with `pip install -U --force-reinstall docarray`'
)
try:
_signal.signal(_signal.SIGINT, _signal.default_int_handler)
except Exception as exc:
_warnings.warn(f'failed to set default signal handler: {exc!r}`')
def _set_nofile(nofile_atleast=4096):
"""
Set nofile soft limit to at least 4096, useful for running matlplotlib/seaborn on
parallel executing plot generators vs. Ubuntu default ulimit -n 1024 or OS X El Captian 256
temporary setting extinguishing with Python session.
:param nofile_atleast: nofile soft limit
:return: nofile soft limit and nofile hard limit
"""
try:
import resource as res
except ImportError: # Windows
res = None
if res is None:
return (None,) * 2
soft, ohard = res.getrlimit(res.RLIMIT_NOFILE)
hard = ohard
if soft < nofile_atleast:
soft = nofile_atleast
if hard < soft:
hard = soft
try:
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except (ValueError, res.error):
try:
hard = soft
print(f'trouble with max limit, retrying with soft,hard {soft},{hard}')
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except Exception:
print('failed to set ulimit, giving up')
soft, hard = res.getrlimit(res.RLIMIT_NOFILE)
return soft, hard
_set_nofile()
# ONLY FIRST CLASS CITIZENS ARE ALLOWED HERE, namely Document, Executor Flow
# Document
from jina._docarray import Document, DocumentArray
# Client
from jina.clients import Client
# Deployment
from jina.orchestrate.deployments import Deployment
from jina.orchestrate.flow.asyncio import AsyncFlow
# Flow
from jina.orchestrate.flow.base import Flow
# Executor
from jina.serve.executors import BaseExecutor as Executor
from jina.serve.executors.decorators import dynamic_batching, monitor, requests
# Custom Gateway
from jina.serve.runtimes.gateway.gateway import Gateway
|
import numpy as np
import pytest
from hnswlib_searcher import HnswlibSearcher
from jina import Document, DocumentArray, Flow
_DIM = 10
@pytest.mark.parametrize('uses', ['HnswlibSearcher', 'docker://hnswlibsearcher'])
def test_index_search_flow(uses: str, build_docker_image: str):
f = Flow().add(uses=uses, uses_with={'metric': 'euclidean', 'dim': _DIM})
da = DocumentArray(
[
Document(id='a', embedding=np.ones(_DIM) * 1.0),
Document(id='b', embedding=np.ones(_DIM) * 2.0),
]
)
with f:
f.index(da)
status_ind = f.post('/status', return_results=True)
status_ind = status_ind[0].data.docs[0].tags
assert status_ind['count_active'] == 2
assert status_ind['count_deleted'] == 0
assert status_ind['count_indexed'] == 2
result_search = f.search(da, return_results=True)
result_search = result_search[0].data.docs
assert len(result_search) == 2
for ind in range(2):
assert result_search[ind].matches[0].id == ('a' if ind == 0 else 'b')
assert result_search[ind].matches[0].scores['euclidean'].value == 0.0
assert result_search[ind].matches[1].id == ('b' if ind == 0 else 'a')
assert result_search[ind].matches[1].scores['euclidean'].value == 10.0
def test_save_load(tmp_path):
f = Flow().add(
name='hnsw',
uses=HnswlibSearcher,
uses_with={'metric': 'euclidean', 'dim': _DIM},
)
da = DocumentArray(
[
Document(id='a', embedding=np.ones(_DIM) * 1.0),
Document(id='b', embedding=np.ones(_DIM) * 2.0),
]
)
# Index and save
with f:
f.index(da)
f.post(
on='/dump',
target_peapod='hnsw',
parameters={
'dump_path': str(tmp_path),
},
)
# Sanity check - without "dump_path" specified, index is empty
with f:
status_ind = f.post('/status', return_results=True)
status_ind = status_ind[0].data.docs[0].tags
assert status_ind['count_active'] == 0
assert status_ind['count_indexed'] == 0
# Load
f = Flow().add(
name='hnsw',
uses=HnswlibSearcher,
uses_with={'metric': 'euclidean', 'dim': _DIM, 'dump_path': str(tmp_path)},
)
with f:
status_ind = f.post('/status', return_results=True)
status_ind = status_ind[0].data.docs[0].tags
assert status_ind['count_active'] == 2
assert status_ind['count_deleted'] == 0
assert status_ind['count_indexed'] == 2
# Check that we indeed have same items in index
result_search = f.search(da, return_results=True)
result_search = result_search[0].data.docs
assert len(result_search) == 2
print(result_search)
for ind in range(2):
assert result_search[ind].matches[0].id == ('a' if ind == 0 else 'b')
assert result_search[ind].matches[0].scores['euclidean'].value == 0.0
assert result_search[ind].matches[1].id == ('b' if ind == 0 else 'a')
assert result_search[ind].matches[1].scores['euclidean'].value == 10.0
|
import numpy as np
import pytest
from hnswlib_searcher import HnswlibSearcher
from jina import Document, DocumentArray, Flow
_DIM = 10
@pytest.mark.parametrize('uses', ['HnswlibSearcher', 'docker://hnswlibsearcher'])
def test_index_search_flow(uses: str, build_docker_image: str):
f = Flow().add(uses=uses, uses_with={'metric': 'l2', 'dim': _DIM})
da = DocumentArray(
[
Document(id='a', embedding=np.ones(_DIM) * 1.0),
Document(id='b', embedding=np.ones(_DIM) * 2.0),
]
)
with f:
f.index(da)
status_ind = f.post('/status', return_results=True)
status_ind = status_ind[0].data.docs[0].tags
assert status_ind['count_active'] == 2
assert status_ind['count_deleted'] == 0
assert status_ind['count_indexed'] == 2
result_search = f.search(da, return_results=True)
result_search = result_search[0].data.docs
assert len(result_search) == 2
for ind in range(2):
assert result_search[ind].matches[0].id == ('a' if ind == 0 else 'b')
assert result_search[ind].matches[0].scores['l2'].value == 0.0
assert result_search[ind].matches[1].id == ('b' if ind == 0 else 'a')
assert result_search[ind].matches[1].scores['l2'].value == 10.0
def test_save_load(tmp_path):
f = Flow().add(
name='hnsw', uses=HnswlibSearcher, uses_with={'metric': 'l2', 'dim': _DIM}
)
da = DocumentArray(
[
Document(id='a', embedding=np.ones(_DIM) * 1.0),
Document(id='b', embedding=np.ones(_DIM) * 2.0),
]
)
# Index and save
with f:
f.index(da)
f.post(
on='/dump',
target_peapod='hnsw',
parameters={
'dump_path': str(tmp_path),
},
)
# Sanity check - without "dump_path" specified, index is empty
with f:
status_ind = f.post('/status', return_results=True)
status_ind = status_ind[0].data.docs[0].tags
assert status_ind['count_active'] == 0
assert status_ind['count_indexed'] == 0
# Load
f = Flow().add(
name='hnsw',
uses=HnswlibSearcher,
uses_with={'metric': 'l2', 'dim': _DIM, 'dump_path': str(tmp_path)},
)
with f:
status_ind = f.post('/status', return_results=True)
status_ind = status_ind[0].data.docs[0].tags
assert status_ind['count_active'] == 2
assert status_ind['count_deleted'] == 0
assert status_ind['count_indexed'] == 2
# Check that we indeed have same items in index
result_search = f.search(da, return_results=True)
result_search = result_search[0].data.docs
assert len(result_search) == 2
for ind in range(2):
assert result_search[ind].matches[0].id == ('a' if ind == 0 else 'b')
assert result_search[ind].matches[0].scores['l2'].value == 0.0
assert result_search[ind].matches[1].id == ('b' if ind == 0 else 'a')
assert result_search[ind].matches[1].scores['l2'].value == 10.0
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.quantizers import deserialize as deserialize
from keras.src.quantizers import get as get
from keras.src.quantizers import serialize as serialize
from keras.src.quantizers.quantizers import AbsMaxQuantizer as AbsMaxQuantizer
from keras.src.quantizers.quantizers import Quantizer as Quantizer
from keras.src.quantizers.quantizers import abs_max_quantize as abs_max_quantize
from keras.src.quantizers.quantizers import (
compute_float8_amax_history as compute_float8_amax_history,
)
from keras.src.quantizers.quantizers import (
compute_float8_scale as compute_float8_scale,
)
from keras.src.quantizers.quantizers import (
fake_quant_with_min_max_vars as fake_quant_with_min_max_vars,
)
from keras.src.quantizers.quantizers import pack_int4 as pack_int4
from keras.src.quantizers.quantizers import (
quantize_and_dequantize as quantize_and_dequantize,
)
from keras.src.quantizers.quantizers import unpack_int4 as unpack_int4
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.quantizers import deserialize as deserialize
from keras.src.quantizers import get as get
from keras.src.quantizers import serialize as serialize
from keras.src.quantizers.quantizers import AbsMaxQuantizer as AbsMaxQuantizer
from keras.src.quantizers.quantizers import Quantizer as Quantizer
from keras.src.quantizers.quantizers import abs_max_quantize as abs_max_quantize
from keras.src.quantizers.quantizers import (
compute_float8_amax_history as compute_float8_amax_history,
)
from keras.src.quantizers.quantizers import (
compute_float8_scale as compute_float8_scale,
)
from keras.src.quantizers.quantizers import (
fake_quant_with_min_max_vars as fake_quant_with_min_max_vars,
)
from keras.src.quantizers.quantizers import (
quantize_and_dequantize as quantize_and_dequantize,
)
|
# Copyright (c) OpenMMLab. All rights reserved.
from .base_tracker import BaseTracker
from .byte_tracker import ByteTracker
from .quasi_dense_tracker import QuasiDenseTracker
from .sort_tracker import SORTTracker
__all__ = ['BaseTracker', 'ByteTracker', 'QuasiDenseTracker', 'SORTTracker']
|
# Copyright (c) OpenMMLab. All rights reserved.
from .base_tracker import BaseTracker
from .byte_tracker import ByteTracker
from .quasi_dense_tracker import QuasiDenseTracker
__all__ = ['BaseTracker', 'ByteTracker', 'QuasiDenseTracker']
|
_base_ = '../fcos/fcos_r50_caffe_fpn_gn-head_4x4_1x_coco.py'
model = dict(
data_preprocessor=dict(
mean=[103.53, 116.28, 123.675],
std=[57.375, 57.12, 58.395],
bgr_to_rgb=False),
backbone=dict(
_delete_=True,
type='HRNet',
extra=dict(
stage1=dict(
num_modules=1,
num_branches=1,
block='BOTTLENECK',
num_blocks=(4, ),
num_channels=(64, )),
stage2=dict(
num_modules=1,
num_branches=2,
block='BASIC',
num_blocks=(4, 4),
num_channels=(32, 64)),
stage3=dict(
num_modules=4,
num_branches=3,
block='BASIC',
num_blocks=(4, 4, 4),
num_channels=(32, 64, 128)),
stage4=dict(
num_modules=3,
num_branches=4,
block='BASIC',
num_blocks=(4, 4, 4, 4),
num_channels=(32, 64, 128, 256))),
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://msra/hrnetv2_w32')),
neck=dict(
_delete_=True,
type='HRFPN',
in_channels=[32, 64, 128, 256],
out_channels=256,
stride=2,
num_outs=5))
|
_base_ = '../fcos/fcos_r50_caffe_fpn_gn-head_4x4_1x_coco.py'
model = dict(
backbone=dict(
_delete_=True,
type='HRNet',
extra=dict(
stage1=dict(
num_modules=1,
num_branches=1,
block='BOTTLENECK',
num_blocks=(4, ),
num_channels=(64, )),
stage2=dict(
num_modules=1,
num_branches=2,
block='BASIC',
num_blocks=(4, 4),
num_channels=(32, 64)),
stage3=dict(
num_modules=4,
num_branches=3,
block='BASIC',
num_blocks=(4, 4, 4),
num_channels=(32, 64, 128)),
stage4=dict(
num_modules=3,
num_branches=4,
block='BASIC',
num_blocks=(4, 4, 4, 4),
num_channels=(32, 64, 128, 256))),
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://msra/hrnetv2_w32')),
neck=dict(
_delete_=True,
type='HRFPN',
in_channels=[32, 64, 128, 256],
out_channels=256,
stride=2,
num_outs=5))
img_norm_cfg = dict(
mean=[103.53, 116.28, 123.675], std=[57.375, 57.12, 58.395], to_rgb=False)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
train=dict(pipeline=train_pipeline),
val=dict(pipeline=test_pipeline),
test=dict(pipeline=test_pipeline))
|
# Copyright (c) OpenMMLab. All rights reserved.
"""MMDetection provides 17 registry nodes to support using modules across
projects. Each node is a child of the root registry in MMEngine.
More details can be found at
https://mmengine.readthedocs.io/en/latest/tutorials/registry.html.
"""
from mmengine.registry import DATA_SAMPLERS as MMENGINE_DATA_SAMPLERS
from mmengine.registry import DATASETS as MMENGINE_DATASETS
from mmengine.registry import EVALUATOR as MMENGINE_EVALUATOR
from mmengine.registry import HOOKS as MMENGINE_HOOKS
from mmengine.registry import LOG_PROCESSORS as MMENGINE_LOG_PROCESSORS
from mmengine.registry import LOOPS as MMENGINE_LOOPS
from mmengine.registry import METRICS as MMENGINE_METRICS
from mmengine.registry import MODEL_WRAPPERS as MMENGINE_MODEL_WRAPPERS
from mmengine.registry import MODELS as MMENGINE_MODELS
from mmengine.registry import \
OPTIM_WRAPPER_CONSTRUCTORS as MMENGINE_OPTIM_WRAPPER_CONSTRUCTORS
from mmengine.registry import OPTIM_WRAPPERS as MMENGINE_OPTIM_WRAPPERS
from mmengine.registry import OPTIMIZERS as MMENGINE_OPTIMIZERS
from mmengine.registry import PARAM_SCHEDULERS as MMENGINE_PARAM_SCHEDULERS
from mmengine.registry import \
RUNNER_CONSTRUCTORS as MMENGINE_RUNNER_CONSTRUCTORS
from mmengine.registry import RUNNERS as MMENGINE_RUNNERS
from mmengine.registry import TASK_UTILS as MMENGINE_TASK_UTILS
from mmengine.registry import TRANSFORMS as MMENGINE_TRANSFORMS
from mmengine.registry import VISBACKENDS as MMENGINE_VISBACKENDS
from mmengine.registry import VISUALIZERS as MMENGINE_VISUALIZERS
from mmengine.registry import \
WEIGHT_INITIALIZERS as MMENGINE_WEIGHT_INITIALIZERS
from mmengine.registry import Registry
# manage all kinds of runners like `EpochBasedRunner` and `IterBasedRunner`
RUNNERS = Registry(
'runner', parent=MMENGINE_RUNNERS, locations=['mmdet.engine.runner'])
# manage runner constructors that define how to initialize runners
RUNNER_CONSTRUCTORS = Registry(
'runner constructor',
parent=MMENGINE_RUNNER_CONSTRUCTORS,
locations=['mmdet.engine.runner'])
# manage all kinds of loops like `EpochBasedTrainLoop`
LOOPS = Registry(
'loop', parent=MMENGINE_LOOPS, locations=['mmdet.engine.runner'])
# manage all kinds of hooks like `CheckpointHook`
HOOKS = Registry(
'hook', parent=MMENGINE_HOOKS, locations=['mmdet.engine.hooks'])
# manage data-related modules
DATASETS = Registry(
'dataset', parent=MMENGINE_DATASETS, locations=['mmdet.datasets'])
DATA_SAMPLERS = Registry(
'data sampler',
parent=MMENGINE_DATA_SAMPLERS,
locations=['mmdet.datasets.samplers'])
TRANSFORMS = Registry(
'transform',
parent=MMENGINE_TRANSFORMS,
locations=['mmdet.datasets.transforms'])
# manage all kinds of modules inheriting `nn.Module`
MODELS = Registry('model', parent=MMENGINE_MODELS, locations=['mmdet.models'])
# manage all kinds of model wrappers like 'MMDistributedDataParallel'
MODEL_WRAPPERS = Registry(
'model_wrapper',
parent=MMENGINE_MODEL_WRAPPERS,
locations=['mmdet.models'])
# manage all kinds of weight initialization modules like `Uniform`
WEIGHT_INITIALIZERS = Registry(
'weight initializer',
parent=MMENGINE_WEIGHT_INITIALIZERS,
locations=['mmdet.models'])
# manage all kinds of optimizers like `SGD` and `Adam`
OPTIMIZERS = Registry(
'optimizer',
parent=MMENGINE_OPTIMIZERS,
locations=['mmdet.engine.optimizers'])
# manage optimizer wrapper
OPTIM_WRAPPERS = Registry(
'optim_wrapper',
parent=MMENGINE_OPTIM_WRAPPERS,
locations=['mmdet.engine.optimizers'])
# manage constructors that customize the optimization hyperparameters.
OPTIM_WRAPPER_CONSTRUCTORS = Registry(
'optimizer constructor',
parent=MMENGINE_OPTIM_WRAPPER_CONSTRUCTORS,
locations=['mmdet.engine.optimizers'])
# manage all kinds of parameter schedulers like `MultiStepLR`
PARAM_SCHEDULERS = Registry(
'parameter scheduler',
parent=MMENGINE_PARAM_SCHEDULERS,
locations=['mmdet.engine.schedulers'])
# manage all kinds of metrics
METRICS = Registry(
'metric', parent=MMENGINE_METRICS, locations=['mmdet.evaluation'])
# manage evaluator
EVALUATOR = Registry(
'evaluator', parent=MMENGINE_EVALUATOR, locations=['mmdet.evaluation'])
# manage task-specific modules like anchor generators and box coders
TASK_UTILS = Registry(
'task util', parent=MMENGINE_TASK_UTILS, locations=['mmdet.models'])
# manage visualizer
VISUALIZERS = Registry(
'visualizer',
parent=MMENGINE_VISUALIZERS,
locations=['mmdet.visualization'])
# manage visualizer backend
VISBACKENDS = Registry(
'vis_backend',
parent=MMENGINE_VISBACKENDS,
locations=['mmdet.visualization'])
# manage logprocessor
LOG_PROCESSORS = Registry(
'log_processor',
parent=MMENGINE_LOG_PROCESSORS,
# TODO: update the location when mmdet has its own log processor
locations=['mmdet.engine'])
|
# Copyright (c) OpenMMLab. All rights reserved.
"""MMDetection provides 17 registry nodes to support using modules across
projects. Each node is a child of the root registry in MMEngine.
More details can be found at
https://mmengine.readthedocs.io/en/latest/tutorials/registry.html.
"""
from mmengine.registry import DATA_SAMPLERS as MMENGINE_DATA_SAMPLERS
from mmengine.registry import DATASETS as MMENGINE_DATASETS
from mmengine.registry import EVALUATOR as MMENGINE_EVALUATOR
from mmengine.registry import HOOKS as MMENGINE_HOOKS
from mmengine.registry import LOG_PROCESSORS as MMENGINE_LOG_PROCESSORS
from mmengine.registry import LOOPS as MMENGINE_LOOPS
from mmengine.registry import METRICS as MMENGINE_METRICS
from mmengine.registry import MODEL_WRAPPERS as MMENGINE_MODEL_WRAPPERS
from mmengine.registry import MODELS as MMENGINE_MODELS
from mmengine.registry import \
OPTIM_WRAPPER_CONSTRUCTORS as MMENGINE_OPTIM_WRAPPER_CONSTRUCTORS
from mmengine.registry import OPTIM_WRAPPERS as MMENGINE_OPTIM_WRAPPERS
from mmengine.registry import OPTIMIZERS as MMENGINE_OPTIMIZERS
from mmengine.registry import PARAM_SCHEDULERS as MMENGINE_PARAM_SCHEDULERS
from mmengine.registry import \
RUNNER_CONSTRUCTORS as MMENGINE_RUNNER_CONSTRUCTORS
from mmengine.registry import RUNNERS as MMENGINE_RUNNERS
from mmengine.registry import TASK_UTILS as MMENGINE_TASK_UTILS
from mmengine.registry import TRANSFORMS as MMENGINE_TRANSFORMS
from mmengine.registry import VISBACKENDS as MMENGINE_VISBACKENDS
from mmengine.registry import VISUALIZERS as MMENGINE_VISUALIZERS
from mmengine.registry import \
WEIGHT_INITIALIZERS as MMENGINE_WEIGHT_INITIALIZERS
from mmengine.registry import Registry
# manage all kinds of runners like `EpochBasedRunner` and `IterBasedRunner`
RUNNERS = Registry('runner', parent=MMENGINE_RUNNERS)
# manage runner constructors that define how to initialize runners
RUNNER_CONSTRUCTORS = Registry(
'runner constructor', parent=MMENGINE_RUNNER_CONSTRUCTORS)
# manage all kinds of loops like `EpochBasedTrainLoop`
LOOPS = Registry('loop', parent=MMENGINE_LOOPS)
# manage all kinds of hooks like `CheckpointHook`
HOOKS = Registry('hook', parent=MMENGINE_HOOKS)
# manage data-related modules
DATASETS = Registry('dataset', parent=MMENGINE_DATASETS)
DATA_SAMPLERS = Registry('data sampler', parent=MMENGINE_DATA_SAMPLERS)
TRANSFORMS = Registry('transform', parent=MMENGINE_TRANSFORMS)
# manage all kinds of modules inheriting `nn.Module`
MODELS = Registry('model', parent=MMENGINE_MODELS)
# manage all kinds of model wrappers like 'MMDistributedDataParallel'
MODEL_WRAPPERS = Registry('model_wrapper', parent=MMENGINE_MODEL_WRAPPERS)
# manage all kinds of weight initialization modules like `Uniform`
WEIGHT_INITIALIZERS = Registry(
'weight initializer', parent=MMENGINE_WEIGHT_INITIALIZERS)
# manage all kinds of optimizers like `SGD` and `Adam`
OPTIMIZERS = Registry('optimizer', parent=MMENGINE_OPTIMIZERS)
# manage optimizer wrapper
OPTIM_WRAPPERS = Registry('optim_wrapper', parent=MMENGINE_OPTIM_WRAPPERS)
# manage constructors that customize the optimization hyperparameters.
OPTIM_WRAPPER_CONSTRUCTORS = Registry(
'optimizer constructor', parent=MMENGINE_OPTIM_WRAPPER_CONSTRUCTORS)
# manage all kinds of parameter schedulers like `MultiStepLR`
PARAM_SCHEDULERS = Registry(
'parameter scheduler', parent=MMENGINE_PARAM_SCHEDULERS)
# manage all kinds of metrics
METRICS = Registry('metric', parent=MMENGINE_METRICS)
# manage evaluator
EVALUATOR = Registry('evaluator', parent=MMENGINE_EVALUATOR)
# manage task-specific modules like anchor generators and box coders
TASK_UTILS = Registry('task util', parent=MMENGINE_TASK_UTILS)
# manage visualizer
VISUALIZERS = Registry('visualizer', parent=MMENGINE_VISUALIZERS)
# manage visualizer backend
VISBACKENDS = Registry('vis_backend', parent=MMENGINE_VISBACKENDS)
# manage logprocessor
LOG_PROCESSORS = Registry('log_processor', parent=MMENGINE_LOG_PROCESSORS)
|
"""
Gcs file and directory reader.
A loader that fetches a file or iterates through a directory on Gcs.
"""
from typing import Dict, List, Optional, Union
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
from llama_index.readers.opendal.base import OpendalReader
class OpendalGcsReader(BaseReader):
"""General reader for any Gcs file or directory."""
def __init__(
self,
bucket: str,
path: str = "/",
endpoint: str = "",
credentials: str = "",
file_extractor: Optional[Dict[str, Union[str, BaseReader]]] = None,
) -> None:
"""
Initialize Gcs container, along with credentials if needed.
If key is not set, the entire bucket (filtered by prefix) is parsed.
Args:
bucket (str): the name of your gcs bucket
path (str): the path of the data. If none is provided,
this loader will iterate through the entire bucket. If path is endswith `/`, this loader will iterate through the entire dir. Otherwise, this loeader will load the file.
endpoint Optional[str]: the endpoint of the azblob service.
credentials (Optional[str]): provide credential string for GCS OAuth2 directly.
file_extractor (Optional[Dict[str, BaseReader]]): A mapping of file
extension to a BaseReader class that specifies how to convert that file
to text. See `SimpleDirectoryReader` for more details.
"""
super().__init__()
self.path = path
self.file_extractor = file_extractor
# opendal service related config.
self.options = {
"bucket": bucket,
"endpoint": endpoint,
"credentials": credentials,
}
def load_data(self) -> List[Document]:
"""Load file(s) from OpenDAL."""
loader = OpendalReader(
scheme="gcs",
path=self.path,
file_extractor=self.file_extractor,
**self.options,
)
return loader.load_data()
|
"""Gcs file and directory reader.
A loader that fetches a file or iterates through a directory on Gcs.
"""
from typing import Dict, List, Optional, Union
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
from llama_index.readers.opendal.base import OpendalReader
class OpendalGcsReader(BaseReader):
"""General reader for any Gcs file or directory."""
def __init__(
self,
bucket: str,
path: str = "/",
endpoint: str = "",
credentials: str = "",
file_extractor: Optional[Dict[str, Union[str, BaseReader]]] = None,
) -> None:
"""Initialize Gcs container, along with credentials if needed.
If key is not set, the entire bucket (filtered by prefix) is parsed.
Args:
bucket (str): the name of your gcs bucket
path (str): the path of the data. If none is provided,
this loader will iterate through the entire bucket. If path is endswith `/`, this loader will iterate through the entire dir. Otherwise, this loeader will load the file.
endpoint Optional[str]: the endpoint of the azblob service.
credentials (Optional[str]): provide credential string for GCS OAuth2 directly.
file_extractor (Optional[Dict[str, BaseReader]]): A mapping of file
extension to a BaseReader class that specifies how to convert that file
to text. See `SimpleDirectoryReader` for more details.
"""
super().__init__()
self.path = path
self.file_extractor = file_extractor
# opendal service related config.
self.options = {
"bucket": bucket,
"endpoint": endpoint,
"credentials": credentials,
}
def load_data(self) -> List[Document]:
"""Load file(s) from OpenDAL."""
loader = OpendalReader(
scheme="gcs",
path=self.path,
file_extractor=self.file_extractor,
**self.options,
)
return loader.load_data()
|
# coding=utf-8
# Copyright 2024 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
import unittest
from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import nightly, require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@nightly
@require_flax
class FlaxStableDiffusion2PipelineIntegrationTests(unittest.TestCase):
def tearDown(self):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def test_stable_diffusion_flax(self):
sd_pipe, params = FlaxStableDiffusionPipeline.from_pretrained(
"stabilityai/stable-diffusion-2",
variant="bf16",
dtype=jnp.bfloat16,
)
prompt = "A painting of a squirrel eating a burger"
num_samples = jax.device_count()
prompt = num_samples * [prompt]
prompt_ids = sd_pipe.prepare_inputs(prompt)
params = replicate(params)
prompt_ids = shard(prompt_ids)
prng_seed = jax.random.PRNGKey(0)
prng_seed = jax.random.split(prng_seed, jax.device_count())
images = sd_pipe(prompt_ids, params, prng_seed, num_inference_steps=25, jit=True)[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
images = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:])
image_slice = images[0, 253:256, 253:256, -1]
output_slice = jnp.asarray(jax.device_get(image_slice.flatten()))
expected_slice = jnp.array([0.4238, 0.4414, 0.4395, 0.4453, 0.4629, 0.4590, 0.4531, 0.45508, 0.4512])
assert jnp.abs(output_slice - expected_slice).max() < 1e-2
@nightly
@require_flax
class FlaxStableDiffusion2PipelineNightlyTests(unittest.TestCase):
def tearDown(self):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def test_stable_diffusion_dpm_flax(self):
model_id = "stabilityai/stable-diffusion-2"
scheduler, scheduler_params = FlaxDPMSolverMultistepScheduler.from_pretrained(model_id, subfolder="scheduler")
sd_pipe, params = FlaxStableDiffusionPipeline.from_pretrained(
model_id,
scheduler=scheduler,
variant="bf16",
dtype=jnp.bfloat16,
)
params["scheduler"] = scheduler_params
prompt = "A painting of a squirrel eating a burger"
num_samples = jax.device_count()
prompt = num_samples * [prompt]
prompt_ids = sd_pipe.prepare_inputs(prompt)
params = replicate(params)
prompt_ids = shard(prompt_ids)
prng_seed = jax.random.PRNGKey(0)
prng_seed = jax.random.split(prng_seed, jax.device_count())
images = sd_pipe(prompt_ids, params, prng_seed, num_inference_steps=25, jit=True)[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
images = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:])
image_slice = images[0, 253:256, 253:256, -1]
output_slice = jnp.asarray(jax.device_get(image_slice.flatten()))
expected_slice = jnp.array([0.4336, 0.42969, 0.4453, 0.4199, 0.4297, 0.4531, 0.4434, 0.4434, 0.4297])
assert jnp.abs(output_slice - expected_slice).max() < 1e-2
|
# coding=utf-8
# Copyright 2024 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
import unittest
from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import nightly, require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@nightly
@require_flax
class FlaxStableDiffusion2PipelineIntegrationTests(unittest.TestCase):
def tearDown(self):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def test_stable_diffusion_flax(self):
sd_pipe, params = FlaxStableDiffusionPipeline.from_pretrained(
"stabilityai/stable-diffusion-2",
variant="bf16",
dtype=jnp.bfloat16,
)
prompt = "A painting of a squirrel eating a burger"
num_samples = jax.device_count()
prompt = num_samples * [prompt]
prompt_ids = sd_pipe.prepare_inputs(prompt)
params = replicate(params)
prompt_ids = shard(prompt_ids)
prng_seed = jax.random.PRNGKey(0)
prng_seed = jax.random.split(prng_seed, jax.device_count())
images = sd_pipe(prompt_ids, params, prng_seed, num_inference_steps=25, jit=True)[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
images = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:])
image_slice = images[0, 253:256, 253:256, -1]
output_slice = jnp.asarray(jax.device_get(image_slice.flatten()))
expected_slice = jnp.array([0.4238, 0.4414, 0.4395, 0.4453, 0.4629, 0.4590, 0.4531, 0.45508, 0.4512])
print(f"output_slice: {output_slice}")
assert jnp.abs(output_slice - expected_slice).max() < 1e-2
@nightly
@require_flax
class FlaxStableDiffusion2PipelineNightlyTests(unittest.TestCase):
def tearDown(self):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def test_stable_diffusion_dpm_flax(self):
model_id = "stabilityai/stable-diffusion-2"
scheduler, scheduler_params = FlaxDPMSolverMultistepScheduler.from_pretrained(model_id, subfolder="scheduler")
sd_pipe, params = FlaxStableDiffusionPipeline.from_pretrained(
model_id,
scheduler=scheduler,
variant="bf16",
dtype=jnp.bfloat16,
)
params["scheduler"] = scheduler_params
prompt = "A painting of a squirrel eating a burger"
num_samples = jax.device_count()
prompt = num_samples * [prompt]
prompt_ids = sd_pipe.prepare_inputs(prompt)
params = replicate(params)
prompt_ids = shard(prompt_ids)
prng_seed = jax.random.PRNGKey(0)
prng_seed = jax.random.split(prng_seed, jax.device_count())
images = sd_pipe(prompt_ids, params, prng_seed, num_inference_steps=25, jit=True)[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
images = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:])
image_slice = images[0, 253:256, 253:256, -1]
output_slice = jnp.asarray(jax.device_get(image_slice.flatten()))
expected_slice = jnp.array([0.4336, 0.42969, 0.4453, 0.4199, 0.4297, 0.4531, 0.4434, 0.4434, 0.4297])
print(f"output_slice: {output_slice}")
assert jnp.abs(output_slice - expected_slice).max() < 1e-2
|
"""
This file is part of the private API. Please do not use directly these classes as they will be modified on
future versions without warning. The classes should be accessed only via the transforms argument of Weights.
"""
from typing import List, Optional, Tuple, Union
import PIL.Image
import torch
from torch import Tensor
from torchvision.transforms.v2 import functional as F, InterpolationMode
from torchvision.transforms.v2.functional._geometry import _check_interpolation
__all__ = ["StereoMatching"]
class StereoMatching(torch.nn.Module):
def __init__(
self,
*,
use_gray_scale: bool = False,
resize_size: Optional[Tuple[int, ...]],
mean: Tuple[float, ...] = (0.5, 0.5, 0.5),
std: Tuple[float, ...] = (0.5, 0.5, 0.5),
interpolation: Union[InterpolationMode, int] = InterpolationMode.BILINEAR,
) -> None:
super().__init__()
# pacify mypy
self.resize_size: Union[None, List]
if resize_size is not None:
self.resize_size = list(resize_size)
else:
self.resize_size = None
self.mean = list(mean)
self.std = list(std)
self.interpolation = _check_interpolation(interpolation)
self.use_gray_scale = use_gray_scale
def forward(self, left_image: Tensor, right_image: Tensor) -> Tuple[Tensor, Tensor]:
def _process_image(img: PIL.Image.Image) -> Tensor:
if not isinstance(img, Tensor):
img = F.pil_to_tensor(img)
if self.resize_size is not None:
# We hard-code antialias=False to preserve results after we changed
# its default from None to True (see
# https://github.com/pytorch/vision/pull/7160)
# TODO: we could re-train the stereo models with antialias=True?
img = F.resize(img, self.resize_size, interpolation=self.interpolation, antialias=False)
if self.use_gray_scale is True:
img = F.rgb_to_grayscale(img)
img = F.convert_image_dtype(img, torch.float)
img = F.normalize(img, mean=self.mean, std=self.std)
img = img.contiguous()
return img
left_image = _process_image(left_image)
right_image = _process_image(right_image)
return left_image, right_image
def __repr__(self) -> str:
format_string = self.__class__.__name__ + "("
format_string += f"\n resize_size={self.resize_size}"
format_string += f"\n mean={self.mean}"
format_string += f"\n std={self.std}"
format_string += f"\n interpolation={self.interpolation}"
format_string += "\n)"
return format_string
def describe(self) -> str:
return (
"Accepts ``PIL.Image``, batched ``(B, C, H, W)`` and single ``(C, H, W)`` image ``torch.Tensor`` objects. "
f"The images are resized to ``resize_size={self.resize_size}`` using ``interpolation={self.interpolation}``. "
f"Finally the values are first rescaled to ``[0.0, 1.0]`` and then normalized using ``mean={self.mean}`` and "
f"``std={self.std}``."
)
|
"""
This file is part of the private API. Please do not use directly these classes as they will be modified on
future versions without warning. The classes should be accessed only via the transforms argument of Weights.
"""
from typing import List, Optional, Tuple, Union
import PIL.Image
import torch
from torch import Tensor
from torchvision.prototype.transforms.functional._geometry import _check_interpolation
from . import functional as F, InterpolationMode
__all__ = ["StereoMatching"]
class StereoMatching(torch.nn.Module):
def __init__(
self,
*,
use_gray_scale: bool = False,
resize_size: Optional[Tuple[int, ...]],
mean: Tuple[float, ...] = (0.5, 0.5, 0.5),
std: Tuple[float, ...] = (0.5, 0.5, 0.5),
interpolation: Union[InterpolationMode, int] = InterpolationMode.BILINEAR,
) -> None:
super().__init__()
# pacify mypy
self.resize_size: Union[None, List]
if resize_size is not None:
self.resize_size = list(resize_size)
else:
self.resize_size = None
self.mean = list(mean)
self.std = list(std)
self.interpolation = _check_interpolation(interpolation)
self.use_gray_scale = use_gray_scale
def forward(self, left_image: Tensor, right_image: Tensor) -> Tuple[Tensor, Tensor]:
def _process_image(img: PIL.Image.Image) -> Tensor:
if not isinstance(img, Tensor):
img = F.pil_to_tensor(img)
if self.resize_size is not None:
# We hard-code antialias=False to preserve results after we changed
# its default from None to True (see
# https://github.com/pytorch/vision/pull/7160)
# TODO: we could re-train the stereo models with antialias=True?
img = F.resize(img, self.resize_size, interpolation=self.interpolation, antialias=False)
if self.use_gray_scale is True:
img = F.rgb_to_grayscale(img)
img = F.convert_image_dtype(img, torch.float)
img = F.normalize(img, mean=self.mean, std=self.std)
img = img.contiguous()
return img
left_image = _process_image(left_image)
right_image = _process_image(right_image)
return left_image, right_image
def __repr__(self) -> str:
format_string = self.__class__.__name__ + "("
format_string += f"\n resize_size={self.resize_size}"
format_string += f"\n mean={self.mean}"
format_string += f"\n std={self.std}"
format_string += f"\n interpolation={self.interpolation}"
format_string += "\n)"
return format_string
def describe(self) -> str:
return (
"Accepts ``PIL.Image``, batched ``(B, C, H, W)`` and single ``(C, H, W)`` image ``torch.Tensor`` objects. "
f"The images are resized to ``resize_size={self.resize_size}`` using ``interpolation={self.interpolation}``. "
f"Finally the values are first rescaled to ``[0.0, 1.0]`` and then normalized using ``mean={self.mean}`` and "
f"``std={self.std}``."
)
|
from typing import TYPE_CHECKING, Any, Type, TypeVar, Union, cast
import numpy as np
from docarray.typing.tensor.image.abstract_image_tensor import AbstractImageTensor
from docarray.typing.tensor.image.image_ndarray import ImageNdArray
from docarray.typing.tensor.tensor import AnyTensor
from docarray.utils._internal.misc import (
is_jax_available,
is_tf_available,
is_torch_available,
)
jax_available = is_jax_available()
if jax_available:
import jax.numpy as jnp # type: ignore
from docarray.typing.tensor.image.image_jax_array import ImageJaxArray
from docarray.typing.tensor.jaxarray import JaxArray
torch_available = is_torch_available()
if torch_available:
import torch
from docarray.typing.tensor.image.image_torch_tensor import ImageTorchTensor
from docarray.typing.tensor.torch_tensor import TorchTensor
tf_available = is_tf_available()
if tf_available:
import tensorflow as tf # type: ignore
from docarray.typing.tensor.image.image_tensorflow_tensor import (
ImageTensorFlowTensor,
)
from docarray.typing.tensor.tensorflow_tensor import TensorFlowTensor
if TYPE_CHECKING:
from pydantic import BaseConfig
from pydantic.fields import ModelField
T = TypeVar("T", bound="ImageTensor")
class ImageTensor(AnyTensor, AbstractImageTensor):
"""
Represents an image tensor object that can be used with TensorFlow, PyTorch, and NumPy type.
---
'''python
from docarray import BaseDoc
from docarray.typing import ImageTensor
class MyImageDoc(BaseDoc):
image: ImageTensor
# Example usage with TensorFlow:
import tensorflow as tf
doc = MyImageDoc(image=tf.zeros((1000, 2)))
type(doc.image) # ImageTensorFlowTensor
# Example usage with PyTorch:
import torch
doc = MyImageDoc(image=torch.zeros((1000, 2)))
type(doc.image) # ImageTorchTensor
# Example usage with NumPy:
import numpy as np
doc = MyImageDoc(image=np.zeros((1000, 2)))
type(doc.image) # ImageNdArray
'''
---
Returns:
Union[ImageTorchTensor, ImageTensorFlowTensor, ImageNdArray]: The validated and converted image tensor.
Raises:
TypeError: If the input type is not one of [torch.Tensor, tensorflow.Tensor, numpy.ndarray].
"""
@classmethod
def __get_validators__(cls):
yield cls.validate
@classmethod
def validate(
cls: Type[T],
value: Union[T, np.ndarray, Any],
field: "ModelField",
config: "BaseConfig",
):
if torch_available:
if isinstance(value, TorchTensor):
return cast(ImageTorchTensor, value)
elif isinstance(value, torch.Tensor):
return ImageTorchTensor._docarray_from_native(value) # noqa
if tf_available:
if isinstance(value, TensorFlowTensor):
return cast(ImageTensorFlowTensor, value)
elif isinstance(value, tf.Tensor):
return ImageTensorFlowTensor._docarray_from_native(value) # noqa
if jax_available:
if isinstance(value, JaxArray):
return cast(ImageJaxArray, value)
elif isinstance(value, jnp.ndarray):
return ImageJaxArray._docarray_from_native(value) # noqa
try:
return ImageNdArray.validate(value, field, config)
except Exception: # noqa
pass
raise TypeError(
f"Expected one of [torch.Tensor, tensorflow.Tensor, numpy.ndarray] "
f"compatible type, got {type(value)}"
)
|
from typing import TYPE_CHECKING, Any, Type, TypeVar, Union, cast
import numpy as np
from docarray.typing.tensor.image.abstract_image_tensor import AbstractImageTensor
from docarray.typing.tensor.image.image_ndarray import ImageNdArray
from docarray.typing.tensor.tensor import AnyTensor
from docarray.utils._internal.misc import is_tf_available, is_torch_available
torch_available = is_torch_available()
if torch_available:
import torch
from docarray.typing.tensor.image.image_torch_tensor import ImageTorchTensor
from docarray.typing.tensor.torch_tensor import TorchTensor
tf_available = is_tf_available()
if tf_available:
import tensorflow as tf # type: ignore
from docarray.typing.tensor.image.image_tensorflow_tensor import (
ImageTensorFlowTensor,
)
from docarray.typing.tensor.tensorflow_tensor import TensorFlowTensor
if TYPE_CHECKING:
from pydantic import BaseConfig
from pydantic.fields import ModelField
T = TypeVar("T", bound="ImageTensor")
class ImageTensor(AnyTensor, AbstractImageTensor):
"""
Represents an image tensor object that can be used with TensorFlow, PyTorch, and NumPy type.
---
'''python
from docarray import BaseDoc
from docarray.typing import ImageTensor
class MyImageDoc(BaseDoc):
image: ImageTensor
# Example usage with TensorFlow:
import tensorflow as tf
doc = MyImageDoc(image=tf.zeros((1000, 2)))
type(doc.image) # ImageTensorFlowTensor
# Example usage with PyTorch:
import torch
doc = MyImageDoc(image=torch.zeros((1000, 2)))
type(doc.image) # ImageTorchTensor
# Example usage with NumPy:
import numpy as np
doc = MyImageDoc(image=np.zeros((1000, 2)))
type(doc.image) # ImageNdArray
'''
---
Returns:
Union[ImageTorchTensor, ImageTensorFlowTensor, ImageNdArray]: The validated and converted image tensor.
Raises:
TypeError: If the input type is not one of [torch.Tensor, tensorflow.Tensor, numpy.ndarray].
"""
@classmethod
def __get_validators__(cls):
yield cls.validate
@classmethod
def validate(
cls: Type[T],
value: Union[T, np.ndarray, Any],
field: "ModelField",
config: "BaseConfig",
):
if torch_available:
if isinstance(value, TorchTensor):
return cast(ImageTorchTensor, value)
elif isinstance(value, torch.Tensor):
return ImageTorchTensor._docarray_from_native(value) # noqa
if tf_available:
if isinstance(value, TensorFlowTensor):
return cast(ImageTensorFlowTensor, value)
elif isinstance(value, tf.Tensor):
return ImageTensorFlowTensor._docarray_from_native(value) # noqa
try:
return ImageNdArray.validate(value, field, config)
except Exception: # noqa
pass
raise TypeError(
f"Expected one of [torch.Tensor, tensorflow.Tensor, numpy.ndarray] "
f"compatible type, got {type(value)}"
)
|
"""Module for parsing text files.."""
from typing import Iterator
from langchain_core.documents import Document
from langchain_community.document_loaders.base import BaseBlobParser
from langchain_community.document_loaders.blob_loaders import Blob
class TextParser(BaseBlobParser):
"""Parser for text blobs."""
def lazy_parse(self, blob: Blob) -> Iterator[Document]:
"""Lazily parse the blob."""
yield Document(page_content=blob.as_string(), metadata={"source": blob.source})
|
"""Module for parsing text files.."""
from typing import Iterator
from langchain_core.documents import Document
from langchain_community.document_loaders.base import BaseBlobParser
from langchain_community.document_loaders.blob_loaders import Blob
class TextParser(BaseBlobParser):
"""Parser for text blobs."""
def lazy_parse(self, blob: Blob) -> Iterator[Document]: # type: ignore[valid-type]
"""Lazily parse the blob."""
yield Document(page_content=blob.as_string(), metadata={"source": blob.source}) # type: ignore[attr-defined]
|
from docarray.index.backends.elastic import ElasticV7DocIndex
from docarray.index.backends.hnswlib import HnswDocumentIndex
__all__ = ['HnswDocumentIndex', 'ElasticV7DocIndex']
|
from docarray.index.backends.hnswlib import HnswDocumentIndex
__all__ = ['HnswDocumentIndex']
|
import logging
from datasets import load_dataset
from sentence_transformers.sparse_encoder import (
MLMTransformer,
SparseEncoder,
SparseTripletEvaluator,
SpladePooling,
)
logging.basicConfig(format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO)
# Initialize the SPLADE model
model_name = "naver/splade-cocondenser-ensembledistil"
model = SparseEncoder(
modules=[
MLMTransformer(model_name),
SpladePooling(pooling_strategy="max"), # You can also use 'sum'
],
device="cuda:0",
)
# Load triplets from the AllNLI dataset
# The dataset contains triplets of (anchor, positive, negative) sentences
dataset = load_dataset("sentence-transformers/all-nli", "triplet", split="dev[:1000]")
# Initialize the SparseTripletEvaluator
evaluator = SparseTripletEvaluator(
anchors=dataset[:1000]["anchor"],
positives=dataset[:1000]["positive"],
negatives=dataset[:1000]["negative"],
name="all_nli_dev",
batch_size=32,
show_progress_bar=True,
)
# Run the evaluation
results = evaluator(model)
# Print the results
print(f"Primary metric: {evaluator.primary_metric}")
print(f"Primary metric value: {results[evaluator.primary_metric]:.4f}")
|
from datasets import load_dataset
from sentence_transformers.sparse_encoder import (
MLMTransformer,
SparseEncoder,
SparseTripletEvaluator,
SpladePooling,
)
# Initialize the SPLADE model
model_name = "naver/splade-cocondenser-ensembledistil"
model = SparseEncoder(
modules=[
MLMTransformer(model_name),
SpladePooling(pooling_strategy="max"), # You can also use 'sum'
],
device="cuda:0",
)
# Load triplets from the AllNLI dataset
# The dataset contains triplets of (anchor, positive, negative) sentences
dataset = load_dataset("sentence-transformers/all-nli", "triplet", split="dev[:1000]")
# Initialize the SparseTripletEvaluator
evaluator = SparseTripletEvaluator(
anchors=dataset[:1000]["anchor"],
positives=dataset[:1000]["positive"],
negatives=dataset[:1000]["negative"],
name="all_nli_dev",
batch_size=32,
show_progress_bar=True,
)
# Run the evaluation
results = evaluator(model)
# Print the results
print(f"Primary metric: {evaluator.primary_metric}")
print(f"Primary metric value: {results[evaluator.primary_metric]:.4f}")
|
from typing import TYPE_CHECKING, Any, Type, TypeVar, Union, cast
import numpy as np
from docarray.typing.tensor.image.abstract_image_tensor import AbstractImageTensor
from docarray.typing.tensor.image.image_ndarray import ImageNdArray
from docarray.typing.tensor.tensor import AnyTensor
from docarray.utils._internal.misc import is_tf_available, is_torch_available
torch_available = is_torch_available()
if torch_available:
import torch
from docarray.typing.tensor.image.image_torch_tensor import ImageTorchTensor
from docarray.typing.tensor.torch_tensor import TorchTensor
tf_available = is_tf_available()
if tf_available:
import tensorflow as tf # type: ignore
from docarray.typing.tensor.image.image_tensorflow_tensor import (
ImageTensorFlowTensor,
)
from docarray.typing.tensor.tensorflow_tensor import TensorFlowTensor
if TYPE_CHECKING:
from pydantic import BaseConfig
from pydantic.fields import ModelField
T = TypeVar("T", bound="ImageTensor")
class ImageTensor(AnyTensor, AbstractImageTensor):
"""
Represents an image tensor object that can be used with TensorFlow, PyTorch, and NumPy type.
---
'''python
from docarray import BaseDoc
from docarray.typing import ImageTensor
class MyImageDoc(BaseDoc):
image: ImageTensor
# Example usage with TensorFlow:
import tensorflow as tf
doc = MyImageDoc(image=tf.zeros((1000, 2)))
type(doc.image) # ImageTensorFlowTensor
# Example usage with PyTorch:
import torch
doc = MyImageDoc(image=torch.zeros((1000, 2)))
type(doc.image) # ImageTorchTensor
# Example usage with NumPy:
import numpy as np
doc = MyImageDoc(image=np.zeros((1000, 2)))
type(doc.image) # ImageNdArray
'''
---
Returns:
Union[ImageTorchTensor, ImageTensorFlowTensor, ImageNdArray]: The validated and converted image tensor.
Raises:
TypeError: If the input type is not one of [torch.Tensor, tensorflow.Tensor, numpy.ndarray].
"""
@classmethod
def __get_validators__(cls):
yield cls.validate
@classmethod
def validate(
cls: Type[T],
value: Union[T, np.ndarray, Any],
field: "ModelField",
config: "BaseConfig",
):
if torch_available:
if isinstance(value, TorchTensor):
return cast(ImageTorchTensor, value)
elif isinstance(value, torch.Tensor):
return ImageTorchTensor._docarray_from_native(value) # noqa
if tf_available:
if isinstance(value, TensorFlowTensor):
return cast(ImageTensorFlowTensor, value)
elif isinstance(value, tf.Tensor):
return ImageTensorFlowTensor._docarray_from_native(value) # noqa
try:
return ImageNdArray.validate(value, field, config)
except Exception: # noqa
pass
raise TypeError(
f"Expected one of [torch.Tensor, tensorflow.Tensor, numpy.ndarray] "
f"compatible type, got {type(value)}"
)
|
from typing import Union
from docarray.typing.tensor.image.image_ndarray import ImageNdArray
from docarray.utils._internal.misc import is_tf_available, is_torch_available
torch_available = is_torch_available()
if torch_available:
from docarray.typing.tensor.image.image_torch_tensor import ImageTorchTensor
tf_available = is_tf_available()
if tf_available:
from docarray.typing.tensor.image.image_tensorflow_tensor import (
ImageTensorFlowTensor as ImageTFTensor,
)
ImageTensor = Union[ImageNdArray] # type: ignore
if tf_available and torch_available:
ImageTensor = Union[ImageNdArray, ImageTorchTensor, ImageTFTensor] # type: ignore
elif tf_available:
ImageTensor = Union[ImageNdArray, ImageTFTensor] # type: ignore
elif torch_available:
ImageTensor = Union[ImageNdArray, ImageTorchTensor] # type: ignore
|
import importlib.util
import os
import warnings
from functools import wraps
from typing import Optional
def eval_env(var, default):
"""Check if environment varable has True-y value"""
if var not in os.environ:
return default
val = os.environ.get(var, "0")
trues = ["1", "true", "TRUE", "on", "ON", "yes", "YES"]
falses = ["0", "false", "FALSE", "off", "OFF", "no", "NO"]
if val in trues:
return True
if val not in falses:
# fmt: off
raise RuntimeError(
f"Unexpected environment variable value `{var}={val}`. "
f"Expected one of {trues + falses}")
# fmt: on
return False
def is_module_available(*modules: str) -> bool:
r"""Returns if a top-level module with :attr:`name` exists *without**
importing it. This is generally safer than try-catch block around a
`import X`. It avoids third party libraries breaking assumptions of some of
our tests, e.g., setting multiprocessing start method when imported
(see librosa/#747, torchvision/#544).
"""
return all(importlib.util.find_spec(m) is not None for m in modules)
def requires_module(*modules: str):
"""Decorate function to give error message if invoked without required optional modules.
This decorator is to give better error message to users rather
than raising ``NameError: name 'module' is not defined`` at random places.
"""
missing = [m for m in modules if not is_module_available(m)]
if not missing:
# fall through. If all the modules are available, no need to decorate
def decorator(func):
return func
else:
req = f"module: {missing[0]}" if len(missing) == 1 else f"modules: {missing}"
def decorator(func):
@wraps(func)
def wrapped(*args, **kwargs):
raise RuntimeError(f"{func.__module__}.{func.__name__} requires {req}")
return wrapped
return decorator
def deprecated(direction: str, version: Optional[str] = None, remove: bool = False):
"""Decorator to add deprecation message
Args:
direction (str): Migration steps to be given to users.
version (str or int): The version when the object will be removed
remove (bool): If enabled, append future removal message.
"""
def decorator(func):
@wraps(func)
def wrapped(*args, **kwargs):
message = f"{func.__module__}.{func.__name__} has been deprecated. {direction}"
if remove:
message += f' It will be removed from {"future" if version is None else version} release. '
warnings.warn(message, stacklevel=2)
return func(*args, **kwargs)
return wrapped
return decorator
def fail_with_message(message):
"""Generate decorator to give users message about missing TorchAudio extension."""
def decorator(func):
@wraps(func)
def wrapped(*args, **kwargs):
raise RuntimeError(f"{func.__module__}.{func.__name__} {message}")
return wrapped
return decorator
def no_op(func):
"""Op-op decorator. Used in place of fail_with_message when a functionality that requires extension works fine."""
return func
|
import importlib.util
import warnings
from functools import wraps
from typing import Optional
def is_module_available(*modules: str) -> bool:
r"""Returns if a top-level module with :attr:`name` exists *without**
importing it. This is generally safer than try-catch block around a
`import X`. It avoids third party libraries breaking assumptions of some of
our tests, e.g., setting multiprocessing start method when imported
(see librosa/#747, torchvision/#544).
"""
return all(importlib.util.find_spec(m) is not None for m in modules)
def requires_module(*modules: str):
"""Decorate function to give error message if invoked without required optional modules.
This decorator is to give better error message to users rather
than raising ``NameError: name 'module' is not defined`` at random places.
"""
missing = [m for m in modules if not is_module_available(m)]
if not missing:
# fall through. If all the modules are available, no need to decorate
def decorator(func):
return func
else:
req = f"module: {missing[0]}" if len(missing) == 1 else f"modules: {missing}"
def decorator(func):
@wraps(func)
def wrapped(*args, **kwargs):
raise RuntimeError(f"{func.__module__}.{func.__name__} requires {req}")
return wrapped
return decorator
def deprecated(direction: str, version: Optional[str] = None, remove: bool = False):
"""Decorator to add deprecation message
Args:
direction (str): Migration steps to be given to users.
version (str or int): The version when the object will be removed
remove (bool): If enabled, append future removal message.
"""
def decorator(func):
@wraps(func)
def wrapped(*args, **kwargs):
message = f"{func.__module__}.{func.__name__} has been deprecated. {direction}"
if remove:
message += f' It will be removed from {"future" if version is None else version} release. '
warnings.warn(message, stacklevel=2)
return func(*args, **kwargs)
return wrapped
return decorator
def fail_with_message(message):
"""Generate decorator to give users message about missing TorchAudio extension."""
def decorator(func):
@wraps(func)
def wrapped(*args, **kwargs):
raise RuntimeError(f"{func.__module__}.{func.__name__} {message}")
return wrapped
return decorator
def no_op(func):
"""Op-op decorator. Used in place of fail_with_message when a functionality that requires extension works fine."""
return func
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.