input
stringlengths 33
5k
| output
stringlengths 32
5k
|
---|---|
import requests
from packaging import version
from typing import Sequence, Union, List, Optional
from llama_index.core.base.llms.types import (
ChatMessage,
ChatResponse,
)
from tgi.types import (
Message,
)
def resolve_tgi_function_call(url: str) -> bool:
url = f"{url}/info"
model_info = dict(requests.get(url).json())
tgi_version = model_info.get("version", None)
if version.parse(tgi_version) >= version.parse("2.0.1"):
return True
else:
raise ValueError(
"'text-generation-inference' version ",
f"incompatible with function call: {tgi_version}. ",
"Function call support was added in v2.0.1",
)
def get_max_input_tokens(url: str) -> Union[int, None]:
url = f"{url}/info"
model_info = dict(requests.get(url).json())
tgi_version = model_info.get("version", None)
if version.parse(tgi_version) >= version.parse("2.1.0"):
return model_info.get("max_input_tokens", None)
else:
return model_info.get("max_input_length", None)
def get_max_total_tokens(url: str) -> Union[int, None]:
url = f"{url}/info"
model_info = dict(requests.get(url).json())
return model_info.get("max_total_tokens", None)
def get_model_name(url: str) -> Union[str, None]:
url = f"{url}/info"
model_info = dict(requests.get(url).json())
return model_info.get("model_id", None)
def to_tgi_messages(messages: Sequence[ChatMessage]) -> Sequence[Message]:
out_messages = []
for m in messages:
tool_calls = m.additional_kwargs.get("tool_calls")
out_messages.append(
Message(role=m.role.value, content=m.content, tool_calls=tool_calls)
)
return out_messages
def force_single_tool_call(response: ChatResponse) -> None:
tool_calls = response.message.additional_kwargs.get("tool_calls", [])
if len(tool_calls) > 1:
response.message.additional_kwargs["tool_calls"] = [tool_calls[0]]
def resolve_tool_choice(
tools: Optional[List[dict]] = None, tool_choice: str = "none"
) -> Union[str, dict]:
"""Resolve tool choice.
Check if tool_name exists in tools.
Note that unlike in OpenAI specification, 'auto' will ALWAYS choose the tool for you.
Set to 'none' explicitly if do not wish to use tool.
"""
valid_tool_choices = ["none", "auto"] + [t["function"]["name"] for t in tools or []]
if tool_choice not in valid_tool_choices:
raise ValueError(
f"{tool_choice} is not a valid tool_choice. Must be one of {valid_tool_choices}"
)
return tool_choice
|
import requests
from packaging import version
from typing import Sequence, Union, List, Optional
from llama_index.core.base.llms.types import (
ChatMessage,
ChatResponse,
)
from text_generation.types import (
Message,
)
def resolve_tgi_function_call(url: str) -> bool:
url = f"{url}/info"
model_info = dict(requests.get(url).json())
tgi_version = model_info.get("version", None)
if version.parse(tgi_version) >= version.parse("2.0.1"):
return True
else:
raise ValueError(
"'text-generation-inference' version ",
f"incompatible with function call: {tgi_version}. ",
"Function call support was added in v2.0.1",
)
def get_max_input_tokens(url: str) -> Union[int, None]:
url = f"{url}/info"
model_info = dict(requests.get(url).json())
tgi_version = model_info.get("version", None)
if version.parse(tgi_version) >= version.parse("2.1.0"):
return model_info.get("max_input_tokens", None)
else:
return model_info.get("max_input_length", None)
def get_max_total_tokens(url: str) -> Union[int, None]:
url = f"{url}/info"
model_info = dict(requests.get(url).json())
return model_info.get("max_total_tokens", None)
def get_model_name(url: str) -> Union[str, None]:
url = f"{url}/info"
model_info = dict(requests.get(url).json())
return model_info.get("model_id", None)
def to_tgi_messages(messages: Sequence[ChatMessage]) -> Sequence[Message]:
out_messages = []
for m in messages:
tool_calls = m.additional_kwargs.get("tool_calls")
out_messages.append(
Message(role=m.role.value, content=m.content, tool_calls=tool_calls)
)
return out_messages
def force_single_tool_call(response: ChatResponse) -> None:
tool_calls = response.message.additional_kwargs.get("tool_calls", [])
if len(tool_calls) > 1:
response.message.additional_kwargs["tool_calls"] = [tool_calls[0]]
def resolve_tool_choice(
tools: Optional[List[dict]] = None, tool_choice: str = "none"
) -> Union[str, dict]:
"""Resolve tool choice.
Check if tool_name exists in tools.
Note that unlike in OpenAI specification, 'auto' will ALWAYS choose the tool for you.
Set to 'none' explicitly if do not wish to use tool.
"""
valid_tool_choices = ["none", "auto"] + [t["function"]["name"] for t in tools or []]
if tool_choice not in valid_tool_choices:
raise ValueError(
f"{tool_choice} is not a valid tool_choice. Must be one of {valid_tool_choices}"
)
return tool_choice
|
_INITIALIZED = False
_LAZILY_IMPORTED = [
"CTCHypothesis",
"CTCDecoder",
"CTCDecoderLM",
"CTCDecoderLMState",
"ctc_decoder",
"download_pretrained_files",
]
def __getattr__(name: str):
if name in _LAZILY_IMPORTED:
try:
from . import _ctc_decoder
except AttributeError as err:
raise RuntimeError(
"CTC decoder requires the decoder extension. Please set BUILD_CTC_DECODER=1 when building from source."
) from err
item = getattr(_ctc_decoder, name)
globals()[name] = item
return item
raise AttributeError(f"module {__name__} has no attribute {name}")
def __dir__():
return sorted(__all__ + _LAZILY_IMPORTED)
__all__ = []
|
_INITIALIZED = False
_LAZILY_IMPORTED = [
"CTCHypothesis",
"CTCDecoder",
"ctc_decoder",
"download_pretrained_files",
]
def __getattr__(name: str):
if name in _LAZILY_IMPORTED:
try:
from . import _ctc_decoder
except AttributeError as err:
raise RuntimeError(
"CTC decoder requires the decoder extension. Please set BUILD_CTC_DECODER=1 when building from source."
) from err
item = getattr(_ctc_decoder, name)
globals()[name] = item
return item
raise AttributeError(f"module {__name__} has no attribute {name}")
def __dir__():
return sorted(__all__ + _LAZILY_IMPORTED)
__all__ = []
|
from keras.src import ops
from keras.src.api_export import keras_export
from keras.src.layers.layer import Layer
@keras_export("keras.layers.UnitNormalization")
class UnitNormalization(Layer):
"""Unit normalization layer.
Normalize a batch of inputs so that each input in the batch has a L2 norm
equal to 1 (across the axes specified in `axis`).
Example:
>>> data = np.arange(6).reshape(2, 3)
>>> normalized_data = keras.layers.UnitNormalization()(data)
>>> np.sum(normalized_data[0, :] ** 2)
1.0
Args:
axis: Integer or list/tuple. The axis or axes to normalize across.
Typically, this is the features axis or axes. The left-out axes are
typically the batch axis or axes. `-1` is the last dimension
in the input. Defaults to `-1`.
"""
def __init__(self, axis=-1, **kwargs):
super().__init__(**kwargs)
if isinstance(axis, (list, tuple)):
self.axis = list(axis)
elif isinstance(axis, int):
self.axis = axis
else:
raise TypeError(
"Invalid value for `axis` argument: "
"expected an int or a list/tuple of ints. "
f"Received: axis={axis}"
)
self.supports_masking = True
def build(self, input_shape):
self.built = True
def call(self, inputs):
x = ops.cast(inputs, self.compute_dtype)
square_sum = ops.sum(ops.square(x), axis=self.axis, keepdims=True)
x_inv_norm = ops.rsqrt(ops.maximum(square_sum, 1e-12))
return ops.multiply(x, x_inv_norm)
def compute_output_shape(self, input_shape):
return input_shape
def get_config(self):
config = super().get_config()
config.update({"axis": self.axis})
return config
|
from keras.src import ops
from keras.src.api_export import keras_export
from keras.src.layers.layer import Layer
@keras_export("keras.layers.UnitNormalization")
class UnitNormalization(Layer):
"""Unit normalization layer.
Normalize a batch of inputs so that each input in the batch has a L2 norm
equal to 1 (across the axes specified in `axis`).
Example:
>>> data = np.arange(6).reshape(2, 3)
>>> normalized_data = keras.layers.UnitNormalization()(data)
>>> print(np.sum(normalized_data[0, :] ** 2))
0.99999994
Args:
axis: Integer or list/tuple. The axis or axes to normalize across.
Typically, this is the features axis or axes. The left-out axes are
typically the batch axis or axes. `-1` is the last dimension
in the input. Defaults to `-1`.
"""
def __init__(self, axis=-1, **kwargs):
super().__init__(**kwargs)
if isinstance(axis, (list, tuple)):
self.axis = list(axis)
elif isinstance(axis, int):
self.axis = axis
else:
raise TypeError(
"Invalid value for `axis` argument: "
"expected an int or a list/tuple of ints. "
f"Received: axis={axis}"
)
self.supports_masking = True
def build(self, input_shape):
self.built = True
def call(self, inputs):
x = ops.cast(inputs, self.compute_dtype)
square_sum = ops.sum(ops.square(x), axis=self.axis, keepdims=True)
x_inv_norm = ops.rsqrt(ops.maximum(square_sum, 1e-12))
return ops.multiply(x, x_inv_norm)
def compute_output_shape(self, input_shape):
return input_shape
def get_config(self):
config = super().get_config()
config.update({"axis": self.axis})
return config
|
import os.path
from pathlib import Path
from typing import Any, Callable, Optional, Tuple, Union
import numpy as np
from PIL import Image
from .utils import check_integrity, download_url
from .vision import VisionDataset
class SEMEION(VisionDataset):
r"""`SEMEION <http://archive.ics.uci.edu/ml/datasets/semeion+handwritten+digit>`_ Dataset.
Args:
root (str or ``pathlib.Path``): Root directory of dataset where directory
``semeion.py`` exists.
transform (callable, optional): A function/transform that takes in a PIL image
and returns a transformed version. E.g, ``transforms.RandomCrop``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
download (bool, optional): If true, downloads the dataset from the internet and
puts it in root directory. If dataset is already downloaded, it is not
downloaded again.
"""
url = "http://archive.ics.uci.edu/ml/machine-learning-databases/semeion/semeion.data"
filename = "semeion.data"
md5_checksum = "cb545d371d2ce14ec121470795a77432"
def __init__(
self,
root: Union[str, Path],
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
download: bool = True,
) -> None:
super().__init__(root, transform=transform, target_transform=target_transform)
if download:
self.download()
if not self._check_integrity():
raise RuntimeError("Dataset not found or corrupted. You can use download=True to download it")
fp = os.path.join(self.root, self.filename)
data = np.loadtxt(fp)
# convert value to 8 bit unsigned integer
# color (white #255) the pixels
self.data = (data[:, :256] * 255).astype("uint8")
self.data = np.reshape(self.data, (-1, 16, 16))
self.labels = np.nonzero(data[:, 256:])[1]
def __getitem__(self, index: int) -> Tuple[Any, Any]:
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is index of the target class.
"""
img, target = self.data[index], int(self.labels[index])
# doing this so that it is consistent with all other datasets
# to return a PIL Image
img = Image.fromarray(img, mode="L")
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
return img, target
def __len__(self) -> int:
return len(self.data)
def _check_integrity(self) -> bool:
root = self.root
fpath = os.path.join(root, self.filename)
if not check_integrity(fpath, self.md5_checksum):
return False
return True
def download(self) -> None:
if self._check_integrity():
print("Files already downloaded and verified")
return
root = self.root
download_url(self.url, root, self.filename, self.md5_checksum)
|
import os.path
from typing import Any, Callable, Optional, Tuple
import numpy as np
from PIL import Image
from .utils import check_integrity, download_url
from .vision import VisionDataset
class SEMEION(VisionDataset):
r"""`SEMEION <http://archive.ics.uci.edu/ml/datasets/semeion+handwritten+digit>`_ Dataset.
Args:
root (string): Root directory of dataset where directory
``semeion.py`` exists.
transform (callable, optional): A function/transform that takes in a PIL image
and returns a transformed version. E.g, ``transforms.RandomCrop``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
download (bool, optional): If true, downloads the dataset from the internet and
puts it in root directory. If dataset is already downloaded, it is not
downloaded again.
"""
url = "http://archive.ics.uci.edu/ml/machine-learning-databases/semeion/semeion.data"
filename = "semeion.data"
md5_checksum = "cb545d371d2ce14ec121470795a77432"
def __init__(
self,
root: str,
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
download: bool = True,
) -> None:
super().__init__(root, transform=transform, target_transform=target_transform)
if download:
self.download()
if not self._check_integrity():
raise RuntimeError("Dataset not found or corrupted. You can use download=True to download it")
fp = os.path.join(self.root, self.filename)
data = np.loadtxt(fp)
# convert value to 8 bit unsigned integer
# color (white #255) the pixels
self.data = (data[:, :256] * 255).astype("uint8")
self.data = np.reshape(self.data, (-1, 16, 16))
self.labels = np.nonzero(data[:, 256:])[1]
def __getitem__(self, index: int) -> Tuple[Any, Any]:
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is index of the target class.
"""
img, target = self.data[index], int(self.labels[index])
# doing this so that it is consistent with all other datasets
# to return a PIL Image
img = Image.fromarray(img, mode="L")
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
return img, target
def __len__(self) -> int:
return len(self.data)
def _check_integrity(self) -> bool:
root = self.root
fpath = os.path.join(root, self.filename)
if not check_integrity(fpath, self.md5_checksum):
return False
return True
def download(self) -> None:
if self._check_integrity():
print("Files already downloaded and verified")
return
root = self.root
download_url(self.url, root, self.filename, self.md5_checksum)
|
import numpy as np
import scipy.signal
from keras.src import backend
from keras.src import initializers
from keras.src import testing
class ConstantInitializersTest(testing.TestCase):
def test_zeros_initializer(self):
shape = (3, 3)
initializer = initializers.Zeros()
values = initializer(shape=shape)
self.assertEqual(values.shape, shape)
np_values = backend.convert_to_numpy(values)
self.assertAllClose(np_values, np.zeros(shape=shape))
self.run_class_serialization_test(initializer)
def test_ones_initializer(self):
shape = (3, 3)
initializer = initializers.Ones()
values = initializer(shape=shape)
self.assertEqual(values.shape, shape)
np_values = backend.convert_to_numpy(values)
self.assertAllClose(np_values, np.ones(shape=shape))
self.run_class_serialization_test(initializer)
def test_constant_initializer(self):
shape = (3, 3)
constant_value = 6.0
initializer = initializers.Constant(value=constant_value)
values = initializer(shape=shape)
self.assertEqual(values.shape, shape)
np_values = backend.convert_to_numpy(values)
self.assertAllClose(
np_values, np.full(shape=shape, fill_value=constant_value)
)
self.run_class_serialization_test(initializer)
def test_constant_initializer_array_value(self):
shape = (3, 3)
constant_value = np.random.random((3, 3))
initializer = initializers.Constant(value=constant_value)
values = initializer(shape=shape)
self.assertEqual(values.shape, shape)
np_values = backend.convert_to_numpy(values)
self.assertAllClose(
np_values, np.full(shape=shape, fill_value=constant_value)
)
self.run_class_serialization_test(initializer)
def test_identity_initializer(self):
shape = (3, 3)
gain = 2
initializer = initializers.Identity(gain=gain)
values = initializer(shape=shape)
self.assertEqual(values.shape, shape)
np_values = backend.convert_to_numpy(values)
self.assertAllClose(np_values, np.eye(*shape) * gain)
self.run_class_serialization_test(initializer)
def test_stft_initializer(self):
shape = (256, 1, 513)
time_range = np.arange(256).reshape((-1, 1, 1))
freq_range = (np.arange(513) / 1024.0).reshape((1, 1, -1))
pi = np.arccos(np.float64(-1))
args = -2 * pi * time_range * freq_range
tol_kwargs = {}
if backend.backend() == "jax":
# TODO(mostafa-mahmoud): investigate the cases
# of non-small error in jax and torch
tol_kwargs = {"atol": 1e-4, "rtol": 1e-6}
initializer = initializers.STFTInitializer("real", None)
values = backend.convert_to_numpy(initializer(shape))
self.assertAllClose(np.cos(args), values, atol=1e-4)
self.run_class_serialization_test(initializer)
initializer = initializers.STFTInitializer(
"real",
"hamming",
None,
True,
)
window = scipy.signal.windows.get_window("hamming", 256, True)
window = window.astype("float64").reshape((-1, 1, 1))
values = backend.convert_to_numpy(initializer(shape, "float64"))
self.assertAllClose(np.cos(args) * window, values, **tol_kwargs)
self.run_class_serialization_test(initializer)
initializer = initializers.STFTInitializer(
"imag",
"tukey",
"density",
False,
)
window = scipy.signal.windows.get_window("tukey", 256, False)
window = window.astype("float64").reshape((-1, 1, 1))
window = window / np.sqrt(np.sum(window**2))
values = backend.convert_to_numpy(initializer(shape, "float64"))
self.assertAllClose(np.sin(args) * window, values, **tol_kwargs)
self.run_class_serialization_test(initializer)
initializer = initializers.STFTInitializer(
"imag",
list(range(1, 257)),
"spectrum",
)
window = np.arange(1, 257)
window = window.astype("float64").reshape((-1, 1, 1))
window = window / np.sum(window)
values = backend.convert_to_numpy(initializer(shape, "float64"))
self.assertAllClose(np.sin(args) * window, values, **tol_kwargs)
self.run_class_serialization_test(initializer)
with self.assertRaises(ValueError):
initializers.STFTInitializer("imaginary")
with self.assertRaises(ValueError):
initializers.STFTInitializer("real", scaling="l2")
with self.assertRaises(ValueError):
initializers.STFTInitializer("real", window="unknown")
|
import numpy as np
from keras.src import backend
from keras.src import initializers
from keras.src import testing
class ConstantInitializersTest(testing.TestCase):
def test_zeros_initializer(self):
shape = (3, 3)
initializer = initializers.Zeros()
values = initializer(shape=shape)
self.assertEqual(values.shape, shape)
np_values = backend.convert_to_numpy(values)
self.assertAllClose(np_values, np.zeros(shape=shape))
self.run_class_serialization_test(initializer)
def test_ones_initializer(self):
shape = (3, 3)
initializer = initializers.Ones()
values = initializer(shape=shape)
self.assertEqual(values.shape, shape)
np_values = backend.convert_to_numpy(values)
self.assertAllClose(np_values, np.ones(shape=shape))
self.run_class_serialization_test(initializer)
def test_constant_initializer(self):
shape = (3, 3)
constant_value = 6.0
initializer = initializers.Constant(value=constant_value)
values = initializer(shape=shape)
self.assertEqual(values.shape, shape)
np_values = backend.convert_to_numpy(values)
self.assertAllClose(
np_values, np.full(shape=shape, fill_value=constant_value)
)
self.run_class_serialization_test(initializer)
def test_constant_initializer_array_value(self):
shape = (3, 3)
constant_value = np.random.random((3, 3))
initializer = initializers.Constant(value=constant_value)
values = initializer(shape=shape)
self.assertEqual(values.shape, shape)
np_values = backend.convert_to_numpy(values)
self.assertAllClose(
np_values, np.full(shape=shape, fill_value=constant_value)
)
self.run_class_serialization_test(initializer)
def test_identity_initializer(self):
shape = (3, 3)
gain = 2
initializer = initializers.Identity(gain=gain)
values = initializer(shape=shape)
self.assertEqual(values.shape, shape)
np_values = backend.convert_to_numpy(values)
self.assertAllClose(np_values, np.eye(*shape) * gain)
self.run_class_serialization_test(initializer)
|
from ...utils import is_torch_available
if is_torch_available():
from .auraflow_transformer_2d import AuraFlowTransformer2DModel
from .cogvideox_transformer_3d import CogVideoXTransformer3DModel
from .dit_transformer_2d import DiTTransformer2DModel
from .dual_transformer_2d import DualTransformer2DModel
from .hunyuan_transformer_2d import HunyuanDiT2DModel
from .latte_transformer_3d import LatteTransformer3DModel
from .lumina_nextdit2d import LuminaNextDiT2DModel
from .pixart_transformer_2d import PixArtTransformer2DModel
from .prior_transformer import PriorTransformer
from .stable_audio_transformer import StableAudioDiTModel
from .t5_film_transformer import T5FilmDecoder
from .transformer_2d import Transformer2DModel
from .transformer_allegro import AllegroTransformer3DModel
from .transformer_cogview3plus import CogView3PlusTransformer2DModel
from .transformer_flux import FluxTransformer2DModel
from .transformer_sd3 import SD3Transformer2DModel
from .transformer_temporal import TransformerTemporalModel
|
from ...utils import is_torch_available
if is_torch_available():
from .auraflow_transformer_2d import AuraFlowTransformer2DModel
from .cogvideox_transformer_3d import CogVideoXTransformer3DModel
from .dit_transformer_2d import DiTTransformer2DModel
from .dual_transformer_2d import DualTransformer2DModel
from .hunyuan_transformer_2d import HunyuanDiT2DModel
from .latte_transformer_3d import LatteTransformer3DModel
from .lumina_nextdit2d import LuminaNextDiT2DModel
from .pixart_transformer_2d import PixArtTransformer2DModel
from .prior_transformer import PriorTransformer
from .stable_audio_transformer import StableAudioDiTModel
from .t5_film_transformer import T5FilmDecoder
from .transformer_2d import Transformer2DModel
from .transformer_cogview3plus import CogView3PlusTransformer2DModel
from .transformer_flux import FluxTransformer2DModel
from .transformer_sd3 import SD3Transformer2DModel
from .transformer_temporal import TransformerTemporalModel
|
"""Callback Handler streams to stdout on new llm token."""
import sys
from typing import Any, Optional
from langchain_core.callbacks import StreamingStdOutCallbackHandler
DEFAULT_ANSWER_PREFIX_TOKENS = ["Final", "Answer", ":"]
class FinalStreamingStdOutCallbackHandler(StreamingStdOutCallbackHandler):
"""Callback handler for streaming in agents.
Only works with agents using LLMs that support streaming.
Only the final output of the agent will be streamed.
"""
def append_to_last_tokens(self, token: str) -> None:
self.last_tokens.append(token)
self.last_tokens_stripped.append(token.strip())
if len(self.last_tokens) > len(self.answer_prefix_tokens):
self.last_tokens.pop(0)
self.last_tokens_stripped.pop(0)
def check_if_answer_reached(self) -> bool:
if self.strip_tokens:
return self.last_tokens_stripped == self.answer_prefix_tokens_stripped
return self.last_tokens == self.answer_prefix_tokens
def __init__(
self,
*,
answer_prefix_tokens: Optional[list[str]] = None,
strip_tokens: bool = True,
stream_prefix: bool = False,
) -> None:
"""Instantiate FinalStreamingStdOutCallbackHandler.
Args:
answer_prefix_tokens: Token sequence that prefixes the answer.
Default is ["Final", "Answer", ":"]
strip_tokens: Ignore white spaces and new lines when comparing
answer_prefix_tokens to last tokens? (to determine if answer has been
reached)
stream_prefix: Should answer prefix itself also be streamed?
"""
super().__init__()
if answer_prefix_tokens is None:
self.answer_prefix_tokens = DEFAULT_ANSWER_PREFIX_TOKENS
else:
self.answer_prefix_tokens = answer_prefix_tokens
if strip_tokens:
self.answer_prefix_tokens_stripped = [
token.strip() for token in self.answer_prefix_tokens
]
else:
self.answer_prefix_tokens_stripped = self.answer_prefix_tokens
self.last_tokens = [""] * len(self.answer_prefix_tokens)
self.last_tokens_stripped = [""] * len(self.answer_prefix_tokens)
self.strip_tokens = strip_tokens
self.stream_prefix = stream_prefix
self.answer_reached = False
def on_llm_start(
self,
serialized: dict[str, Any],
prompts: list[str],
**kwargs: Any,
) -> None:
"""Run when LLM starts running."""
self.answer_reached = False
def on_llm_new_token(self, token: str, **kwargs: Any) -> None:
"""Run on new LLM token. Only available when streaming is enabled."""
# Remember the last n tokens, where n = len(answer_prefix_tokens)
self.append_to_last_tokens(token)
# Check if the last n tokens match the answer_prefix_tokens list ...
if self.check_if_answer_reached():
self.answer_reached = True
if self.stream_prefix:
for t in self.last_tokens:
sys.stdout.write(t)
sys.stdout.flush()
return
# ... if yes, then print tokens from now on
if self.answer_reached:
sys.stdout.write(token)
sys.stdout.flush()
|
"""Callback Handler streams to stdout on new llm token."""
import sys
from typing import Any, Optional
from langchain_core.callbacks import StreamingStdOutCallbackHandler
DEFAULT_ANSWER_PREFIX_TOKENS = ["Final", "Answer", ":"]
class FinalStreamingStdOutCallbackHandler(StreamingStdOutCallbackHandler):
"""Callback handler for streaming in agents.
Only works with agents using LLMs that support streaming.
Only the final output of the agent will be streamed.
"""
def append_to_last_tokens(self, token: str) -> None:
self.last_tokens.append(token)
self.last_tokens_stripped.append(token.strip())
if len(self.last_tokens) > len(self.answer_prefix_tokens):
self.last_tokens.pop(0)
self.last_tokens_stripped.pop(0)
def check_if_answer_reached(self) -> bool:
if self.strip_tokens:
return self.last_tokens_stripped == self.answer_prefix_tokens_stripped
return self.last_tokens == self.answer_prefix_tokens
def __init__(
self,
*,
answer_prefix_tokens: Optional[list[str]] = None,
strip_tokens: bool = True,
stream_prefix: bool = False,
) -> None:
"""Instantiate FinalStreamingStdOutCallbackHandler.
Args:
answer_prefix_tokens: Token sequence that prefixes the answer.
Default is ["Final", "Answer", ":"]
strip_tokens: Ignore white spaces and new lines when comparing
answer_prefix_tokens to last tokens? (to determine if answer has been
reached)
stream_prefix: Should answer prefix itself also be streamed?
"""
super().__init__()
if answer_prefix_tokens is None:
self.answer_prefix_tokens = DEFAULT_ANSWER_PREFIX_TOKENS
else:
self.answer_prefix_tokens = answer_prefix_tokens
if strip_tokens:
self.answer_prefix_tokens_stripped = [
token.strip() for token in self.answer_prefix_tokens
]
else:
self.answer_prefix_tokens_stripped = self.answer_prefix_tokens
self.last_tokens = [""] * len(self.answer_prefix_tokens)
self.last_tokens_stripped = [""] * len(self.answer_prefix_tokens)
self.strip_tokens = strip_tokens
self.stream_prefix = stream_prefix
self.answer_reached = False
def on_llm_start(
self, serialized: dict[str, Any], prompts: list[str], **kwargs: Any
) -> None:
"""Run when LLM starts running."""
self.answer_reached = False
def on_llm_new_token(self, token: str, **kwargs: Any) -> None:
"""Run on new LLM token. Only available when streaming is enabled."""
# Remember the last n tokens, where n = len(answer_prefix_tokens)
self.append_to_last_tokens(token)
# Check if the last n tokens match the answer_prefix_tokens list ...
if self.check_if_answer_reached():
self.answer_reached = True
if self.stream_prefix:
for t in self.last_tokens:
sys.stdout.write(t)
sys.stdout.flush()
return
# ... if yes, then print tokens from now on
if self.answer_reached:
sys.stdout.write(token)
sys.stdout.flush()
|
import time
from jina import Flow
from tests.integration.instrumentation import (
get_exported_jobs,
get_flow_metric_labels,
get_services,
)
def test_docker_instrumentation(
jaeger_port,
otlp_collector,
otlp_receiver_port,
docker_image_name,
docker_image_built,
prometheus_client,
expected_flow_metric_labels,
):
f = Flow(
tracing=True,
traces_exporter_host='http://localhost',
traces_exporter_port=otlp_receiver_port,
metrics=True,
metrics_exporter_host='http://localhost',
metrics_exporter_port=otlp_receiver_port,
).add(uses=f'docker://{docker_image_name}')
with f:
from docarray import DocumentArray
f.post(f'/search', DocumentArray.empty(), continue_on_error=True)
# give some time for the tracing and metrics exporters to finish exporting.
# the client is slow to export the data
time.sleep(3)
services = get_services(jaeger_port)
assert set(services) == {'executor0/rep-0', 'gateway/rep-0'}
exported_jobs = get_exported_jobs(prometheus_client)
assert exported_jobs == {
'gateway/rep-0',
'executor0/rep-0',
}
flow_metric_labels = get_flow_metric_labels(prometheus_client)
assert flow_metric_labels.issubset(expected_flow_metric_labels)
|
import os
import time
import pytest
from jina import Flow
from tests.integration.instrumentation import (
get_exported_jobs,
get_flow_metric_labels,
get_services,
)
def test_docker_instrumentation(
jaeger_port,
otlp_collector,
otlp_receiver_port,
docker_image_name,
docker_image_built,
prometheus_client,
expected_flow_metric_labels,
):
f = Flow(
tracing=True,
traces_exporter_host='http://localhost',
traces_exporter_port=otlp_receiver_port,
metrics=True,
metrics_exporter_host='http://localhost',
metrics_exporter_port=otlp_receiver_port,
).add(uses=f'docker://{docker_image_name}')
with f:
from jina import DocumentArray
f.post(f'/search', DocumentArray.empty(), continue_on_error=True)
# give some time for the tracing and metrics exporters to finish exporting.
# the client is slow to export the data
time.sleep(3)
services = get_services(jaeger_port)
assert set(services) == {'executor0/rep-0', 'gateway/rep-0'}
exported_jobs = get_exported_jobs(prometheus_client)
assert exported_jobs == {
'gateway/rep-0',
'executor0/rep-0',
}
flow_metric_labels = get_flow_metric_labels(prometheus_client)
assert flow_metric_labels.issubset(expected_flow_metric_labels)
|
import torch
import torchaudio.prototype.functional as F
from torchaudio_unittest.common_utils import nested_params, TorchaudioTestCase
class BatchConsistencyTest(TorchaudioTestCase):
@nested_params(
[F.convolve, F.fftconvolve],
["full", "valid", "same"],
)
def test_convolve(self, fn, mode):
leading_dims = (2, 3)
L_x, L_y = 89, 43
x = torch.rand(*leading_dims, L_x, dtype=self.dtype, device=self.device)
y = torch.rand(*leading_dims, L_y, dtype=self.dtype, device=self.device)
actual = fn(x, y, mode)
expected = torch.stack(
[
torch.stack(
[fn(x[i, j].unsqueeze(0), y[i, j].unsqueeze(0), mode).squeeze(0) for j in range(leading_dims[1])]
)
for i in range(leading_dims[0])
]
)
self.assertEqual(expected, actual)
def test_add_noise(self):
leading_dims = (5, 2, 3)
L = 51
waveform = torch.rand(*leading_dims, L, dtype=self.dtype, device=self.device)
noise = torch.rand(*leading_dims, L, dtype=self.dtype, device=self.device)
lengths = torch.rand(*leading_dims, dtype=self.dtype, device=self.device)
snr = torch.rand(*leading_dims, dtype=self.dtype, device=self.device) * 10
actual = F.add_noise(waveform, noise, lengths, snr)
expected = []
for i in range(leading_dims[0]):
for j in range(leading_dims[1]):
for k in range(leading_dims[2]):
expected.append(F.add_noise(waveform[i][j][k], noise[i][j][k], lengths[i][j][k], snr[i][j][k]))
self.assertEqual(torch.stack(expected), actual.reshape(-1, L))
def test_speed(self):
B = 5
orig_freq = 100
factor = 0.8
input_lengths = torch.randint(1, 1000, (B,), dtype=torch.int32)
unbatched_input = [torch.ones((int(length),)) * 1.0 for length in input_lengths]
batched_input = torch.nn.utils.rnn.pad_sequence(unbatched_input, batch_first=True)
output, output_lengths = F.speed(batched_input, input_lengths, orig_freq=orig_freq, factor=factor)
unbatched_output = []
unbatched_output_lengths = []
for idx in range(len(unbatched_input)):
w, l = F.speed(unbatched_input[idx], input_lengths[idx], orig_freq=orig_freq, factor=factor)
unbatched_output.append(w)
unbatched_output_lengths.append(l)
self.assertEqual(output_lengths, torch.stack(unbatched_output_lengths))
for idx in range(len(unbatched_output)):
w, l = output[idx], output_lengths[idx]
self.assertEqual(unbatched_output[idx], w[:l])
|
import torch
import torchaudio.prototype.functional as F
from torchaudio_unittest.common_utils import nested_params, TorchaudioTestCase
class BatchConsistencyTest(TorchaudioTestCase):
@nested_params(
[F.convolve, F.fftconvolve],
["full", "valid", "same"],
)
def test_convolve(self, fn, mode):
leading_dims = (2, 3)
L_x, L_y = 89, 43
x = torch.rand(*leading_dims, L_x, dtype=self.dtype, device=self.device)
y = torch.rand(*leading_dims, L_y, dtype=self.dtype, device=self.device)
actual = fn(x, y, mode)
expected = torch.stack(
[
torch.stack(
[fn(x[i, j].unsqueeze(0), y[i, j].unsqueeze(0), mode).squeeze(0) for j in range(leading_dims[1])]
)
for i in range(leading_dims[0])
]
)
self.assertEqual(expected, actual)
def test_add_noise(self):
leading_dims = (5, 2, 3)
L = 51
waveform = torch.rand(*leading_dims, L, dtype=self.dtype, device=self.device)
noise = torch.rand(*leading_dims, L, dtype=self.dtype, device=self.device)
lengths = torch.rand(*leading_dims, dtype=self.dtype, device=self.device)
snr = torch.rand(*leading_dims, dtype=self.dtype, device=self.device) * 10
actual = F.add_noise(waveform, noise, lengths, snr)
expected = []
for i in range(leading_dims[0]):
for j in range(leading_dims[1]):
for k in range(leading_dims[2]):
expected.append(F.add_noise(waveform[i][j][k], noise[i][j][k], lengths[i][j][k], snr[i][j][k]))
self.assertEqual(torch.stack(expected), actual.reshape(-1, L))
|
from abc import ABC, abstractmethod
from typing import Dict, List
import torch
import torchaudio.functional as F
from torch import Tensor
from torchaudio.functional import TokenSpan
class ITokenizer(ABC):
@abstractmethod
def __call__(self, transcript: List[str]) -> List[List[str]]:
"""Tokenize the given transcript (list of word)
.. note::
The toranscript must be normalized.
Args:
transcript (list of str): Transcript (list of word).
Returns:
(list of int): List of token sequences
"""
class Tokenizer(ITokenizer):
def __init__(self, dictionary: Dict[str, int]):
self.dictionary = dictionary
def __call__(self, transcript: List[str]) -> List[List[int]]:
return [[self.dictionary[c] for c in word] for word in transcript]
def _align_emission_and_tokens(emission: Tensor, tokens: List[int], blank: int = 0):
device = emission.device
emission = emission.unsqueeze(0)
targets = torch.tensor([tokens], dtype=torch.int32, device=device)
aligned_tokens, scores = F.forced_align(emission, targets, blank=blank)
scores = scores.exp() # convert back to probability
aligned_tokens, scores = aligned_tokens[0], scores[0] # remove batch dimension
return aligned_tokens, scores
class IAligner(ABC):
@abstractmethod
def __call__(self, emission: Tensor, tokens: List[List[int]]) -> List[List[TokenSpan]]:
"""Generate list of time-stamped token sequences
Args:
emission (Tensor): Sequence of token probability distributions in log-domain.
Shape: `(time, tokens)`.
tokens (list of integer sequence): Tokenized transcript.
Output from :py:class:`torchaudio.pipelines.Wav2Vec2FABundle.Tokenizer`.
Returns:
(list of TokenSpan sequence): Tokens with time stamps and scores.
"""
def _unflatten(list_, lengths):
assert len(list_) == sum(lengths)
i = 0
ret = []
for l in lengths:
ret.append(list_[i : i + l])
i += l
return ret
def _flatten(nested_list):
return [item for list_ in nested_list for item in list_]
class Aligner(IAligner):
def __init__(self, blank):
self.blank = blank
def __call__(self, emission: Tensor, tokens: List[List[int]]) -> List[List[TokenSpan]]:
if emission.ndim != 2:
raise ValueError(f"The input emission must be 2D. Found: {emission.shape}")
aligned_tokens, scores = _align_emission_and_tokens(emission, _flatten(tokens), self.blank)
spans = F.merge_tokens(aligned_tokens, scores)
return _unflatten(spans, [len(ts) for ts in tokens])
|
from abc import ABC, abstractmethod
from typing import Dict, List
import torch
import torchaudio.functional as F
from torch import Tensor
from torchaudio.functional import TokenSpan
class ITokenizer(ABC):
@abstractmethod
def __call__(self, transcript: List[str]) -> List[List[str]]:
"""Tokenize the given transcript (list of word)
.. note::
The toranscript must be normalized.
Args:
transcript (list of str): Transcript (list of word).
Returns:
(list of int): List of token sequences
"""
class Tokenizer(ITokenizer):
def __init__(self, dictionary: Dict[str, int]):
self.dictionary = dictionary
def __call__(self, transcript: List[str]) -> List[List[int]]:
return [[self.dictionary[c] for c in word] for word in transcript]
def _align_emission_and_tokens(emission: Tensor, tokens: List[int], blank: int = 0):
device = emission.device
emission = emission.unsqueeze(0)
targets = torch.tensor([tokens], dtype=torch.int32, device=device)
aligned_tokens, scores = F.forced_align(emission, targets, blank=blank)
scores = scores.exp() # convert back to probability
aligned_tokens, scores = aligned_tokens[0], scores[0] # remove batch dimension
return aligned_tokens, scores
class IAligner(ABC):
@abstractmethod
def __call__(self, emission: Tensor, tokens: List[List[int]]) -> List[List[TokenSpan]]:
"""Generate list of time-stamped token sequences
Args:
emission (Tensor): Sequence of token probability distributions in log-domain.
Shape: `(time, tokens)`.
tokens (list of integer sequence): Tokenized transcript.
Output from :py:class:`Wav2Vec2FABundle.Tokenizer`.
Returns:
(list of TokenSpan sequence): Tokens with time stamps and scores.
"""
def _unflatten(list_, lengths):
assert len(list_) == sum(lengths)
i = 0
ret = []
for l in lengths:
ret.append(list_[i : i + l])
i += l
return ret
def _flatten(nested_list):
return [item for list_ in nested_list for item in list_]
class Aligner(IAligner):
def __init__(self, blank):
self.blank = blank
def __call__(self, emission: Tensor, tokens: List[List[int]]) -> List[List[TokenSpan]]:
if emission.ndim != 2:
raise ValueError(f"The input emission must be 2D. Found: {emission.shape}")
aligned_tokens, scores = _align_emission_and_tokens(emission, _flatten(tokens), self.blank)
spans = F.merge_tokens(aligned_tokens, scores)
return _unflatten(spans, [len(ts) for ts in tokens])
|
from typing import Optional
from llama_index.core.readers.base import BasePydanticReader
from llama_index.core.schema import Document
from zyte_api import ZyteAPI
from zyte_api.utils import USER_AGENT as PYTHON_ZYTE_API_USER_AGENT
class ZyteSerpReader(BasePydanticReader):
"""
Get google search results URLs for a search query.
Args:
api_key: Zyte API key.
extract_from: Determines the mode while extracting the search results.
It can take one of the following values: 'httpResponseBody', 'browserHtml'
Example:
.. code-block:: python
from llama_index.readers.zyte_serp import ZyteSerpReader
reader = ZyteSerpReader(
api_key="ZYTE_API_KEY",
)
docs = reader.load_data(
"search query",
)
Zyte-API reference:
https://docs.zyte.com/zyte-api/get-started.html
"""
client: ZyteAPI
api_key: str
extract_from: Optional[str]
def __init__(
self,
api_key: str,
extract_from: Optional[str] = None,
) -> None:
"""Initialize with file path."""
user_agent = f"llama-index-zyte-api/{PYTHON_ZYTE_API_USER_AGENT}"
client = ZyteAPI(
api_key=api_key,
user_agent=user_agent,
)
super().__init__(
api_key=api_key,
extract_from=extract_from,
client=client,
)
def _serp_url(self, query: str):
from urllib.parse import quote_plus
base_url = "https://www.google.com/search?q="
return base_url + quote_plus(query)
def load_data(self, query: str):
serp_url = self._serp_url(query)
serp_request = {
"url": serp_url,
"serp": True,
}
if self.extract_from:
serp_request.update({"serpOptions": {"extractFrom": self.extract_from}})
results = self.client.get(serp_request)
docs = []
for result in results["serp"]["organicResults"]:
doc = Document(
text=result["url"],
metadata={"name": result["name"], "rank": result["rank"]},
)
docs.append(doc)
return docs
|
from typing import Optional
from llama_index.core.readers.base import BasePydanticReader
from llama_index.core.schema import Document
from zyte_api import ZyteAPI
from zyte_api.utils import USER_AGENT as PYTHON_ZYTE_API_USER_AGENT
class ZyteSerpReader(BasePydanticReader):
"""Get google search results URLs for a search query.
Args:
api_key: Zyte API key.
extract_from: Determines the mode while extracting the search results.
It can take one of the following values: 'httpResponseBody', 'browserHtml'
Example:
.. code-block:: python
from llama_index.readers.zyte_serp import ZyteSerpReader
reader = ZyteSerpReader(
api_key="ZYTE_API_KEY",
)
docs = reader.load_data(
"search query",
)
Zyte-API reference:
https://docs.zyte.com/zyte-api/get-started.html
"""
client: ZyteAPI
api_key: str
extract_from: Optional[str]
def __init__(
self,
api_key: str,
extract_from: Optional[str] = None,
) -> None:
"""Initialize with file path."""
user_agent = f"llama-index-zyte-api/{PYTHON_ZYTE_API_USER_AGENT}"
client = ZyteAPI(
api_key=api_key,
user_agent=user_agent,
)
super().__init__(
api_key=api_key,
extract_from=extract_from,
client=client,
)
def _serp_url(self, query: str):
from urllib.parse import quote_plus
base_url = "https://www.google.com/search?q="
return base_url + quote_plus(query)
def load_data(self, query: str):
serp_url = self._serp_url(query)
serp_request = {
"url": serp_url,
"serp": True,
}
if self.extract_from:
serp_request.update({"serpOptions": {"extractFrom": self.extract_from}})
results = self.client.get(serp_request)
docs = []
for result in results["serp"]["organicResults"]:
doc = Document(
text=result["url"],
metadata={"name": result["name"], "rank": result["rank"]},
)
docs.append(doc)
return docs
|
import itertools
import warnings
from dataclasses import dataclass
from typing import Optional
import pandas as pd
import pyarrow as pa
import datasets
from datasets.table import table_cast
@dataclass
class PandasConfig(datasets.BuilderConfig):
"""BuilderConfig for Pandas."""
features: Optional[datasets.Features] = None
class Pandas(datasets.ArrowBasedBuilder):
BUILDER_CONFIG_CLASS = PandasConfig
def _info(self):
warnings.warn(
"The Pandas builder is deprecated and will be removed in the next major version of datasets.",
FutureWarning,
)
return datasets.DatasetInfo(features=self.config.features)
def _split_generators(self, dl_manager):
"""We handle string, list and dicts in datafiles"""
if not self.config.data_files:
raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}")
data_files = dl_manager.download_and_extract(self.config.data_files)
if isinstance(data_files, (str, list, tuple)):
files = data_files
if isinstance(files, str):
files = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
files = [dl_manager.iter_files(file) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"files": files})]
splits = []
for split_name, files in data_files.items():
if isinstance(files, str):
files = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
files = [dl_manager.iter_files(file) for file in files]
splits.append(datasets.SplitGenerator(name=split_name, gen_kwargs={"files": files}))
return splits
def _cast_table(self, pa_table: pa.Table) -> pa.Table:
if self.config.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
pa_table = table_cast(pa_table, self.config.features.arrow_schema)
return pa_table
def _generate_tables(self, files):
for i, file in enumerate(itertools.chain.from_iterable(files)):
with open(file, "rb") as f:
pa_table = pa.Table.from_pandas(pd.read_pickle(f))
yield i, self._cast_table(pa_table)
|
import itertools
from dataclasses import dataclass
from typing import Optional
import pandas as pd
import pyarrow as pa
import datasets
from datasets.table import table_cast
@dataclass
class PandasConfig(datasets.BuilderConfig):
"""BuilderConfig for Pandas."""
features: Optional[datasets.Features] = None
class Pandas(datasets.ArrowBasedBuilder):
BUILDER_CONFIG_CLASS = PandasConfig
def _info(self):
return datasets.DatasetInfo(features=self.config.features)
def _split_generators(self, dl_manager):
"""We handle string, list and dicts in datafiles"""
if not self.config.data_files:
raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}")
data_files = dl_manager.download_and_extract(self.config.data_files)
if isinstance(data_files, (str, list, tuple)):
files = data_files
if isinstance(files, str):
files = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
files = [dl_manager.iter_files(file) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"files": files})]
splits = []
for split_name, files in data_files.items():
if isinstance(files, str):
files = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
files = [dl_manager.iter_files(file) for file in files]
splits.append(datasets.SplitGenerator(name=split_name, gen_kwargs={"files": files}))
return splits
def _cast_table(self, pa_table: pa.Table) -> pa.Table:
if self.config.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
pa_table = table_cast(pa_table, self.config.features.arrow_schema)
return pa_table
def _generate_tables(self, files):
for i, file in enumerate(itertools.chain.from_iterable(files)):
with open(file, "rb") as f:
pa_table = pa.Table.from_pandas(pd.read_pickle(f))
yield i, self._cast_table(pa_table)
|
from keras.src import backend
from keras.src.api_export import keras_export
from keras.src.dtype_policies import dtype_policy
from keras.src.dtype_policies.dtype_policy import QUANTIZATION_MODES
from keras.src.dtype_policies.dtype_policy import DTypePolicy
from keras.src.dtype_policies.dtype_policy import FloatDTypePolicy
from keras.src.dtype_policies.dtype_policy import QuantizedDTypePolicy
from keras.src.dtype_policies.dtype_policy import QuantizedFloat8DTypePolicy
ALL_OBJECTS = {
DTypePolicy,
FloatDTypePolicy,
QuantizedDTypePolicy,
QuantizedFloat8DTypePolicy,
}
ALL_OBJECTS_DICT = {cls.__name__: cls for cls in ALL_OBJECTS}
@keras_export("keras.dtype_policies.serialize")
def serialize(dtype_policy):
"""Serializes `DTypePolicy` instance.
Args:
dtype_policy: A Keras `DTypePolicy` instance.
Returns:
`DTypePolicy` configuration dictionary.
"""
from keras.src.saving import serialization_lib
return serialization_lib.serialize_keras_object(dtype_policy)
@keras_export("keras.dtype_policies.deserialize")
def deserialize(config, custom_objects=None):
"""Deserializes a serialized `DTypePolicy` instance.
Args:
config: `DTypePolicy` configuration.
custom_objects: Optional dictionary mapping names (strings) to custom
objects (classes and functions) to be considered during
deserialization.
Returns:
A Keras `DTypePolicy` instance.
"""
from keras.src.saving import serialization_lib
return serialization_lib.deserialize_keras_object(
config,
module_objects=ALL_OBJECTS_DICT,
custom_objects=custom_objects,
)
@keras_export("keras.dtype_policies.get")
def get(identifier):
"""Retrieves a Keras `DTypePolicy` instance.
The `identifier` may be the string name of a `DTypePolicy` class.
>>> policy = dtype_policies.get("mixed_bfloat16")
>>> type(policy)
<class '...FloatDTypePolicy'>
You can also specify `config` of the dtype policy to this function by
passing dict containing `class_name` and `config` as an identifier. Also
note that the `class_name` must map to a `DTypePolicy` class
>>> identifier = {"class_name": "FloatDTypePolicy",
... "config": {"name": "float32"}}
>>> policy = dtype_policies.get(identifier)
>>> type(policy)
<class '...FloatDTypePolicy'>
Args:
identifier: A dtype policy identifier. One of `None` or string name of a
`DTypePolicy` or `DTypePolicy` configuration dictionary or a
`DTypePolicy` instance.
Returns:
A Keras `DTypePolicy` instance.
"""
from keras.src.dtype_policies.dtype_policy import (
_get_quantized_dtype_policy_by_str,
)
if identifier is None:
return dtype_policy.dtype_policy()
if isinstance(
identifier, (DTypePolicy, FloatDTypePolicy, QuantizedDTypePolicy)
):
return identifier
if isinstance(identifier, dict):
return deserialize(identifier)
if isinstance(identifier, str):
if identifier.startswith(QUANTIZATION_MODES):
return _get_quantized_dtype_policy_by_str(identifier)
else:
return FloatDTypePolicy(identifier)
try:
return FloatDTypePolicy(backend.standardize_dtype(identifier))
except:
raise ValueError(
"Cannot interpret `dtype` argument. Expected a string "
f"or an instance of DTypePolicy. Received: dtype={identifier}"
)
|
from keras.src import backend
from keras.src.api_export import keras_export
from keras.src.dtype_policies import dtype_policy
from keras.src.dtype_policies.dtype_policy import QUANTIZATION_MODES
from keras.src.dtype_policies.dtype_policy import DTypePolicy
from keras.src.dtype_policies.dtype_policy import FloatDTypePolicy
from keras.src.dtype_policies.dtype_policy import QuantizedDTypePolicy
from keras.src.dtype_policies.dtype_policy import QuantizedFloat8DTypePolicy
ALL_OBJECTS = {
DTypePolicy,
FloatDTypePolicy,
QuantizedDTypePolicy,
QuantizedFloat8DTypePolicy,
}
ALL_OBJECTS_DICT = {cls.__name__: cls for cls in ALL_OBJECTS}
@keras_export("keras.dtype_policies.serialize")
def serialize(dtype_policy):
"""Serializes `DTypePolicy` instance.
Args:
dtype_policy: A Keras `DTypePolicy` instance.
Returns:
`DTypePolicy` configuration dictionary.
"""
from keras.src.saving import serialization_lib
return serialization_lib.serialize_keras_object(dtype_policy)
@keras_export("keras.dtype_policies.deserialize")
def deserialize(config, custom_objects=None):
"""Deserializes a serialized `DTypePolicy` instance.
Args:
config: `DTypePolicy` configuration.
custom_objects: Optional dictionary mapping names (strings) to custom
objects (classes and functions) to be considered during
deserialization.
Returns:
A Keras `DTypePolicy` instance.
"""
from keras.src.saving import serialization_lib
return serialization_lib.deserialize_keras_object(
config,
module_objects=ALL_OBJECTS_DICT,
custom_objects=custom_objects,
)
@keras_export("keras.dtype_policies.get")
def get(identifier):
"""Retrieves a Keras `DTypePolicy` instance.
The `identifier` may be the string name of a `DTypePolicy` class.
>>> policy = dtype_policies.get("mixed_bfloat16")
>>> type(policy)
<class '...FloatDTypePolicy'>
You can also specify `config` of the dtype policy to this function by
passing dict containing `class_name` and `config` as an identifier. Also
note that the `class_name` must map to a `DTypePolicy` class
>>> identifier = {"class_name": "FloatDTypePolicy",
... "config": {"name": "float32"}}
>>> policy = dtype_policies.get(identifier)
>>> type(policy)
<class '...FloatDTypePolicy'>
Args:
identifier: A dtype policy identifier. One of `None` or string name of a
`DTypePolicy` or `DTypePolicy` configuration dictionary or a
`DTypePolicy` instance.
Returns:
A Keras `DTypePolicy` instance.
"""
from keras.src.dtype_policies.dtype_policy import (
_get_quantized_dtype_policy_by_str,
)
if identifier is None:
return dtype_policy.dtype_policy()
if isinstance(identifier, (FloatDTypePolicy, QuantizedDTypePolicy)):
return identifier
if isinstance(identifier, dict):
return deserialize(identifier)
if isinstance(identifier, str):
if identifier.startswith(QUANTIZATION_MODES):
return _get_quantized_dtype_policy_by_str(identifier)
else:
return FloatDTypePolicy(identifier)
try:
return FloatDTypePolicy(backend.standardize_dtype(identifier))
except:
raise ValueError(
"Cannot interpret `dtype` argument. Expected a string "
f"or an instance of DTypePolicy. Received: dtype={identifier}"
)
|
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import subprocess
import sys
import pytorch_sphinx_theme
sys.path.insert(0, os.path.abspath('../..'))
# -- Project information -----------------------------------------------------
project = 'mmengine'
copyright = '2022, mmengine contributors'
author = 'mmengine contributors'
version_file = '../../mmengine/version.py'
with open(version_file) as f:
exec(compile(f.read(), version_file, 'exec'))
__version__ = locals()['__version__']
# The short X.Y version
version = __version__
# The full version, including alpha/beta/rc tags
release = __version__
# -- General configuration ---------------------------------------------------
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = 'zh_CN'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.intersphinx',
'sphinx.ext.napoleon',
'sphinx.ext.viewcode',
'myst_parser',
'sphinx_copybutton',
'sphinx.ext.autodoc.typehints',
] # yapf: disable
autodoc_typehints = 'description'
myst_heading_anchors = 4
myst_enable_extensions = ['colon_fence']
# Configuration for intersphinx
intersphinx_mapping = {
'python': ('https://docs.python.org/3', None),
'numpy': ('https://numpy.org/doc/stable', None),
'torch': ('https://pytorch.org/docs/stable/', None),
'mmcv': ('https://mmcv.readthedocs.io/zh_CN/2.x/', None),
}
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'pytorch_sphinx_theme'
html_theme_path = [pytorch_sphinx_theme.get_html_theme_path()]
html_theme_options = {
'menu': [
{
'name': 'GitHub',
'url': 'https://github.com/open-mmlab/mmengine'
},
],
# Specify the language of shared menu
'menu_lang': 'cn',
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
html_css_files = ['css/readthedocs.css']
# -- Extension configuration -------------------------------------------------
# Ignore >>> when copying code
copybutton_prompt_text = r'>>> |\.\.\. '
copybutton_prompt_is_regexp = True
def builder_inited_handler(app):
subprocess.run(['./cp_origin_docs.sh'])
def setup(app):
app.connect('builder-inited', builder_inited_handler)
|
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import subprocess
import sys
import pytorch_sphinx_theme
sys.path.insert(0, os.path.abspath('../..'))
# -- Project information -----------------------------------------------------
project = 'mmengine'
copyright = '2022, mmengine contributors'
author = 'mmengine contributors'
version_file = '../../mmengine/version.py'
with open(version_file) as f:
exec(compile(f.read(), version_file, 'exec'))
__version__ = locals()['__version__']
# The short X.Y version
version = __version__
# The full version, including alpha/beta/rc tags
release = __version__
# -- General configuration ---------------------------------------------------
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = 'zh_CN'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.intersphinx',
'sphinx.ext.napoleon',
'sphinx.ext.viewcode',
'sphinx.ext.autosectionlabel',
'myst_parser',
'sphinx_copybutton',
'sphinx.ext.autodoc.typehints',
] # yapf: disable
autodoc_typehints = 'description'
myst_heading_anchors = 4
myst_enable_extensions = ['colon_fence']
# Configuration for intersphinx
intersphinx_mapping = {
'python': ('https://docs.python.org/3', None),
'numpy': ('https://numpy.org/doc/stable', None),
'torch': ('https://pytorch.org/docs/stable/', None),
'mmcv': ('https://mmcv.readthedocs.io/zh_CN/2.x/', None),
}
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'pytorch_sphinx_theme'
html_theme_path = [pytorch_sphinx_theme.get_html_theme_path()]
html_theme_options = {
'menu': [
{
'name': 'GitHub',
'url': 'https://github.com/open-mmlab/mmengine'
},
],
# Specify the language of shared menu
'menu_lang': 'cn',
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
html_css_files = ['css/readthedocs.css']
# -- Extension configuration -------------------------------------------------
# Ignore >>> when copying code
copybutton_prompt_text = r'>>> |\.\.\. '
copybutton_prompt_is_regexp = True
def builder_inited_handler(app):
subprocess.run(['./cp_origin_docs.sh'])
def setup(app):
app.connect('builder-inited', builder_inited_handler)
|
_base_ = './cascade-rcnn_r50_fpn_1x_coco.py'
model = dict(
# use caffe img_norm
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[103.530, 116.280, 123.675],
std=[1.0, 1.0, 1.0],
to_rgb=False,
pad_size_divisor=32),
backbone=dict(
norm_cfg=dict(requires_grad=False),
style='caffe',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet50_caffe')))
|
_base_ = './cascade_rcnn_r50_fpn_1x_coco.py'
model = dict(
# use caffe img_norm
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[103.530, 116.280, 123.675],
std=[1.0, 1.0, 1.0],
to_rgb=False,
pad_size_divisor=32),
backbone=dict(
norm_cfg=dict(requires_grad=False),
style='caffe',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet50_caffe')))
|
import grpc
from grpc_health.v1 import health, health_pb2, health_pb2_grpc
from grpc_reflection.v1alpha import reflection
from pydantic import BaseModel
from uvicorn import Config, Server
from jina import Gateway, __default_host__
from jina.proto import jina_pb2, jina_pb2_grpc
class DummyResponseModel(BaseModel):
protocol: str
class MultiProtocolGateway(Gateway):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.http_port = self.runtime_args.port[0]
self.grpc_port = self.runtime_args.port[1]
self.health_servicer = health.HealthServicer(experimental_non_blocking=True)
async def _setup_http_server(self):
from fastapi import FastAPI
app = FastAPI(
title='HTTP Server',
)
@app.get(path='/', response_model=DummyResponseModel)
def _get_response():
return {'protocol': 'http'}
self.http_server = Server(
Config(app, host=__default_host__, port=self.http_port)
)
async def _setup_grpc_server(self):
self.grpc_server = grpc.aio.server()
jina_pb2_grpc.add_JinaRPCServicer_to_server(
self.streamer._streamer, self.grpc_server
)
service_names = (
jina_pb2.DESCRIPTOR.services_by_name['JinaRPC'].full_name,
reflection.SERVICE_NAME,
)
# Mark all services as healthy.
health_pb2_grpc.add_HealthServicer_to_server(
self.health_servicer, self.grpc_server
)
for service in service_names:
self.health_servicer.set(service, health_pb2.HealthCheckResponse.SERVING)
reflection.enable_server_reflection(service_names, self.grpc_server)
self.grpc_server.add_insecure_port(f'{__default_host__}:{self.grpc_port}')
await self.grpc_server.start()
async def setup_server(self):
await self._setup_http_server()
await self._setup_grpc_server()
async def run_server(self):
await self.http_server.serve()
await self.grpc_server.wait_for_termination()
async def shutdown(self):
self.http_server.should_exit = True
await self.grpc_server.stop(0)
await self.http_server.shutdown()
self.health_servicer.enter_graceful_shutdown()
@property
def _should_exit(self) -> bool:
return self.http_server.should_exit
|
import grpc
from grpc_health.v1 import health, health_pb2, health_pb2_grpc
from grpc_reflection.v1alpha import reflection
from pydantic import BaseModel
from uvicorn import Config, Server
from jina import Gateway, __default_host__
from jina.proto import jina_pb2, jina_pb2_grpc
class DummyResponseModel(BaseModel):
protocol: str
class MultiProtocolGateway(Gateway):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.http_port = self.runtime_args.port[0]
self.grpc_port = self.runtime_args.port[1]
self.health_servicer = health.HealthServicer(experimental_non_blocking=True)
async def _setup_http_server(self):
from fastapi import FastAPI
app = FastAPI(
title='HTTP Server',
)
@app.get(path='/', response_model=DummyResponseModel)
def _get_response():
return {'protocol': 'http'}
self.http_server = Server(
Config(app, host=__default_host__, port=self.http_port)
)
async def _setup_grpc_server(self):
self.grpc_server = grpc.aio.server()
jina_pb2_grpc.add_JinaRPCServicer_to_server(
self.streamer._streamer, self.grpc_server
)
service_names = (
jina_pb2.DESCRIPTOR.services_by_name['JinaRPC'].full_name,
reflection.SERVICE_NAME,
)
# Mark all services as healthy.
health_pb2_grpc.add_HealthServicer_to_server(
self.health_servicer, self.grpc_server
)
for service in service_names:
self.health_servicer.set(service, health_pb2.HealthCheckResponse.SERVING)
reflection.enable_server_reflection(service_names, self.grpc_server)
self.grpc_server.add_insecure_port(f'{__default_host__}:{self.grpc_port}')
await self.grpc_server.start()
async def setup_server(self):
await self._setup_http_server()
await self._setup_grpc_server()
async def run_server(self):
await self.http_server.serve()
await self.grpc_server.wait_for_termination()
async def teardown(self):
await super().teardown()
await self.http_server.shutdown()
self.health_servicer.enter_graceful_shutdown()
async def stop_server(self):
self.http_server.should_exit = True
await self.grpc_server.stop(0)
@property
def _should_exit(self) -> bool:
return self.http_server.should_exit
|
from jina import Executor, requests
class MyExecutorToReload1(Executor):
def __init__(self, **kwargs):
super().__init__(**kwargs)
@requests()
def foo(self, docs, **kwargs):
for doc in docs:
doc.text = 'MyExecutorBeforeReload'
@requests(on='/bar')
def bar(self, docs, **kwargs):
for doc in docs:
doc.text = 'MyExecutorBeforeReloadBar'
|
from jina import Executor, requests
class MyExecutorToReload1(Executor):
def __init__(self, **kwargs):
super().__init__(**kwargs)
@requests()
def foo(self, docs, **kwargs):
for doc in docs:
doc.text = 'MyExecutorBeforeReload'
|
import random
from pathlib import Path
from typing import Callable, Dict, Tuple
import opentelemetry.sdk.metrics.view
import pytest
from opentelemetry.sdk.metrics.export import (
AggregationTemporality,
MetricExporter,
MetricExportResult,
MetricsData,
PeriodicExportingMetricReader,
)
class DirMetricExporter(MetricExporter):
"""Implementation of :class:`MetricExporter` that prints metrics to a file in a given directory.
This class can be used for diagnostic or testing purposes.
"""
def __init__(
self,
metric_dir: str,
preferred_temporality: Dict[type, AggregationTemporality] = None,
preferred_aggregation: Dict[
type, "opentelemetry.sdk.metrics.view.Aggregation"
] = None,
):
print(f'JOAN IS HERE DIRMETRIC')
super().__init__(
preferred_temporality=preferred_temporality,
preferred_aggregation=preferred_aggregation,
)
self.metric_filename: Path = Path(metric_dir) / str(random.randint(0, 1048575))
self.f = open(self.metric_filename, 'a')
def export(
self,
metrics_data: MetricsData,
timeout_millis: float = 10_000,
**kwargs,
) -> MetricExportResult:
print(f'export to {self.metric_filename} => {metrics_data.to_json()[0:3]}')
self.f.write(metrics_data.to_json())
self.f.write('\n')
self.f.flush()
return MetricExportResult.SUCCESS
def shutdown(self, timeout_millis: float = 30_000, **kwargs) -> None:
pass
def force_flush(self, timeout_millis: float = 10_000) -> bool:
return True
def __del__(self):
self.f.close()
@pytest.fixture(scope='function')
def monkeypatch_metric_exporter(
tmpdir_factory: pytest.TempdirFactory,
) -> Tuple[Callable, Callable]:
import json
import os
import time
from pathlib import Path
import opentelemetry.sdk.metrics.export
collect_path = Path(tmpdir_factory.mktemp('otel-collector'))
metrics_path = collect_path / 'metrics'
os.mkdir(metrics_path)
tick_counter_filename = collect_path / 'tick_counter'
with open(tick_counter_filename, 'w', encoding='utf-8') as f:
f.write('0')
def collect_metrics():
print(f'tick_counter_filename {tick_counter_filename}')
with open(tick_counter_filename, 'r', encoding='utf-8') as ft:
tick_counter = int(ft.read())
with open(tick_counter_filename, 'w', encoding='utf-8') as ft2:
ft2.write(str(tick_counter + 1))
time.sleep(2)
def _get_service_name(otel_measurement):
return otel_measurement['resource_metrics'][0]['resource']['attributes'][
'service.name'
]
def read_metrics():
def read_metric_file(filename):
print(f'filename {filename}')
with open(filename, 'r', encoding='utf-8') as fr:
r = fr.read()
print(f'READ {r[0:3]}')
try:
return json.loads(r)
except:
return None
ret = {}
for i in map(read_metric_file, metrics_path.glob('*')):
if i is not None:
ret[_get_service_name(i)] = i
return ret
class PatchedTextReader(PeriodicExportingMetricReader):
def __init__(self, *args, **kwargs) -> None:
self.exporter = DirMetricExporter(metrics_path)
self.tick_counter = 0
super().__init__(
exporter=self.exporter,
export_interval_millis=500,
)
def _ticker(self) -> None:
interval_secs = self._export_interval_millis / 1e3
while not self._shutdown_event.wait(interval_secs):
with open(tick_counter_filename, 'r', encoding='utf-8') as f:
tick_counter = int(f.read())
if tick_counter != self.tick_counter:
self.tick_counter = tick_counter
self.collect(timeout_millis=self._export_timeout_millis)
self.collect(timeout_millis=self._export_interval_millis)
real_reader = opentelemetry.sdk.metrics.export.PeriodicExportingMetricReader
opentelemetry.sdk.metrics.export.PeriodicExportingMetricReader = PatchedTextReader
yield collect_metrics, read_metrics
opentelemetry.sdk.metrics.export.PeriodicExportingMetricReader = real_reader
|
import random
from pathlib import Path
from typing import Callable, Dict, Tuple
import opentelemetry.sdk.metrics.export
import opentelemetry.sdk.metrics.view
import pytest
from opentelemetry.sdk.metrics.export import (
AggregationTemporality,
MetricExporter,
MetricExportResult,
MetricsData,
PeriodicExportingMetricReader,
)
class DirMetricExporter(MetricExporter):
"""Implementation of :class:`MetricExporter` that prints metrics to a file in a given directory.
This class can be used for diagnostic or testing purposes.
"""
def __init__(
self,
metric_dir: str,
preferred_temporality: Dict[type, AggregationTemporality] = None,
preferred_aggregation: Dict[
type, "opentelemetry.sdk.metrics.view.Aggregation"
] = None,
):
super().__init__(
preferred_temporality=preferred_temporality,
preferred_aggregation=preferred_aggregation,
)
self.metric_filename: Path = Path(metric_dir) / str(random.randint(0, 1048575))
self.f = open(self.metric_filename, 'a')
def export(
self,
metrics_data: MetricsData,
timeout_millis: float = 10_000,
**kwargs,
) -> MetricExportResult:
self.f.write(metrics_data.to_json())
self.f.write('\n')
self.f.flush()
return MetricExportResult.SUCCESS
def shutdown(self, timeout_millis: float = 30_000, **kwargs) -> None:
pass
def force_flush(self, timeout_millis: float = 10_000) -> bool:
return True
def __del__(self):
self.f.close()
@pytest.fixture(scope='function')
def monkeypatch_metric_exporter(
tmpdir_factory: pytest.TempdirFactory,
) -> Tuple[Callable, Callable]:
import json
import os
import time
from pathlib import Path
import opentelemetry.sdk.metrics.export
collect_path = Path(tmpdir_factory.mktemp('otel-collector'))
metrics_path = collect_path / 'metrics'
os.mkdir(metrics_path)
tick_counter_filename = collect_path / 'tick_counter'
with open(tick_counter_filename, 'w', encoding='utf-8') as f:
f.write('0')
def collect_metrics():
with open(tick_counter_filename, 'r', encoding='utf-8') as f:
tick_counter = int(f.read())
with open(tick_counter_filename, 'w', encoding='utf-8') as f:
f.write(str(tick_counter + 1))
time.sleep(2)
def _get_service_name(otel_measurement):
return otel_measurement['resource_metrics'][0]['resource']['attributes'][
'service.name'
]
def read_metrics():
def read_metric_file(filename):
with open(filename, 'r', encoding='utf-8') as f:
return json.loads(f.read())
return {
_get_service_name(i): i
for i in map(read_metric_file, metrics_path.glob('*'))
}
class PatchedTextReader(PeriodicExportingMetricReader):
def __init__(self, *args, **kwargs) -> None:
self.exporter = DirMetricExporter(metrics_path)
self.tick_counter = 0
super().__init__(
exporter=self.exporter,
export_interval_millis=500,
)
def _ticker(self) -> None:
interval_secs = self._export_interval_millis / 1e3
while not self._shutdown_event.wait(interval_secs):
with open(tick_counter_filename, 'r', encoding='utf-8') as f:
tick_counter = int(f.read())
if tick_counter != self.tick_counter:
self.tick_counter = tick_counter
self.collect(timeout_millis=self._export_timeout_millis)
self.collect(timeout_millis=self._export_interval_millis)
real_reader = opentelemetry.sdk.metrics.export.PeriodicExportingMetricReader
opentelemetry.sdk.metrics.export.PeriodicExportingMetricReader = PatchedTextReader
yield collect_metrics, read_metrics
opentelemetry.sdk.metrics.export.PeriodicExportingMetricReader = real_reader
|
from typing import Any, Optional, Type, TypeVar, Union
import numpy as np
from docarray.base_document import BaseDocument
from docarray.documents import Audio
from docarray.typing import AnyEmbedding, AnyTensor
from docarray.typing.tensor.abstract_tensor import AbstractTensor
from docarray.typing.tensor.video.video_tensor import VideoTensor
from docarray.typing.url.video_url import VideoUrl
from docarray.utils.misc import is_tf_available, is_torch_available
torch_available = is_torch_available()
if torch_available:
import torch
tf_available = is_tf_available()
if tf_available:
import tensorflow as tf # type: ignore
T = TypeVar('T', bound='Video')
class Video(BaseDocument):
"""
Document for handling video.
The Video Document can contain a VideoUrl (`Video.url`), an Audio Document
(`Video.audio`), a VideoTensor (`Video.tensor`), an AnyTensor representing
the indices of the video's key frames (`Video.key_frame_indices`) and an
AnyEmbedding (`Video.embedding`).
EXAMPLE USAGE:
You can use this Document directly:
.. code-block:: python
from docarray.documents import Video
# use it directly
vid = Video(
url='https://github.com/docarray/docarray/tree/feat-add-video-v2/tests/toydata/mov_bbb.mp4?raw=true'
)
vid.audio.tensor, vid.tensor, vid.key_frame_indices = vid.url.load()
model = MyEmbeddingModel()
vid.embedding = model(vid.tensor)
You can extend this Document:
.. code-block:: python
from typing import Optional
from docarray.documents import Text, Video
# extend it
class MyVideo(Video):
name: Optional[Text]
video = MyVideo(
url='https://github.com/docarray/docarray/blob/feat-rewrite-v2/tests/toydata/mov_bbb.mp4?raw=true'
)
video.video_tensor = video.url.load().video
model = MyEmbeddingModel()
video.embedding = model(video.tensor)
video.name = Text(text='my first video')
You can use this Document for composition:
.. code-block:: python
from docarray import BaseDocument
from docarray.documents import Text, Video
# compose it
class MultiModalDoc(BaseDocument):
video: Video
text: Text
mmdoc = MultiModalDoc(
video=Video(
url='https://github.com/docarray/docarray/blob/feat-rewrite-v2/tests/toydata/mov_bbb.mp4?raw=true'
),
text=Text(text='hello world, how are you doing?'),
)
mmdoc.video.video_tensor = mmdoc.video.url.load().video
# or
mmdoc.video.bytes = mmdoc.video.url.load_bytes()
"""
url: Optional[VideoUrl]
audio: Optional[Audio] = Audio()
tensor: Optional[VideoTensor]
key_frame_indices: Optional[AnyTensor]
embedding: Optional[AnyEmbedding]
bytes: Optional[bytes] = None
@classmethod
def validate(
cls: Type[T],
value: Union[str, AbstractTensor, Any],
) -> T:
if isinstance(value, str):
value = cls(url=value)
elif isinstance(value, (AbstractTensor, np.ndarray)) or (
torch_available
and isinstance(value, torch.Tensor)
or (tf_available and isinstance(value, tf.Tensor))
):
value = cls(tensor=value)
return super().validate(value)
|
from typing import Any, Optional, Type, TypeVar, Union
import numpy as np
from docarray.base_document import BaseDocument
from docarray.documents import Audio
from docarray.typing import AnyEmbedding, AnyTensor
from docarray.typing.tensor.abstract_tensor import AbstractTensor
from docarray.typing.tensor.video.video_tensor import VideoTensor
from docarray.typing.url.video_url import VideoUrl
from docarray.utils.misc import is_torch_available
torch_available = is_torch_available()
if torch_available:
import torch
T = TypeVar('T', bound='Video')
class Video(BaseDocument):
"""
Document for handling video.
The Video Document can contain a VideoUrl (`Video.url`), an Audio Document
(`Video.audio`), a VideoTensor (`Video.tensor`), an AnyTensor representing
the indices of the video's key frames (`Video.key_frame_indices`) and an
AnyEmbedding (`Video.embedding`).
EXAMPLE USAGE:
You can use this Document directly:
.. code-block:: python
from docarray.documents import Video
# use it directly
vid = Video(
url='https://github.com/docarray/docarray/tree/feat-add-video-v2/tests/toydata/mov_bbb.mp4?raw=true'
)
vid.audio.tensor, vid.tensor, vid.key_frame_indices = vid.url.load()
model = MyEmbeddingModel()
vid.embedding = model(vid.tensor)
You can extend this Document:
.. code-block:: python
from typing import Optional
from docarray.documents import Text, Video
# extend it
class MyVideo(Video):
name: Optional[Text]
video = MyVideo(
url='https://github.com/docarray/docarray/blob/feat-rewrite-v2/tests/toydata/mov_bbb.mp4?raw=true'
)
video.video_tensor = video.url.load().video
model = MyEmbeddingModel()
video.embedding = model(video.tensor)
video.name = Text(text='my first video')
You can use this Document for composition:
.. code-block:: python
from docarray import BaseDocument
from docarray.documents import Text, Video
# compose it
class MultiModalDoc(BaseDocument):
video: Video
text: Text
mmdoc = MultiModalDoc(
video=Video(
url='https://github.com/docarray/docarray/blob/feat-rewrite-v2/tests/toydata/mov_bbb.mp4?raw=true'
),
text=Text(text='hello world, how are you doing?'),
)
mmdoc.video.video_tensor = mmdoc.video.url.load().video
# or
mmdoc.video.bytes = mmdoc.video.url.load_bytes()
"""
url: Optional[VideoUrl]
audio: Optional[Audio] = Audio()
tensor: Optional[VideoTensor]
key_frame_indices: Optional[AnyTensor]
embedding: Optional[AnyEmbedding]
bytes: Optional[bytes] = None
@classmethod
def validate(
cls: Type[T],
value: Union[str, AbstractTensor, Any],
) -> T:
if isinstance(value, str):
value = cls(url=value)
elif isinstance(value, (AbstractTensor, np.ndarray)) or (
torch_available and isinstance(value, torch.Tensor)
):
value = cls(tensor=value)
return super().validate(value)
|
import os
import pytest
from llama_index.llms.nvidia import NVIDIA
from typing import Any
from pytest_httpx import HTTPXMock
@pytest.fixture()
def mock_local_models(httpx_mock: HTTPXMock):
mock_response = {
"data": [
{
"id": "model1",
"object": "model",
"created": 1234567890,
"owned_by": "OWNER",
"root": "model1",
}
]
}
httpx_mock.add_response(
url="https://test_url/v1/models",
method="GET",
json=mock_response,
status_code=200,
)
def get_api_key(instance: Any) -> str:
return instance.api_key
def test_create_default_url_without_api_key() -> None:
NVIDIA()
@pytest.mark.usefixtures("mock_local_models")
def test_create_unknown_url_without_api_key(masked_env_var: str) -> None:
NVIDIA(base_url="https://test_url/v1")
@pytest.mark.parametrize("param", ["nvidia_api_key", "api_key"])
def test_create_with_api_key(param: str, masked_env_var: str) -> None:
instance = NVIDIA(**{param: "just testing no failure"})
assert get_api_key(instance) == "just testing no failure"
def test_api_key_priority(masked_env_var: str) -> None:
try:
os.environ["NVIDIA_API_KEY"] = "ENV"
assert get_api_key(NVIDIA()) == "ENV"
assert get_api_key(NVIDIA(nvidia_api_key="PARAM")) == "PARAM"
assert get_api_key(NVIDIA(api_key="PARAM")) == "PARAM"
assert get_api_key(NVIDIA(api_key="LOW", nvidia_api_key="HIGH")) == "HIGH"
finally:
# we must clean up environ or it may impact other tests
del os.environ["NVIDIA_API_KEY"]
@pytest.mark.integration()
def test_missing_api_key_error(masked_env_var: str) -> None:
with pytest.warns(UserWarning):
client = NVIDIA()
with pytest.raises(Exception) as exc_info:
client.complete("Hello, world!").text
message = str(exc_info.value)
assert "401" in message
@pytest.mark.integration()
def test_bogus_api_key_error(masked_env_var: str) -> None:
client = NVIDIA(nvidia_api_key="BOGUS")
with pytest.raises(Exception) as exc_info:
client.complete("Hello, world!").text
message = str(exc_info.value)
assert "401" in message
@pytest.mark.integration()
@pytest.mark.parametrize("param", ["nvidia_api_key", "api_key"])
def test_api_key(chat_model: str, mode: dict, param: str, masked_env_var: str) -> None:
client = NVIDIA(model=chat_model, **{**mode, **{param: masked_env_var}})
assert client.complete("Hello, world!").text
|
import os
import pytest
from llama_index.llms.nvidia import NVIDIA
from typing import Any
from pytest_httpx import HTTPXMock
@pytest.fixture()
def mock_local_models(httpx_mock: HTTPXMock):
mock_response = {
"data": [
{
"id": "model1",
"object": "model",
"created": 1234567890,
"owned_by": "OWNER",
"root": "model1",
}
]
}
httpx_mock.add_response(
url="https://test_url/v1/models",
method="GET",
json=mock_response,
status_code=200,
)
def get_api_key(instance: Any) -> str:
return instance.api_key
def test_create_default_url_without_api_key(masked_env_var: str) -> None:
with pytest.warns(UserWarning):
NVIDIA()
@pytest.mark.usefixtures("mock_local_models")
def test_create_unknown_url_without_api_key(masked_env_var: str) -> None:
NVIDIA(base_url="https://test_url/v1")
@pytest.mark.parametrize("param", ["nvidia_api_key", "api_key"])
def test_create_with_api_key(param: str, masked_env_var: str) -> None:
instance = NVIDIA(**{param: "just testing no failure"})
assert get_api_key(instance) == "just testing no failure"
def test_api_key_priority(masked_env_var: str) -> None:
try:
os.environ["NVIDIA_API_KEY"] = "ENV"
assert get_api_key(NVIDIA()) == "ENV"
assert get_api_key(NVIDIA(nvidia_api_key="PARAM")) == "PARAM"
assert get_api_key(NVIDIA(api_key="PARAM")) == "PARAM"
assert get_api_key(NVIDIA(api_key="LOW", nvidia_api_key="HIGH")) == "HIGH"
finally:
# we must clean up environ or it may impact other tests
del os.environ["NVIDIA_API_KEY"]
@pytest.mark.integration()
def test_missing_api_key_error(masked_env_var: str) -> None:
with pytest.warns(UserWarning):
client = NVIDIA()
with pytest.raises(Exception) as exc_info:
client.complete("Hello, world!").text
message = str(exc_info.value)
assert "401" in message
@pytest.mark.integration()
def test_bogus_api_key_error(masked_env_var: str) -> None:
client = NVIDIA(nvidia_api_key="BOGUS")
with pytest.raises(Exception) as exc_info:
client.complete("Hello, world!").text
message = str(exc_info.value)
assert "401" in message
@pytest.mark.integration()
@pytest.mark.parametrize("param", ["nvidia_api_key", "api_key"])
def test_api_key(chat_model: str, mode: dict, param: str, masked_env_var: str) -> None:
client = NVIDIA(model=chat_model, **{**mode, **{param: masked_env_var}})
assert client.complete("Hello, world!").text
|
import glob
import os
import pytest
from jina import Document, Flow
from jina.constants import __uptime__, __windows__
from jina.enums import LogVerbosity
from jina.helper import colored
from jina.logging.logger import JinaLogger
cur_dir = os.path.dirname(os.path.abspath(__file__))
def log(logger: JinaLogger):
logger.debug('this is test debug message')
logger.info('this is test info message')
logger.success('this is test success message')
logger.warning('this is test warning message')
logger.error('this is test error message')
logger.critical('this is test critical message')
def test_color_log():
with JinaLogger('test_logger') as logger:
logger.debug('this is test debug message')
logger.info('this is test info message')
logger.info(f'this is test {colored("color", "red")} message')
logger.success('this is test success message')
logger.warning('this is test warning message')
logger.error('this is test error message')
logger.critical('this is test critical message')
def test_logging_syslog():
with JinaLogger(
'test_logger', log_config=os.path.join(cur_dir, 'yaml/syslog.yml')
) as logger:
log(logger)
assert len(logger.handlers) == 0 if __windows__ else 1
def test_logging_default():
with JinaLogger('test_logger') as logger:
log(logger)
assert len(logger.handlers) == 1
def test_logging_level_yaml(monkeypatch):
monkeypatch.delenv('JINA_LOG_LEVEL', raising=True) # ignore global env
fn = os.path.join(cur_dir, f'jina-{__uptime__}.log')
with JinaLogger(
'test_file_logger', log_config=os.path.join(cur_dir, 'yaml/file.yml')
) as file_logger:
if os.path.exists(fn):
os.remove(fn)
log(file_logger)
assert file_logger.logger.level == LogVerbosity.from_string('INFO')
for f in glob.glob(cur_dir + '/*.log'):
os.remove(f)
def test_logging_file(monkeypatch):
monkeypatch.delenv('JINA_LOG_LEVEL', raising=True) # ignore global env
uptime = __uptime__.replace(':', '.') if __windows__ else __uptime__
fn = os.path.join(cur_dir, f'jina-{uptime}.log')
with JinaLogger(
'test_file_logger', log_config=os.path.join(cur_dir, 'yaml/file.yml')
) as file_logger:
log(file_logger)
assert os.path.exists(fn)
with open(fn) as fp:
assert len(fp.readlines()) == 5
for f in glob.glob(cur_dir + '/*.log'):
os.remove(f)
@pytest.mark.slow
def test_logging_quiet(caplog):
# no way to capture logs in multiprocessing
# see discussion here: https://github.com/pytest-dev/pytest/issues/3037#issuecomment-745050393
f = Flow().add(quiet=True).add()
with f:
f.index(Document())
|
import glob
import os
from datetime import datetime
import pytest
from jina import Document, Flow, __uptime__, __windows__
from jina.enums import LogVerbosity
from jina.helper import colored
from jina.logging.logger import JinaLogger
cur_dir = os.path.dirname(os.path.abspath(__file__))
def log(logger: JinaLogger):
logger.debug('this is test debug message')
logger.info('this is test info message')
logger.success('this is test success message')
logger.warning('this is test warning message')
logger.error('this is test error message')
logger.critical('this is test critical message')
def test_color_log():
with JinaLogger('test_logger') as logger:
logger.debug('this is test debug message')
logger.info('this is test info message')
logger.info(f'this is test {colored("color", "red")} message')
logger.success('this is test success message')
logger.warning('this is test warning message')
logger.error('this is test error message')
logger.critical('this is test critical message')
def test_logging_syslog():
with JinaLogger(
'test_logger', log_config=os.path.join(cur_dir, 'yaml/syslog.yml')
) as logger:
log(logger)
assert len(logger.handlers) == 0 if __windows__ else 1
def test_logging_default():
with JinaLogger('test_logger') as logger:
log(logger)
assert len(logger.handlers) == 1
def test_logging_level_yaml(monkeypatch):
monkeypatch.delenv('JINA_LOG_LEVEL', raising=True) # ignore global env
fn = os.path.join(cur_dir, f'jina-{__uptime__}.log')
with JinaLogger(
'test_file_logger', log_config=os.path.join(cur_dir, 'yaml/file.yml')
) as file_logger:
if os.path.exists(fn):
os.remove(fn)
log(file_logger)
assert file_logger.logger.level == LogVerbosity.from_string('INFO')
for f in glob.glob(cur_dir + '/*.log'):
os.remove(f)
def test_logging_file(monkeypatch):
monkeypatch.delenv('JINA_LOG_LEVEL', raising=True) # ignore global env
uptime = __uptime__.replace(':', '.') if __windows__ else __uptime__
fn = os.path.join(cur_dir, f'jina-{uptime}.log')
with JinaLogger(
'test_file_logger', log_config=os.path.join(cur_dir, 'yaml/file.yml')
) as file_logger:
log(file_logger)
assert os.path.exists(fn)
with open(fn) as fp:
assert len(fp.readlines()) == 5
for f in glob.glob(cur_dir + '/*.log'):
os.remove(f)
@pytest.mark.slow
def test_logging_quiet(caplog):
# no way to capture logs in multiprocessing
# see discussion here: https://github.com/pytest-dev/pytest/issues/3037#issuecomment-745050393
f = Flow().add(quiet=True).add()
with f:
f.index(Document())
|
__copyright__ = 'Copyright (c) 2020-2021 Jina AI Limited. All rights reserved.'
__license__ = 'Apache-2.0'
from typing import Any, Iterable, Optional
import librosa as lr
import numpy as np
import torch
from jina import DocumentArray, Executor, requests
from jina.excepts import BadDocType
from .audio_clip.model import AudioCLIP
class AudioCLIPEncoder(Executor):
"""
Encode audio data with AudioCLIP embeddings
:param model_path: path of the pre-trained AudioCLIP model
:param default_traversal_paths: default traversal path
:param device: Torch device string (e.g. 'cpu', 'cuda', 'cuda:2')
"""
TARGET_SAMPLE_RATE = 44100 # derived from ESResNeXt
def __init__(
self,
model_path: str = 'assets/AudioCLIP-Full-Training.pt',
default_traversal_paths: Iterable[str] = ('r',),
device: str = 'cpu',
*args,
**kwargs
):
super().__init__(*args, **kwargs)
torch.set_grad_enabled(False)
self.model_path = model_path
self.aclp = AudioCLIP(pretrained=model_path).to(device).eval()
self.default_traversal_paths = default_traversal_paths
@requests
def encode(
self, docs: Optional[DocumentArray], parameters: dict, *args, **kwargs
) -> Any:
if docs:
cleaned_document_array = self._get_input_data(docs, parameters)
self._create_embeddings(cleaned_document_array)
def _get_input_data(self, docs: DocumentArray, parameters: dict):
"""Create a filtered set of Documents to iterate over."""
traversal_paths = parameters.get(
'traversal_paths', self.default_traversal_paths
)
# traverse thought all documents which have to be processed
flat_docs = docs.traverse_flat(traversal_paths)
# filter out documents without audio wav
filtered_docs = DocumentArray(
[doc for doc in flat_docs if doc.blob is not None]
)
return filtered_docs
def _create_embeddings(self, filtered_docs: Iterable):
"""Update the documents with the embeddings generated by AudioCLIP"""
for d in filtered_docs:
d.blob, d.tags['sample_rate'] = self._resample(
d.blob, d.tags.get('sample_rate', None)
)
audio = torch.Tensor(d.blob).unsqueeze(0)
embedding = self.aclp.encode_audio(audio=audio)[0]
d.embedding = embedding.cpu().numpy()
def _resample(self, blob: np.ndarray, orig_sr: int):
if orig_sr is None:
raise BadDocType(
'sample rate is not given, please provide a valid sample rate'
)
if orig_sr == AudioCLIPEncoder.TARGET_SAMPLE_RATE:
return
return (
lr.resample(blob, orig_sr, AudioCLIPEncoder.TARGET_SAMPLE_RATE),
AudioCLIPEncoder.TARGET_SAMPLE_RATE,
)
|
__copyright__ = 'Copyright (c) 2020-2021 Jina AI Limited. All rights reserved.'
__license__ = 'Apache-2.0'
from typing import Optional, Iterable, Any
from jina import Executor, DocumentArray, requests
from jina.excepts import BadDocType
import librosa as lr
import numpy as np
import torch
from .audio_clip.model import AudioCLIP
class AudioCLIPEncoder(Executor):
"""
Encode audio data with AudioCLIP embeddings
:param model_path: path of the pre-trained AudioCLIP model
:param default_traversal_paths: default traversal path
"""
TARGET_SAMPLE_RATE = 44100 # derived from ESResNeXt
def __init__(
self,
model_path: str = 'assets/AudioCLIP-Full-Training.pt',
default_traversal_paths: Iterable[str] = ['r'],
*args,
**kwargs
):
super().__init__(*args, **kwargs)
torch.set_grad_enabled(False)
self.model_path = model_path
self.aclp = AudioCLIP(pretrained=model_path)
self.aclp.eval()
self.aclp.audio.eval()
self.default_traversal_paths = default_traversal_paths
@requests
def encode(
self, docs: Optional[DocumentArray], parameters: dict, *args, **kwargs
) -> Any:
if docs:
cleaned_document_array = self._get_input_data(docs, parameters)
self._create_embeddings(cleaned_document_array)
def _get_input_data(self, docs: DocumentArray, parameters: dict):
"""Create a filtered set of Documents to iterate over."""
traversal_paths = parameters.get(
'traversal_paths', self.default_traversal_paths
)
# traverse thought all documents which have to be processed
flat_docs = docs.traverse_flat(traversal_paths)
# filter out documents without audio wav
filtered_docs = DocumentArray(
[doc for doc in flat_docs if doc.blob is not None]
)
return filtered_docs
def _create_embeddings(self, filtered_docs: Iterable):
"""Update the documents with the embeddings generated by AudioCLIP"""
for d in filtered_docs:
d.blob, d.tags['sample_rate'] = self._resample(
d.blob, d.tags.get('sample_rate', None)
)
audio = torch.Tensor(d.blob).unsqueeze(0)
embedding = self.aclp.encode_audio(audio=audio)[0]
d.embedding = embedding.cpu().numpy()
def _resample(self, blob: np.ndarray, orig_sr: int):
if orig_sr is None:
raise BadDocType(
'sample rate is not given, please provide a valid sample rate'
)
if orig_sr == AudioCLIPEncoder.TARGET_SAMPLE_RATE:
return
return (
lr.resample(blob, orig_sr, AudioCLIPEncoder.TARGET_SAMPLE_RATE),
AudioCLIPEncoder.TARGET_SAMPLE_RATE,
)
|
_base_ = '../_base_/default_runtime.py'
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
# file_client_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/detection/',
# 'data/': 's3://openmmlab/datasets/detection/'
# }))
file_client_args = dict(backend='disk')
train_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='RandomResize', scale=[(1333, 640), (1333, 800)]),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(
batch_size=2,
num_workers=2,
persistent_workers=True,
sampler=dict(type='DefaultSampler', shuffle=True),
batch_sampler=dict(type='AspectRatioBatchSampler'),
dataset=dict(
type='RepeatDataset',
times=3,
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instances_train2017.json',
data_prefix=dict(img='train2017/'),
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=train_pipeline)))
val_dataloader = dict(
batch_size=2,
num_workers=2,
persistent_workers=True,
drop_last=False,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instances_val2017.json',
data_prefix=dict(img='val2017/'),
test_mode=True,
pipeline=test_pipeline))
test_dataloader = val_dataloader
val_evaluator = dict(
type='CocoMetric',
ann_file=data_root + 'annotations/instances_val2017.json',
metric='bbox')
test_evaluator = val_evaluator
# training schedule for 3x with `RepeatDataset`
train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=12, val_interval=1)
val_cfg = dict(type='ValLoop')
test_cfg = dict(type='TestLoop')
# learning rate
# Experiments show that using milestones=[9, 11] has higher performance
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=12,
by_epoch=True,
milestones=[9, 11],
gamma=0.1)
]
# optimizer
optim_wrapper = dict(
type='OptimWrapper',
optimizer=dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001))
|
_base_ = '../_base_/default_runtime.py'
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
# file_client_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/detection/',
# 'data/': 's3://openmmlab/datasets/detection/'
# }))
file_client_args = dict(backend='disk')
train_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='RandomResize', scale=[(1333, 640), (1333, 800)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(
batch_size=2,
num_workers=2,
persistent_workers=True,
sampler=dict(type='DefaultSampler', shuffle=True),
batch_sampler=dict(type='AspectRatioBatchSampler'),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instances_train2017.json',
data_prefix=dict(img='train2017/'),
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=train_pipeline))
val_dataloader = dict(
batch_size=2,
num_workers=2,
persistent_workers=True,
drop_last=False,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instances_val2017.json',
data_prefix=dict(img='val2017/'),
test_mode=True,
pipeline=test_pipeline))
test_dataloader = val_dataloader
val_evaluator = dict(
type='CocoMetric',
ann_file=data_root + 'annotations/instances_val2017.json',
metric='bbox')
test_evaluator = val_evaluator
# TODO: use repeat dataset wrapper
# training schedule for 3x
train_cfg = dict(by_epoch=True, max_epochs=36)
val_cfg = dict(interval=3)
test_cfg = dict()
# learning rate
# Experiments show that using milestones=[27, 33] has higher performance
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=36,
by_epoch=True,
milestones=[27, 33],
gamma=0.1)
]
# optimizer
optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)
|
import csv
import os
from pathlib import Path
from typing import Tuple, Union
import torchaudio
from torch import Tensor
from torch.hub import download_url_to_file
from torch.utils.data import Dataset
from torchaudio.datasets.utils import extract_archive
_RELEASE_CONFIGS = {
"release1": {
"folder_in_archive": "wavs",
"url": "https://data.keithito.com/data/speech/LJSpeech-1.1.tar.bz2",
"checksum": "be1a30453f28eb8dd26af4101ae40cbf2c50413b1bb21936cbcdc6fae3de8aa5",
}
}
class LJSPEECH(Dataset):
"""*LJSpeech-1.1* :cite:`ljspeech17` dataset.
Args:
root (str or Path): Path to the directory where the dataset is found or downloaded.
url (str, optional): The URL to download the dataset from.
(default: ``"https://data.keithito.com/data/speech/LJSpeech-1.1.tar.bz2"``)
folder_in_archive (str, optional):
The top-level directory of the dataset. (default: ``"wavs"``)
download (bool, optional):
Whether to download the dataset if it is not found at root path. (default: ``False``).
"""
def __init__(
self,
root: Union[str, Path],
url: str = _RELEASE_CONFIGS["release1"]["url"],
folder_in_archive: str = _RELEASE_CONFIGS["release1"]["folder_in_archive"],
download: bool = False,
) -> None:
self._parse_filesystem(root, url, folder_in_archive, download)
def _parse_filesystem(self, root: str, url: str, folder_in_archive: str, download: bool) -> None:
root = Path(root)
basename = os.path.basename(url)
archive = root / basename
basename = Path(basename.split(".tar.bz2")[0])
folder_in_archive = basename / folder_in_archive
self._path = root / folder_in_archive
self._metadata_path = root / basename / "metadata.csv"
if download:
if not os.path.isdir(self._path):
if not os.path.isfile(archive):
checksum = _RELEASE_CONFIGS["release1"]["checksum"]
download_url_to_file(url, archive, hash_prefix=checksum)
extract_archive(archive)
else:
if not os.path.exists(self._path):
raise RuntimeError(
f"The path {self._path} doesn't exist. "
"Please check the ``root`` path or set `download=True` to download it"
)
with open(self._metadata_path, "r", newline="") as metadata:
flist = csv.reader(metadata, delimiter="|", quoting=csv.QUOTE_NONE)
self._flist = list(flist)
def __getitem__(self, n: int) -> Tuple[Tensor, int, str, str]:
"""Load the n-th sample from the dataset.
Args:
n (int): The index of the sample to be loaded
Returns:
Tuple of the following items;
Tensor:
Waveform
int:
Sample rate
str:
Transcript
str:
Normalized Transcript
"""
line = self._flist[n]
fileid, transcript, normalized_transcript = line
fileid_audio = self._path / (fileid + ".wav")
# Load audio
waveform, sample_rate = torchaudio.load(fileid_audio)
return (
waveform,
sample_rate,
transcript,
normalized_transcript,
)
def __len__(self) -> int:
return len(self._flist)
|
import csv
import os
from pathlib import Path
from typing import Tuple, Union
import torchaudio
from torch import Tensor
from torch.hub import download_url_to_file
from torch.utils.data import Dataset
from torchaudio.datasets.utils import extract_archive
_RELEASE_CONFIGS = {
"release1": {
"folder_in_archive": "wavs",
"url": "https://data.keithito.com/data/speech/LJSpeech-1.1.tar.bz2",
"checksum": "be1a30453f28eb8dd26af4101ae40cbf2c50413b1bb21936cbcdc6fae3de8aa5",
}
}
class LJSPEECH(Dataset):
"""Create a Dataset for *LJSpeech-1.1* :cite:`ljspeech17`.
Args:
root (str or Path): Path to the directory where the dataset is found or downloaded.
url (str, optional): The URL to download the dataset from.
(default: ``"https://data.keithito.com/data/speech/LJSpeech-1.1.tar.bz2"``)
folder_in_archive (str, optional):
The top-level directory of the dataset. (default: ``"wavs"``)
download (bool, optional):
Whether to download the dataset if it is not found at root path. (default: ``False``).
"""
def __init__(
self,
root: Union[str, Path],
url: str = _RELEASE_CONFIGS["release1"]["url"],
folder_in_archive: str = _RELEASE_CONFIGS["release1"]["folder_in_archive"],
download: bool = False,
) -> None:
self._parse_filesystem(root, url, folder_in_archive, download)
def _parse_filesystem(self, root: str, url: str, folder_in_archive: str, download: bool) -> None:
root = Path(root)
basename = os.path.basename(url)
archive = root / basename
basename = Path(basename.split(".tar.bz2")[0])
folder_in_archive = basename / folder_in_archive
self._path = root / folder_in_archive
self._metadata_path = root / basename / "metadata.csv"
if download:
if not os.path.isdir(self._path):
if not os.path.isfile(archive):
checksum = _RELEASE_CONFIGS["release1"]["checksum"]
download_url_to_file(url, archive, hash_prefix=checksum)
extract_archive(archive)
else:
if not os.path.exists(self._path):
raise RuntimeError(
f"The path {self._path} doesn't exist. "
"Please check the ``root`` path or set `download=True` to download it"
)
with open(self._metadata_path, "r", newline="") as metadata:
flist = csv.reader(metadata, delimiter="|", quoting=csv.QUOTE_NONE)
self._flist = list(flist)
def __getitem__(self, n: int) -> Tuple[Tensor, int, str, str]:
"""Load the n-th sample from the dataset.
Args:
n (int): The index of the sample to be loaded
Returns:
(Tensor, int, str, str):
``(waveform, sample_rate, transcript, normalized_transcript)``
"""
line = self._flist[n]
fileid, transcript, normalized_transcript = line
fileid_audio = self._path / (fileid + ".wav")
# Load audio
waveform, sample_rate = torchaudio.load(fileid_audio)
return (
waveform,
sample_rate,
transcript,
normalized_transcript,
)
def __len__(self) -> int:
return len(self._flist)
|
import numpy as np
from docarray import Image
def test_image():
image = Image(url='http://jina.ai')
image.tensor = image.url.load()
assert isinstance(image.tensor, np.ndarray)
|
import numpy as np
from docarray import Image
from docarray.typing import Tensor
def test_image():
image = Image(uri='http://jina.ai')
image.tensor = image.uri.load()
assert isinstance(image.tensor, np.ndarray)
|
"""Output parsers using Pydantic."""
import json
from typing import Annotated, Generic, Optional
import pydantic
from pydantic import SkipValidation
from typing_extensions import override
from langchain_core.exceptions import OutputParserException
from langchain_core.output_parsers import JsonOutputParser
from langchain_core.outputs import Generation
from langchain_core.utils.pydantic import (
PydanticBaseModel,
TBaseModel,
)
class PydanticOutputParser(JsonOutputParser, Generic[TBaseModel]):
"""Parse an output using a pydantic model."""
pydantic_object: Annotated[type[TBaseModel], SkipValidation()]
"""The pydantic model to parse."""
def _parse_obj(self, obj: dict) -> TBaseModel:
try:
if issubclass(self.pydantic_object, pydantic.BaseModel):
return self.pydantic_object.model_validate(obj)
if issubclass(self.pydantic_object, pydantic.v1.BaseModel):
return self.pydantic_object.parse_obj(obj)
msg = f"Unsupported model version for PydanticOutputParser: \
{self.pydantic_object.__class__}"
raise OutputParserException(msg)
except (pydantic.ValidationError, pydantic.v1.ValidationError) as e:
raise self._parser_exception(e, obj) from e
def _parser_exception(
self, e: Exception, json_object: dict
) -> OutputParserException:
json_string = json.dumps(json_object)
name = self.pydantic_object.__name__
msg = f"Failed to parse {name} from completion {json_string}. Got: {e}"
return OutputParserException(msg, llm_output=json_string)
def parse_result(
self, result: list[Generation], *, partial: bool = False
) -> Optional[TBaseModel]:
"""Parse the result of an LLM call to a pydantic object.
Args:
result: The result of the LLM call.
partial: Whether to parse partial JSON objects.
If True, the output will be a JSON object containing
all the keys that have been returned so far.
Defaults to False.
Returns:
The parsed pydantic object.
"""
try:
json_object = super().parse_result(result)
return self._parse_obj(json_object)
except OutputParserException:
if partial:
return None
raise
def parse(self, text: str) -> TBaseModel:
"""Parse the output of an LLM call to a pydantic object.
Args:
text: The output of the LLM call.
Returns:
The parsed pydantic object.
"""
return super().parse(text)
def get_format_instructions(self) -> str:
"""Return the format instructions for the JSON output.
Returns:
The format instructions for the JSON output.
"""
# Copy schema to avoid altering original Pydantic schema.
schema = dict(self.pydantic_object.model_json_schema().items())
# Remove extraneous fields.
reduced_schema = schema
if "title" in reduced_schema:
del reduced_schema["title"]
if "type" in reduced_schema:
del reduced_schema["type"]
# Ensure json in context is well-formed with double quotes.
schema_str = json.dumps(reduced_schema, ensure_ascii=False)
return _PYDANTIC_FORMAT_INSTRUCTIONS.format(schema=schema_str)
@property
def _type(self) -> str:
return "pydantic"
@property
@override
def OutputType(self) -> type[TBaseModel]:
"""Return the pydantic model."""
return self.pydantic_object
_PYDANTIC_FORMAT_INSTRUCTIONS = """The output should be formatted as a JSON instance that conforms to the JSON schema below.
As an example, for the schema {{"properties": {{"foo": {{"title": "Foo", "description": "a list of strings", "type": "array", "items": {{"type": "string"}}}}}}, "required": ["foo"]}}
the object {{"foo": ["bar", "baz"]}} is a well-formatted instance of the schema. The object {{"properties": {{"foo": ["bar", "baz"]}}}} is not well-formatted.
Here is the output schema:
```
{schema}
```""" # noqa: E501
# Re-exporting types for backwards compatibility
__all__ = [
"PydanticBaseModel",
"PydanticOutputParser",
"TBaseModel",
]
|
"""Output parsers using Pydantic."""
import json
from typing import Annotated, Generic, Optional
import pydantic
from pydantic import SkipValidation
from typing_extensions import override
from langchain_core.exceptions import OutputParserException
from langchain_core.output_parsers import JsonOutputParser
from langchain_core.outputs import Generation
from langchain_core.utils.pydantic import (
IS_PYDANTIC_V2,
PydanticBaseModel,
TBaseModel,
)
class PydanticOutputParser(JsonOutputParser, Generic[TBaseModel]):
"""Parse an output using a pydantic model."""
pydantic_object: Annotated[type[TBaseModel], SkipValidation()]
"""The pydantic model to parse."""
def _parse_obj(self, obj: dict) -> TBaseModel:
if IS_PYDANTIC_V2:
try:
if issubclass(self.pydantic_object, pydantic.BaseModel):
return self.pydantic_object.model_validate(obj)
if issubclass(self.pydantic_object, pydantic.v1.BaseModel):
return self.pydantic_object.parse_obj(obj)
msg = f"Unsupported model version for PydanticOutputParser: \
{self.pydantic_object.__class__}"
raise OutputParserException(msg)
except (pydantic.ValidationError, pydantic.v1.ValidationError) as e:
raise self._parser_exception(e, obj) from e
else: # pydantic v1
try:
return self.pydantic_object.parse_obj(obj)
except pydantic.ValidationError as e:
raise self._parser_exception(e, obj) from e
def _parser_exception(
self, e: Exception, json_object: dict
) -> OutputParserException:
json_string = json.dumps(json_object)
name = self.pydantic_object.__name__
msg = f"Failed to parse {name} from completion {json_string}. Got: {e}"
return OutputParserException(msg, llm_output=json_string)
def parse_result(
self, result: list[Generation], *, partial: bool = False
) -> Optional[TBaseModel]:
"""Parse the result of an LLM call to a pydantic object.
Args:
result: The result of the LLM call.
partial: Whether to parse partial JSON objects.
If True, the output will be a JSON object containing
all the keys that have been returned so far.
Defaults to False.
Returns:
The parsed pydantic object.
"""
try:
json_object = super().parse_result(result)
return self._parse_obj(json_object)
except OutputParserException:
if partial:
return None
raise
def parse(self, text: str) -> TBaseModel:
"""Parse the output of an LLM call to a pydantic object.
Args:
text: The output of the LLM call.
Returns:
The parsed pydantic object.
"""
return super().parse(text)
def get_format_instructions(self) -> str:
"""Return the format instructions for the JSON output.
Returns:
The format instructions for the JSON output.
"""
# Copy schema to avoid altering original Pydantic schema.
schema = dict(self.pydantic_object.model_json_schema().items())
# Remove extraneous fields.
reduced_schema = schema
if "title" in reduced_schema:
del reduced_schema["title"]
if "type" in reduced_schema:
del reduced_schema["type"]
# Ensure json in context is well-formed with double quotes.
schema_str = json.dumps(reduced_schema, ensure_ascii=False)
return _PYDANTIC_FORMAT_INSTRUCTIONS.format(schema=schema_str)
@property
def _type(self) -> str:
return "pydantic"
@property
@override
def OutputType(self) -> type[TBaseModel]:
"""Return the pydantic model."""
return self.pydantic_object
_PYDANTIC_FORMAT_INSTRUCTIONS = """The output should be formatted as a JSON instance that conforms to the JSON schema below.
As an example, for the schema {{"properties": {{"foo": {{"title": "Foo", "description": "a list of strings", "type": "array", "items": {{"type": "string"}}}}}}, "required": ["foo"]}}
the object {{"foo": ["bar", "baz"]}} is a well-formatted instance of the schema. The object {{"properties": {{"foo": ["bar", "baz"]}}}} is not well-formatted.
Here is the output schema:
```
{schema}
```""" # noqa: E501
# Re-exporting types for backwards compatibility
__all__ = [
"PydanticBaseModel",
"PydanticOutputParser",
"TBaseModel",
]
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.agent_toolkits.openapi.planner_prompt import (
API_CONTROLLER_PROMPT,
API_CONTROLLER_TOOL_DESCRIPTION,
API_CONTROLLER_TOOL_NAME,
API_ORCHESTRATOR_PROMPT,
API_PLANNER_PROMPT,
API_PLANNER_TOOL_DESCRIPTION,
API_PLANNER_TOOL_NAME,
PARSING_DELETE_PROMPT,
PARSING_GET_PROMPT,
PARSING_PATCH_PROMPT,
PARSING_POST_PROMPT,
PARSING_PUT_PROMPT,
REQUESTS_DELETE_TOOL_DESCRIPTION,
REQUESTS_GET_TOOL_DESCRIPTION,
REQUESTS_PATCH_TOOL_DESCRIPTION,
REQUESTS_POST_TOOL_DESCRIPTION,
REQUESTS_PUT_TOOL_DESCRIPTION,
)
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"API_CONTROLLER_PROMPT": (
"langchain_community.agent_toolkits.openapi.planner_prompt"
),
"API_CONTROLLER_TOOL_DESCRIPTION": (
"langchain_community.agent_toolkits.openapi.planner_prompt"
),
"API_CONTROLLER_TOOL_NAME": (
"langchain_community.agent_toolkits.openapi.planner_prompt"
),
"API_ORCHESTRATOR_PROMPT": (
"langchain_community.agent_toolkits.openapi.planner_prompt"
),
"API_PLANNER_PROMPT": ("langchain_community.agent_toolkits.openapi.planner_prompt"),
"API_PLANNER_TOOL_DESCRIPTION": (
"langchain_community.agent_toolkits.openapi.planner_prompt"
),
"API_PLANNER_TOOL_NAME": (
"langchain_community.agent_toolkits.openapi.planner_prompt"
),
"PARSING_DELETE_PROMPT": (
"langchain_community.agent_toolkits.openapi.planner_prompt"
),
"PARSING_GET_PROMPT": ("langchain_community.agent_toolkits.openapi.planner_prompt"),
"PARSING_PATCH_PROMPT": (
"langchain_community.agent_toolkits.openapi.planner_prompt"
),
"PARSING_POST_PROMPT": (
"langchain_community.agent_toolkits.openapi.planner_prompt"
),
"PARSING_PUT_PROMPT": ("langchain_community.agent_toolkits.openapi.planner_prompt"),
"REQUESTS_DELETE_TOOL_DESCRIPTION": (
"langchain_community.agent_toolkits.openapi.planner_prompt"
),
"REQUESTS_GET_TOOL_DESCRIPTION": (
"langchain_community.agent_toolkits.openapi.planner_prompt"
),
"REQUESTS_PATCH_TOOL_DESCRIPTION": (
"langchain_community.agent_toolkits.openapi.planner_prompt"
),
"REQUESTS_POST_TOOL_DESCRIPTION": (
"langchain_community.agent_toolkits.openapi.planner_prompt"
),
"REQUESTS_PUT_TOOL_DESCRIPTION": (
"langchain_community.agent_toolkits.openapi.planner_prompt"
),
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"API_CONTROLLER_PROMPT",
"API_CONTROLLER_TOOL_DESCRIPTION",
"API_CONTROLLER_TOOL_NAME",
"API_ORCHESTRATOR_PROMPT",
"API_PLANNER_PROMPT",
"API_PLANNER_TOOL_DESCRIPTION",
"API_PLANNER_TOOL_NAME",
"PARSING_DELETE_PROMPT",
"PARSING_GET_PROMPT",
"PARSING_PATCH_PROMPT",
"PARSING_POST_PROMPT",
"PARSING_PUT_PROMPT",
"REQUESTS_DELETE_TOOL_DESCRIPTION",
"REQUESTS_GET_TOOL_DESCRIPTION",
"REQUESTS_PATCH_TOOL_DESCRIPTION",
"REQUESTS_POST_TOOL_DESCRIPTION",
"REQUESTS_PUT_TOOL_DESCRIPTION",
]
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.agent_toolkits.openapi.planner_prompt import (
API_CONTROLLER_PROMPT,
API_CONTROLLER_TOOL_DESCRIPTION,
API_CONTROLLER_TOOL_NAME,
API_ORCHESTRATOR_PROMPT,
API_PLANNER_PROMPT,
API_PLANNER_TOOL_DESCRIPTION,
API_PLANNER_TOOL_NAME,
PARSING_DELETE_PROMPT,
PARSING_GET_PROMPT,
PARSING_PATCH_PROMPT,
PARSING_POST_PROMPT,
PARSING_PUT_PROMPT,
REQUESTS_DELETE_TOOL_DESCRIPTION,
REQUESTS_GET_TOOL_DESCRIPTION,
REQUESTS_PATCH_TOOL_DESCRIPTION,
REQUESTS_POST_TOOL_DESCRIPTION,
REQUESTS_PUT_TOOL_DESCRIPTION,
)
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"API_CONTROLLER_PROMPT": (
"langchain_community.agent_toolkits.openapi.planner_prompt"
),
"API_CONTROLLER_TOOL_DESCRIPTION": (
"langchain_community.agent_toolkits.openapi.planner_prompt"
),
"API_CONTROLLER_TOOL_NAME": (
"langchain_community.agent_toolkits.openapi.planner_prompt"
),
"API_ORCHESTRATOR_PROMPT": (
"langchain_community.agent_toolkits.openapi.planner_prompt"
),
"API_PLANNER_PROMPT": ("langchain_community.agent_toolkits.openapi.planner_prompt"),
"API_PLANNER_TOOL_DESCRIPTION": (
"langchain_community.agent_toolkits.openapi.planner_prompt"
),
"API_PLANNER_TOOL_NAME": (
"langchain_community.agent_toolkits.openapi.planner_prompt"
),
"PARSING_DELETE_PROMPT": (
"langchain_community.agent_toolkits.openapi.planner_prompt"
),
"PARSING_GET_PROMPT": ("langchain_community.agent_toolkits.openapi.planner_prompt"),
"PARSING_PATCH_PROMPT": (
"langchain_community.agent_toolkits.openapi.planner_prompt"
),
"PARSING_POST_PROMPT": (
"langchain_community.agent_toolkits.openapi.planner_prompt"
),
"PARSING_PUT_PROMPT": ("langchain_community.agent_toolkits.openapi.planner_prompt"),
"REQUESTS_DELETE_TOOL_DESCRIPTION": (
"langchain_community.agent_toolkits.openapi.planner_prompt"
),
"REQUESTS_GET_TOOL_DESCRIPTION": (
"langchain_community.agent_toolkits.openapi.planner_prompt"
),
"REQUESTS_PATCH_TOOL_DESCRIPTION": (
"langchain_community.agent_toolkits.openapi.planner_prompt"
),
"REQUESTS_POST_TOOL_DESCRIPTION": (
"langchain_community.agent_toolkits.openapi.planner_prompt"
),
"REQUESTS_PUT_TOOL_DESCRIPTION": (
"langchain_community.agent_toolkits.openapi.planner_prompt"
),
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"API_PLANNER_PROMPT",
"API_PLANNER_TOOL_NAME",
"API_PLANNER_TOOL_DESCRIPTION",
"API_CONTROLLER_PROMPT",
"API_CONTROLLER_TOOL_NAME",
"API_CONTROLLER_TOOL_DESCRIPTION",
"API_ORCHESTRATOR_PROMPT",
"REQUESTS_GET_TOOL_DESCRIPTION",
"PARSING_GET_PROMPT",
"REQUESTS_POST_TOOL_DESCRIPTION",
"PARSING_POST_PROMPT",
"REQUESTS_PATCH_TOOL_DESCRIPTION",
"PARSING_PATCH_PROMPT",
"REQUESTS_PUT_TOOL_DESCRIPTION",
"PARSING_PUT_PROMPT",
"REQUESTS_DELETE_TOOL_DESCRIPTION",
"PARSING_DELETE_PROMPT",
]
|
from __future__ import annotations
__version__ = "4.2.0.dev0"
__MODEL_HUB_ORGANIZATION__ = "sentence-transformers"
import importlib
import os
import warnings
from sentence_transformers.backend import (
export_dynamic_quantized_onnx_model,
export_optimized_onnx_model,
export_static_quantized_openvino_model,
)
from sentence_transformers.cross_encoder import (
CrossEncoder,
CrossEncoderModelCardData,
CrossEncoderTrainer,
CrossEncoderTrainingArguments,
)
from sentence_transformers.datasets import ParallelSentencesDataset, SentencesDataset
from sentence_transformers.LoggingHandler import LoggingHandler
from sentence_transformers.model_card import SentenceTransformerModelCardData
from sentence_transformers.quantization import quantize_embeddings
from sentence_transformers.readers import InputExample
from sentence_transformers.sampler import DefaultBatchSampler, MultiDatasetDefaultBatchSampler
from sentence_transformers.SentenceTransformer import SentenceTransformer
from sentence_transformers.similarity_functions import SimilarityFunction
from sentence_transformers.sparse_encoder import (
SparseEncoder,
SparseEncoderModelCardData,
SparseEncoderTrainer,
SparseEncoderTrainingArguments,
)
from sentence_transformers.trainer import SentenceTransformerTrainer
from sentence_transformers.training_args import SentenceTransformerTrainingArguments
from sentence_transformers.util import mine_hard_negatives
# If codecarbon is installed and the log level is not defined,
# automatically overwrite the default to "error"
if importlib.util.find_spec("codecarbon") and "CODECARBON_LOG_LEVEL" not in os.environ:
os.environ["CODECARBON_LOG_LEVEL"] = "error"
# Globally silence PyTorch sparse CSR tensor beta warning
warnings.filterwarnings("ignore", message="Sparse CSR tensor support is in beta state")
__all__ = [
"LoggingHandler",
"SentencesDataset",
"ParallelSentencesDataset",
"SentenceTransformer",
"SimilarityFunction",
"InputExample",
"CrossEncoder",
"CrossEncoderTrainer",
"CrossEncoderTrainingArguments",
"CrossEncoderModelCardData",
"SentenceTransformerTrainer",
"SentenceTransformerTrainingArguments",
"SentenceTransformerModelCardData",
"SparseEncoder",
"SparseEncoderTrainer",
"SparseEncoderTrainingArguments",
"SparseEncoderModelCardData",
"quantize_embeddings",
"export_optimized_onnx_model",
"export_dynamic_quantized_onnx_model",
"export_static_quantized_openvino_model",
"DefaultBatchSampler",
"MultiDatasetDefaultBatchSampler",
"mine_hard_negatives",
]
|
from __future__ import annotations
__version__ = "4.2.0.dev0"
__MODEL_HUB_ORGANIZATION__ = "sentence-transformers"
import importlib
import os
from sentence_transformers.backend import (
export_dynamic_quantized_onnx_model,
export_optimized_onnx_model,
export_static_quantized_openvino_model,
)
from sentence_transformers.cross_encoder import (
CrossEncoder,
CrossEncoderModelCardData,
CrossEncoderTrainer,
CrossEncoderTrainingArguments,
)
from sentence_transformers.datasets import ParallelSentencesDataset, SentencesDataset
from sentence_transformers.LoggingHandler import LoggingHandler
from sentence_transformers.model_card import SentenceTransformerModelCardData
from sentence_transformers.quantization import quantize_embeddings
from sentence_transformers.readers import InputExample
from sentence_transformers.sampler import DefaultBatchSampler, MultiDatasetDefaultBatchSampler
from sentence_transformers.SentenceTransformer import SentenceTransformer
from sentence_transformers.similarity_functions import SimilarityFunction
from sentence_transformers.trainer import SentenceTransformerTrainer
from sentence_transformers.training_args import SentenceTransformerTrainingArguments
from sentence_transformers.util import mine_hard_negatives
# If codecarbon is installed and the log level is not defined,
# automatically overwrite the default to "error"
if importlib.util.find_spec("codecarbon") and "CODECARBON_LOG_LEVEL" not in os.environ:
os.environ["CODECARBON_LOG_LEVEL"] = "error"
__all__ = [
"LoggingHandler",
"SentencesDataset",
"ParallelSentencesDataset",
"SentenceTransformer",
"SimilarityFunction",
"InputExample",
"CrossEncoder",
"CrossEncoderTrainer",
"CrossEncoderTrainingArguments",
"CrossEncoderModelCardData",
"SentenceTransformerTrainer",
"SentenceTransformerTrainingArguments",
"SentenceTransformerModelCardData",
"quantize_embeddings",
"export_optimized_onnx_model",
"export_dynamic_quantized_onnx_model",
"export_static_quantized_openvino_model",
"DefaultBatchSampler",
"MultiDatasetDefaultBatchSampler",
"mine_hard_negatives",
]
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch
from mmcv.runner import force_fp32
from mmdet.registry import MODELS
from .base_roi_extractor import BaseRoIExtractor
@MODELS.register_module()
class SingleRoIExtractor(BaseRoIExtractor):
"""Extract RoI features from a single level feature map.
If there are multiple input feature levels, each RoI is mapped to a level
according to its scale. The mapping rule is proposed in
`FPN <https://arxiv.org/abs/1612.03144>`_.
Args:
roi_layer (dict): Specify RoI layer type and arguments.
out_channels (int): Output channels of RoI layers.
featmap_strides (List[int]): Strides of input feature maps.
finest_scale (int): Scale threshold of mapping to level 0. Default: 56.
init_cfg (dict or list[dict], optional): Initialization config dict.
Default: None
"""
def __init__(self,
roi_layer,
out_channels,
featmap_strides,
finest_scale=56,
init_cfg=None):
super(SingleRoIExtractor, self).__init__(roi_layer, out_channels,
featmap_strides, init_cfg)
self.finest_scale = finest_scale
def map_roi_levels(self, rois, num_levels):
"""Map rois to corresponding feature levels by scales.
- scale < finest_scale * 2: level 0
- finest_scale * 2 <= scale < finest_scale * 4: level 1
- finest_scale * 4 <= scale < finest_scale * 8: level 2
- scale >= finest_scale * 8: level 3
Args:
rois (Tensor): Input RoIs, shape (k, 5).
num_levels (int): Total level number.
Returns:
Tensor: Level index (0-based) of each RoI, shape (k, )
"""
scale = torch.sqrt(
(rois[:, 3] - rois[:, 1]) * (rois[:, 4] - rois[:, 2]))
target_lvls = torch.floor(torch.log2(scale / self.finest_scale + 1e-6))
target_lvls = target_lvls.clamp(min=0, max=num_levels - 1).long()
return target_lvls
@force_fp32(apply_to=('feats', ), out_fp16=True)
def forward(self, feats, rois, roi_scale_factor=None):
"""Forward function."""
out_size = self.roi_layers[0].output_size
num_levels = len(feats)
expand_dims = (-1, self.out_channels * out_size[0] * out_size[1])
if torch.onnx.is_in_onnx_export():
# Work around to export mask-rcnn to onnx
roi_feats = rois[:, :1].clone().detach()
roi_feats = roi_feats.expand(*expand_dims)
roi_feats = roi_feats.reshape(-1, self.out_channels, *out_size)
roi_feats = roi_feats * 0
else:
roi_feats = feats[0].new_zeros(
rois.size(0), self.out_channels, *out_size)
# TODO: remove this when parrots supports
if torch.__version__ == 'parrots':
roi_feats.requires_grad = True
if num_levels == 1:
if len(rois) == 0:
return roi_feats
return self.roi_layers[0](feats[0], rois)
target_lvls = self.map_roi_levels(rois, num_levels)
if roi_scale_factor is not None:
rois = self.roi_rescale(rois, roi_scale_factor)
for i in range(num_levels):
mask = target_lvls == i
if torch.onnx.is_in_onnx_export():
# To keep all roi_align nodes exported to onnx
# and skip nonzero op
mask = mask.float().unsqueeze(-1)
# select target level rois and reset the rest rois to zero.
rois_i = rois.clone().detach()
rois_i *= mask
mask_exp = mask.expand(*expand_dims).reshape(roi_feats.shape)
roi_feats_t = self.roi_layers[i](feats[i], rois_i)
roi_feats_t *= mask_exp
roi_feats += roi_feats_t
continue
inds = mask.nonzero(as_tuple=False).squeeze(1)
if inds.numel() > 0:
rois_ = rois[inds]
roi_feats_t = self.roi_layers[i](feats[i], rois_)
roi_feats[inds] = roi_feats_t
else:
# Sometimes some pyramid levels will not be used for RoI
# feature extraction and this will cause an incomplete
# computation graph in one GPU, which is different from those
# in other GPUs and will cause a hanging error.
# Therefore, we add it to ensure each feature pyramid is
# included in the computation graph to avoid runtime bugs.
roi_feats += sum(
x.view(-1)[0]
for x in self.parameters()) * 0. + feats[i].sum() * 0.
return roi_feats
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch
from mmcv.runner import force_fp32
from mmdet.models.builder import ROI_EXTRACTORS
from .base_roi_extractor import BaseRoIExtractor
@ROI_EXTRACTORS.register_module()
class SingleRoIExtractor(BaseRoIExtractor):
"""Extract RoI features from a single level feature map.
If there are multiple input feature levels, each RoI is mapped to a level
according to its scale. The mapping rule is proposed in
`FPN <https://arxiv.org/abs/1612.03144>`_.
Args:
roi_layer (dict): Specify RoI layer type and arguments.
out_channels (int): Output channels of RoI layers.
featmap_strides (List[int]): Strides of input feature maps.
finest_scale (int): Scale threshold of mapping to level 0. Default: 56.
init_cfg (dict or list[dict], optional): Initialization config dict.
Default: None
"""
def __init__(self,
roi_layer,
out_channels,
featmap_strides,
finest_scale=56,
init_cfg=None):
super(SingleRoIExtractor, self).__init__(roi_layer, out_channels,
featmap_strides, init_cfg)
self.finest_scale = finest_scale
def map_roi_levels(self, rois, num_levels):
"""Map rois to corresponding feature levels by scales.
- scale < finest_scale * 2: level 0
- finest_scale * 2 <= scale < finest_scale * 4: level 1
- finest_scale * 4 <= scale < finest_scale * 8: level 2
- scale >= finest_scale * 8: level 3
Args:
rois (Tensor): Input RoIs, shape (k, 5).
num_levels (int): Total level number.
Returns:
Tensor: Level index (0-based) of each RoI, shape (k, )
"""
scale = torch.sqrt(
(rois[:, 3] - rois[:, 1]) * (rois[:, 4] - rois[:, 2]))
target_lvls = torch.floor(torch.log2(scale / self.finest_scale + 1e-6))
target_lvls = target_lvls.clamp(min=0, max=num_levels - 1).long()
return target_lvls
@force_fp32(apply_to=('feats', ), out_fp16=True)
def forward(self, feats, rois, roi_scale_factor=None):
"""Forward function."""
out_size = self.roi_layers[0].output_size
num_levels = len(feats)
expand_dims = (-1, self.out_channels * out_size[0] * out_size[1])
if torch.onnx.is_in_onnx_export():
# Work around to export mask-rcnn to onnx
roi_feats = rois[:, :1].clone().detach()
roi_feats = roi_feats.expand(*expand_dims)
roi_feats = roi_feats.reshape(-1, self.out_channels, *out_size)
roi_feats = roi_feats * 0
else:
roi_feats = feats[0].new_zeros(
rois.size(0), self.out_channels, *out_size)
# TODO: remove this when parrots supports
if torch.__version__ == 'parrots':
roi_feats.requires_grad = True
if num_levels == 1:
if len(rois) == 0:
return roi_feats
return self.roi_layers[0](feats[0], rois)
target_lvls = self.map_roi_levels(rois, num_levels)
if roi_scale_factor is not None:
rois = self.roi_rescale(rois, roi_scale_factor)
for i in range(num_levels):
mask = target_lvls == i
if torch.onnx.is_in_onnx_export():
# To keep all roi_align nodes exported to onnx
# and skip nonzero op
mask = mask.float().unsqueeze(-1)
# select target level rois and reset the rest rois to zero.
rois_i = rois.clone().detach()
rois_i *= mask
mask_exp = mask.expand(*expand_dims).reshape(roi_feats.shape)
roi_feats_t = self.roi_layers[i](feats[i], rois_i)
roi_feats_t *= mask_exp
roi_feats += roi_feats_t
continue
inds = mask.nonzero(as_tuple=False).squeeze(1)
if inds.numel() > 0:
rois_ = rois[inds]
roi_feats_t = self.roi_layers[i](feats[i], rois_)
roi_feats[inds] = roi_feats_t
else:
# Sometimes some pyramid levels will not be used for RoI
# feature extraction and this will cause an incomplete
# computation graph in one GPU, which is different from those
# in other GPUs and will cause a hanging error.
# Therefore, we add it to ensure each feature pyramid is
# included in the computation graph to avoid runtime bugs.
roi_feats += sum(
x.view(-1)[0]
for x in self.parameters()) * 0. + feats[i].sum() * 0.
return roi_feats
|
from __future__ import annotations
from pathlib import Path
from unittest.mock import Mock, PropertyMock
import pytest
import torch
from sentence_transformers import SentenceTransformer
from sentence_transformers.evaluation import InformationRetrievalEvaluator
from sentence_transformers.util import cos_sim
@pytest.fixture
def mock_model():
def mock_encode(sentences: str | list[str], **kwargs) -> torch.Tensor:
"""
We simply one-hot encode the sentences; if a sentence contains a keyword, the corresponding one-hot
encoding is added to the sentence embedding.
"""
one_hot_encodings = {
"pokemon": torch.tensor([1.0, 0.0, 0.0, 0.0, 0.0]),
"car": torch.tensor([0.0, 1.0, 0.0, 0.0, 0.0]),
"vehicle": torch.tensor([0.0, 0.0, 1.0, 0.0, 0.0]),
"fruit": torch.tensor([0.0, 0.0, 0.0, 1.0, 0.0]),
"vegetable": torch.tensor([0.0, 0.0, 0.0, 0.0, 1.0]),
}
if isinstance(sentences, str):
sentences = [sentences]
embeddings = []
for sentence in sentences:
encoding = torch.zeros(5)
for keyword, one_hot in one_hot_encodings.items():
if keyword in sentence:
encoding += one_hot
embeddings.append(encoding)
return torch.stack(embeddings)
model = Mock(spec=SentenceTransformer)
model.similarity_fn_name = "cosine"
model.similarity.side_effect = cos_sim
model.encode.side_effect = mock_encode
model.encode_query.side_effect = mock_encode
model.encode_document.side_effect = mock_encode
model.model_card_data = PropertyMock(return_value=Mock())
return model
@pytest.fixture
def test_data():
queries = {
"0": "What is a pokemon?",
"1": "What is a vegetable?",
"2": "What is a fruit?",
"3": "What is a vehicle?",
"4": "What is a car?",
}
corpus = {
"0": "A pokemon is a fictional creature",
"1": "A vegetable is a plant",
"2": "A fruit is a plant",
"3": "A vehicle is a machine",
"4": "A car is a vehicle",
}
relevant_docs = {"0": {"0"}, "1": {"1"}, "2": {"2"}, "3": {"3", "4"}, "4": {"4"}}
return queries, corpus, relevant_docs
def test_simple(test_data, tmp_path: Path):
queries, corpus, relevant_docs = test_data
model = SentenceTransformer("sentence-transformers-testing/stsb-bert-tiny-safetensors")
ir_evaluator = InformationRetrievalEvaluator(
queries=queries,
corpus=corpus,
relevant_docs=relevant_docs,
name="test",
accuracy_at_k=[1, 3],
precision_recall_at_k=[1, 3],
mrr_at_k=[3],
ndcg_at_k=[3],
map_at_k=[5],
)
results = ir_evaluator(model, output_path=str(tmp_path))
expected_keys = [
"test_cosine_accuracy@1",
"test_cosine_accuracy@3",
"test_cosine_precision@1",
"test_cosine_precision@3",
"test_cosine_recall@1",
"test_cosine_recall@3",
"test_cosine_ndcg@3",
"test_cosine_mrr@3",
"test_cosine_map@5",
]
assert set(results.keys()) == set(expected_keys)
def test_metrices(test_data, mock_model, tmp_path: Path):
queries, corpus, relevant_docs = test_data
ir_evaluator = InformationRetrievalEvaluator(
queries=queries,
corpus=corpus,
relevant_docs=relevant_docs,
name="test",
accuracy_at_k=[1, 3],
precision_recall_at_k=[1, 3],
mrr_at_k=[3],
ndcg_at_k=[3],
map_at_k=[5],
)
results = ir_evaluator(mock_model, output_path=str(tmp_path))
# We expect test_cosine_precision@3 to be 0.4, since 6 out of 15 (5 queries * 3) are True Positives
# We expect test_cosine_recall@1 to be 0.9; the average of 4 times a recall of 1 and once a recall of 0.5
expected_results = {
"test_cosine_accuracy@1": 1.0,
"test_cosine_accuracy@3": 1.0,
"test_cosine_precision@1": 1.0,
"test_cosine_precision@3": 0.4,
"test_cosine_recall@1": 0.9,
"test_cosine_recall@3": 1.0,
"test_cosine_ndcg@3": 1.0,
"test_cosine_mrr@3": 1.0,
"test_cosine_map@5": 1.0,
}
for key, expected_value in expected_results.items():
assert results[key] == pytest.approx(expected_value, abs=1e-9)
|
from __future__ import annotations
from pathlib import Path
from unittest.mock import Mock, PropertyMock
import pytest
import torch
from sentence_transformers import SentenceTransformer
from sentence_transformers.evaluation import InformationRetrievalEvaluator
from sentence_transformers.util import cos_sim
@pytest.fixture
def mock_model():
def mock_encode(sentences: str | list[str], **kwargs) -> torch.Tensor:
"""
We simply one-hot encode the sentences; if a sentence contains a keyword, the corresponding one-hot
encoding is added to the sentence embedding.
"""
one_hot_encodings = {
"pokemon": torch.tensor([1.0, 0.0, 0.0, 0.0, 0.0]),
"car": torch.tensor([0.0, 1.0, 0.0, 0.0, 0.0]),
"vehicle": torch.tensor([0.0, 0.0, 1.0, 0.0, 0.0]),
"fruit": torch.tensor([0.0, 0.0, 0.0, 1.0, 0.0]),
"vegetable": torch.tensor([0.0, 0.0, 0.0, 0.0, 1.0]),
}
if isinstance(sentences, str):
sentences = [sentences]
embeddings = []
for sentence in sentences:
encoding = torch.zeros(5)
for keyword, one_hot in one_hot_encodings.items():
if keyword in sentence:
encoding += one_hot
embeddings.append(encoding)
return torch.stack(embeddings)
model = Mock(spec=SentenceTransformer)
model.similarity_fn_name = "cosine"
model.similarity.side_effect = cos_sim
model.encode.side_effect = mock_encode
model.model_card_data = PropertyMock(return_value=Mock())
return model
@pytest.fixture
def test_data():
queries = {
"0": "What is a pokemon?",
"1": "What is a vegetable?",
"2": "What is a fruit?",
"3": "What is a vehicle?",
"4": "What is a car?",
}
corpus = {
"0": "A pokemon is a fictional creature",
"1": "A vegetable is a plant",
"2": "A fruit is a plant",
"3": "A vehicle is a machine",
"4": "A car is a vehicle",
}
relevant_docs = {"0": {"0"}, "1": {"1"}, "2": {"2"}, "3": {"3", "4"}, "4": {"4"}}
return queries, corpus, relevant_docs
def test_simple(test_data, tmp_path: Path):
queries, corpus, relevant_docs = test_data
model = SentenceTransformer("sentence-transformers-testing/stsb-bert-tiny-safetensors")
ir_evaluator = InformationRetrievalEvaluator(
queries=queries,
corpus=corpus,
relevant_docs=relevant_docs,
name="test",
accuracy_at_k=[1, 3],
precision_recall_at_k=[1, 3],
mrr_at_k=[3],
ndcg_at_k=[3],
map_at_k=[5],
)
results = ir_evaluator(model, output_path=str(tmp_path))
expected_keys = [
"test_cosine_accuracy@1",
"test_cosine_accuracy@3",
"test_cosine_precision@1",
"test_cosine_precision@3",
"test_cosine_recall@1",
"test_cosine_recall@3",
"test_cosine_ndcg@3",
"test_cosine_mrr@3",
"test_cosine_map@5",
]
assert set(results.keys()) == set(expected_keys)
def test_metrices(test_data, mock_model, tmp_path: Path):
queries, corpus, relevant_docs = test_data
ir_evaluator = InformationRetrievalEvaluator(
queries=queries,
corpus=corpus,
relevant_docs=relevant_docs,
name="test",
accuracy_at_k=[1, 3],
precision_recall_at_k=[1, 3],
mrr_at_k=[3],
ndcg_at_k=[3],
map_at_k=[5],
)
results = ir_evaluator(mock_model, output_path=str(tmp_path))
# We expect test_cosine_precision@3 to be 0.4, since 6 out of 15 (5 queries * 3) are True Positives
# We expect test_cosine_recall@1 to be 0.9; the average of 4 times a recall of 1 and once a recall of 0.5
expected_results = {
"test_cosine_accuracy@1": 1.0,
"test_cosine_accuracy@3": 1.0,
"test_cosine_precision@1": 1.0,
"test_cosine_precision@3": 0.4,
"test_cosine_recall@1": 0.9,
"test_cosine_recall@3": 1.0,
"test_cosine_ndcg@3": 1.0,
"test_cosine_mrr@3": 1.0,
"test_cosine_map@5": 1.0,
}
for key, expected_value in expected_results.items():
assert results[key] == pytest.approx(expected_value, abs=1e-9)
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch.nn as nn
import torch.nn.functional as F
from ..builder import LOSSES
from .utils import weighted_loss
@weighted_loss
def mse_loss(pred, target):
"""Warpper of mse loss."""
return F.mse_loss(pred, target, reduction='none')
@LOSSES.register_module()
class MSELoss(nn.Module):
"""MSELoss.
Args:
reduction (str, optional): The method that reduces the loss to a
scalar. Options are "none", "mean" and "sum".
loss_weight (float, optional): The weight of the loss. Defaults to 1.0
"""
def __init__(self, reduction='mean', loss_weight=1.0):
super().__init__()
self.reduction = reduction
self.loss_weight = loss_weight
def forward(self,
pred,
target,
weight=None,
avg_factor=None,
reduction_override=None):
"""Forward function of loss.
Args:
pred (torch.Tensor): The prediction.
target (torch.Tensor): The learning target of the prediction.
weight (torch.Tensor, optional): Weight of the loss for each
prediction. Defaults to None.
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
reduction_override (str, optional): The reduction method used to
override the original reduction method of the loss.
Defaults to None.
Returns:
torch.Tensor: The calculated loss
"""
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (
reduction_override if reduction_override else self.reduction)
loss = self.loss_weight * mse_loss(
pred, target, weight, reduction=reduction, avg_factor=avg_factor)
return loss
|
import torch.nn as nn
import torch.nn.functional as F
from ..builder import LOSSES
from .utils import weighted_loss
@weighted_loss
def mse_loss(pred, target):
"""Warpper of mse loss."""
return F.mse_loss(pred, target, reduction='none')
@LOSSES.register_module()
class MSELoss(nn.Module):
"""MSELoss.
Args:
reduction (str, optional): The method that reduces the loss to a
scalar. Options are "none", "mean" and "sum".
loss_weight (float, optional): The weight of the loss. Defaults to 1.0
"""
def __init__(self, reduction='mean', loss_weight=1.0):
super().__init__()
self.reduction = reduction
self.loss_weight = loss_weight
def forward(self,
pred,
target,
weight=None,
avg_factor=None,
reduction_override=None):
"""Forward function of loss.
Args:
pred (torch.Tensor): The prediction.
target (torch.Tensor): The learning target of the prediction.
weight (torch.Tensor, optional): Weight of the loss for each
prediction. Defaults to None.
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
reduction_override (str, optional): The reduction method used to
override the original reduction method of the loss.
Defaults to None.
Returns:
torch.Tensor: The calculated loss
"""
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (
reduction_override if reduction_override else self.reduction)
loss = self.loss_weight * mse_loss(
pred, target, weight, reduction=reduction, avg_factor=avg_factor)
return loss
|
from __future__ import annotations
import math
from pathlib import Path
import pytest
from tokenizers import Tokenizer
from sentence_transformers import SentenceTransformer
from sentence_transformers.models.StaticEmbedding import StaticEmbedding
try:
import model2vec
except ImportError:
model2vec = None
skip_if_no_model2vec = pytest.mark.skipif(model2vec is None, reason="The model2vec library is not installed.")
def test_initialization_with_embedding_weights(tokenizer: Tokenizer, embedding_weights) -> None:
model = StaticEmbedding(tokenizer, embedding_weights=embedding_weights)
assert model.embedding.weight.shape == (30522, 768)
def test_initialization_with_embedding_dim(tokenizer: Tokenizer) -> None:
model = StaticEmbedding(tokenizer, embedding_dim=768)
assert model.embedding.weight.shape == (30522, 768)
def test_tokenize(static_embedding_model: StaticEmbedding) -> None:
texts = ["Hello world!", "How are you?"]
tokens = static_embedding_model.tokenize(texts)
assert "input_ids" in tokens
assert "offsets" in tokens
def test_forward(static_embedding_model: StaticEmbedding) -> None:
texts = ["Hello world!", "How are you?"]
tokens = static_embedding_model.tokenize(texts)
output = static_embedding_model(tokens)
assert "sentence_embedding" in output
def test_save_and_load(tmp_path: Path, static_embedding_model: StaticEmbedding) -> None:
save_dir = tmp_path / "model"
save_dir.mkdir()
static_embedding_model.save(str(save_dir))
loaded_model = StaticEmbedding.load(str(save_dir))
assert loaded_model.embedding.weight.shape == static_embedding_model.embedding.weight.shape
@skip_if_no_model2vec()
def test_from_distillation() -> None:
model = StaticEmbedding.from_distillation("sentence-transformers-testing/stsb-bert-tiny-safetensors", pca_dims=32)
# The shape has been 29528 for <0.5.0, 29525 for 0.5.0, and 29524 for >=0.6.0, so let's make a safer test
# that checks the first dimension is close to 29525 and the second dimension is 32.
assert abs(model.embedding.weight.shape[0] - 29525) < 5
assert model.embedding.weight.shape[1] == 32
@skip_if_no_model2vec()
def test_from_model2vec() -> None:
model = StaticEmbedding.from_model2vec("minishlab/M2V_base_output")
assert model.embedding.weight.shape == (29528, 256)
def test_loading_model2vec() -> None:
model = SentenceTransformer("minishlab/potion-base-8M")
assert model.get_sentence_embedding_dimension() == 256
assert model.max_seq_length == math.inf
test_sentences = ["It's so sunny outside!", "The sun is shining outside!"]
embeddings = model.encode(test_sentences)
assert embeddings.shape == (2, 256)
similarity = model.similarity(embeddings[0], embeddings[1])
assert similarity.item() > 0.7
|
from __future__ import annotations
import math
from pathlib import Path
import numpy as np
import pytest
from packaging.version import Version, parse
from tokenizers import Tokenizer
from sentence_transformers import SentenceTransformer
from sentence_transformers.models.StaticEmbedding import StaticEmbedding
try:
import model2vec
from model2vec import __version__ as M2V_VERSION
except ImportError:
model2vec = None
skip_if_no_model2vec = pytest.mark.skipif(model2vec is None, reason="The model2vec library is not installed.")
@pytest.fixture(scope="session")
def tokenizer() -> Tokenizer:
return Tokenizer.from_pretrained("bert-base-uncased")
@pytest.fixture
def embedding_weights():
return np.random.rand(30522, 768)
@pytest.fixture
def static_embedding(tokenizer: Tokenizer, embedding_weights) -> StaticEmbedding:
return StaticEmbedding(tokenizer, embedding_weights=embedding_weights)
def test_initialization_with_embedding_weights(tokenizer: Tokenizer, embedding_weights) -> None:
model = StaticEmbedding(tokenizer, embedding_weights=embedding_weights)
assert model.embedding.weight.shape == (30522, 768)
def test_initialization_with_embedding_dim(tokenizer: Tokenizer) -> None:
model = StaticEmbedding(tokenizer, embedding_dim=768)
assert model.embedding.weight.shape == (30522, 768)
def test_tokenize(static_embedding: StaticEmbedding) -> None:
texts = ["Hello world!", "How are you?"]
tokens = static_embedding.tokenize(texts)
assert "input_ids" in tokens
assert "offsets" in tokens
def test_forward(static_embedding: StaticEmbedding) -> None:
texts = ["Hello world!", "How are you?"]
tokens = static_embedding.tokenize(texts)
output = static_embedding(tokens)
assert "sentence_embedding" in output
def test_save_and_load(tmp_path: Path, static_embedding: StaticEmbedding) -> None:
save_dir = tmp_path / "model"
save_dir.mkdir()
static_embedding.save(str(save_dir))
loaded_model = StaticEmbedding.load(str(save_dir))
assert loaded_model.embedding.weight.shape == static_embedding.embedding.weight.shape
@skip_if_no_model2vec()
def test_from_distillation() -> None:
model = StaticEmbedding.from_distillation("sentence-transformers-testing/stsb-bert-tiny-safetensors", pca_dims=32)
expected_shape = (29525 if parse(M2V_VERSION) >= Version("0.5.0") else 29528, 32)
assert model.embedding.weight.shape == expected_shape
@skip_if_no_model2vec()
def test_from_model2vec() -> None:
model = StaticEmbedding.from_model2vec("minishlab/M2V_base_output")
assert model.embedding.weight.shape == (29528, 256)
def test_loading_model2vec() -> None:
model = SentenceTransformer("minishlab/potion-base-8M")
assert model.get_sentence_embedding_dimension() == 256
assert model.max_seq_length == math.inf
test_sentences = ["It's so sunny outside!", "The sun is shining outside!"]
embeddings = model.encode(test_sentences)
assert embeddings.shape == (2, 256)
similarity = model.similarity(embeddings[0], embeddings[1])
assert similarity.item() > 0.7
|
import datasets
_DESCRIPTION = """\
"""
_URL = "https://www.gutenberg.org/files/2554/2554-h/2554-h.htm"
_DATA_URL = "https://raw.githubusercontent.com/patrickvonplaten/datasets/master/crime_and_punishment.txt"
class CrimeAndPunish(datasets.GeneratorBasedBuilder):
def _info(self):
return datasets.DatasetInfo(
# This is the description that will appear on the datasets page.
description=_DESCRIPTION,
# datasets.features.FeatureConnectors
features=datasets.Features(
{
"line": datasets.Value("string"),
}
),
# If there's a common (input, target) tuple from the features,
# specify them here. They'll be used if as_supervised=True in
# builder.as_dataset.
supervised_keys=None,
homepage=_URL,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
data = dl_manager.download_and_extract(_DATA_URL)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={"data_file": data, "split": "train"},
),
]
def _generate_examples(self, data_file, split):
with open(data_file, "rb") as f:
id_counter = 0
add_text = False
crime_and_punishment_occ_counter = 0
for line in f:
line = line.decode("UTF-8")
if "CRIME AND PUNISHMENT" in line:
crime_and_punishment_occ_counter += 1
add_text = crime_and_punishment_occ_counter == 3
if "End of Project" in line:
add_text = False
if add_text is True:
result = {"line": line}
id_counter += 1
yield id_counter, result
|
import datasets
_DESCRIPTION = """\
"""
_URL = "https://www.gutenberg.org/files/2554/2554-h/2554-h.htm"
_DATA_URL = "https://raw.githubusercontent.com/patrickvonplaten/datasets/master/crime_and_punishment.txt"
class CrimeAndPunishConfig(datasets.BuilderConfig):
"""BuilderConfig for Crime and Punish."""
def __init__(self, data_url, **kwargs):
"""BuilderConfig for BlogAuthorship
Args:
data_url: `string`, url to the dataset (word or raw level)
**kwargs: keyword arguments forwarded to super.
"""
super(CrimeAndPunishConfig, self).__init__(
version=datasets.Version(
"1.0.0",
),
**kwargs,
)
self.data_url = data_url
class CrimeAndPunish(datasets.GeneratorBasedBuilder):
VERSION = datasets.Version("0.1.0")
BUILDER_CONFIGS = [
CrimeAndPunishConfig(
name="crime-and-punish",
data_url=_DATA_URL,
description="word level dataset. No processing is needed other than replacing newlines with <eos> tokens.",
),
]
def _info(self):
return datasets.DatasetInfo(
# This is the description that will appear on the datasets page.
description=_DESCRIPTION,
# datasets.features.FeatureConnectors
features=datasets.Features(
{
"line": datasets.Value("string"),
}
),
# If there's a common (input, target) tuple from the features,
# specify them here. They'll be used if as_supervised=True in
# builder.as_dataset.
supervised_keys=None,
homepage=_URL,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
if self.config.name == "crime-and-punish":
data = dl_manager.download_and_extract(self.config.data_url)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={"data_file": data, "split": "train"},
),
]
else:
raise ValueError(f"{self.config.name} does not exist")
def _generate_examples(self, data_file, split):
with open(data_file, "rb") as f:
id_counter = 0
add_text = False
crime_and_punishment_occ_counter = 0
for line in f:
line = line.decode("UTF-8")
if "CRIME AND PUNISHMENT" in line:
crime_and_punishment_occ_counter += 1
add_text = crime_and_punishment_occ_counter == 3
if "End of Project" in line:
add_text = False
if add_text is True:
result = {"line": line}
id_counter += 1
yield id_counter, result
|
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
from typing import Dict, Iterable, Optional
import torch
from jina import DocumentArray, Executor, requests
from sentence_transformers import SentenceTransformer
class TransformerSentenceEncoder(Executor):
"""
Encode the Document text into embedding.
"""
def __init__(
self,
model_name: str = 'all-MiniLM-L6-v2',
traversal_paths: Iterable[str] = ('r',),
batch_size: int = 32,
device: str = 'cpu',
*args,
**kwargs
):
"""
:param model_name: The name of the sentence transformer to be used
:param device: Torch device to put the model on (e.g. 'cpu', 'cuda', 'cuda:1')
:param traversal_paths: Default traversal paths
:param batch_size: Batch size to be used in the encoder model
"""
super().__init__(*args, **kwargs)
self.batch_size = batch_size
self.traversal_paths = traversal_paths
self.model = SentenceTransformer(model_name, device=device)
@requests
def encode(
self, docs: Optional[DocumentArray] = None, parameters: Dict = {}, **kwargs
):
"""
Encode all docs with text and store the encodings in the ``embedding`` attribute
of the docs.
:param docs: Documents to send to the encoder. They need to have the ``text``
attribute get an embedding.
:param parameters: Any additional parameters for the `encode` function.
"""
if docs is None:
return
for batch in docs.batch(
traversal_paths=parameters.get('traversal_paths', self.traversal_paths),
batch_size=parameters.get('batch_size', self.batch_size),
require_attr='text',
):
texts = batch.get_attributes('text')
with torch.inference_mode():
embeddings = self.model.encode(texts)
for doc, embedding in zip(batch, embeddings):
doc.embedding = embedding
|
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
from typing import Dict, Iterable, Optional
import torch
from jina import DocumentArray, Executor, requests
from jina_commons.batching import get_docs_batch_generator
from sentence_transformers import SentenceTransformer
class TransformerSentenceEncoder(Executor):
"""
Encode the Document text into embedding.
"""
def __init__(
self,
model_name: str = 'all-MiniLM-L6-v2',
traversal_paths: Iterable[str] = ('r',),
batch_size: int = 32,
device: str = 'cpu',
*args,
**kwargs
):
"""
:param model_name: The name of the sentence transformer to be used
:param device: Torch device to put the model on (e.g. 'cpu', 'cuda', 'cuda:1')
:param traversal_paths: Default traversal paths
:param batch_size: Batch size to be used in the encoder model
"""
super().__init__(*args, **kwargs)
self.batch_size = batch_size
self.traversal_paths = traversal_paths
self.model = SentenceTransformer(model_name, device=device)
@requests
def encode(
self, docs: Optional[DocumentArray] = None, parameters: Dict = {}, **kwargs
):
"""
Encode all docs with text and store the encodings in the ``embedding`` attribute
of the docs.
:param docs: Documents to send to the encoder. They need to have the ``text``
attribute get an embedding.
:param parameters: Any additional parameters for the `encode` function.
"""
for batch in get_docs_batch_generator(
docs,
traversal_path=parameters.get('traversal_paths', self.traversal_paths),
batch_size=parameters.get('batch_size', self.batch_size),
needs_attr='text',
):
texts = batch.get_attributes('text')
with torch.no_grad():
embeddings = self.model.encode(texts)
for doc, embedding in zip(batch, embeddings):
doc.embedding = embedding
|
# Copyright (c) OpenMMLab. All rights reserved.
from .base_det_dataset import BaseDetDataset
from .cityscapes import CityscapesDataset
from .coco import CocoDataset
from .coco_panoptic import CocoPanopticDataset
from .crowdhuman import CrowdHumanDataset
from .dataset_wrappers import MultiImageMixDataset
from .deepfashion import DeepFashionDataset
from .lvis import LVISDataset, LVISV1Dataset, LVISV05Dataset
from .objects365 import Objects365V1Dataset, Objects365V2Dataset
from .openimages import OpenImagesChallengeDataset, OpenImagesDataset
from .samplers import (AspectRatioBatchSampler, ClassAwareSampler,
GroupMultiSourceSampler, MultiSourceSampler)
from .utils import get_loading_pipeline
from .voc import VOCDataset
from .wider_face import WIDERFaceDataset
from .xml_style import XMLDataset
__all__ = [
'XMLDataset', 'CocoDataset', 'DeepFashionDataset', 'VOCDataset',
'CityscapesDataset', 'LVISDataset', 'LVISV05Dataset', 'LVISV1Dataset',
'WIDERFaceDataset', 'get_loading_pipeline', 'CocoPanopticDataset',
'MultiImageMixDataset', 'OpenImagesDataset', 'OpenImagesChallengeDataset',
'AspectRatioBatchSampler', 'ClassAwareSampler', 'MultiSourceSampler',
'GroupMultiSourceSampler', 'BaseDetDataset', 'CrowdHumanDataset',
'Objects365V1Dataset', 'Objects365V2Dataset'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .base_det_dataset import BaseDetDataset
from .cityscapes import CityscapesDataset
from .coco import CocoDataset
from .coco_panoptic import CocoPanopticDataset
from .crowdhuman import CrowdHumanDataset
from .dataset_wrappers import MultiImageMixDataset
from .deepfashion import DeepFashionDataset
from .lvis import LVISDataset, LVISV1Dataset, LVISV05Dataset
from .openimages import OpenImagesChallengeDataset, OpenImagesDataset
from .samplers import (AspectRatioBatchSampler, ClassAwareSampler,
GroupMultiSourceSampler, MultiSourceSampler)
from .utils import get_loading_pipeline
from .voc import VOCDataset
from .wider_face import WIDERFaceDataset
from .xml_style import XMLDataset
__all__ = [
'XMLDataset', 'CocoDataset', 'DeepFashionDataset', 'VOCDataset',
'CityscapesDataset', 'LVISDataset', 'LVISV05Dataset', 'LVISV1Dataset',
'WIDERFaceDataset', 'get_loading_pipeline', 'CocoPanopticDataset',
'MultiImageMixDataset', 'OpenImagesDataset', 'OpenImagesChallengeDataset',
'AspectRatioBatchSampler', 'ClassAwareSampler', 'MultiSourceSampler',
'GroupMultiSourceSampler', 'BaseDetDataset', 'CrowdHumanDataset'
]
|
import os
from pathlib import Path
from torchaudio.datasets import cmuarctic
from torchaudio_unittest.common_utils import (
get_whitenoise,
normalize_wav,
save_wav,
TempDirMixin,
TorchaudioTestCase,
)
def get_mock_dataset(root_dir):
"""
root_dir: directory to the mocked dataset
"""
mocked_data = []
sample_rate = 16000
transcript = "This is a test transcript."
base_dir = os.path.join(root_dir, "ARCTIC", "cmu_us_aew_arctic")
txt_dir = os.path.join(base_dir, "etc")
os.makedirs(txt_dir, exist_ok=True)
txt_file = os.path.join(txt_dir, "txt.done.data")
audio_dir = os.path.join(base_dir, "wav")
os.makedirs(audio_dir, exist_ok=True)
seed = 42
with open(txt_file, "w") as txt:
for c in ["a", "b"]:
for i in range(5):
utterance_id = f"arctic_{c}{i:04d}"
path = os.path.join(audio_dir, f"{utterance_id}.wav")
data = get_whitenoise(
sample_rate=sample_rate,
duration=3,
n_channels=1,
dtype="int16",
seed=seed,
)
save_wav(path, data, sample_rate)
sample = (
normalize_wav(data),
sample_rate,
transcript,
utterance_id.split("_")[1],
)
mocked_data.append(sample)
txt.write(f'( {utterance_id} "{transcript}" )\n')
seed += 1
return mocked_data
class TestCMUARCTIC(TempDirMixin, TorchaudioTestCase):
backend = "default"
root_dir = None
samples = []
@classmethod
def setUpClass(cls):
cls.root_dir = cls.get_base_temp_dir()
cls.samples = get_mock_dataset(cls.root_dir)
def _test_cmuarctic(self, dataset):
n_ite = 0
for i, (waveform, sample_rate, transcript, utterance_id) in enumerate(dataset):
expected_sample = self.samples[i]
assert sample_rate == expected_sample[1]
assert transcript == expected_sample[2]
assert utterance_id == expected_sample[3]
self.assertEqual(expected_sample[0], waveform, atol=5e-5, rtol=1e-8)
n_ite += 1
assert n_ite == len(self.samples)
def test_cmuarctic_str(self):
dataset = cmuarctic.CMUARCTIC(self.root_dir)
self._test_cmuarctic(dataset)
def test_cmuarctic_path(self):
dataset = cmuarctic.CMUARCTIC(Path(self.root_dir))
self._test_cmuarctic(dataset)
|
import os
from pathlib import Path
from torchaudio.datasets import cmuarctic
from torchaudio_unittest.common_utils import (
TempDirMixin,
TorchaudioTestCase,
get_whitenoise,
save_wav,
normalize_wav,
)
def get_mock_dataset(root_dir):
"""
root_dir: directory to the mocked dataset
"""
mocked_data = []
sample_rate = 16000
transcript = "This is a test transcript."
base_dir = os.path.join(root_dir, "ARCTIC", "cmu_us_aew_arctic")
txt_dir = os.path.join(base_dir, "etc")
os.makedirs(txt_dir, exist_ok=True)
txt_file = os.path.join(txt_dir, "txt.done.data")
audio_dir = os.path.join(base_dir, "wav")
os.makedirs(audio_dir, exist_ok=True)
seed = 42
with open(txt_file, "w") as txt:
for c in ["a", "b"]:
for i in range(5):
utterance_id = f"arctic_{c}{i:04d}"
path = os.path.join(audio_dir, f"{utterance_id}.wav")
data = get_whitenoise(
sample_rate=sample_rate,
duration=3,
n_channels=1,
dtype="int16",
seed=seed,
)
save_wav(path, data, sample_rate)
sample = (
normalize_wav(data),
sample_rate,
transcript,
utterance_id.split("_")[1],
)
mocked_data.append(sample)
txt.write(f'( {utterance_id} "{transcript}" )\n')
seed += 1
return mocked_data
class TestCMUARCTIC(TempDirMixin, TorchaudioTestCase):
backend = "default"
root_dir = None
samples = []
@classmethod
def setUpClass(cls):
cls.root_dir = cls.get_base_temp_dir()
cls.samples = get_mock_dataset(cls.root_dir)
def _test_cmuarctic(self, dataset):
n_ite = 0
for i, (waveform, sample_rate, transcript, utterance_id) in enumerate(dataset):
expected_sample = self.samples[i]
assert sample_rate == expected_sample[1]
assert transcript == expected_sample[2]
assert utterance_id == expected_sample[3]
self.assertEqual(expected_sample[0], waveform, atol=5e-5, rtol=1e-8)
n_ite += 1
assert n_ite == len(self.samples)
def test_cmuarctic_str(self):
dataset = cmuarctic.CMUARCTIC(self.root_dir)
self._test_cmuarctic(dataset)
def test_cmuarctic_path(self):
dataset = cmuarctic.CMUARCTIC(Path(self.root_dir))
self._test_cmuarctic(dataset)
|
from __future__ import annotations
from .model_card import SparseEncoderModelCardData
from .SparseEncoder import SparseEncoder
from .trainer import SparseEncoderTrainer
from .training_args import SparseEncoderTrainingArguments
__all__ = [
"SparseEncoder",
"SparseEncoderTrainer",
"SparseEncoderTrainingArguments",
"SparseEncoderModelCardData",
]
|
from __future__ import annotations
from .model_card import SparseEncoderModelCardData
from .SparseEncoder import SparseEncoder
from .trainer import SparseEncoderTrainer
from .training_args import SparseEncoderTrainingArguments
__all__ = [
"SparseEncoder",
"SparseEncoderTrainer",
"SparseEncoderTrainingArguments",
"SparseEncoderModelCardData",
]
# TODO : Add tests for all the components
|
"""Script to check if python modules can be imported."""
import random
import string
import sys
import traceback
from importlib.machinery import SourceFileLoader
if __name__ == "__main__":
files = sys.argv[1:]
has_failure = False
for file in files:
try:
module_name = "".join(
random.choice(string.ascii_letters)
for _ in range(20) # noqa: S311
)
SourceFileLoader(module_name, file).load_module()
except Exception:
has_failure = True
print(file) # noqa: T201
traceback.print_exc()
print() # noqa: T201
sys.exit(1 if has_failure else 0)
|
import random
import string
import sys
import traceback
from importlib.machinery import SourceFileLoader
if __name__ == "__main__":
files = sys.argv[1:]
has_failure = False
for file in files:
try:
module_name = "".join(
random.choice(string.ascii_letters)
for _ in range(20) # noqa: S311
)
SourceFileLoader(module_name, file).load_module()
except Exception:
has_failure = True
print(file) # noqa: T201
traceback.print_exc()
print() # noqa: T201
sys.exit(1 if has_failure else 0)
|
import importlib.util
import warnings
from functools import wraps
from typing import Optional
def is_module_available(*modules: str) -> bool:
r"""Returns if a top-level module with :attr:`name` exists *without**
importing it. This is generally safer than try-catch block around a
`import X`. It avoids third party libraries breaking assumptions of some of
our tests, e.g., setting multiprocessing start method when imported
(see librosa/#747, torchvision/#544).
"""
return all(importlib.util.find_spec(m) is not None for m in modules)
def requires_module(*modules: str):
"""Decorate function to give error message if invoked without required optional modules.
This decorator is to give better error message to users rather
than raising ``NameError: name 'module' is not defined`` at random places.
"""
missing = [m for m in modules if not is_module_available(m)]
if not missing:
# fall through. If all the modules are available, no need to decorate
def decorator(func):
return func
else:
req = f"module: {missing[0]}" if len(missing) == 1 else f"modules: {missing}"
def decorator(func):
@wraps(func)
def wrapped(*args, **kwargs):
raise RuntimeError(f"{func.__module__}.{func.__name__} requires {req}")
return wrapped
return decorator
def deprecated(direction: str, version: Optional[str] = None, remove: bool = False):
"""Decorator to add deprecation message
Args:
direction (str): Migration steps to be given to users.
version (str or int): The version when the object will be removed
"""
def decorator(func):
@wraps(func)
def wrapped(*args, **kwargs):
message = f"{func.__module__}.{func.__name__} has been deprecated. {direction}"
if remove:
message += f' It will be removed from {"future" if version is None else version} release. '
warnings.warn(message, stacklevel=2)
return func(*args, **kwargs)
return wrapped
return decorator
def fail_with_message(message):
"""Generate decorator to give users message about missing TorchAudio extension."""
def decorator(func):
@wraps(func)
def wrapped(*args, **kwargs):
raise RuntimeError(f"{func.__module__}.{func.__name__} {message}")
return wrapped
return decorator
def no_op(func):
"""Op-op decorator. Used in place of fail_with_message when a functionality that requires extension works fine."""
return func
|
import importlib.util
import warnings
from functools import wraps
from typing import Optional
def is_module_available(*modules: str) -> bool:
r"""Returns if a top-level module with :attr:`name` exists *without**
importing it. This is generally safer than try-catch block around a
`import X`. It avoids third party libraries breaking assumptions of some of
our tests, e.g., setting multiprocessing start method when imported
(see librosa/#747, torchvision/#544).
"""
return all(importlib.util.find_spec(m) is not None for m in modules)
def requires_module(*modules: str):
"""Decorate function to give error message if invoked without required optional modules.
This decorator is to give better error message to users rather
than raising ``NameError: name 'module' is not defined`` at random places.
"""
missing = [m for m in modules if not is_module_available(m)]
if not missing:
# fall through. If all the modules are available, no need to decorate
def decorator(func):
return func
else:
req = f"module: {missing[0]}" if len(missing) == 1 else f"modules: {missing}"
def decorator(func):
@wraps(func)
def wrapped(*args, **kwargs):
raise RuntimeError(f"{func.__module__}.{func.__name__} requires {req}")
return wrapped
return decorator
def deprecated(direction: str, version: Optional[str] = None):
"""Decorator to add deprecation message
Args:
direction (str): Migration steps to be given to users.
version (str or int): The version when the object will be removed
"""
def decorator(func):
@wraps(func)
def wrapped(*args, **kwargs):
message = (
f"{func.__module__}.{func.__name__} has been deprecated "
f'and will be removed from {"future" if version is None else version} release. '
f"{direction}"
)
warnings.warn(message, stacklevel=2)
return func(*args, **kwargs)
return wrapped
return decorator
def fail_with_message(message):
"""Generate decorator to give users message about missing TorchAudio extension."""
def decorator(func):
@wraps(func)
def wrapped(*args, **kwargs):
raise RuntimeError(f"{func.__module__}.{func.__name__} {message}")
return wrapped
return decorator
def no_op(func):
"""Op-op decorator. Used in place of fail_with_message when a functionality that requires extension works fine."""
return func
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.utils.sequence_utils import pad_sequences as pad_sequences
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.utils.sequence_utils import pad_sequences
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Union
import torch
from torch import Tensor
from mmdet.registry import TASK_UTILS
from mmdet.structures.bbox import BaseBoxes, HorizontalBoxes, get_box_tensor
from .base_bbox_coder import BaseBBoxCoder
@TASK_UTILS.register_module()
class YOLOBBoxCoder(BaseBBoxCoder):
"""YOLO BBox coder.
Following `YOLO <https://arxiv.org/abs/1506.02640>`_, this coder divide
image into grids, and encode bbox (x1, y1, x2, y2) into (cx, cy, dw, dh).
cx, cy in [0., 1.], denotes relative center position w.r.t the center of
bboxes. dw, dh are the same as :obj:`DeltaXYWHBBoxCoder`.
Args:
eps (float): Min value of cx, cy when encoding.
"""
def __init__(self, eps: float = 1e-6, **kwargs):
super().__init__(**kwargs)
self.eps = eps
def encode(self, bboxes: Union[Tensor, BaseBoxes],
gt_bboxes: Union[Tensor, BaseBoxes],
stride: Union[Tensor, int]) -> Tensor:
"""Get box regression transformation deltas that can be used to
transform the ``bboxes`` into the ``gt_bboxes``.
Args:
bboxes (torch.Tensor or :obj:`BaseBoxes`): Source boxes,
e.g., anchors.
gt_bboxes (torch.Tensor or :obj:`BaseBoxes`): Target of the
transformation, e.g., ground-truth boxes.
stride (torch.Tensor | int): Stride of bboxes.
Returns:
torch.Tensor: Box transformation deltas
"""
bboxes = get_box_tensor(bboxes)
gt_bboxes = get_box_tensor(gt_bboxes)
assert bboxes.size(0) == gt_bboxes.size(0)
assert bboxes.size(-1) == gt_bboxes.size(-1) == 4
x_center_gt = (gt_bboxes[..., 0] + gt_bboxes[..., 2]) * 0.5
y_center_gt = (gt_bboxes[..., 1] + gt_bboxes[..., 3]) * 0.5
w_gt = gt_bboxes[..., 2] - gt_bboxes[..., 0]
h_gt = gt_bboxes[..., 3] - gt_bboxes[..., 1]
x_center = (bboxes[..., 0] + bboxes[..., 2]) * 0.5
y_center = (bboxes[..., 1] + bboxes[..., 3]) * 0.5
w = bboxes[..., 2] - bboxes[..., 0]
h = bboxes[..., 3] - bboxes[..., 1]
w_target = torch.log((w_gt / w).clamp(min=self.eps))
h_target = torch.log((h_gt / h).clamp(min=self.eps))
x_center_target = ((x_center_gt - x_center) / stride + 0.5).clamp(
self.eps, 1 - self.eps)
y_center_target = ((y_center_gt - y_center) / stride + 0.5).clamp(
self.eps, 1 - self.eps)
encoded_bboxes = torch.stack(
[x_center_target, y_center_target, w_target, h_target], dim=-1)
return encoded_bboxes
def decode(self, bboxes: Union[Tensor, BaseBoxes], pred_bboxes: Tensor,
stride: Union[Tensor, int]) -> Union[Tensor, BaseBoxes]:
"""Apply transformation `pred_bboxes` to `boxes`.
Args:
boxes (torch.Tensor or :obj:`BaseBoxes`): Basic boxes,
e.g. anchors.
pred_bboxes (torch.Tensor): Encoded boxes with shape
stride (torch.Tensor | int): Strides of bboxes.
Returns:
Union[torch.Tensor, :obj:`BaseBoxes`]: Decoded boxes.
"""
bboxes = get_box_tensor(bboxes)
assert pred_bboxes.size(-1) == bboxes.size(-1) == 4
xy_centers = (bboxes[..., :2] + bboxes[..., 2:]) * 0.5 + (
pred_bboxes[..., :2] - 0.5) * stride
whs = (bboxes[..., 2:] -
bboxes[..., :2]) * 0.5 * pred_bboxes[..., 2:].exp()
decoded_bboxes = torch.stack(
(xy_centers[..., 0] - whs[..., 0], xy_centers[..., 1] -
whs[..., 1], xy_centers[..., 0] + whs[..., 0],
xy_centers[..., 1] + whs[..., 1]),
dim=-1)
if self.use_box_type:
decoded_bboxes = HorizontalBoxes(decoded_bboxes)
return decoded_bboxes
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch
from mmdet.registry import TASK_UTILS
from mmdet.structures.bbox import HorizontalBoxes, get_box_tensor
from .base_bbox_coder import BaseBBoxCoder
@TASK_UTILS.register_module()
class YOLOBBoxCoder(BaseBBoxCoder):
"""YOLO BBox coder.
Following `YOLO <https://arxiv.org/abs/1506.02640>`_, this coder divide
image into grids, and encode bbox (x1, y1, x2, y2) into (cx, cy, dw, dh).
cx, cy in [0., 1.], denotes relative center position w.r.t the center of
bboxes. dw, dh are the same as :obj:`DeltaXYWHBBoxCoder`.
Args:
eps (float): Min value of cx, cy when encoding.
"""
def __init__(self, eps=1e-6, **kwargs):
super().__init__(**kwargs)
self.eps = eps
def encode(self, bboxes, gt_bboxes, stride):
"""Get box regression transformation deltas that can be used to
transform the ``bboxes`` into the ``gt_bboxes``.
Args:
bboxes (torch.Tensor or :obj:`BaseBoxes`): Source boxes,
e.g., anchors.
gt_bboxes (torch.Tensor or :obj:`BaseBoxes`): Target of the
transformation, e.g., ground-truth boxes.
stride (torch.Tensor | int): Stride of bboxes.
Returns:
torch.Tensor: Box transformation deltas
"""
bboxes = get_box_tensor(bboxes)
gt_bboxes = get_box_tensor(gt_bboxes)
assert bboxes.size(0) == gt_bboxes.size(0)
assert bboxes.size(-1) == gt_bboxes.size(-1) == 4
x_center_gt = (gt_bboxes[..., 0] + gt_bboxes[..., 2]) * 0.5
y_center_gt = (gt_bboxes[..., 1] + gt_bboxes[..., 3]) * 0.5
w_gt = gt_bboxes[..., 2] - gt_bboxes[..., 0]
h_gt = gt_bboxes[..., 3] - gt_bboxes[..., 1]
x_center = (bboxes[..., 0] + bboxes[..., 2]) * 0.5
y_center = (bboxes[..., 1] + bboxes[..., 3]) * 0.5
w = bboxes[..., 2] - bboxes[..., 0]
h = bboxes[..., 3] - bboxes[..., 1]
w_target = torch.log((w_gt / w).clamp(min=self.eps))
h_target = torch.log((h_gt / h).clamp(min=self.eps))
x_center_target = ((x_center_gt - x_center) / stride + 0.5).clamp(
self.eps, 1 - self.eps)
y_center_target = ((y_center_gt - y_center) / stride + 0.5).clamp(
self.eps, 1 - self.eps)
encoded_bboxes = torch.stack(
[x_center_target, y_center_target, w_target, h_target], dim=-1)
return encoded_bboxes
def decode(self, bboxes, pred_bboxes, stride):
"""Apply transformation `pred_bboxes` to `boxes`.
Args:
boxes (torch.Tensor or :obj:`BaseBoxes`): Basic boxes,
e.g. anchors.
pred_bboxes (torch.Tensor): Encoded boxes with shape
stride (torch.Tensor | int): Strides of bboxes.
Returns:
Union[torch.Tensor, :obj:`BaseBoxes`]: Decoded boxes.
"""
bboxes = get_box_tensor(bboxes)
assert pred_bboxes.size(-1) == bboxes.size(-1) == 4
xy_centers = (bboxes[..., :2] + bboxes[..., 2:]) * 0.5 + (
pred_bboxes[..., :2] - 0.5) * stride
whs = (bboxes[..., 2:] -
bboxes[..., :2]) * 0.5 * pred_bboxes[..., 2:].exp()
decoded_bboxes = torch.stack(
(xy_centers[..., 0] - whs[..., 0], xy_centers[..., 1] -
whs[..., 1], xy_centers[..., 0] + whs[..., 0],
xy_centers[..., 1] + whs[..., 1]),
dim=-1)
if self.use_box_type:
decoded_bboxes = HorizontalBoxes(decoded_bboxes)
return decoded_bboxes
|
# Copyright (c) OpenMMLab. All rights reserved.
"""MMEngine provides 20 root registries to support using modules across
projects.
More datails can be found at
https://mmengine.readthedocs.io/en/latest/advanced_tutorials/registry.html.
"""
from .build_functions import (build_model_from_cfg, build_runner_from_cfg,
build_scheduler_from_cfg)
from .registry import Registry
# manage all kinds of runners like `EpochBasedRunner` and `IterBasedRunner`
RUNNERS = Registry('runner', build_func=build_runner_from_cfg)
# manage runner constructors that define how to initialize runners
RUNNER_CONSTRUCTORS = Registry('runner constructor')
# manage all kinds of loops like `EpochBasedTrainLoop`
LOOPS = Registry('loop')
# manage all kinds of hooks like `CheckpointHook`
HOOKS = Registry('hook')
# manage data-related modules
DATASETS = Registry('dataset')
DATA_SAMPLERS = Registry('data sampler')
TRANSFORMS = Registry('transform')
# mangage all kinds of modules inheriting `nn.Module`
MODELS = Registry('model', build_model_from_cfg)
# mangage all kinds of model wrappers like 'MMDistributedDataParallel'
MODEL_WRAPPERS = Registry('model_wrapper')
# mangage all kinds of weight initialization modules like `Uniform`
WEIGHT_INITIALIZERS = Registry('weight initializer')
# mangage all kinds of optimizers like `SGD` and `Adam`
OPTIMIZERS = Registry('optimizer')
# manage optimizer wrapper
OPTIM_WRAPPERS = Registry('optim_wrapper')
# manage constructors that customize the optimization hyperparameters.
OPTIM_WRAPPER_CONSTRUCTORS = Registry('optimizer wrapper constructor')
# mangage all kinds of parameter schedulers like `MultiStepLR`
PARAM_SCHEDULERS = Registry(
'parameter scheduler', build_func=build_scheduler_from_cfg)
# manage all kinds of metrics
METRICS = Registry('metric')
# manage evaluator
EVALUATOR = Registry('evaluator')
# manage task-specific modules like anchor generators and box coders
TASK_UTILS = Registry('task util')
# manage visualizer
VISUALIZERS = Registry('visualizer')
# manage visualizer backend
VISBACKENDS = Registry('vis_backend')
# manage logprocessor
LOG_PROCESSORS = Registry('log_processor')
# manage inferencer
INFERENCERS = Registry('inferencer')
# manage function
FUNCTIONS = Registry('function')
|
# Copyright (c) OpenMMLab. All rights reserved.
"""MMEngine provides 20 root registries to support using modules across
projects.
More datails can be found at
https://mmengine.readthedocs.io/en/latest/advanced_tutorials/registry.html.
"""
from .build_functions import (build_model_from_cfg, build_runner_from_cfg,
build_scheduler_from_cfg)
from .registry import Registry
# manage all kinds of runners like `EpochBasedRunner` and `IterBasedRunner`
RUNNERS = Registry('runner', build_func=build_runner_from_cfg)
# manage runner constructors that define how to initialize runners
RUNNER_CONSTRUCTORS = Registry('runner constructor')
# manage all kinds of loops like `EpochBasedTrainLoop`
LOOPS = Registry('loop')
# manage all kinds of hooks like `CheckpointHook`
HOOKS = Registry('hook')
# manage data-related modules
DATASETS = Registry('dataset')
DATA_SAMPLERS = Registry('data sampler')
TRANSFORMS = Registry('transform')
# mangage all kinds of modules inheriting `nn.Module`
MODELS = Registry('model', build_model_from_cfg)
# mangage all kinds of model wrappers like 'MMDistributedDataParallel'
MODEL_WRAPPERS = Registry('model_wrapper')
# mangage all kinds of weight initialization modules like `Uniform`
WEIGHT_INITIALIZERS = Registry('weight initializer')
# mangage all kinds of optimizers like `SGD` and `Adam`
OPTIMIZERS = Registry('optimizer')
# manage optimizer wrapper
OPTIM_WRAPPERS = Registry('optim_wrapper')
# manage constructors that customize the optimization hyperparameters.
OPTIM_WRAPPER_CONSTRUCTORS = Registry('optimizer wrapper constructor')
# mangage all kinds of parameter schedulers like `MultiStepLR`
PARAM_SCHEDULERS = Registry(
'parameter scheduler', build_func=build_scheduler_from_cfg)
# manage all kinds of metrics
METRICS = Registry('metric')
# manage evaluator
EVALUATOR = Registry('evaluator')
# manage task-specific modules like anchor generators and box coders
TASK_UTILS = Registry('task util')
# manage visualizer
VISUALIZERS = Registry('visualizer')
# manage visualizer backend
VISBACKENDS = Registry('vis_backend')
# manage logprocessor
LOG_PROCESSORS = Registry('log_processor')
# manage inferencer
INFERENCERS = Registry('inferencer')
|
from unittest.mock import MagicMock, patch
import pytest
from llama_index.core.llms import ChatMessage, MessageRole
from llama_index.llms.huggingface import HuggingFaceInferenceAPI
STUB_MODEL_NAME = "placeholder_model"
@pytest.fixture(name="hf_inference_api")
def fixture_hf_inference_api() -> HuggingFaceInferenceAPI:
with patch.dict("sys.modules", huggingface_hub=MagicMock()):
return HuggingFaceInferenceAPI(model_name=STUB_MODEL_NAME)
class TestHuggingFaceInferenceAPI:
def test_class_name(self, hf_inference_api: HuggingFaceInferenceAPI) -> None:
assert HuggingFaceInferenceAPI.class_name() == HuggingFaceInferenceAPI.__name__
assert hf_inference_api.class_name() == HuggingFaceInferenceAPI.__name__
def test_instantiation(self) -> None:
mock_hub = MagicMock()
with patch.dict("sys.modules", huggingface_hub=mock_hub):
llm = HuggingFaceInferenceAPI(model_name=STUB_MODEL_NAME)
assert llm.model_name == STUB_MODEL_NAME
# Check can be both a large language model and an embedding model
assert isinstance(llm, HuggingFaceInferenceAPI)
# Confirm Clients are instantiated correctly
# mock_hub.InferenceClient.assert_called_once_with(
# model=STUB_MODEL_NAME, token=None, timeout=None, headers=None, cookies=None
# )
# mock_hub.AsyncInferenceClient.assert_called_once_with(
# model=STUB_MODEL_NAME, token=None, timeout=None, headers=None, cookies=None
# )
def test_chat(self, hf_inference_api: HuggingFaceInferenceAPI) -> None:
messages = [
ChatMessage(content="Which movie is the best?"),
ChatMessage(content="It's Die Hard for sure.", role=MessageRole.ASSISTANT),
ChatMessage(content="Can you explain why?"),
]
generated_response = (
" It's based on the book of the same name by James Fenimore Cooper."
)
conversational_return = {
"choices": [
{
"message": {
"content": generated_response,
}
}
]
}
with patch.object(
hf_inference_api._sync_client,
"chat_completion",
return_value=conversational_return,
) as mock_conversational:
response = hf_inference_api.chat(messages=messages)
assert response.message.role == MessageRole.ASSISTANT
assert response.message.content == generated_response
mock_conversational.assert_called_once_with(
messages=[{"role": m.role.value, "content": m.content} for m in messages],
model=STUB_MODEL_NAME,
)
def test_chat_text_generation(
self, hf_inference_api: HuggingFaceInferenceAPI
) -> None:
mock_message_to_prompt = MagicMock(
return_value="System: You are an expert movie reviewer\nUser: Which movie is the best?\nAssistant:"
)
hf_inference_api.task = "text-generation"
hf_inference_api.messages_to_prompt = mock_message_to_prompt
messages = [
ChatMessage(
role=MessageRole.SYSTEM, content="You are an expert movie reviewer"
),
ChatMessage(role=MessageRole.USER, content="Which movie is the best?"),
]
conversational_return = "It's Die Hard for sure."
with patch.object(
hf_inference_api._sync_client,
"text_generation",
return_value=conversational_return,
) as mock_complete:
response = hf_inference_api.chat(messages=messages)
hf_inference_api.messages_to_prompt.assert_called_once_with(messages)
assert response.message.role == MessageRole.ASSISTANT
assert response.message.content == conversational_return
mock_complete.assert_called_once_with(
"System: You are an expert movie reviewer\nUser: Which movie is the best?\nAssistant:",
max_new_tokens=256,
)
def test_complete(self, hf_inference_api: HuggingFaceInferenceAPI) -> None:
prompt = "My favorite color is "
generated_text = '"green" and I love to paint. I have been painting for 30 years and have been'
with patch.object(
hf_inference_api._sync_client,
"text_generation",
return_value=generated_text,
) as mock_text_generation:
response = hf_inference_api.complete(prompt)
mock_text_generation.assert_called_once_with(prompt, max_new_tokens=256)
assert response.text == generated_text
|
from unittest.mock import MagicMock, patch
import pytest
from llama_index.core.llms import ChatMessage, MessageRole
from llama_index.llms.huggingface import HuggingFaceInferenceAPI
STUB_MODEL_NAME = "placeholder_model"
@pytest.fixture(name="hf_inference_api")
def fixture_hf_inference_api() -> HuggingFaceInferenceAPI:
with patch.dict("sys.modules", huggingface_hub=MagicMock()):
return HuggingFaceInferenceAPI(model_name=STUB_MODEL_NAME)
class TestHuggingFaceInferenceAPI:
def test_class_name(self, hf_inference_api: HuggingFaceInferenceAPI) -> None:
assert HuggingFaceInferenceAPI.class_name() == HuggingFaceInferenceAPI.__name__
assert hf_inference_api.class_name() == HuggingFaceInferenceAPI.__name__
def test_instantiation(self) -> None:
mock_hub = MagicMock()
with patch.dict("sys.modules", huggingface_hub=mock_hub):
llm = HuggingFaceInferenceAPI(model_name=STUB_MODEL_NAME)
assert llm.model_name == STUB_MODEL_NAME
# Check can be both a large language model and an embedding model
assert isinstance(llm, HuggingFaceInferenceAPI)
# Confirm Clients are instantiated correctly
# mock_hub.InferenceClient.assert_called_once_with(
# model=STUB_MODEL_NAME, token=None, timeout=None, headers=None, cookies=None
# )
# mock_hub.AsyncInferenceClient.assert_called_once_with(
# model=STUB_MODEL_NAME, token=None, timeout=None, headers=None, cookies=None
# )
def test_chat(self, hf_inference_api: HuggingFaceInferenceAPI) -> None:
messages = [
ChatMessage(content="Which movie is the best?"),
ChatMessage(content="It's Die Hard for sure.", role=MessageRole.ASSISTANT),
ChatMessage(content="Can you explain why?"),
]
generated_response = (
" It's based on the book of the same name by James Fenimore Cooper."
)
conversational_return = {
"generated_text": generated_response,
"conversation": {
"generated_responses": ["It's Die Hard for sure.", generated_response],
"past_user_inputs": [
"Which movie is the best?",
"Can you explain why?",
],
},
}
with patch.object(
hf_inference_api._sync_client,
"conversational",
return_value=conversational_return,
) as mock_conversational:
response = hf_inference_api.chat(messages=messages)
assert response.message.role == MessageRole.ASSISTANT
assert response.message.content == generated_response
mock_conversational.assert_called_once_with(
text="Can you explain why?",
past_user_inputs=["Which movie is the best?"],
generated_responses=["It's Die Hard for sure."],
)
def test_chat_text_generation(
self, hf_inference_api: HuggingFaceInferenceAPI
) -> None:
mock_message_to_prompt = MagicMock(
return_value="System: You are an expert movie reviewer\nUser: Which movie is the best?\nAssistant:"
)
hf_inference_api.task = "text-generation"
hf_inference_api.messages_to_prompt = mock_message_to_prompt
messages = [
ChatMessage(
role=MessageRole.SYSTEM, content="You are an expert movie reviewer"
),
ChatMessage(role=MessageRole.USER, content="Which movie is the best?"),
]
conversational_return = "It's Die Hard for sure."
with patch.object(
hf_inference_api._sync_client,
"text_generation",
return_value=conversational_return,
) as mock_complete:
response = hf_inference_api.chat(messages=messages)
hf_inference_api.messages_to_prompt.assert_called_once_with(messages)
assert response.message.role == MessageRole.ASSISTANT
assert response.message.content == conversational_return
mock_complete.assert_called_once_with(
"System: You are an expert movie reviewer\nUser: Which movie is the best?\nAssistant:",
max_new_tokens=256,
)
def test_complete(self, hf_inference_api: HuggingFaceInferenceAPI) -> None:
prompt = "My favorite color is "
generated_text = '"green" and I love to paint. I have been painting for 30 years and have been'
with patch.object(
hf_inference_api._sync_client,
"text_generation",
return_value=generated_text,
) as mock_text_generation:
response = hf_inference_api.complete(prompt)
mock_text_generation.assert_called_once_with(prompt, max_new_tokens=256)
assert response.text == generated_text
|
# Copyright (c) OpenMMLab. All rights reserved.
from .default_scope import DefaultScope
from .registry import Registry, build_from_cfg
from .root import (DATA_SAMPLERS, DATASETS, HOOKS, LOG_PROCESSORS, LOOPS,
METRICS, MODEL_WRAPPERS, MODELS, OPTIM_WRAPPER_CONSTRUCTORS,
OPTIM_WRAPPERS, OPTIMIZERS, PARAM_SCHEDULERS,
RUNNER_CONSTRUCTORS, RUNNERS, TASK_UTILS, TRANSFORMS,
VISBACKENDS, VISUALIZERS, WEIGHT_INITIALIZERS)
from .utils import count_registered_modules, traverse_registry_tree
__all__ = [
'Registry', 'build_from_cfg', 'RUNNERS', 'RUNNER_CONSTRUCTORS', 'HOOKS',
'DATASETS', 'DATA_SAMPLERS', 'TRANSFORMS', 'MODELS', 'WEIGHT_INITIALIZERS',
'OPTIMIZERS', 'OPTIM_WRAPPER_CONSTRUCTORS', 'TASK_UTILS',
'PARAM_SCHEDULERS', 'METRICS', 'MODEL_WRAPPERS', 'OPTIM_WRAPPERS', 'LOOPS',
'VISBACKENDS', 'VISUALIZERS', 'LOG_PROCESSORS', 'DefaultScope',
'traverse_registry_tree', 'count_registered_modules'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .default_scope import DefaultScope
from .registry import Registry, build_from_cfg
from .root import (DATA_SAMPLERS, DATASETS, HOOKS, LOG_PROCESSORS, LOOPS,
METRICS, MODEL_WRAPPERS, MODELS, OPTIMIZER_CONSTRUCTORS,
OPTIMIZERS, PARAM_SCHEDULERS, RUNNER_CONSTRUCTORS, RUNNERS,
TASK_UTILS, TRANSFORMS, VISBACKENDS, VISUALIZERS,
WEIGHT_INITIALIZERS)
from .utils import count_registered_modules, traverse_registry_tree
__all__ = [
'Registry', 'build_from_cfg', 'RUNNERS', 'RUNNER_CONSTRUCTORS', 'HOOKS',
'DATASETS', 'DATA_SAMPLERS', 'TRANSFORMS', 'MODELS', 'WEIGHT_INITIALIZERS',
'OPTIMIZERS', 'OPTIMIZER_CONSTRUCTORS', 'TASK_UTILS', 'PARAM_SCHEDULERS',
'METRICS', 'MODEL_WRAPPERS', 'LOOPS', 'VISBACKENDS', 'VISUALIZERS',
'LOG_PROCESSORS', 'DefaultScope', 'traverse_registry_tree',
'count_registered_modules'
]
|
_base_ = '../mask_rcnn/mask-rcnn_x101-32x4d_fpn_1x_coco.py'
model = dict(
backbone=dict(
norm_cfg=dict(type='SyncBN', requires_grad=True),
norm_eval=False,
plugins=[
dict(
cfg=dict(type='ContextBlock', ratio=1. / 4),
stages=(False, True, True, True),
position='after_conv3')
]))
|
_base_ = '../mask_rcnn/mask_rcnn_x101_32x4d_fpn_1x_coco.py'
model = dict(
backbone=dict(
norm_cfg=dict(type='SyncBN', requires_grad=True),
norm_eval=False,
plugins=[
dict(
cfg=dict(type='ContextBlock', ratio=1. / 4),
stages=(False, True, True, True),
position='after_conv3')
]))
|
# Copyright (c) OpenMMLab. All rights reserved.
from .amp_optimizer_wrapper import AmpOptimWrapper
from .builder import (OPTIM_WRAPPER_CONSTRUCTORS, OPTIMIZERS,
build_optim_wrapper)
from .default_constructor import DefaultOptimWrapperConstructor
from .optimizer_wrapper import OptimWrapper
from .optimizer_wrapper_dict import OptimWrapperDict
__all__ = [
'OPTIM_WRAPPER_CONSTRUCTORS', 'OPTIMIZERS',
'DefaultOptimWrapperConstructor', 'build_optim_wrapper', 'OptimWrapper',
'AmpOptimWrapper', 'OptimWrapperDict'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .builder import OPTIMIZER_CONSTRUCTORS, OPTIMIZERS, build_optimizer
from .default_constructor import DefaultOptimizerConstructor
__all__ = [
'OPTIMIZER_CONSTRUCTORS', 'OPTIMIZERS', 'DefaultOptimizerConstructor',
'build_optimizer'
]
|
_base_ = [
'../_base_/models/faster-rcnn_r50_fpn.py',
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
model = dict(
data_preprocessor=dict(
# The mean and std are used in PyCls when training RegNets
mean=[103.53, 116.28, 123.675],
std=[57.375, 57.12, 58.395],
bgr_to_rgb=False),
backbone=dict(
_delete_=True,
type='RegNet',
arch='regnetx_3.2gf',
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://regnetx_3.2gf')),
neck=dict(
type='FPN',
in_channels=[96, 192, 432, 1008],
out_channels=256,
num_outs=5))
optim_wrapper = dict(
optimizer=dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.00005))
|
_base_ = [
'../_base_/models/faster_rcnn_r50_fpn.py',
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
model = dict(
data_preprocessor=dict(
# The mean and std are used in PyCls when training RegNets
mean=[103.53, 116.28, 123.675],
std=[57.375, 57.12, 58.395],
bgr_to_rgb=False),
backbone=dict(
_delete_=True,
type='RegNet',
arch='regnetx_3.2gf',
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://regnetx_3.2gf')),
neck=dict(
type='FPN',
in_channels=[96, 192, 432, 1008],
out_channels=256,
num_outs=5))
optim_wrapper = dict(
optimizer=dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.00005))
|
import json
from pathlib import Path
import yaml
from llama_index.core.tools.tool_spec.base import BaseToolSpec
from llama_index.tools.openapi import OpenAPIToolSpec
def test_class():
names_of_base_classes = [b.__name__ for b in OpenAPIToolSpec.__mro__]
assert BaseToolSpec.__name__ in names_of_base_classes
def test_opid_filter():
openapi_spec = load_example_spec()
llamaindex_tool_spec = OpenAPIToolSpec(
spec=openapi_spec, operation_id_filter=lambda it: it != "findPetsByTags"
)
spec_array = llamaindex_tool_spec.load_openapi_spec()
deserialized = json.loads(spec_array[0].text)
endpoints: list = deserialized["endpoints"]
operation = next(
filter(lambda it: it["path_template"] == "/pet/findByTags", endpoints), None
)
assert operation is None
def test_request_body():
openapi_spec = load_example_spec()
llamaindex_tool_spec = OpenAPIToolSpec(spec=openapi_spec)
spec_array = llamaindex_tool_spec.load_openapi_spec()
deserialized = json.loads(spec_array[0].text)
endpoints: list = deserialized["endpoints"]
operation = next(
filter(
lambda it: it["path_template"] == "/pet" and it["verb"] == "PUT", endpoints
)
)
assert isinstance(operation["requestBody"], dict)
def load_example_spec():
current_file_path = Path(__file__).resolve()
example_file = current_file_path.parent / "example.json"
with example_file.open("r", encoding="utf-8") as file:
return yaml.safe_load(file)
|
from llama_index.core.tools.tool_spec.base import BaseToolSpec
from llama_index.tools.openapi import OpenAPIToolSpec
def test_class():
names_of_base_classes = [b.__name__ for b in OpenAPIToolSpec.__mro__]
assert BaseToolSpec.__name__ in names_of_base_classes
|
from langchain_core.utils.html import (
DEFAULT_LINK_REGEX,
PREFIXES_TO_IGNORE,
PREFIXES_TO_IGNORE_REGEX,
SUFFIXES_TO_IGNORE,
SUFFIXES_TO_IGNORE_REGEX,
extract_sub_links,
find_all_links,
)
__all__ = [
"DEFAULT_LINK_REGEX",
"PREFIXES_TO_IGNORE",
"PREFIXES_TO_IGNORE_REGEX",
"SUFFIXES_TO_IGNORE",
"SUFFIXES_TO_IGNORE_REGEX",
"extract_sub_links",
"find_all_links",
]
|
from langchain_core.utils.html import (
DEFAULT_LINK_REGEX,
PREFIXES_TO_IGNORE,
PREFIXES_TO_IGNORE_REGEX,
SUFFIXES_TO_IGNORE,
SUFFIXES_TO_IGNORE_REGEX,
extract_sub_links,
find_all_links,
)
__all__ = [
"PREFIXES_TO_IGNORE",
"SUFFIXES_TO_IGNORE",
"SUFFIXES_TO_IGNORE_REGEX",
"PREFIXES_TO_IGNORE_REGEX",
"DEFAULT_LINK_REGEX",
"find_all_links",
"extract_sub_links",
]
|
_base_ = './mask_rcnn_r50_fpn_1x_coco.py'
model = dict(
# use caffe img_norm
data_preprocessor=dict(
mean=[103.530, 116.280, 123.675],
std=[1.0, 1.0, 1.0],
bgr_to_rgb=False),
backbone=dict(
norm_cfg=dict(requires_grad=False),
style='caffe',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet50_caffe')))
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='RandomChoiceResize',
scales=[(1333, 640), (1333, 672), (1333, 704), (1333, 736),
(1333, 768), (1333, 800)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs'),
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
|
_base_ = './mask_rcnn_r50_fpn_1x_coco.py'
data_preprocessor = dict(
mean=[103.530, 116.280, 123.675],
std=[1.0, 1.0, 1.0],
bgr_to_rgb=False,
pad_size_divisor=32)
model = dict(
# use caffe img_norm
data_preprocessor=data_preprocessor,
backbone=dict(
norm_cfg=dict(requires_grad=False),
style='caffe',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet50_caffe')))
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='RandomChoiceResize',
scales=[(1333, 640), (1333, 672), (1333, 704), (1333, 736),
(1333, 768), (1333, 800)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs'),
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
|
from backend.data.credit import UsageTransactionMetadata, get_user_credit_model
from backend.data.execution import (
ExecutionResult,
RedisExecutionEventBus,
create_graph_execution,
get_execution_results,
get_incomplete_executions,
get_latest_execution,
update_execution_status,
update_execution_status_batch,
update_graph_execution_start_time,
update_graph_execution_stats,
update_node_execution_stats,
upsert_execution_input,
upsert_execution_output,
)
from backend.data.graph import (
get_connected_output_nodes,
get_graph,
get_graph_metadata,
get_node,
)
from backend.data.notifications import (
create_or_add_to_user_notification_batch,
empty_user_notification_batch,
get_all_batches_by_type,
get_user_notification_batch,
get_user_notification_oldest_message_in_batch,
)
from backend.data.user import (
get_active_user_ids_in_timerange,
get_user_email_by_id,
get_user_email_verification,
get_user_integrations,
get_user_metadata,
get_user_notification_preference,
update_user_integrations,
update_user_metadata,
)
from backend.util.service import AppService, expose, exposed_run_and_wait
from backend.util.settings import Config
config = Config()
_user_credit_model = get_user_credit_model()
async def _spend_credits(
user_id: str, cost: int, metadata: UsageTransactionMetadata
) -> int:
return await _user_credit_model.spend_credits(user_id, cost, metadata)
class DatabaseManager(AppService):
def __init__(self):
super().__init__()
self.use_db = True
self.use_redis = True
self.event_queue = RedisExecutionEventBus()
@classmethod
def get_port(cls) -> int:
return config.database_api_port
@expose
def send_execution_update(self, execution_result: ExecutionResult):
self.event_queue.publish(execution_result)
# Executions
create_graph_execution = exposed_run_and_wait(create_graph_execution)
get_execution_results = exposed_run_and_wait(get_execution_results)
get_incomplete_executions = exposed_run_and_wait(get_incomplete_executions)
get_latest_execution = exposed_run_and_wait(get_latest_execution)
update_execution_status = exposed_run_and_wait(update_execution_status)
update_execution_status_batch = exposed_run_and_wait(update_execution_status_batch)
update_graph_execution_start_time = exposed_run_and_wait(
update_graph_execution_start_time
)
update_graph_execution_stats = exposed_run_and_wait(update_graph_execution_stats)
update_node_execution_stats = exposed_run_and_wait(update_node_execution_stats)
upsert_execution_input = exposed_run_and_wait(upsert_execution_input)
upsert_execution_output = exposed_run_and_wait(upsert_execution_output)
# Graphs
get_node = exposed_run_and_wait(get_node)
get_graph = exposed_run_and_wait(get_graph)
get_connected_output_nodes = exposed_run_and_wait(get_connected_output_nodes)
get_graph_metadata = exposed_run_and_wait(get_graph_metadata)
# Credits
spend_credits = exposed_run_and_wait(_spend_credits)
# User + User Metadata + User Integrations
get_user_metadata = exposed_run_and_wait(get_user_metadata)
update_user_metadata = exposed_run_and_wait(update_user_metadata)
get_user_integrations = exposed_run_and_wait(get_user_integrations)
update_user_integrations = exposed_run_and_wait(update_user_integrations)
# User Comms - async
get_active_user_ids_in_timerange = exposed_run_and_wait(
get_active_user_ids_in_timerange
)
get_user_email_by_id = exposed_run_and_wait(get_user_email_by_id)
get_user_email_verification = exposed_run_and_wait(get_user_email_verification)
get_user_notification_preference = exposed_run_and_wait(
get_user_notification_preference
)
# Notifications - async
create_or_add_to_user_notification_batch = exposed_run_and_wait(
create_or_add_to_user_notification_batch
)
empty_user_notification_batch = exposed_run_and_wait(empty_user_notification_batch)
get_all_batches_by_type = exposed_run_and_wait(get_all_batches_by_type)
get_user_notification_batch = exposed_run_and_wait(get_user_notification_batch)
get_user_notification_oldest_message_in_batch = exposed_run_and_wait(
get_user_notification_oldest_message_in_batch
)
|
from backend.data.credit import get_user_credit_model
from backend.data.execution import (
ExecutionResult,
NodeExecutionEntry,
RedisExecutionEventBus,
create_graph_execution,
get_execution_results,
get_incomplete_executions,
get_latest_execution,
update_execution_status,
update_execution_status_batch,
update_graph_execution_start_time,
update_graph_execution_stats,
update_node_execution_stats,
upsert_execution_input,
upsert_execution_output,
)
from backend.data.graph import (
get_connected_output_nodes,
get_graph,
get_graph_metadata,
get_node,
)
from backend.data.notifications import (
create_or_add_to_user_notification_batch,
empty_user_notification_batch,
get_all_batches_by_type,
get_user_notification_batch,
get_user_notification_oldest_message_in_batch,
)
from backend.data.user import (
get_active_user_ids_in_timerange,
get_user_email_by_id,
get_user_email_verification,
get_user_integrations,
get_user_metadata,
get_user_notification_preference,
update_user_integrations,
update_user_metadata,
)
from backend.util.service import AppService, expose, exposed_run_and_wait
from backend.util.settings import Config
config = Config()
_user_credit_model = get_user_credit_model()
async def _spend_credits(entry: NodeExecutionEntry) -> int:
return await _user_credit_model.spend_credits(entry, 0, 0)
class DatabaseManager(AppService):
def __init__(self):
super().__init__()
self.use_db = True
self.use_redis = True
self.event_queue = RedisExecutionEventBus()
@classmethod
def get_port(cls) -> int:
return config.database_api_port
@expose
def send_execution_update(self, execution_result: ExecutionResult):
self.event_queue.publish(execution_result)
# Executions
create_graph_execution = exposed_run_and_wait(create_graph_execution)
get_execution_results = exposed_run_and_wait(get_execution_results)
get_incomplete_executions = exposed_run_and_wait(get_incomplete_executions)
get_latest_execution = exposed_run_and_wait(get_latest_execution)
update_execution_status = exposed_run_and_wait(update_execution_status)
update_execution_status_batch = exposed_run_and_wait(update_execution_status_batch)
update_graph_execution_start_time = exposed_run_and_wait(
update_graph_execution_start_time
)
update_graph_execution_stats = exposed_run_and_wait(update_graph_execution_stats)
update_node_execution_stats = exposed_run_and_wait(update_node_execution_stats)
upsert_execution_input = exposed_run_and_wait(upsert_execution_input)
upsert_execution_output = exposed_run_and_wait(upsert_execution_output)
# Graphs
get_node = exposed_run_and_wait(get_node)
get_graph = exposed_run_and_wait(get_graph)
get_connected_output_nodes = exposed_run_and_wait(get_connected_output_nodes)
get_graph_metadata = exposed_run_and_wait(get_graph_metadata)
# Credits
spend_credits = exposed_run_and_wait(_spend_credits)
# User + User Metadata + User Integrations
get_user_metadata = exposed_run_and_wait(get_user_metadata)
update_user_metadata = exposed_run_and_wait(update_user_metadata)
get_user_integrations = exposed_run_and_wait(get_user_integrations)
update_user_integrations = exposed_run_and_wait(update_user_integrations)
# User Comms - async
get_active_user_ids_in_timerange = exposed_run_and_wait(
get_active_user_ids_in_timerange
)
get_user_email_by_id = exposed_run_and_wait(get_user_email_by_id)
get_user_email_verification = exposed_run_and_wait(get_user_email_verification)
get_user_notification_preference = exposed_run_and_wait(
get_user_notification_preference
)
# Notifications - async
create_or_add_to_user_notification_batch = exposed_run_and_wait(
create_or_add_to_user_notification_batch
)
empty_user_notification_batch = exposed_run_and_wait(empty_user_notification_batch)
get_all_batches_by_type = exposed_run_and_wait(get_all_batches_by_type)
get_user_notification_batch = exposed_run_and_wait(get_user_notification_batch)
get_user_notification_oldest_message_in_batch = exposed_run_and_wait(
get_user_notification_oldest_message_in_batch
)
|
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import subprocess
import pytest
from jina import Document, Flow
def data_generator(num_docs):
for i in range(num_docs):
doc = Document(text='it is a good day! the dog sits on the floor.')
yield doc
def test_use_in_flow():
with Flow.load_config('flow.yml') as flow:
resp = flow.post(on='/encode', inputs=data_generator(5), return_results=True)
docs = resp[0].docs
assert len(docs) == 5
for doc in docs:
assert doc.embedding.shape == (768,)
@pytest.mark.docker
def test_docker_runtime(build_docker_image: str):
with pytest.raises(subprocess.TimeoutExpired):
subprocess.run(
['jina', 'executor', f'--uses=docker://{build_docker_image}'],
timeout=30,
check=True,
)
@pytest.mark.gpu
@pytest.mark.docker
def test_docker_runtime_gpu(build_docker_image_gpu: str):
with pytest.raises(subprocess.TimeoutExpired):
subprocess.run(
[
'jina',
'pea',
f'--uses=docker://{build_docker_image_gpu}',
'--gpus',
'all',
'--uses-with',
'device:"/GPU:0"',
],
timeout=30,
check=True,
)
|
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
from jina import Document, Flow
def data_generator(num_docs):
for i in range(num_docs):
doc = Document(text='it is a good day! the dog sits on the floor.')
yield doc
def test_use_in_flow():
with Flow.load_config('flow.yml') as flow:
resp = flow.post(on='/encode', inputs=data_generator(5), return_results=True)
docs = resp[0].docs
assert len(docs) == 5
for doc in docs:
assert doc.embedding.shape == (768,)
|
_base_ = '../cascade_rcnn/cascade-mask-rcnn_r50_fpn_1x_coco.py'
train_pipeline = [
dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
dict(
type='InstaBoost',
action_candidate=('normal', 'horizontal', 'skip'),
action_prob=(1, 0, 0),
scale=(0.8, 1.2),
dx=15,
dy=15,
theta=(-1, 1),
color_prob=0.5,
hflag=False,
aug_ratio=0.5),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
max_epochs = 48
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[32, 44],
gamma=0.1)
]
train_cfg = dict(max_epochs=max_epochs)
# only keep latest 3 checkpoints
default_hooks = dict(checkpoint=dict(max_keep_ckpts=3))
|
_base_ = '../cascade_rcnn/cascade-mask-rcnn_r50_fpn_1x_coco.py'
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(
type='InstaBoost',
action_candidate=('normal', 'horizontal', 'skip'),
action_prob=(1, 0, 0),
scale=(0.8, 1.2),
dx=15,
dy=15,
theta=(-1, 1),
color_prob=0.5,
hflag=False,
aug_ratio=0.5),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
max_epochs = 48
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[32, 44],
gamma=0.1)
]
train_cfg = dict(max_epochs=max_epochs)
# only keep latest 3 checkpoints
default_hooks = dict(checkpoint=dict(max_keep_ckpts=3))
|
"""
MangaDex info reader.
Retrieves data about a particular manga by title.
"""
import logging
from typing import List
import requests
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
logger = logging.getLogger(__name__)
class MangaDexReader(BaseReader):
def __init__(self) -> None:
self.base_url = "https://api.mangadex.org"
def _get_manga_info(self, title: str):
try:
manga_response = requests.get(
f"{self.base_url}/manga", params={"title": title}
)
manga_response.raise_for_status()
manga_data = manga_response.json()["data"]
if len(manga_data):
return manga_data[0]
else:
logger.warning(f"No match found for title '{title}'")
return None
except requests.exceptions.HTTPError as http_error:
logger.error(f"HTTP error: {http_error}")
except requests.exceptions.RequestException as req_error:
logger.error(f"Request Error: {req_error}")
return None
# Authors and artists are combined
def _get_manga_author(self, id: str):
try:
author_response = requests.get(
f"{self.base_url}/author", params={"ids[]": [id]}
)
author_response.raise_for_status()
return author_response.json()["data"][0]
except requests.exceptions.HTTPError as http_error:
logger.error(f"HTTP error: {http_error}")
except requests.exceptions.RequestException as req_error:
logger.error(f"Request Error: {req_error}")
return None
def _get_manga_chapters(self, manga_id: str, lang: str):
try:
chapter_response = requests.get(
f"{self.base_url}/manga/{manga_id}/feed",
params={
"translatedLanguage[]": [lang],
"order[chapter]": "asc",
},
)
chapter_response.raise_for_status()
return chapter_response.json()
except requests.exceptions.HTTPError as http_error:
logger.error(f"HTTP error: {http_error}")
except requests.exceptions.RequestException as req_error:
logger.error(f"Request Error: {req_error}")
return None
def load_data(self, titles: List[str], lang: str = "en") -> List[Document]:
"""
Load data from the MangaDex API.
Args:
title (List[str]): List of manga titles
lang (str, optional): ISO 639-1 language code. Defaults to 'en'.
Returns:
List[Document]: A list of Documents.
"""
result = []
for title in titles:
manga = self._get_manga_info(title)
if not manga:
continue
author_name, artist_name = None, None
for r in manga["relationships"]:
if r["type"] == "author":
author = self._get_manga_author(r["id"])
author_name = author["attributes"]["name"]
if r["type"] == "artist":
artist = self._get_manga_author(r["id"])
artist_name = artist["attributes"]["name"]
chapters = self._get_manga_chapters(manga["id"], lang)
chapter_count = chapters.get("total", None)
latest_chapter_published_at = None
if len(chapters["data"]):
latest_chapter = chapters["data"][-1]
latest_chapter_published_at = latest_chapter["attributes"]["publishAt"]
# Get tags for the selected language
tags = []
for tag in manga["attributes"]["tags"]:
tag_name_dict = tag["attributes"]["name"]
if lang in tag_name_dict:
tags.append(tag_name_dict[lang])
doc = Document(
text=manga["attributes"]["title"].get(lang, title),
extra_info={
"id": manga["id"],
"author": author_name,
"artist": artist_name,
"description": manga["attributes"]["description"].get(lang, None),
"original_language": manga["attributes"]["originalLanguage"],
"tags": tags,
"chapter_count": chapter_count,
"latest_chapter_published_at": latest_chapter_published_at,
},
)
result.append(doc)
return result
if __name__ == "__main__":
reader = MangaDexReader()
print(reader.load_data(titles=["Grand Blue Dreaming"], lang="en"))
|
"""
MangaDex info reader.
Retrieves data about a particular manga by title.
"""
import logging
from typing import List
import requests
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
logger = logging.getLogger(__name__)
class MangaDexReader(BaseReader):
def __init__(self) -> None:
self.base_url = "https://api.mangadex.org"
def _get_manga_info(self, title: str):
try:
manga_response = requests.get(
f"{self.base_url}/manga", params={"title": title}
)
manga_response.raise_for_status()
manga_data = manga_response.json()["data"]
if len(manga_data):
return manga_data[0]
else:
logger.warning(f"No match found for title '{title}'")
return None
except requests.exceptions.HTTPError as http_error:
logger.error(f"HTTP error: {http_error}")
except requests.exceptions.RequestException as req_error:
logger.error(f"Request Error: {req_error}")
return None
# Authors and artists are combined
def _get_manga_author(self, id: str):
try:
author_response = requests.get(
f"{self.base_url}/author", params={"ids[]": [id]}
)
author_response.raise_for_status()
return author_response.json()["data"][0]
except requests.exceptions.HTTPError as http_error:
logger.error(f"HTTP error: {http_error}")
except requests.exceptions.RequestException as req_error:
logger.error(f"Request Error: {req_error}")
return None
def _get_manga_chapters(self, manga_id: str, lang: str):
try:
chapter_response = requests.get(
f"{self.base_url}/manga/{manga_id}/feed",
params={
"translatedLanguage[]": [lang],
"order[chapter]": "asc",
},
)
chapter_response.raise_for_status()
return chapter_response.json()
except requests.exceptions.HTTPError as http_error:
logger.error(f"HTTP error: {http_error}")
except requests.exceptions.RequestException as req_error:
logger.error(f"Request Error: {req_error}")
return None
def load_data(self, titles: List[str], lang: str = "en") -> List[Document]:
"""Load data from the MangaDex API.
Args:
title (List[str]): List of manga titles
lang (str, optional): ISO 639-1 language code. Defaults to 'en'.
Returns:
List[Document]: A list of Documents.
"""
result = []
for title in titles:
manga = self._get_manga_info(title)
if not manga:
continue
author_name, artist_name = None, None
for r in manga["relationships"]:
if r["type"] == "author":
author = self._get_manga_author(r["id"])
author_name = author["attributes"]["name"]
if r["type"] == "artist":
artist = self._get_manga_author(r["id"])
artist_name = artist["attributes"]["name"]
chapters = self._get_manga_chapters(manga["id"], lang)
chapter_count = chapters.get("total", None)
latest_chapter_published_at = None
if len(chapters["data"]):
latest_chapter = chapters["data"][-1]
latest_chapter_published_at = latest_chapter["attributes"]["publishAt"]
# Get tags for the selected language
tags = []
for tag in manga["attributes"]["tags"]:
tag_name_dict = tag["attributes"]["name"]
if lang in tag_name_dict:
tags.append(tag_name_dict[lang])
doc = Document(
text=manga["attributes"]["title"].get(lang, title),
extra_info={
"id": manga["id"],
"author": author_name,
"artist": artist_name,
"description": manga["attributes"]["description"].get(lang, None),
"original_language": manga["attributes"]["originalLanguage"],
"tags": tags,
"chapter_count": chapter_count,
"latest_chapter_published_at": latest_chapter_published_at,
},
)
result.append(doc)
return result
if __name__ == "__main__":
reader = MangaDexReader()
print(reader.load_data(titles=["Grand Blue Dreaming"], lang="en"))
|
"""Elasticsearch (or Opensearch) reader over REST api.
This only uses the basic search api, so it will work with Elasticsearch and Opensearch.
"""
from typing import Any, List, Optional
from llama_index.core.bridge.pydantic import PrivateAttr
from llama_index.core.readers.base import BasePydanticReader
from llama_index.core.schema import Document
class ElasticsearchReader(BasePydanticReader):
"""
Read documents from an Elasticsearch/Opensearch index.
These documents can then be used in a downstream Llama Index data structure.
Args:
endpoint (str): URL (http/https) of cluster
index (str): Name of the index (required)
httpx_client_args (dict): Optional additional args to pass to the `httpx.Client`
"""
is_remote: bool = True
endpoint: str
index: str
httpx_client_args: Optional[dict] = None
_client: Any = PrivateAttr()
def __init__(
self, endpoint: str, index: str, httpx_client_args: Optional[dict] = None
):
"""Initialize with parameters."""
super().__init__(
endpoint=endpoint, index=index, httpx_client_args=httpx_client_args
)
import_err_msg = """
`httpx` package not found. Install via `pip install httpx`
"""
try:
import httpx
except ImportError:
raise ImportError(import_err_msg)
self._client = httpx.Client(base_url=endpoint, **(httpx_client_args or {}))
@classmethod
def class_name(cls) -> str:
return "ElasticsearchReader"
def load_data(
self,
field: str,
query: Optional[dict] = None,
embedding_field: Optional[str] = None,
metadata_fields: Optional[List[str]] = None,
) -> List[Document]:
"""Read data from the Elasticsearch index.
Args:
field (str): Field in the document to retrieve text from
query (Optional[dict]): Elasticsearch JSON query DSL object.
For example:
{"query": {"match": {"message": {"query": "this is a test"}}}}
embedding_field (Optional[str]): If there are embeddings stored in
this index, this field can be used
to set the embedding field on the returned Document list.
metadata_fields (Optional[List[str]]): Fields used as metadata. Default
is all fields in the document except those specified by the
field and embedding_field parameters.
Returns:
List[Document]: A list of documents.
"""
res = self._client.post(f"{self.index}/_search", json=query).json()
documents = []
for hit in res["hits"]["hits"]:
doc_id = hit["_id"]
value = hit["_source"][field]
embedding = hit["_source"].get(embedding_field or "", None)
if metadata_fields:
metadata = {
k: v for k, v in hit["_source"].items() if k in metadata_fields
}
else:
hit["_source"].pop(field)
hit["_source"].pop(embedding_field or "", None)
metadata = hit["_source"]
documents.append(
Document(id_=doc_id, text=value, metadata=metadata, embedding=embedding)
)
return documents
|
"""Elasticsearch (or Opensearch) reader over REST api.
This only uses the basic search api, so it will work with Elasticsearch and Opensearch.
"""
from typing import Any, List, Optional
from llama_index.core.bridge.pydantic import PrivateAttr
from llama_index.core.readers.base import BasePydanticReader
from llama_index.core.schema import Document
class ElasticsearchReader(BasePydanticReader):
"""
Read documents from an Elasticsearch/Opensearch index.
These documents can then be used in a downstream Llama Index data structure.
Args:
endpoint (str): URL (http/https) of cluster
index (str): Name of the index (required)
httpx_client_args (dict): Optional additional args to pass to the `httpx.Client`
"""
is_remote: bool = True
endpoint: str
index: str
httpx_client_args: Optional[dict] = None
_client: Any = PrivateAttr()
def __init__(
self, endpoint: str, index: str, httpx_client_args: Optional[dict] = None
):
"""Initialize with parameters."""
super().__init__(
endpoint=endpoint, index=index, httpx_client_args=httpx_client_args
)
import_err_msg = """
`httpx` package not found. Install via `pip install httpx`
"""
try:
import httpx
except ImportError:
raise ImportError(import_err_msg)
self._client = httpx.Client(base_url=endpoint, **(httpx_client_args or {}))
@classmethod
def class_name(cls) -> str:
return "ElasticsearchReader"
def load_data(
self,
field: str,
query: Optional[dict] = None,
embedding_field: Optional[str] = None,
) -> List[Document]:
"""Read data from the Elasticsearch index.
Args:
field (str): Field in the document to retrieve text from
query (Optional[dict]): Elasticsearch JSON query DSL object.
For example:
{"query": {"match": {"message": {"query": "this is a test"}}}}
embedding_field (Optional[str]): If there are embeddings stored in
this index, this field can be used
to set the embedding field on the returned Document list.
Returns:
List[Document]: A list of documents.
"""
res = self._client.post(f"{self.index}/_search", json=query).json()
documents = []
for hit in res["hits"]["hits"]:
doc_id = hit["_id"]
value = hit["_source"][field]
embedding = hit["_source"].get(embedding_field or "", None)
documents.append(
Document(
id_=doc_id, text=value, metadata=hit["_source"], embedding=embedding
)
)
return documents
|
"""Init file."""
from llama_index.readers.mangadex.base import MangaDexReader
__all__ = ["MangaDexReader"]
|
"""Init file."""
from llama_index.readers.mangadex.base import MangaDexReader
__all__ = ["MangaDexReader"]
|
# coding=utf-8
# Copyright 2025 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from diffusers import AutoencoderKLTemporalDecoder
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
torch_device,
)
from ..test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class AutoencoderKLTemporalDecoderTests(ModelTesterMixin, UNetTesterMixin, unittest.TestCase):
model_class = AutoencoderKLTemporalDecoder
main_input_name = "sample"
base_precision = 1e-2
@property
def dummy_input(self):
batch_size = 3
num_channels = 3
sizes = (32, 32)
image = floats_tensor((batch_size, num_channels) + sizes).to(torch_device)
num_frames = 3
return {"sample": image, "num_frames": num_frames}
@property
def input_shape(self):
return (3, 32, 32)
@property
def output_shape(self):
return (3, 32, 32)
def prepare_init_args_and_inputs_for_common(self):
init_dict = {
"block_out_channels": [32, 64],
"in_channels": 3,
"out_channels": 3,
"down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"],
"latent_channels": 4,
"layers_per_block": 2,
}
inputs_dict = self.dummy_input
return init_dict, inputs_dict
def test_gradient_checkpointing_is_applied(self):
expected_set = {"Encoder", "TemporalDecoder", "UNetMidBlock2D"}
super().test_gradient_checkpointing_is_applied(expected_set=expected_set)
@unittest.skip("Test unsupported.")
def test_forward_with_norm_groups(self):
pass
|
# coding=utf-8
# Copyright 2024 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from diffusers import AutoencoderKLTemporalDecoder
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
torch_device,
)
from ..test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class AutoencoderKLTemporalDecoderTests(ModelTesterMixin, UNetTesterMixin, unittest.TestCase):
model_class = AutoencoderKLTemporalDecoder
main_input_name = "sample"
base_precision = 1e-2
@property
def dummy_input(self):
batch_size = 3
num_channels = 3
sizes = (32, 32)
image = floats_tensor((batch_size, num_channels) + sizes).to(torch_device)
num_frames = 3
return {"sample": image, "num_frames": num_frames}
@property
def input_shape(self):
return (3, 32, 32)
@property
def output_shape(self):
return (3, 32, 32)
def prepare_init_args_and_inputs_for_common(self):
init_dict = {
"block_out_channels": [32, 64],
"in_channels": 3,
"out_channels": 3,
"down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"],
"latent_channels": 4,
"layers_per_block": 2,
}
inputs_dict = self.dummy_input
return init_dict, inputs_dict
def test_gradient_checkpointing_is_applied(self):
expected_set = {"Encoder", "TemporalDecoder", "UNetMidBlock2D"}
super().test_gradient_checkpointing_is_applied(expected_set=expected_set)
@unittest.skip("Test unsupported.")
def test_forward_with_norm_groups(self):
pass
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch
from mmdet.core import build_assigner, build_sampler
def _dummy_bbox_sampling(proposal_list, gt_bboxes, gt_labels):
"""Create sample results that can be passed to BBoxHead.get_targets."""
num_imgs = 1
feat = torch.rand(1, 1, 3, 3)
assign_config = dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0.5,
ignore_iof_thr=-1)
sampler_config = dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True)
bbox_assigner = build_assigner(assign_config)
bbox_sampler = build_sampler(sampler_config)
gt_bboxes_ignore = [None for _ in range(num_imgs)]
sampling_results = []
for i in range(num_imgs):
assign_result = bbox_assigner.assign(proposal_list[i], gt_bboxes[i],
gt_bboxes_ignore[i], gt_labels[i])
sampling_result = bbox_sampler.sample(
assign_result,
proposal_list[i],
gt_bboxes[i],
gt_labels[i],
feats=feat)
sampling_results.append(sampling_result)
return sampling_results
|
import torch
from mmdet.core import build_assigner, build_sampler
def _dummy_bbox_sampling(proposal_list, gt_bboxes, gt_labels):
"""Create sample results that can be passed to BBoxHead.get_targets."""
num_imgs = 1
feat = torch.rand(1, 1, 3, 3)
assign_config = dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0.5,
ignore_iof_thr=-1)
sampler_config = dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True)
bbox_assigner = build_assigner(assign_config)
bbox_sampler = build_sampler(sampler_config)
gt_bboxes_ignore = [None for _ in range(num_imgs)]
sampling_results = []
for i in range(num_imgs):
assign_result = bbox_assigner.assign(proposal_list[i], gt_bboxes[i],
gt_bboxes_ignore[i], gt_labels[i])
sampling_result = bbox_sampler.sample(
assign_result,
proposal_list[i],
gt_bboxes[i],
gt_labels[i],
feats=feat)
sampling_results.append(sampling_result)
return sampling_results
|
_base_ = [
'../_base_/datasets/coco_instance.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
# model settings
model = dict(
type='SOLOv2',
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
bgr_to_rgb=True,
pad_size_divisor=32),
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50'),
style='pytorch'),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=0,
num_outs=5),
mask_head=dict(
type='SOLOV2Head',
num_classes=80,
in_channels=256,
feat_channels=512,
stacked_convs=4,
strides=[8, 8, 16, 32, 32],
scale_ranges=((1, 96), (48, 192), (96, 384), (192, 768), (384, 2048)),
pos_scale=0.2,
num_grids=[40, 36, 24, 16, 12],
cls_down_index=0,
mask_feature_head=dict(
feat_channels=128,
start_level=0,
end_level=3,
out_channels=256,
mask_stride=4,
norm_cfg=dict(type='GN', num_groups=32, requires_grad=True)),
loss_mask=dict(type='DiceLoss', use_sigmoid=True, loss_weight=3.0),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0)),
# model training and testing settings
test_cfg=dict(
nms_pre=500,
score_thr=0.1,
mask_thr=0.5,
filter_thr=0.05,
kernel='gaussian', # gaussian/linear
sigma=2.0,
max_per_img=100))
# optimizer
optim_wrapper = dict(
optimizer=dict(lr=0.01), clip_grad=dict(max_norm=35, norm_type=2))
val_evaluator = dict(metric='segm')
test_evaluator = val_evaluator
|
_base_ = [
'../_base_/datasets/coco_instance.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
# model settings
model = dict(
type='SOLOv2',
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50'),
style='pytorch'),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=0,
num_outs=5),
mask_head=dict(
type='SOLOV2Head',
num_classes=80,
in_channels=256,
feat_channels=512,
stacked_convs=4,
strides=[8, 8, 16, 32, 32],
scale_ranges=((1, 96), (48, 192), (96, 384), (192, 768), (384, 2048)),
pos_scale=0.2,
num_grids=[40, 36, 24, 16, 12],
cls_down_index=0,
mask_feature_head=dict(
feat_channels=128,
start_level=0,
end_level=3,
out_channels=256,
mask_stride=4,
norm_cfg=dict(type='GN', num_groups=32, requires_grad=True)),
loss_mask=dict(type='DiceLoss', use_sigmoid=True, loss_weight=3.0),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0)),
# model training and testing settings
test_cfg=dict(
nms_pre=500,
score_thr=0.1,
mask_thr=0.5,
filter_thr=0.05,
kernel='gaussian', # gaussian/linear
sigma=2.0,
max_per_img=100))
# optimizer
optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001)
optimizer_config = dict(
_delete_=True, grad_clip=dict(max_norm=35, norm_type=2))
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmengine.utils import digit_version
from mmengine.utils.dl_utils import TORCH_VERSION
from .base import BaseStrategy
from .deepspeed import DeepSpeedStrategy
from .distributed import DDPStrategy
from .single_device import SingleDeviceStrategy
__all__ = [
'BaseStrategy', 'DDPStrategy', 'SingleDeviceStrategy', 'DeepSpeedStrategy'
]
if digit_version(TORCH_VERSION) >= digit_version('2.0.0'):
try:
from .fsdp import FSDPStrategy # noqa:F401
__all__.append('FSDPStrategy')
except: # noqa: E722
pass
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmengine.utils import digit_version, is_installed
from mmengine.utils.dl_utils import TORCH_VERSION
from .base import BaseStrategy
from .distributed import DDPStrategy
from .single_device import SingleDeviceStrategy
__all__ = ['BaseStrategy', 'DDPStrategy', 'SingleDeviceStrategy']
if is_installed('deepspeed'):
from .deepspeed import DeepSpeedStrategy # noqa: F401
__all__.append('DeepSpeedStrategy')
if digit_version(TORCH_VERSION) >= digit_version('2.0.0'):
try:
from .fsdp import FSDPStrategy # noqa:F401
__all__.append('FSDPStrategy')
except: # noqa: E722
pass
|
"""Schema for Blobs and Blob Loaders.
The goal is to facilitate decoupling of content loading from content parsing code.
In addition, content loading code should provide a lazy loading interface by default.
"""
from __future__ import annotations
from abc import ABC, abstractmethod
from typing import TYPE_CHECKING
# Re-export Blob and PathLike for backwards compatibility
from langchain_core.documents.base import Blob as Blob
from langchain_core.documents.base import PathLike as PathLike
if TYPE_CHECKING:
from collections.abc import Iterable
class BlobLoader(ABC):
"""Abstract interface for blob loaders implementation.
Implementer should be able to load raw content from a storage system according
to some criteria and return the raw content lazily as a stream of blobs.
"""
@abstractmethod
def yield_blobs(
self,
) -> Iterable[Blob]:
"""A lazy loader for raw data represented by LangChain's Blob object.
Returns:
A generator over blobs
"""
# Re-export Blob and Pathlike for backwards compatibility
__all__ = ["Blob", "BlobLoader", "PathLike"]
|
"""Schema for Blobs and Blob Loaders.
The goal is to facilitate decoupling of content loading from content parsing code.
In addition, content loading code should provide a lazy loading interface by default.
"""
from __future__ import annotations
from abc import ABC, abstractmethod
from collections.abc import Iterable
# Re-export Blob and PathLike for backwards compatibility
from langchain_core.documents.base import Blob as Blob
from langchain_core.documents.base import PathLike as PathLike
class BlobLoader(ABC):
"""Abstract interface for blob loaders implementation.
Implementer should be able to load raw content from a storage system according
to some criteria and return the raw content lazily as a stream of blobs.
"""
@abstractmethod
def yield_blobs(
self,
) -> Iterable[Blob]:
"""A lazy loader for raw data represented by LangChain's Blob object.
Returns:
A generator over blobs
"""
# Re-export Blob and Pathlike for backwards compatibility
__all__ = ["Blob", "BlobLoader", "PathLike"]
|
import hashlib
import secrets
from typing import NamedTuple
class APIKeyContainer(NamedTuple):
"""Container for API key parts."""
raw: str
prefix: str
postfix: str
hash: str
class APIKeyManager:
PREFIX: str = "agpt_"
PREFIX_LENGTH: int = 8
POSTFIX_LENGTH: int = 8
def generate_api_key(self) -> APIKeyContainer:
"""Generate a new API key with all its parts."""
raw_key = f"{self.PREFIX}{secrets.token_urlsafe(32)}"
return APIKeyContainer(
raw=raw_key,
prefix=raw_key[: self.PREFIX_LENGTH],
postfix=raw_key[-self.POSTFIX_LENGTH :],
hash=hashlib.sha256(raw_key.encode()).hexdigest(),
)
def verify_api_key(self, provided_key: str, stored_hash: str) -> bool:
"""Verify if a provided API key matches the stored hash."""
if not provided_key.startswith(self.PREFIX):
return False
provided_hash = hashlib.sha256(provided_key.encode()).hexdigest()
return secrets.compare_digest(provided_hash, stored_hash)
|
import hashlib
import secrets
from typing import NamedTuple
class APIKeyContainer(NamedTuple):
"""Container for API key parts."""
raw: str
prefix: str
postfix: str
hash: str
class APIKeyManager:
PREFIX: str = "agpt_"
PREFIX_LENGTH: int = 8
POSTFIX_LENGTH: int = 8
def generate_api_key(self) -> APIKeyContainer:
"""Generate a new API key with all its parts."""
raw_key = f"{self.PREFIX}{secrets.token_urlsafe(32)}"
return APIKeyContainer(
raw=raw_key,
prefix=raw_key[: self.PREFIX_LENGTH],
postfix=raw_key[-self.POSTFIX_LENGTH :],
hash=hashlib.sha256(raw_key.encode()).hexdigest(),
)
def verify_api_key(self, provided_key: str, stored_hash: str) -> bool:
"""Verify if a provided API key matches the stored hash."""
if not provided_key.startswith(self.PREFIX):
return False
return hashlib.sha256(provided_key.encode()).hexdigest() == stored_hash
|
from .filtering import (
allpass_biquad,
band_biquad,
bandpass_biquad,
bandreject_biquad,
bass_biquad,
biquad,
contrast,
dcshift,
deemph_biquad,
dither,
equalizer_biquad,
filtfilt,
flanger,
gain,
highpass_biquad,
lfilter,
lowpass_biquad,
overdrive,
phaser,
riaa_biquad,
treble_biquad,
vad,
)
from .functional import (
amplitude_to_DB,
apply_beamforming,
apply_codec,
compute_deltas,
compute_kaldi_pitch,
create_dct,
DB_to_amplitude,
detect_pitch_frequency,
edit_distance,
griffinlim,
inverse_spectrogram,
linear_fbanks,
mask_along_axis,
mask_along_axis_iid,
melscale_fbanks,
mu_law_decoding,
mu_law_encoding,
mvdr_weights_rtf,
mvdr_weights_souden,
phase_vocoder,
pitch_shift,
psd,
resample,
rnnt_loss,
rtf_evd,
rtf_power,
sliding_window_cmn,
spectral_centroid,
spectrogram,
)
__all__ = [
"amplitude_to_DB",
"compute_deltas",
"compute_kaldi_pitch",
"create_dct",
"melscale_fbanks",
"linear_fbanks",
"DB_to_amplitude",
"detect_pitch_frequency",
"griffinlim",
"mask_along_axis",
"mask_along_axis_iid",
"mu_law_encoding",
"mu_law_decoding",
"phase_vocoder",
"sliding_window_cmn",
"spectrogram",
"inverse_spectrogram",
"spectral_centroid",
"allpass_biquad",
"band_biquad",
"bandpass_biquad",
"bandreject_biquad",
"bass_biquad",
"biquad",
"contrast",
"dither",
"dcshift",
"deemph_biquad",
"equalizer_biquad",
"filtfilt",
"flanger",
"gain",
"highpass_biquad",
"lfilter",
"lowpass_biquad",
"overdrive",
"phaser",
"riaa_biquad",
"treble_biquad",
"vad",
"apply_codec",
"resample",
"edit_distance",
"pitch_shift",
"rnnt_loss",
"psd",
"mvdr_weights_souden",
"mvdr_weights_rtf",
"rtf_evd",
"rtf_power",
"apply_beamforming",
]
|
from .filtering import (
allpass_biquad,
band_biquad,
bandpass_biquad,
bandreject_biquad,
bass_biquad,
biquad,
contrast,
dither,
dcshift,
deemph_biquad,
equalizer_biquad,
filtfilt,
flanger,
gain,
highpass_biquad,
lfilter,
lowpass_biquad,
overdrive,
phaser,
riaa_biquad,
treble_biquad,
vad,
)
from .functional import (
amplitude_to_DB,
compute_deltas,
compute_kaldi_pitch,
create_dct,
melscale_fbanks,
linear_fbanks,
DB_to_amplitude,
detect_pitch_frequency,
inverse_spectrogram,
griffinlim,
mask_along_axis,
mask_along_axis_iid,
mu_law_encoding,
mu_law_decoding,
phase_vocoder,
sliding_window_cmn,
spectrogram,
spectral_centroid,
apply_codec,
resample,
edit_distance,
pitch_shift,
rnnt_loss,
psd,
mvdr_weights_souden,
mvdr_weights_rtf,
rtf_evd,
rtf_power,
apply_beamforming,
)
__all__ = [
"amplitude_to_DB",
"compute_deltas",
"compute_kaldi_pitch",
"create_dct",
"melscale_fbanks",
"linear_fbanks",
"DB_to_amplitude",
"detect_pitch_frequency",
"griffinlim",
"mask_along_axis",
"mask_along_axis_iid",
"mu_law_encoding",
"mu_law_decoding",
"phase_vocoder",
"sliding_window_cmn",
"spectrogram",
"inverse_spectrogram",
"spectral_centroid",
"allpass_biquad",
"band_biquad",
"bandpass_biquad",
"bandreject_biquad",
"bass_biquad",
"biquad",
"contrast",
"dither",
"dcshift",
"deemph_biquad",
"equalizer_biquad",
"filtfilt",
"flanger",
"gain",
"highpass_biquad",
"lfilter",
"lowpass_biquad",
"overdrive",
"phaser",
"riaa_biquad",
"treble_biquad",
"vad",
"apply_codec",
"resample",
"edit_distance",
"pitch_shift",
"rnnt_loss",
"psd",
"mvdr_weights_souden",
"mvdr_weights_rtf",
"rtf_evd",
"rtf_power",
"apply_beamforming",
]
|
from sentence_transformers import models
from sentence_transformers.sparse_encoder import SparseEncoder
from sentence_transformers.sparse_encoder.models import IDF, MLMTransformer, SpladePooling
print("# ------------------------------------------example with v2 distill-----------------------------------------")
doc_encoder = MLMTransformer("opensearch-project/opensearch-neural-sparse-encoding-doc-v2-distill")
asym = models.Asym(
{
"query": [
IDF.from_json(
"opensearch-project/opensearch-neural-sparse-encoding-doc-v2-distill",
tokenizer=doc_encoder.tokenizer,
frozen=True,
),
],
"doc": [
doc_encoder,
SpladePooling("max"),
],
}
)
model = SparseEncoder(
modules=[asym],
similarity_fn_name="dot",
)
query = "What's the weather in ny now?"
document = "Currently New York is rainy."
query_embed = model.encode([{"query": query}])
document_embed = model.encode([{"doc": document}])
sim = model.similarity(query_embed, document_embed)
print(f"Similarity: {sim}")
# Visualize top tokens for each text
top_k = 3
print(f"\nTop tokens {top_k} for each text:")
decoded_query = model.decode(query_embed[0], top_k=top_k)
decoded_document = model.decode(document_embed[0])
for i in range(top_k):
query_token, query_score = decoded_query[i]
doc_score = next((score for token, score in decoded_document if token == query_token), 0)
if doc_score != 0:
print(f"Token: {query_token}, Query score: {query_score:.4f}, Document score: {doc_score:.4f}")
"""
# ------------------------------------------example with v2 distill-----------------------------------------
Similarity: tensor([[17.5307]], device='cuda:0')
Top tokens 3 for each text:
Token: ny, Query score: 5.7729, Document score: 1.4109
Token: weather, Query score: 4.5684, Document score: 1.4673
Token: now, Query score: 3.5895, Document score: 0.7473
"""
print("# -----------------------------------------example with v3 distill-----------------------------------------")
doc_encoder = MLMTransformer("opensearch-project/opensearch-neural-sparse-encoding-doc-v3-distill")
asym = models.Asym(
{
"query": [
IDF.from_json(
"opensearch-project/opensearch-neural-sparse-encoding-doc-v3-distill",
tokenizer=doc_encoder.tokenizer,
frozen=True,
),
],
"doc": [
doc_encoder,
SpladePooling(pooling_strategy="max", activation_function="log1p_relu"),
],
}
)
model = SparseEncoder(
modules=[asym],
similarity_fn_name="dot",
)
query = "What's the weather in ny now?"
document = "Currently New York is rainy."
query_embed = model.encode([{"query": query}])
document_embed = model.encode([{"doc": document}])
sim = model.similarity(query_embed, document_embed)
print(f"Similarity: {sim}")
# Visualize top tokens for each text
top_k = 10
print(f"\nTop tokens {top_k} for each text:")
decoded_query = model.decode(query_embed[0], top_k=top_k)
decoded_document = model.decode(document_embed[0])
for i in range(min(top_k, len(decoded_query))):
query_token, query_score = decoded_query[i]
doc_score = next((score for token, score in decoded_document if token == query_token), 0)
if doc_score != 0:
print(f"Token: {query_token}, Query score: {query_score:.4f}, Document score: {doc_score:.4f}")
"""
# -----------------------------------------example with v3 distill-----------------------------------------
Similarity: tensor([[11.1105]], device='cuda:0')
Top tokens 10 for each text:
Token: ny, Query score: 5.7729, Document score: 0.8049
Token: weather, Query score: 4.5684, Document score: 0.9710
Token: now, Query score: 3.5895, Document score: 0.4720
Token: ?, Query score: 3.3313, Document score: 0.0286
Token: what, Query score: 2.7699, Document score: 0.0787
Token: in, Query score: 0.4989, Document score: 0.0417
"""
|
from sentence_transformers import models
from sentence_transformers.sparse_encoder import SparseEncoder
from sentence_transformers.sparse_encoder.models import IDF, MLMTransformer, SpladePooling
print("# ------------------------------------------example with v2 distill-----------------------------------------")
doc_encoder = MLMTransformer("opensearch-project/opensearch-neural-sparse-encoding-doc-v2-distill")
asym = models.Asym(
{
"query": [
IDF.from_json(
"opensearch-project/opensearch-neural-sparse-encoding-doc-v2-distill",
tokenizer=doc_encoder.tokenizer,
frozen=True,
),
],
"doc": [
doc_encoder,
SpladePooling("max"),
],
}
)
model = SparseEncoder(
modules=[asym],
similarity_fn_name="dot",
)
query = "What's the weather in ny now?"
document = "Currently New York is rainy."
query_embed = model.encode([{"query": query}])
document_embed = model.encode([{"doc": document}])
sim = model.similarity(query_embed, document_embed)
print(f"Similarity: {sim}")
# Visualize top tokens for each text
top_k = 3
print(f"\nTop tokens {top_k} for each text:")
decoded_query = model.decode(query_embed)[0]
decoded_document = model.decode(document_embed[0], top_k=100)
for i in range(top_k):
query_token, query_score = decoded_query[i]
doc_score = next((score for token, score in decoded_document if token == query_token), 0)
if doc_score != 0:
print(f"Token: {query_token}, Query score: {query_score:.4f}, Document score: {doc_score:.4f}")
"""
# ------------------------------------------example with v2 distill-----------------------------------------
Similarity: tensor([[17.5307]], device='cuda:0')
Top tokens 3 for each text:
Token: ny, Query score: 5.7729, Document score: 1.4109
Token: weather, Query score: 4.5684, Document score: 1.4673
Token: now, Query score: 3.5895, Document score: 0.7473
"""
print("# -----------------------------------------example with v3 distill-----------------------------------------")
doc_encoder = MLMTransformer("opensearch-project/opensearch-neural-sparse-encoding-doc-v3-distill")
asym = models.Asym(
{
"query": [
IDF.from_json(
"opensearch-project/opensearch-neural-sparse-encoding-doc-v3-distill",
tokenizer=doc_encoder.tokenizer,
frozen=True,
),
],
"doc": [
doc_encoder,
SpladePooling(pooling_strategy="max", activation_function="log1p_relu"),
],
}
)
model = SparseEncoder(
modules=[asym],
similarity_fn_name="dot",
)
query = "What's the weather in ny now?"
document = "Currently New York is rainy."
query_embed = model.encode([{"query": query}])
document_embed = model.encode([{"doc": document}])
sim = model.similarity(query_embed, document_embed)
print(f"Similarity: {sim}")
# Visualize top tokens for each text
top_k = 10
print(f"\nTop tokens {top_k} for each text:")
decoded_query = model.decode(query_embed)[0]
decoded_document = model.decode(document_embed[0], top_k=100)
for i in range(top_k):
query_token, query_score = decoded_query[i]
doc_score = next((score for token, score in decoded_document if token == query_token), 0)
if doc_score != 0:
print(f"Token: {query_token}, Query score: {query_score:.4f}, Document score: {doc_score:.4f}")
"""
# -----------------------------------------example with v3 distill-----------------------------------------
Similarity: tensor([[11.1105]], device='cuda:0')
Top tokens 10 for each text:
Token: ny, Query score: 5.7729, Document score: 0.8049
Token: weather, Query score: 4.5684, Document score: 0.9710
Token: now, Query score: 3.5895, Document score: 0.4720
Token: ?, Query score: 3.3313, Document score: 0.0286
Token: what, Query score: 2.7699, Document score: 0.0787
Token: in, Query score: 0.4989, Document score: 0.0417
"""
|
from typing import Optional
from docarray.document import BaseDocument
from docarray.typing.tensor.embedding import Embedding, Tensor
class Text(BaseDocument):
"""
base Document for Text handling
"""
text: str = ''
tensor: Optional[Tensor]
embedding: Optional[Embedding]
|
from typing import Optional
from docarray.document import BaseDocument
from docarray.typing.embedding import Embedding, Tensor
class Text(BaseDocument):
"""
base Document for Text handling
"""
text: str = ''
tensor: Optional[Tensor]
embedding: Optional[Embedding]
|
from pathlib import Path
from typing import Any, Callable, Optional, Tuple, Union
import PIL.Image
from .utils import check_integrity, download_and_extract_archive, download_url, verify_str_arg
from .vision import VisionDataset
class Flowers102(VisionDataset):
"""`Oxford 102 Flower <https://www.robots.ox.ac.uk/~vgg/data/flowers/102/>`_ Dataset.
.. warning::
This class needs `scipy <https://docs.scipy.org/doc/>`_ to load target files from `.mat` format.
Oxford 102 Flower is an image classification dataset consisting of 102 flower categories. The
flowers were chosen to be flowers commonly occurring in the United Kingdom. Each class consists of
between 40 and 258 images.
The images have large scale, pose and light variations. In addition, there are categories that
have large variations within the category, and several very similar categories.
Args:
root (str or ``pathlib.Path``): Root directory of the dataset.
split (string, optional): The dataset split, supports ``"train"`` (default), ``"val"``, or ``"test"``.
transform (callable, optional): A function/transform that takes in a PIL image and returns a
transformed version. E.g, ``transforms.RandomCrop``.
target_transform (callable, optional): A function/transform that takes in the target and transforms it.
download (bool, optional): If true, downloads the dataset from the internet and
puts it in root directory. If dataset is already downloaded, it is not
downloaded again.
"""
_download_url_prefix = "https://www.robots.ox.ac.uk/~vgg/data/flowers/102/"
_file_dict = { # filename, md5
"image": ("102flowers.tgz", "52808999861908f626f3c1f4e79d11fa"),
"label": ("imagelabels.mat", "e0620be6f572b9609742df49c70aed4d"),
"setid": ("setid.mat", "a5357ecc9cb78c4bef273ce3793fc85c"),
}
_splits_map = {"train": "trnid", "val": "valid", "test": "tstid"}
def __init__(
self,
root: Union[str, Path],
split: str = "train",
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
download: bool = False,
) -> None:
super().__init__(root, transform=transform, target_transform=target_transform)
self._split = verify_str_arg(split, "split", ("train", "val", "test"))
self._base_folder = Path(self.root) / "flowers-102"
self._images_folder = self._base_folder / "jpg"
if download:
self.download()
if not self._check_integrity():
raise RuntimeError("Dataset not found or corrupted. You can use download=True to download it")
from scipy.io import loadmat
set_ids = loadmat(self._base_folder / self._file_dict["setid"][0], squeeze_me=True)
image_ids = set_ids[self._splits_map[self._split]].tolist()
labels = loadmat(self._base_folder / self._file_dict["label"][0], squeeze_me=True)
image_id_to_label = dict(enumerate((labels["labels"] - 1).tolist(), 1))
self._labels = []
self._image_files = []
for image_id in image_ids:
self._labels.append(image_id_to_label[image_id])
self._image_files.append(self._images_folder / f"image_{image_id:05d}.jpg")
def __len__(self) -> int:
return len(self._image_files)
def __getitem__(self, idx: int) -> Tuple[Any, Any]:
image_file, label = self._image_files[idx], self._labels[idx]
image = PIL.Image.open(image_file).convert("RGB")
if self.transform:
image = self.transform(image)
if self.target_transform:
label = self.target_transform(label)
return image, label
def extra_repr(self) -> str:
return f"split={self._split}"
def _check_integrity(self):
if not (self._images_folder.exists() and self._images_folder.is_dir()):
return False
for id in ["label", "setid"]:
filename, md5 = self._file_dict[id]
if not check_integrity(str(self._base_folder / filename), md5):
return False
return True
def download(self):
if self._check_integrity():
return
download_and_extract_archive(
f"{self._download_url_prefix}{self._file_dict['image'][0]}",
str(self._base_folder),
md5=self._file_dict["image"][1],
)
for id in ["label", "setid"]:
filename, md5 = self._file_dict[id]
download_url(self._download_url_prefix + filename, str(self._base_folder), md5=md5)
|
from pathlib import Path
from typing import Any, Callable, Optional, Tuple
import PIL.Image
from .utils import check_integrity, download_and_extract_archive, download_url, verify_str_arg
from .vision import VisionDataset
class Flowers102(VisionDataset):
"""`Oxford 102 Flower <https://www.robots.ox.ac.uk/~vgg/data/flowers/102/>`_ Dataset.
.. warning::
This class needs `scipy <https://docs.scipy.org/doc/>`_ to load target files from `.mat` format.
Oxford 102 Flower is an image classification dataset consisting of 102 flower categories. The
flowers were chosen to be flowers commonly occurring in the United Kingdom. Each class consists of
between 40 and 258 images.
The images have large scale, pose and light variations. In addition, there are categories that
have large variations within the category, and several very similar categories.
Args:
root (string): Root directory of the dataset.
split (string, optional): The dataset split, supports ``"train"`` (default), ``"val"``, or ``"test"``.
transform (callable, optional): A function/transform that takes in a PIL image and returns a
transformed version. E.g, ``transforms.RandomCrop``.
target_transform (callable, optional): A function/transform that takes in the target and transforms it.
download (bool, optional): If true, downloads the dataset from the internet and
puts it in root directory. If dataset is already downloaded, it is not
downloaded again.
"""
_download_url_prefix = "https://www.robots.ox.ac.uk/~vgg/data/flowers/102/"
_file_dict = { # filename, md5
"image": ("102flowers.tgz", "52808999861908f626f3c1f4e79d11fa"),
"label": ("imagelabels.mat", "e0620be6f572b9609742df49c70aed4d"),
"setid": ("setid.mat", "a5357ecc9cb78c4bef273ce3793fc85c"),
}
_splits_map = {"train": "trnid", "val": "valid", "test": "tstid"}
def __init__(
self,
root: str,
split: str = "train",
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
download: bool = False,
) -> None:
super().__init__(root, transform=transform, target_transform=target_transform)
self._split = verify_str_arg(split, "split", ("train", "val", "test"))
self._base_folder = Path(self.root) / "flowers-102"
self._images_folder = self._base_folder / "jpg"
if download:
self.download()
if not self._check_integrity():
raise RuntimeError("Dataset not found or corrupted. You can use download=True to download it")
from scipy.io import loadmat
set_ids = loadmat(self._base_folder / self._file_dict["setid"][0], squeeze_me=True)
image_ids = set_ids[self._splits_map[self._split]].tolist()
labels = loadmat(self._base_folder / self._file_dict["label"][0], squeeze_me=True)
image_id_to_label = dict(enumerate((labels["labels"] - 1).tolist(), 1))
self._labels = []
self._image_files = []
for image_id in image_ids:
self._labels.append(image_id_to_label[image_id])
self._image_files.append(self._images_folder / f"image_{image_id:05d}.jpg")
def __len__(self) -> int:
return len(self._image_files)
def __getitem__(self, idx: int) -> Tuple[Any, Any]:
image_file, label = self._image_files[idx], self._labels[idx]
image = PIL.Image.open(image_file).convert("RGB")
if self.transform:
image = self.transform(image)
if self.target_transform:
label = self.target_transform(label)
return image, label
def extra_repr(self) -> str:
return f"split={self._split}"
def _check_integrity(self):
if not (self._images_folder.exists() and self._images_folder.is_dir()):
return False
for id in ["label", "setid"]:
filename, md5 = self._file_dict[id]
if not check_integrity(str(self._base_folder / filename), md5):
return False
return True
def download(self):
if self._check_integrity():
return
download_and_extract_archive(
f"{self._download_url_prefix}{self._file_dict['image'][0]}",
str(self._base_folder),
md5=self._file_dict["image"][1],
)
for id in ["label", "setid"]:
filename, md5 = self._file_dict[id]
download_url(self._download_url_prefix + filename, str(self._base_folder), md5=md5)
|
__version__ = '0.39.0'
import logging
from docarray.array import DocList, DocVec
from docarray.base_doc.doc import BaseDoc
from docarray.utils._internal.misc import _get_path_from_docarray_root_level
__all__ = ['BaseDoc', 'DocList', 'DocVec']
logger = logging.getLogger('docarray')
handler = logging.StreamHandler()
formatter = logging.Formatter("%(levelname)s - %(name)s - %(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
def __getattr__(name: str):
if name in ['Document', 'DocumentArray']:
raise ImportError(
f'Cannot import name \'{name}\' from \'{_get_path_from_docarray_root_level(__file__)}\'.\n'
f'The object named \'{name}\' does not exist anymore in this version of docarray.\n'
f'If you still want to use \'{name}\' please downgrade to version <=0.21.0 '
f'with: `pip install -U docarray==0.21.0`.'
)
else:
raise ImportError(
f'cannot import name \'{name}\' from \'{_get_path_from_docarray_root_level(__file__)}\''
)
|
__version__ = '0.38.1'
import logging
from docarray.array import DocList, DocVec
from docarray.base_doc.doc import BaseDoc
from docarray.utils._internal.misc import _get_path_from_docarray_root_level
__all__ = ['BaseDoc', 'DocList', 'DocVec']
logger = logging.getLogger('docarray')
handler = logging.StreamHandler()
formatter = logging.Formatter("%(levelname)s - %(name)s - %(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
def __getattr__(name: str):
if name in ['Document', 'DocumentArray']:
raise ImportError(
f'Cannot import name \'{name}\' from \'{_get_path_from_docarray_root_level(__file__)}\'.\n'
f'The object named \'{name}\' does not exist anymore in this version of docarray.\n'
f'If you still want to use \'{name}\' please downgrade to version <=0.21.0 '
f'with: `pip install -U docarray==0.21.0`.'
)
else:
raise ImportError(
f'cannot import name \'{name}\' from \'{_get_path_from_docarray_root_level(__file__)}\''
)
|
import os
from typing import Any
import numpy as np
import pytest
from scipy import sparse
from jina import Document, DocumentArray, Executor, Flow, requests
from tests import validate_callback
cur_dir = os.path.dirname(os.path.abspath(__file__))
TOP_K = 3
@pytest.fixture(scope='function')
def num_docs():
return 10
@pytest.fixture(scope='function')
def docs_to_index(num_docs):
docs = []
for idx in range(1, num_docs + 1):
doc = Document(id=str(idx), content=np.array([idx * 5]))
docs.append(doc)
return DocumentArray(docs)
class DummyCSRSparseIndexEncoder(Executor):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.docs = DocumentArray()
@requests(on='/index')
def encode(self, docs: DocumentArray, *args, **kwargs) -> Any:
for i, doc in enumerate(docs):
doc.embedding = sparse.coo_matrix(doc.content)
self.docs.extend(docs)
@requests(on='/search')
def query(self, docs: DocumentArray, parameters, *args, **kwargs):
top_k = int(parameters['top_k'])
for doc in docs:
doc.matches = self.docs[:top_k]
def test_sparse_pipeline(mocker, docs_to_index):
def validate(response):
assert len(response.docs) == 1
for doc in response.docs:
assert len(doc.matches) == TOP_K
for i, match in enumerate(doc.matches):
assert match.id == docs_to_index[i].id
assert isinstance(match.embedding, sparse.coo_matrix)
f = Flow().add(uses=DummyCSRSparseIndexEncoder)
mock = mocker.Mock()
error_mock = mocker.Mock()
with f:
f.post(
on='/index',
inputs=docs_to_index,
on_error=error_mock,
)
f.post(
on='/search',
inputs=docs_to_index[0],
parameters={'top_k': TOP_K},
on_done=mock,
on_error=error_mock,
)
mock.assert_called_once()
validate_callback(mock, validate)
error_mock.assert_not_called()
|
from typing import Any
import os
import pytest
import numpy as np
from scipy import sparse
from jina import Flow, Document, DocumentArray, requests, Executor
from tests import validate_callback
cur_dir = os.path.dirname(os.path.abspath(__file__))
TOP_K = 3
@pytest.fixture(scope='function')
def num_docs():
return 10
@pytest.fixture(scope='function')
def docs_to_index(num_docs):
docs = []
for idx in range(1, num_docs + 1):
doc = Document(id=str(idx), content=np.array([idx * 5]))
docs.append(doc)
return DocumentArray(docs)
class DummyCSRSparseIndexEncoder(Executor):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.docs = DocumentArray()
@requests(on='/index')
def encode(self, docs: 'DocumentArray', *args, **kwargs) -> Any:
for i, doc in enumerate(docs):
doc.embedding = sparse.coo_matrix(doc.content)
self.docs.extend(docs)
@requests(on='/search')
def query(self, docs: 'DocumentArray', parameters, *args, **kwargs):
top_k = int(parameters['top_k'])
for doc in docs:
doc.matches = self.docs[:top_k]
def test_sparse_pipeline(mocker, docs_to_index):
def validate(response):
assert len(response.docs) == 1
for doc in response.docs:
assert len(doc.matches) == TOP_K
for i, match in enumerate(doc.matches):
assert match.id == docs_to_index[i].id
assert isinstance(match.embedding, sparse.coo_matrix)
f = Flow().add(uses=DummyCSRSparseIndexEncoder)
mock = mocker.Mock()
error_mock = mocker.Mock()
with f:
f.post(
on='/index',
inputs=docs_to_index,
on_error=error_mock,
)
f.post(
on='/search',
inputs=docs_to_index[0],
parameters={'top_k': TOP_K},
on_done=mock,
on_error=error_mock,
)
mock.assert_called_once()
validate_callback(mock, validate)
error_mock.assert_not_called()
|
from typing import Any, ForwardRef, Optional
from typing_extensions import get_origin
from typing_inspect import get_args, is_typevar, is_union_type
from docarray.typing.id import ID
from docarray.typing.tensor.abstract_tensor import AbstractTensor
def is_type_tensor(type_: Any) -> bool:
"""Return True if type is a type Tensor or an Optional Tensor type."""
return isinstance(type_, type) and issubclass(type_, AbstractTensor)
def is_tensor_union(type_: Any) -> bool:
"""Return True if type is a Union of type Tensors."""
is_union = is_union_type(type_)
if is_union is None:
return False
else:
return is_union and all(
(is_type_tensor(t) or issubclass(t, type(None))) for t in get_args(type_)
)
def change_cls_name(cls: type, new_name: str, scope: Optional[dict] = None) -> None:
"""Change the name of a class.
:param cls: the class to change the name of
:param new_name: the new name
:param scope: the scope in which the class is defined
"""
if scope:
scope[new_name] = cls
cls.__qualname__ = cls.__qualname__[: -len(cls.__name__)] + new_name
cls.__name__ = new_name
def safe_issubclass(x: type, a_tuple: type) -> bool:
"""
This is a modified version of the built-in 'issubclass' function to support non-class input.
Traditional 'issubclass' calls can result in a crash if the input is non-class type (e.g. list/tuple).
:param x: A class 'x'
:param a_tuple: A class, or a tuple of classes.
:return: A boolean value - 'True' if 'x' is a subclass of 'A_tuple', 'False' otherwise.
Note that if the origin of 'x' is a list or tuple, the function immediately returns 'False'.
"""
if (
(get_origin(x) in (list, tuple, dict, set))
or is_typevar(x)
or (type(x) == ForwardRef)
or is_typevar(x)
or x == ID
):
return False
return issubclass(x, a_tuple)
|
from typing import Any, ForwardRef, Optional
from typing_extensions import get_origin
from typing_inspect import get_args, is_typevar, is_union_type
from docarray.typing.id import ID
from docarray.typing.tensor.abstract_tensor import AbstractTensor
def is_type_tensor(type_: Any) -> bool:
"""Return True if type is a type Tensor or an Optional Tensor type."""
return isinstance(type_, type) and issubclass(type_, AbstractTensor)
def is_tensor_union(type_: Any) -> bool:
"""Return True if type is a Union of type Tensors."""
is_union = is_union_type(type_)
if is_union is None:
return False
else:
return is_union and all(
(is_type_tensor(t) or issubclass(t, type(None))) for t in get_args(type_)
)
def change_cls_name(cls: type, new_name: str, scope: Optional[dict] = None) -> None:
"""Change the name of a class.
:param cls: the class to change the name of
:param new_name: the new name
:param scope: the scope in which the class is defined
"""
if scope:
scope[new_name] = cls
cls.__qualname__ = cls.__qualname__[: -len(cls.__name__)] + new_name
cls.__name__ = new_name
def safe_issubclass(x: type, a_tuple: type) -> bool:
"""
This is a modified version of the built-in 'issubclass' function to support non-class input.
Traditional 'issubclass' calls can result in a crash if the input is non-class type (e.g. list/tuple).
:param x: A class 'x'
:param a_tuple: A class, or a tuple of classes.
:return: A boolean value - 'True' if 'x' is a subclass of 'A_tuple', 'False' otherwise.
Note that if the origin of 'x' is a list or tuple, the function immediately returns 'False'.
"""
if (
(get_origin(x) in (list, tuple, dict, set))
or is_typevar(x)
or (type(x) == ForwardRef)
or is_typevar(x)
or x == ID
):
return False
return issubclass(x, a_tuple)
|
from typing import Optional, List
import numpy as np
from docarray import BaseDoc, DocList
from docarray.typing import NdArray
from docarray.typing.bytes import ImageBytes
from docarray.typing.url import AnyUrl
from jina import Executor, requests
from pydantic import Field
class TextAndImageDoc(BaseDoc):
text: Optional[str] = None
url: Optional[AnyUrl] = None
bytes: Optional[ImageBytes] = None
num_tokens: Optional[int] = None
input_ids: Optional[List[int]] = None
class EmbeddingResponseModel(TextAndImageDoc):
embeddings: NdArray = Field(description="The embedding of the texts", default=[])
class Config(BaseDoc.Config):
allow_population_by_field_name = True
arbitrary_types_allowed = True
json_encoders = {NdArray: lambda v: v.tolist()}
class SampleClipExecutor(Executor):
@requests(on="/encode")
def foo(
self, docs: DocList[TextAndImageDoc], **kwargs
) -> DocList[EmbeddingResponseModel]:
ret = []
for doc in docs:
ret.append(
EmbeddingResponseModel(
id=doc.id,
text=doc.text,
url=doc.url,
bytes=doc.bytes,
embeddings=np.random.random((1, 64)),
)
)
return DocList[EmbeddingResponseModel](ret)
|
from typing import Optional
import numpy as np
from docarray import BaseDoc, DocList
from docarray.typing import NdArray
from docarray.typing.bytes import ImageBytes
from docarray.typing.url import AnyUrl
from jina import Executor, requests
from pydantic import Field
class TextAndImageDoc(BaseDoc):
text: Optional[str] = None
url: Optional[AnyUrl] = None
bytes: Optional[ImageBytes] = None
class EmbeddingResponseModel(TextAndImageDoc):
embeddings: NdArray = Field(description="The embedding of the texts", default=[])
class Config(BaseDoc.Config):
allow_population_by_field_name = True
arbitrary_types_allowed = True
json_encoders = {NdArray: lambda v: v.tolist()}
class SampleClipExecutor(Executor):
@requests(on="/encode")
def foo(
self, docs: DocList[TextAndImageDoc], **kwargs
) -> DocList[EmbeddingResponseModel]:
ret = []
for doc in docs:
ret.append(
EmbeddingResponseModel(
id=doc.id,
text=doc.text,
url=doc.url,
bytes=doc.bytes,
embeddings=np.random.random((1, 64)),
)
)
return DocList[EmbeddingResponseModel](ret)
|
from langchain_core.prompts.prompt import PromptTemplate
_DEFAULT_TEMPLATE = """Question: Who lived longer, Muhammad Ali or Alan Turing?
Are follow up questions needed here: Yes.
Follow up: How old was Muhammad Ali when he died?
Intermediate answer: Muhammad Ali was 74 years old when he died.
Follow up: How old was Alan Turing when he died?
Intermediate answer: Alan Turing was 41 years old when he died.
So the final answer is: Muhammad Ali
Question: When was the founder of craigslist born?
Are follow up questions needed here: Yes.
Follow up: Who was the founder of craigslist?
Intermediate answer: Craigslist was founded by Craig Newmark.
Follow up: When was Craig Newmark born?
Intermediate answer: Craig Newmark was born on December 6, 1952.
So the final answer is: December 6, 1952
Question: Who was the maternal grandfather of George Washington?
Are follow up questions needed here: Yes.
Follow up: Who was the mother of George Washington?
Intermediate answer: The mother of George Washington was Mary Ball Washington.
Follow up: Who was the father of Mary Ball Washington?
Intermediate answer: The father of Mary Ball Washington was Joseph Ball.
So the final answer is: Joseph Ball
Question: Are both the directors of Jaws and Casino Royale from the same country?
Are follow up questions needed here: Yes.
Follow up: Who is the director of Jaws?
Intermediate answer: The director of Jaws is Steven Spielberg.
Follow up: Where is Steven Spielberg from?
Intermediate answer: The United States.
Follow up: Who is the director of Casino Royale?
Intermediate answer: The director of Casino Royale is Martin Campbell.
Follow up: Where is Martin Campbell from?
Intermediate answer: New Zealand.
So the final answer is: No
Question: {input}
Are followup questions needed here:{agent_scratchpad}"""
PROMPT = PromptTemplate(
input_variables=["input", "agent_scratchpad"], template=_DEFAULT_TEMPLATE
)
|
# flake8: noqa
from langchain_core.prompts.prompt import PromptTemplate
_DEFAULT_TEMPLATE = """Question: Who lived longer, Muhammad Ali or Alan Turing?
Are follow up questions needed here: Yes.
Follow up: How old was Muhammad Ali when he died?
Intermediate answer: Muhammad Ali was 74 years old when he died.
Follow up: How old was Alan Turing when he died?
Intermediate answer: Alan Turing was 41 years old when he died.
So the final answer is: Muhammad Ali
Question: When was the founder of craigslist born?
Are follow up questions needed here: Yes.
Follow up: Who was the founder of craigslist?
Intermediate answer: Craigslist was founded by Craig Newmark.
Follow up: When was Craig Newmark born?
Intermediate answer: Craig Newmark was born on December 6, 1952.
So the final answer is: December 6, 1952
Question: Who was the maternal grandfather of George Washington?
Are follow up questions needed here: Yes.
Follow up: Who was the mother of George Washington?
Intermediate answer: The mother of George Washington was Mary Ball Washington.
Follow up: Who was the father of Mary Ball Washington?
Intermediate answer: The father of Mary Ball Washington was Joseph Ball.
So the final answer is: Joseph Ball
Question: Are both the directors of Jaws and Casino Royale from the same country?
Are follow up questions needed here: Yes.
Follow up: Who is the director of Jaws?
Intermediate answer: The director of Jaws is Steven Spielberg.
Follow up: Where is Steven Spielberg from?
Intermediate answer: The United States.
Follow up: Who is the director of Casino Royale?
Intermediate answer: The director of Casino Royale is Martin Campbell.
Follow up: Where is Martin Campbell from?
Intermediate answer: New Zealand.
So the final answer is: No
Question: {input}
Are followup questions needed here:{agent_scratchpad}"""
PROMPT = PromptTemplate(
input_variables=["input", "agent_scratchpad"], template=_DEFAULT_TEMPLATE
)
|
import sqlite3
import warnings
from dataclasses import dataclass, field
from tempfile import NamedTemporaryFile
from typing import Iterable, Dict, Optional, TYPE_CHECKING, Union
from docarray.array.storage.sqlite.helper import initialize_table
from docarray.array.storage.base.backend import BaseBackendMixin
from docarray.helper import random_identity, dataclass_from_dict
if TYPE_CHECKING: # pragma: no cover
from docarray.typing import DocumentArraySourceType
def _sanitize_table_name(table_name: str, raise_warning=True) -> str:
ret = ''.join(c for c in table_name if c.isalnum() or c == '_')
if ret != table_name and raise_warning:
warnings.warn(f'The table name is changed to {ret} due to illegal characters')
return ret
@dataclass
class SqliteConfig:
connection: Optional[Union[str, 'sqlite3.Connection']] = None
table_name: Optional[str] = None
serialize_config: Dict = field(default_factory=dict)
conn_config: Dict = field(default_factory=dict)
journal_mode: str = 'WAL'
synchronous: str = 'OFF'
class BackendMixin(BaseBackendMixin):
"""Provide necessary functions to enable this storage backend."""
schema_version = '0'
def _sql(self, *args, **kwargs) -> 'sqlite3.Cursor':
return self._cursor.execute(*args, **kwargs)
def _commit(self):
self._connection.commit()
@property
def _cursor(self) -> 'sqlite3.Cursor':
return self._connection.cursor()
def _init_storage(
self,
_docs: Optional['DocumentArraySourceType'] = None,
config: Optional[Union[SqliteConfig, Dict]] = None,
**kwargs,
):
if not config:
config = SqliteConfig()
if isinstance(config, dict):
config = dataclass_from_dict(SqliteConfig, config)
from docarray import Document
sqlite3.register_adapter(
Document, lambda d: d.to_bytes(**config.serialize_config)
)
sqlite3.register_converter(
'Document', lambda x: Document.from_bytes(x, **config.serialize_config)
)
_conn_kwargs = dict()
_conn_kwargs.update(config.conn_config)
if config.connection is None:
config.connection = NamedTemporaryFile().name
if isinstance(config.connection, str):
self._connection = sqlite3.connect(
config.connection,
detect_types=sqlite3.PARSE_DECLTYPES,
check_same_thread=False,
**_conn_kwargs,
)
elif isinstance(config.connection, sqlite3.Connection):
self._connection = config.connection
else:
raise TypeError(
f'connection argument must be None or a string or a sqlite3.Connection, not `{type(config.connection)}`'
)
self._connection.execute(f'PRAGMA synchronous={config.synchronous}')
self._connection.execute(f'PRAGMA journal_mode={config.journal_mode}')
self._table_name = (
_sanitize_table_name(self.__class__.__name__ + random_identity())
if config.table_name is None
else _sanitize_table_name(config.table_name)
)
config.table_name = self._table_name
initialize_table(
self._table_name, self.__class__.__name__, self.schema_version, self._cursor
)
self._connection.commit()
self._config = config
super()._init_storage()
if _docs is None:
return
elif isinstance(_docs, Iterable):
self.clear()
self.extend(_docs)
else:
self.clear()
if isinstance(_docs, Document):
self.append(_docs)
def _ensure_unique_config(
self,
config_root: dict,
config_subindex: dict,
config_joined: dict,
subindex_name: str,
) -> dict:
if 'table_name' not in config_subindex:
subindex_table_name = _sanitize_table_name(
config_joined['table_name'] + 'subindex' + subindex_name,
raise_warning=False,
)
config_joined['table_name'] = subindex_table_name
return config_joined
def __getstate__(self):
d = dict(self.__dict__)
del d['_connection']
return d
def __setstate__(self, state):
self.__dict__ = state
_conn_kwargs = dict()
_conn_kwargs.update(state['_config'].conn_config)
self._connection = sqlite3.connect(
state['_config'].connection,
detect_types=sqlite3.PARSE_DECLTYPES,
check_same_thread=False,
**_conn_kwargs,
)
|
import sqlite3
import warnings
from dataclasses import dataclass, field
from tempfile import NamedTemporaryFile
from typing import Iterable, Dict, Optional, TYPE_CHECKING, Union
from docarray.array.storage.sqlite.helper import initialize_table
from docarray.array.storage.base.backend import BaseBackendMixin
from docarray.helper import random_identity, dataclass_from_dict
if TYPE_CHECKING:
from docarray.typing import DocumentArraySourceType
def _sanitize_table_name(table_name: str, raise_warning=True) -> str:
ret = ''.join(c for c in table_name if c.isalnum() or c == '_')
if ret != table_name and raise_warning:
warnings.warn(f'The table name is changed to {ret} due to illegal characters')
return ret
@dataclass
class SqliteConfig:
connection: Optional[Union[str, 'sqlite3.Connection']] = None
table_name: Optional[str] = None
serialize_config: Dict = field(default_factory=dict)
conn_config: Dict = field(default_factory=dict)
journal_mode: str = 'WAL'
synchronous: str = 'OFF'
class BackendMixin(BaseBackendMixin):
"""Provide necessary functions to enable this storage backend."""
schema_version = '0'
def _sql(self, *args, **kwargs) -> 'sqlite3.Cursor':
return self._cursor.execute(*args, **kwargs)
def _commit(self):
self._connection.commit()
@property
def _cursor(self) -> 'sqlite3.Cursor':
return self._connection.cursor()
def _init_storage(
self,
_docs: Optional['DocumentArraySourceType'] = None,
config: Optional[Union[SqliteConfig, Dict]] = None,
**kwargs,
):
if not config:
config = SqliteConfig()
if isinstance(config, dict):
config = dataclass_from_dict(SqliteConfig, config)
from docarray import Document
sqlite3.register_adapter(
Document, lambda d: d.to_bytes(**config.serialize_config)
)
sqlite3.register_converter(
'Document', lambda x: Document.from_bytes(x, **config.serialize_config)
)
_conn_kwargs = dict()
_conn_kwargs.update(config.conn_config)
if config.connection is None:
config.connection = NamedTemporaryFile().name
if isinstance(config.connection, str):
self._connection = sqlite3.connect(
config.connection,
detect_types=sqlite3.PARSE_DECLTYPES,
check_same_thread=False,
**_conn_kwargs,
)
elif isinstance(config.connection, sqlite3.Connection):
self._connection = config.connection
else:
raise TypeError(
f'connection argument must be None or a string or a sqlite3.Connection, not `{type(config.connection)}`'
)
self._connection.execute(f'PRAGMA synchronous={config.synchronous}')
self._connection.execute(f'PRAGMA journal_mode={config.journal_mode}')
self._table_name = (
_sanitize_table_name(self.__class__.__name__ + random_identity())
if config.table_name is None
else _sanitize_table_name(config.table_name)
)
config.table_name = self._table_name
initialize_table(
self._table_name, self.__class__.__name__, self.schema_version, self._cursor
)
self._connection.commit()
self._config = config
super()._init_storage()
if _docs is None:
return
elif isinstance(_docs, Iterable):
self.clear()
self.extend(_docs)
else:
self.clear()
if isinstance(_docs, Document):
self.append(_docs)
def _ensure_unique_config(
self,
config_root: dict,
config_subindex: dict,
config_joined: dict,
subindex_name: str,
) -> dict:
if 'table_name' not in config_subindex:
subindex_table_name = _sanitize_table_name(
config_joined['table_name'] + 'subindex' + subindex_name,
raise_warning=False,
)
config_joined['table_name'] = subindex_table_name
return config_joined
def __getstate__(self):
d = dict(self.__dict__)
del d['_connection']
return d
def __setstate__(self, state):
self.__dict__ = state
_conn_kwargs = dict()
_conn_kwargs.update(state['_config'].conn_config)
self._connection = sqlite3.connect(
state['_config'].connection,
detect_types=sqlite3.PARSE_DECLTYPES,
check_same_thread=False,
**_conn_kwargs,
)
|
import torch
from datasets import Dataset
from sentence_transformers.sparse_encoder import (
MLMTransformer,
SparseEncoder,
SparseEncoderTrainer,
SparseMarginMSELoss,
SpladePooling,
)
# Initialize the SPLADE model
student_model_name = "prithivida/Splade_PP_en_v1"
student_model = SparseEncoder(
modules=[
MLMTransformer(student_model_name),
SpladePooling(pooling_strategy="max"), # You can also use 'sum'
],
device="cuda:0",
)
# Initialize the SPLADE model
teacher_model_name = "naver/splade-cocondenser-ensembledistil"
teacher_model = SparseEncoder(
modules=[
MLMTransformer(teacher_model_name),
SpladePooling(pooling_strategy="max"), # You can also use 'sum'
],
device="cuda:0",
)
# Create a small toy dataset
train_dataset = Dataset.from_dict(
{
"query": ["It's nice weather outside today.", "He drove to work."],
"passage1": ["It's so sunny.", "He took the car to work."],
"passage2": ["It's very cold.", "She walked to the store."],
"passage3": ["Its rainy", "She took the bus"],
}
)
def compute_labels(batch):
emb_queries = teacher_model.encode(batch["query"])
emb_passages1 = teacher_model.encode(batch["passage1"])
emb_passages2 = teacher_model.encode(batch["passage2"])
emb_passages3 = teacher_model.encode(batch["passage3"])
return {
"label": torch.stack(
[
teacher_model.similarity_pairwise(emb_queries, emb_passages1)
- teacher_model.similarity_pairwise(emb_queries, emb_passages2),
teacher_model.similarity_pairwise(emb_queries, emb_passages1)
- teacher_model.similarity_pairwise(emb_queries, emb_passages3),
],
dim=1,
)
}
train_dataset = train_dataset.map(compute_labels, batched=True)
loss = SparseMarginMSELoss(student_model)
trainer = SparseEncoderTrainer(model=student_model, train_dataset=train_dataset, loss=loss)
trainer.train()
|
from datasets import Dataset
from sentence_transformers.sparse_encoder import (
MLMTransformer,
SparseEncoder,
SparseEncoderTrainer,
SparseMarginMSELoss,
SpladePooling,
)
# Initialize the SPLADE model
student_model_name = "prithivida/Splade_PP_en_v1"
student_model = SparseEncoder(
modules=[
MLMTransformer(student_model_name),
SpladePooling(pooling_strategy="max"), # You can also use 'sum'
],
device="cuda:0",
)
# Initialize the SPLADE model
teacher_model_name = "naver/splade-cocondenser-ensembledistil"
teacher_model = SparseEncoder(
modules=[
MLMTransformer(teacher_model_name),
SpladePooling(pooling_strategy="max"), # You can also use 'sum'
],
device="cuda:0",
)
# Create a small toy dataset
train_dataset = Dataset.from_dict(
{
"query": ["It's nice weather outside today.", "He drove to work."],
"passage1": ["It's so sunny.", "He took the car to work."],
"passage2": ["It's very sunny.", "She walked to the store."],
}
)
def compute_labels(batch):
emb_queries = teacher_model.encode(batch["query"])
emb_passages1 = teacher_model.encode(batch["passage1"])
emb_passages2 = teacher_model.encode(batch["passage2"])
return {
"label": teacher_model.similarity_pairwise(emb_queries, emb_passages1)
- teacher_model.similarity_pairwise(emb_queries, emb_passages2)
}
train_dataset = train_dataset.map(compute_labels, batched=True)
loss = SparseMarginMSELoss(student_model)
trainer = SparseEncoderTrainer(model=student_model, train_dataset=train_dataset, loss=loss)
trainer.train()
|
import os
from collections import namedtuple
import pytest
from datasets import ClassLabel, Features, Sequence, Value
from datasets.commands.test import TestCommand
from datasets.info import DatasetInfo, DatasetInfosDict
_TestCommandArgs = namedtuple(
"_TestCommandArgs",
[
"dataset",
"name",
"cache_dir",
"data_dir",
"all_configs",
"save_infos",
"ignore_verifications",
"force_redownload",
"clear_cache",
"num_proc",
],
defaults=[None, None, None, False, False, False, False, False, None],
)
def is_1percent_close(source, target):
return (abs(source - target) / target) < 0.01
@pytest.mark.integration
def test_test_command(dataset_dir):
args = _TestCommandArgs(dataset=dataset_dir, all_configs=True, save_infos=True)
test_command = TestCommand(*args)
test_command.run()
dataset_readme_path = os.path.join(dataset_dir, "README.md")
assert os.path.exists(dataset_readme_path)
dataset_infos = DatasetInfosDict.from_directory(dataset_dir)
expected_dataset_infos = DatasetInfosDict(
{
"default": DatasetInfo(
features=Features(
{
"tokens": Sequence(Value("string")),
"ner_tags": Sequence(
ClassLabel(names=["O", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"])
),
"langs": Sequence(Value("string")),
"spans": Sequence(Value("string")),
}
),
splits=[
{
"name": "train",
"num_bytes": 2351563,
"num_examples": 10000,
},
{
"name": "validation",
"num_bytes": 238418,
"num_examples": 1000,
},
],
download_size=3940680,
dataset_size=2589981,
)
}
)
assert dataset_infos.keys() == expected_dataset_infos.keys()
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
result, expected = getattr(dataset_infos["default"], key), getattr(expected_dataset_infos["default"], key)
if key == "num_bytes":
assert is_1percent_close(result, expected)
elif key == "splits":
assert list(result) == list(expected)
for split in result:
assert result[split].name == expected[split].name
assert result[split].num_examples == expected[split].num_examples
assert is_1percent_close(result[split].num_bytes, expected[split].num_bytes)
else:
result == expected
|
import os
from collections import namedtuple
import pytest
from datasets import ClassLabel, Features, Sequence, Value
from datasets.commands.test import TestCommand
from datasets.info import DatasetInfo, DatasetInfosDict
_TestCommandArgs = namedtuple(
"_TestCommandArgs",
[
"dataset",
"name",
"cache_dir",
"data_dir",
"all_configs",
"save_infos",
"ignore_verifications",
"force_redownload",
"clear_cache",
"num_proc",
"trust_remote_code",
],
defaults=[None, None, None, False, False, False, False, False, None, None],
)
def is_1percent_close(source, target):
return (abs(source - target) / target) < 0.01
@pytest.mark.integration
def test_test_command(dataset_loading_script_dir):
args = _TestCommandArgs(
dataset=dataset_loading_script_dir, all_configs=True, save_infos=True, trust_remote_code=True
)
test_command = TestCommand(*args)
test_command.run()
dataset_readme_path = os.path.join(dataset_loading_script_dir, "README.md")
assert os.path.exists(dataset_readme_path)
dataset_infos = DatasetInfosDict.from_directory(dataset_loading_script_dir)
expected_dataset_infos = DatasetInfosDict(
{
"default": DatasetInfo(
features=Features(
{
"tokens": Sequence(Value("string")),
"ner_tags": Sequence(
ClassLabel(names=["O", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"])
),
"langs": Sequence(Value("string")),
"spans": Sequence(Value("string")),
}
),
splits=[
{
"name": "train",
"num_bytes": 2351563,
"num_examples": 10000,
},
{
"name": "validation",
"num_bytes": 238418,
"num_examples": 1000,
},
],
download_size=3940680,
dataset_size=2589981,
)
}
)
assert dataset_infos.keys() == expected_dataset_infos.keys()
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
result, expected = getattr(dataset_infos["default"], key), getattr(expected_dataset_infos["default"], key)
if key == "num_bytes":
assert is_1percent_close(result, expected)
elif key == "splits":
assert list(result) == list(expected)
for split in result:
assert result[split].name == expected[split].name
assert result[split].num_examples == expected[split].num_examples
assert is_1percent_close(result[split].num_bytes, expected[split].num_bytes)
else:
result == expected
|
from typing import Dict, Optional, Tuple
import numpy as np
import torch
import torchvision.transforms as T
from jina import DocumentArray, Executor, requests
from timm import create_model
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
class TimmImageEncoder(Executor):
"""
TimmImageEncoder encodes Document blobs of type `ndarray` (`float32`) and shape
`H x W x 3` into `d`-dimensional embedding. The input image in Document should be
in RGB format.
If `use_default_preprocessing=False`, the expected input shape is
`3 x H x W` with `float32` dtype.
Internally, :class:`TimmImageEncoder` wraps the pre-trained models from
[Timm library](https://rwightman.github.io/pytorch-image-models/).
"""
def __init__(
self,
model_name: str = 'resnet18',
device: str = 'cpu',
traversal_path: Tuple[str] = ('r',),
batch_size: Optional[int] = 32,
use_default_preprocessing: bool = True,
*args,
**kwargs
):
"""
:param model_name: the name of the model. Models listed on:
https://rwightman.github.io/pytorch-image-models/models/
:param device: Which device the model runs on. For example 'cpu' or 'cuda'.
:param traversal_paths: Defines traversal path through the docs. It can be
overridden via request params.
:param batch_size: Defines the batch size for inference on the loaded Timm model.
It can be overridden via request params.
:param use_default_preprocessing: If the input should be preprocessed with
default configuration. If `False`, inputs are expected to be pre-processed.
:param args: Additional positional arguments.
:param kwargs: Additional keyword arguments.
"""
super().__init__(*args, **kwargs)
self.device = device
self.batch_size = batch_size
self.use_default_preprocessing = use_default_preprocessing
self.traversal_path = traversal_path
self._model = create_model(model_name, pretrained=True, num_classes=0)
self._model = self._model.to(device)
self._model.eval()
config = resolve_data_config({}, model=self._model)
self._preprocess = create_transform(**config)
self._preprocess.transforms.insert(0, T.ToPILImage())
@requests
def encode(self, docs: Optional[DocumentArray], parameters: Dict, **kwargs):
"""
Encode image data from the `blob` attribute of Documents into a ndarray of
`D` as dimension, and fill the embedding of each Document.
:param docs: DocumentArray containing images
:param parameters: dictionary with additional request parameters. Possible
values are `traversal_paths` and the `batch_size`. For example,
`parameters={'traversal_paths': ['r'], 'batch_size': 10}`.
:param kwargs: Additional keyword arguments.
"""
if docs is None:
return
traversal_paths = parameters.get('traversal_paths', self.traversal_path)
batch_size = parameters.get('batch_size', self.batch_size)
docs_batch_generator = docs.batch(
traversal_paths=traversal_paths,
batch_size=batch_size,
require_attr='blob',
)
for document_batch in docs_batch_generator:
blob_batch = [d.blob for d in document_batch]
if self.use_default_preprocessing:
images = np.stack([self._preprocess(img) for img in blob_batch])
else:
images = np.stack(blob_batch)
with torch.inference_mode():
tensor = torch.from_numpy(images).to(self.device)
features = self._model(tensor)
features = features.cpu().numpy()
for doc, embed in zip(document_batch, features):
doc.embedding = embed
|
from typing import Dict, Iterable, Optional, Tuple
import numpy as np
import torch
import torchvision.transforms as T
from jina import DocumentArray, Executor, requests
from jina_commons.batching import get_docs_batch_generator
from timm import create_model
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
class TimmImageEncoder(Executor):
"""
:class:`TimmImageEncoder` encodes ``Document`` blobs of type `ndarray`
(`float32`) and shape `H x W x 3` into `ndarray` of `D`.
Where `D` is the Dimension of the embedding.
Input image in Document should be in RGB format.
If `use_default_preprocessing=False`, the expected input shape is
`3 x H x W` with `float32` dtype.
Internally, :class:`TimmImageEncoder` wraps the pre-trained models from
`Timm library`.
https://rwightman.github.io/pytorch-image-models/
:param model_name: the name of the model. Models listed on:
https://rwightman.github.io/pytorch-image-models/models/
:param device: Which device the model runs on. For example 'cpu' or 'cuda'.
:param default_traversal_paths: Defines traversal path through the docs.
Default input is the tuple ('r',) and can be overwritten.
:param default_batch_size: Defines the batch size for inference on the loaded Timm model.
Default batch size is 32 and can be updated by passing an int value.
:param use_default_preprocessing: Should the input be preprocessed with default configuration.
:param args: Additional positional arguments.
:param kwargs: Additional keyword arguments.
"""
def __init__(
self,
model_name: str = 'resnet18',
device: str = 'cpu',
default_traversal_path: Tuple[str] = ('r',),
default_batch_size: Optional[int] = 32,
use_default_preprocessing: bool = True,
*args,
**kwargs
):
super().__init__(*args, **kwargs)
self.device = device
self.default_batch_size = default_batch_size
self.use_default_preprocessing = use_default_preprocessing
self.default_traversal_path = default_traversal_path
self._model = create_model(model_name, pretrained=True, num_classes=0)
self._model = self._model.to(device)
self._model.eval()
config = resolve_data_config({}, model=self._model)
self._preprocess = create_transform(**config)
self._preprocess.transforms.insert(0, T.ToPILImage())
@requests
def encode(self, docs: Optional[DocumentArray], parameters: Dict, **kwargs):
"""
Encode image data into a ndarray of `D` as dimension, and fill the embedding of each Document.
:param docs: DocumentArray containing images
:param parameters: dictionary to define the `traversal_paths` and the `batch_size`. For example,
`parameters={'traversal_paths': ['r'], 'batch_size': 10}`.
:param kwargs: Additional key value arguments.
"""
if docs:
docs_batch_generator = get_docs_batch_generator(
docs,
traversal_path=parameters.get(
'traversal_paths', self.default_traversal_path
),
batch_size=parameters.get('batch_size', self.default_batch_size),
needs_attr='blob',
)
self._compute_embeddings(docs_batch_generator)
def _compute_embeddings(self, docs_batch_generator: Iterable) -> None:
with torch.no_grad():
for document_batch in docs_batch_generator:
blob_batch = [d.blob for d in document_batch]
if self.use_default_preprocessing:
images = np.stack([self._preprocess(img) for img in blob_batch])
else:
images = np.stack(blob_batch)
tensor = torch.from_numpy(images).to(self.device)
features = self._model(tensor)
features = features.cpu().numpy()
for doc, embed in zip(document_batch, features):
doc.embedding = embed
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest.mock import Mock
from mmengine.hooks import DistSamplerSeedHook
class TestDistSamplerSeedHook:
def test_before_epoch(self):
hook = DistSamplerSeedHook()
# Test dataset sampler
runner = Mock()
runner.epoch = 1
runner.train_loop.dataloader = Mock()
runner.train_loop.dataloader.sampler = Mock()
runner.train_loop.dataloader.sampler.set_epoch = Mock()
hook.before_train_epoch(runner)
runner.train_loop.dataloader.sampler.set_epoch.assert_called()
# Test batch sampler
runner = Mock()
runner.train_loop.dataloader = Mock()
runner.train_loop.dataloader.sampler = Mock(spec_set=True)
runner.train_loop.dataloader.batch_sampler = Mock()
runner.train_loop.dataloader.batch_sampler.sampler = Mock()
runner.train_loop.dataloader.batch_sampler.sampler.set_epoch = Mock()
hook.before_train_epoch(runner)
runner.train_loop.dataloader.\
batch_sampler.sampler.set_epoch.assert_called()
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest.mock import Mock
from mmengine.hooks import DistSamplerSeedHook
class TestDistSamplerSeedHook:
def test_before_epoch(self):
hook = DistSamplerSeedHook()
# Test dataset sampler
runner = Mock()
runner.epoch = 1
runner.cur_dataloader = Mock()
runner.cur_dataloader.sampler = Mock()
runner.cur_dataloader.sampler.set_epoch = Mock()
hook.before_train_epoch(runner)
runner.cur_dataloader.sampler.set_epoch.assert_called()
# Test batch sampler
runner = Mock()
runner.cur_dataloader = Mock()
runner.cur_dataloader.sampler = Mock(spec_set=True)
runner.cur_dataloader.batch_sampler = Mock()
runner.cur_dataloader.batch_sampler.sampler = Mock()
runner.cur_dataloader.batch_sampler.sampler.set_epoch = Mock()
hook.before_train_epoch(runner)
runner.cur_dataloader.batch_sampler.sampler.set_epoch.assert_called()
|
"""Init file."""
from llama_index.readers.web.agentql_web.base import (
AgentQLWebReader,
)
from llama_index.readers.web.async_web.base import (
AsyncWebPageReader,
)
from llama_index.readers.web.beautiful_soup_web.base import (
BeautifulSoupWebReader,
)
from llama_index.readers.web.browserbase_web.base import BrowserbaseWebReader
from llama_index.readers.web.firecrawl_web.base import FireCrawlWebReader
from llama_index.readers.web.hyperbrowser_web.base import HyperbrowserWebReader
from llama_index.readers.web.knowledge_base.base import (
KnowledgeBaseWebReader,
)
from llama_index.readers.web.main_content_extractor.base import (
MainContentExtractorReader,
)
from llama_index.readers.web.news.base import NewsArticleReader
from llama_index.readers.web.oxylabs_web.base import OxylabsWebReader
from llama_index.readers.web.readability_web.base import (
ReadabilityWebPageReader,
)
from llama_index.readers.web.rss.base import (
RssReader,
)
from llama_index.readers.web.rss_news.base import (
RssNewsReader,
)
from llama_index.readers.web.scrapfly_web.base import (
ScrapflyReader,
)
from llama_index.readers.web.simple_web.base import (
SimpleWebPageReader,
)
from llama_index.readers.web.sitemap.base import (
SitemapReader,
)
from llama_index.readers.web.spider_web.base import (
SpiderWebReader,
)
from llama_index.readers.web.trafilatura_web.base import (
TrafilaturaWebReader,
)
from llama_index.readers.web.unstructured_web.base import (
UnstructuredURLLoader,
)
from llama_index.readers.web.whole_site.base import (
WholeSiteReader,
)
from llama_index.readers.web.zyte_web.base import (
ZyteWebReader,
)
__all__ = [
"AgentQLWebReader",
"AsyncWebPageReader",
"BeautifulSoupWebReader",
"BrowserbaseWebReader",
"FireCrawlWebReader",
"HyperbrowserWebReader",
"KnowledgeBaseWebReader",
"MainContentExtractorReader",
"NewsArticleReader",
"OxylabsWebReader",
"ReadabilityWebPageReader",
"RssReader",
"RssNewsReader",
"ScrapflyReader",
"SimpleWebPageReader",
"SitemapReader",
"SpiderWebReader",
"TrafilaturaWebReader",
"UnstructuredURLLoader",
"WholeSiteReader",
"ZyteWebReader",
]
|
"""Init file."""
from llama_index.readers.web.agentql_web.base import (
AgentQLWebReader,
)
from llama_index.readers.web.async_web.base import (
AsyncWebPageReader,
)
from llama_index.readers.web.beautiful_soup_web.base import (
BeautifulSoupWebReader,
)
from llama_index.readers.web.browserbase_web.base import BrowserbaseWebReader
from llama_index.readers.web.firecrawl_web.base import FireCrawlWebReader
from llama_index.readers.web.hyperbrowser_web.base import HyperbrowserWebReader
from llama_index.readers.web.knowledge_base.base import (
KnowledgeBaseWebReader,
)
from llama_index.readers.web.main_content_extractor.base import (
MainContentExtractorReader,
)
from llama_index.readers.web.news.base import NewsArticleReader
from llama_index.readers.web.readability_web.base import (
ReadabilityWebPageReader,
)
from llama_index.readers.web.rss.base import (
RssReader,
)
from llama_index.readers.web.rss_news.base import (
RssNewsReader,
)
from llama_index.readers.web.scrapfly_web.base import (
ScrapflyReader,
)
from llama_index.readers.web.simple_web.base import (
SimpleWebPageReader,
)
from llama_index.readers.web.sitemap.base import (
SitemapReader,
)
from llama_index.readers.web.spider_web.base import (
SpiderWebReader,
)
from llama_index.readers.web.trafilatura_web.base import (
TrafilaturaWebReader,
)
from llama_index.readers.web.unstructured_web.base import (
UnstructuredURLLoader,
)
from llama_index.readers.web.whole_site.base import (
WholeSiteReader,
)
from llama_index.readers.web.zyte_web.base import (
ZyteWebReader,
)
__all__ = [
"AgentQLWebReader",
"AsyncWebPageReader",
"BeautifulSoupWebReader",
"BrowserbaseWebReader",
"FireCrawlWebReader",
"HyperbrowserWebReader",
"KnowledgeBaseWebReader",
"MainContentExtractorReader",
"NewsArticleReader",
"ReadabilityWebPageReader",
"RssReader",
"RssNewsReader",
"ScrapflyReader",
"SimpleWebPageReader",
"SitemapReader",
"SpiderWebReader",
"TrafilaturaWebReader",
"UnstructuredURLLoader",
"WholeSiteReader",
"ZyteWebReader",
]
|
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
# file_client_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/detection/',
# 'data/': 's3://openmmlab/datasets/detection/'
# }))
file_client_args = dict(backend='disk')
train_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(
batch_size=2,
num_workers=2,
persistent_workers=True,
sampler=dict(type='DefaultSampler', shuffle=True),
batch_sampler=dict(type='AspectRatioBatchSampler'),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instances_train2017.json',
data_prefix=dict(img='train2017/'),
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=train_pipeline))
val_dataloader = dict(
batch_size=2,
num_workers=2,
persistent_workers=True,
drop_last=False,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instances_val2017.json',
data_prefix=dict(img='val2017/'),
test_mode=True,
pipeline=test_pipeline))
test_dataloader = val_dataloader
val_evaluator = dict(
type='CocoMetric',
ann_file=data_root + 'annotations/instances_val2017.json',
metric='bbox')
test_evaluator = val_evaluator
|
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'data_sample']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'data_sample']),
])
]
data = dict(
samples_per_gpu=2,
workers_per_gpu=2,
train=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_train2017.json',
img_prefix=data_root + 'train2017/',
pipeline=train_pipeline),
val=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline))
evaluation = dict(interval=1, metric='bbox')
|
import numpy as np
from docarray import Document
from docarray.typing import Tensor
def test_set_tensor():
class MyDocument(Document):
tensor: Tensor
d = MyDocument(tensor=np.zeros((3, 224, 224)))
assert isinstance(d.tensor, Tensor)
assert isinstance(d.tensor, np.ndarray)
assert (d.tensor == np.zeros((3, 224, 224))).all()
|
import numpy as np
from docarray.typing import Tensor
from docarray import Document
def test_set_tensor():
class MyDocument(Document):
tensor: Tensor
d = MyDocument(tensor=np.zeros((3, 224, 224)))
assert isinstance(d.tensor, Tensor)
assert isinstance(d.tensor, np.ndarray)
assert (d.tensor == np.zeros((3, 224, 224))).all()
|
from docarray.typing.tensor.audio.audio_ndarray import AudioNdArray
__all__ = ['AudioNdArray']
from docarray.utils._internal.misc import is_tf_available, is_torch_available
torch_available = is_torch_available()
if torch_available:
from docarray.typing.tensor.audio.audio_torch_tensor import AudioTorchTensor # noqa
__all__.extend(['AudioTorchTensor'])
tf_available = is_tf_available()
if tf_available:
from docarray.typing.tensor.audio.audio_tensorflow_tensor import ( # noqa
AudioTensorFlowTensor,
)
__all__.extend(['AudioTensorFlowTensor'])
|
from docarray.typing.tensor.audio.audio_ndarray import AudioNdArray
__all__ = ['AudioNdArray']
from docarray.utils.misc import is_tf_available, is_torch_available
torch_available = is_torch_available()
if torch_available:
from docarray.typing.tensor.audio.audio_torch_tensor import AudioTorchTensor # noqa
__all__.extend(['AudioTorchTensor'])
tf_available = is_tf_available()
if tf_available:
from docarray.typing.tensor.audio.audio_tensorflow_tensor import ( # noqa
AudioTensorFlowTensor,
)
__all__.extend(['AudioTensorFlowTensor'])
|
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for DLPack functions."""
from absl.testing import parameterized
import numpy as np
from tensorflow.python.dlpack import dlpack
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
int_dtypes = [
np.int8, np.int16, np.int32, np.int64, np.uint8, np.uint16, np.uint32,
np.uint64
]
float_dtypes = [np.float16, np.float32, np.float64]
complex_dtypes = [np.complex64, np.complex128]
dlpack_dtypes = (
int_dtypes + float_dtypes + [dtypes.bfloat16] + complex_dtypes + [np.bool_]
)
testcase_shapes = [(), (1,), (2, 3), (2, 0), (0, 7), (4, 1, 2)]
def FormatShapeAndDtype(shape, dtype):
return "_{}[{}]".format(str(dtype), ",".join(map(str, shape)))
def GetNamedTestParameters():
result = []
for dtype in dlpack_dtypes:
for shape in testcase_shapes:
result.append({
"testcase_name": FormatShapeAndDtype(shape, dtype),
"dtype": dtype,
"shape": shape
})
return result
class DLPackTest(parameterized.TestCase, test.TestCase):
@parameterized.named_parameters(GetNamedTestParameters())
def testRoundTrip(self, dtype, shape):
np.random.seed(42)
if dtype == np.bool_:
np_array = np.random.randint(0, 1, shape, np.bool_)
else:
np_array = np.random.randint(0, 10, shape)
# copy to gpu if available
tf_tensor = array_ops.identity(constant_op.constant(np_array, dtype=dtype))
tf_tensor_device = tf_tensor.device
tf_tensor_dtype = tf_tensor.dtype
dlcapsule = dlpack.to_dlpack(tf_tensor)
del tf_tensor # should still work
tf_tensor2 = dlpack.from_dlpack(dlcapsule)
self.assertAllClose(np_array, tf_tensor2)
if tf_tensor_dtype == dtypes.int32:
# int32 tensor is always on cpu for now
self.assertEqual(tf_tensor2.device,
"/job:localhost/replica:0/task:0/device:CPU:0")
else:
self.assertEqual(tf_tensor_device, tf_tensor2.device)
def testRoundTripWithoutToDlpack(self):
np_array = np.random.randint(0, 10, [42])
self.assertAllEqual(
np.from_dlpack(constant_op.constant(np_array).cpu()), np_array
)
def testTensorsCanBeConsumedOnceOnly(self):
np.random.seed(42)
np_array = np.random.randint(0, 10, (2, 3, 4))
tf_tensor = constant_op.constant(np_array, dtype=np.float32)
dlcapsule = dlpack.to_dlpack(tf_tensor)
del tf_tensor # should still work
_ = dlpack.from_dlpack(dlcapsule)
def ConsumeDLPackTensor():
dlpack.from_dlpack(dlcapsule) # Should can be consumed only once
self.assertRaisesRegex(Exception,
".*a DLPack tensor may be consumed at most once.*",
ConsumeDLPackTensor)
def testDLPackFromWithoutContextInitialization(self):
tf_tensor = constant_op.constant(1)
dlcapsule = dlpack.to_dlpack(tf_tensor)
# Resetting the context doesn't cause an error.
context._reset_context()
_ = dlpack.from_dlpack(dlcapsule)
def testUnsupportedTypeToDLPack(self):
def UnsupportedQint16():
tf_tensor = constant_op.constant([[1, 4], [5, 2]], dtype=dtypes.qint16)
_ = dlpack.to_dlpack(tf_tensor)
self.assertRaisesRegex(Exception, ".* is not supported by dlpack",
UnsupportedQint16)
def testMustPassTensorArgumentToDLPack(self):
with self.assertRaisesRegex(
errors.InvalidArgumentError,
"The argument to `to_dlpack` must be a TF tensor, not Python object"):
dlpack.to_dlpack([1])
if __name__ == "__main__":
ops.enable_eager_execution()
test.main()
|
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for DLPack functions."""
from absl.testing import parameterized
import numpy as np
from tensorflow.python.dlpack import dlpack
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.platform import test
from tensorflow.python.ops import array_ops
int_dtypes = [
np.int8, np.int16, np.int32, np.int64, np.uint8, np.uint16, np.uint32,
np.uint64
]
float_dtypes = [np.float16, np.float32, np.float64]
complex_dtypes = [np.complex64, np.complex128]
dlpack_dtypes = (
int_dtypes + float_dtypes + [dtypes.bfloat16] + complex_dtypes + [np.bool_]
)
testcase_shapes = [(), (1,), (2, 3), (2, 0), (0, 7), (4, 1, 2)]
def FormatShapeAndDtype(shape, dtype):
return "_{}[{}]".format(str(dtype), ",".join(map(str, shape)))
def GetNamedTestParameters():
result = []
for dtype in dlpack_dtypes:
for shape in testcase_shapes:
result.append({
"testcase_name": FormatShapeAndDtype(shape, dtype),
"dtype": dtype,
"shape": shape
})
return result
class DLPackTest(parameterized.TestCase, test.TestCase):
@parameterized.named_parameters(GetNamedTestParameters())
def testRoundTrip(self, dtype, shape):
np.random.seed(42)
if dtype == np.bool_:
np_array = np.random.randint(0, 1, shape, np.bool_)
else:
np_array = np.random.randint(0, 10, shape)
# copy to gpu if available
tf_tensor = array_ops.identity(constant_op.constant(np_array, dtype=dtype))
tf_tensor_device = tf_tensor.device
tf_tensor_dtype = tf_tensor.dtype
dlcapsule = dlpack.to_dlpack(tf_tensor)
del tf_tensor # should still work
tf_tensor2 = dlpack.from_dlpack(dlcapsule)
self.assertAllClose(np_array, tf_tensor2)
if tf_tensor_dtype == dtypes.int32:
# int32 tensor is always on cpu for now
self.assertEqual(tf_tensor2.device,
"/job:localhost/replica:0/task:0/device:CPU:0")
else:
self.assertEqual(tf_tensor_device, tf_tensor2.device)
def testTensorsCanBeConsumedOnceOnly(self):
np.random.seed(42)
np_array = np.random.randint(0, 10, (2, 3, 4))
tf_tensor = constant_op.constant(np_array, dtype=np.float32)
dlcapsule = dlpack.to_dlpack(tf_tensor)
del tf_tensor # should still work
_ = dlpack.from_dlpack(dlcapsule)
def ConsumeDLPackTensor():
dlpack.from_dlpack(dlcapsule) # Should can be consumed only once
self.assertRaisesRegex(Exception,
".*a DLPack tensor may be consumed at most once.*",
ConsumeDLPackTensor)
def testDLPackFromWithoutContextInitialization(self):
tf_tensor = constant_op.constant(1)
dlcapsule = dlpack.to_dlpack(tf_tensor)
# Resetting the context doesn't cause an error.
context._reset_context()
_ = dlpack.from_dlpack(dlcapsule)
def testUnsupportedTypeToDLPack(self):
def UnsupportedQint16():
tf_tensor = constant_op.constant([[1, 4], [5, 2]], dtype=dtypes.qint16)
_ = dlpack.to_dlpack(tf_tensor)
self.assertRaisesRegex(Exception, ".* is not supported by dlpack",
UnsupportedQint16)
def testMustPassTensorArgumentToDLPack(self):
with self.assertRaisesRegex(
errors.InvalidArgumentError,
"The argument to `to_dlpack` must be a TF tensor, not Python object"):
dlpack.to_dlpack([1])
if __name__ == "__main__":
ops.enable_eager_execution()
test.main()
|
# Copyright (c) OpenMMLab. All rights reserved.
from math import ceil
from unittest import TestCase
import torch
from mmengine import Config
from mmengine.structures import InstanceData
from mmdet.models.dense_heads import FSAFHead
class TestFSAFHead(TestCase):
def test_fsaf_head_loss(self):
"""Tests fsaf head loss when truth is empty and non-empty."""
s = 300
img_metas = [{
'img_shape': (s, s),
'pad_shape': (s, s),
'scale_factor': 1,
}]
cfg = Config(
dict(
assigner=dict(
type='CenterRegionAssigner',
pos_scale=0.2,
neg_scale=0.2,
min_pos_iof=0.01),
allowed_border=-1,
pos_weight=-1,
debug=False))
fsaf_head = FSAFHead(
num_classes=4,
in_channels=1,
stacked_convs=1,
feat_channels=1,
reg_decoded_bbox=True,
anchor_generator=dict(
type='AnchorGenerator',
octave_base_scale=1,
scales_per_octave=1,
ratios=[1.0],
strides=[8, 16, 32, 64, 128]),
bbox_coder=dict(type='TBLRBBoxCoder', normalizer=4.0),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0,
reduction='none'),
loss_bbox=dict(
type='IoULoss', eps=1e-6, loss_weight=1.0, reduction='none'),
train_cfg=cfg)
# FSAF head expects a multiple levels of features per image
feats = (
torch.rand(1, 1, ceil(s / stride[0]), ceil(s / stride[0]))
for stride in fsaf_head.prior_generator.strides)
cls_scores, bbox_preds = fsaf_head.forward(feats)
# Test that empty ground truth encourages the network to
# predict background
gt_instances = InstanceData()
gt_instances.bboxes = torch.empty((0, 4))
gt_instances.labels = torch.LongTensor([])
empty_gt_losses = fsaf_head.loss_by_feat(cls_scores, bbox_preds,
[gt_instances], img_metas)
# When there is no truth, the cls loss should be nonzero but
# box loss should be zero
empty_cls_loss = sum(empty_gt_losses['loss_cls'])
empty_box_loss = sum(empty_gt_losses['loss_bbox'])
self.assertGreater(empty_cls_loss, 0, 'cls loss should be non-zero')
self.assertEqual(
empty_box_loss.item(), 0,
'there should be no box loss when there are no true boxes')
# When truth is non-empty then both cls and box loss
# should be nonzero for random inputs
gt_instances = InstanceData()
gt_instances.bboxes = torch.Tensor(
[[23.6667, 23.8757, 238.6326, 151.8874]])
gt_instances.labels = torch.LongTensor([2])
one_gt_losses = fsaf_head.loss_by_feat(cls_scores, bbox_preds,
[gt_instances], img_metas)
onegt_cls_loss = sum(one_gt_losses['loss_cls'])
onegt_box_loss = sum(one_gt_losses['loss_bbox'])
self.assertGreater(onegt_cls_loss.item(), 0,
'cls loss should be non-zero')
self.assertGreater(onegt_box_loss.item(), 0,
'box loss should be non-zero')
|
# Copyright (c) OpenMMLab. All rights reserved.
from math import ceil
from unittest import TestCase
import torch
from mmengine import Config
from mmengine.data import InstanceData
from mmdet.models.dense_heads import FSAFHead
class TestFSAFHead(TestCase):
def test_fsaf_head_loss(self):
"""Tests fsaf head loss when truth is empty and non-empty."""
s = 300
img_metas = [{
'img_shape': (s, s),
'pad_shape': (s, s),
'scale_factor': 1,
}]
cfg = Config(
dict(
assigner=dict(
type='CenterRegionAssigner',
pos_scale=0.2,
neg_scale=0.2,
min_pos_iof=0.01),
allowed_border=-1,
pos_weight=-1,
debug=False))
fsaf_head = FSAFHead(
num_classes=4,
in_channels=1,
stacked_convs=1,
feat_channels=1,
reg_decoded_bbox=True,
anchor_generator=dict(
type='AnchorGenerator',
octave_base_scale=1,
scales_per_octave=1,
ratios=[1.0],
strides=[8, 16, 32, 64, 128]),
bbox_coder=dict(type='TBLRBBoxCoder', normalizer=4.0),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0,
reduction='none'),
loss_bbox=dict(
type='IoULoss', eps=1e-6, loss_weight=1.0, reduction='none'),
train_cfg=cfg)
# FSAF head expects a multiple levels of features per image
feats = (
torch.rand(1, 1, ceil(s / stride[0]), ceil(s / stride[0]))
for stride in fsaf_head.prior_generator.strides)
cls_scores, bbox_preds = fsaf_head.forward(feats)
# Test that empty ground truth encourages the network to
# predict background
gt_instances = InstanceData()
gt_instances.bboxes = torch.empty((0, 4))
gt_instances.labels = torch.LongTensor([])
empty_gt_losses = fsaf_head.loss_by_feat(cls_scores, bbox_preds,
[gt_instances], img_metas)
# When there is no truth, the cls loss should be nonzero but
# box loss should be zero
empty_cls_loss = sum(empty_gt_losses['loss_cls'])
empty_box_loss = sum(empty_gt_losses['loss_bbox'])
self.assertGreater(empty_cls_loss, 0, 'cls loss should be non-zero')
self.assertEqual(
empty_box_loss.item(), 0,
'there should be no box loss when there are no true boxes')
# When truth is non-empty then both cls and box loss
# should be nonzero for random inputs
gt_instances = InstanceData()
gt_instances.bboxes = torch.Tensor(
[[23.6667, 23.8757, 238.6326, 151.8874]])
gt_instances.labels = torch.LongTensor([2])
one_gt_losses = fsaf_head.loss_by_feat(cls_scores, bbox_preds,
[gt_instances], img_metas)
onegt_cls_loss = sum(one_gt_losses['loss_cls'])
onegt_box_loss = sum(one_gt_losses['loss_bbox'])
self.assertGreater(onegt_cls_loss.item(), 0,
'cls loss should be non-zero')
self.assertGreater(onegt_box_loss.item(), 0,
'box loss should be non-zero')
|
# Copyright (c) OpenMMLab. All rights reserved.
from .auto_augment import (AutoAugment, BrightnessTransform, ColorTransform,
ContrastTransform, EqualizeTransform, Rotate, Shear,
Translate)
from .compose import Compose
from .formatting import (ImageToTensor, PackDetInputs, ToDataContainer,
ToTensor, Transpose)
from .instaboost import InstaBoost
from .loading import (FilterAnnotations, LoadAnnotations, LoadImageFromWebcam,
LoadMultiChannelImageFromFiles, LoadPanopticAnnotations,
LoadProposals)
from .test_time_aug import MultiScaleFlipAug
from .transforms import (Albu, CopyPaste, CutOut, Expand, MinIoURandomCrop,
MixUp, Mosaic, Normalize, Pad, PhotoMetricDistortion,
RandomAffine, RandomCenterCropPad, RandomCrop,
RandomFlip, RandomShift, Resize, SegRescale,
YOLOXHSVRandomAug)
__all__ = [
'PackDetInputs', 'Compose', 'ToTensor', 'ImageToTensor', 'ToDataContainer',
'Transpose', 'LoadImageFromWebcam', 'LoadAnnotations',
'LoadPanopticAnnotations', 'LoadMultiChannelImageFromFiles',
'LoadProposals', 'MultiScaleFlipAug', 'Resize', 'RandomFlip', 'RandomCrop',
'Normalize', 'SegRescale', 'MinIoURandomCrop', 'Expand',
'PhotoMetricDistortion', 'Albu', 'InstaBoost', 'RandomCenterCropPad',
'AutoAugment', 'CutOut', 'Shear', 'Rotate', 'ColorTransform',
'EqualizeTransform', 'BrightnessTransform', 'ContrastTransform',
'Translate', 'RandomShift', 'Mosaic', 'MixUp', 'RandomAffine',
'YOLOXHSVRandomAug', 'CopyPaste', 'FilterAnnotations', 'Pad'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .auto_augment import (AutoAugment, BrightnessTransform, ColorTransform,
ContrastTransform, EqualizeTransform, Rotate, Shear,
Translate)
from .compose import Compose
from .formatting import (ImageToTensor, PackDetInputs, ToDataContainer,
ToTensor, Transpose)
from .instaboost import InstaBoost
from .loading import (FilterAnnotations, LoadAnnotations, LoadImageFromWebcam,
LoadMultiChannelImageFromFiles, LoadPanopticAnnotations,
LoadProposals)
from .test_time_aug import MultiScaleFlipAug
from .transforms import (Albu, CopyPaste, CutOut, Expand, MinIoURandomCrop,
MixUp, Mosaic, Normalize, PhotoMetricDistortion,
RandomAffine, RandomCenterCropPad, RandomCrop,
RandomFlip, RandomShift, Resize, SegRescale,
YOLOXHSVRandomAug)
__all__ = [
'PackDetInputs', 'Compose', 'ToTensor', 'ImageToTensor', 'ToDataContainer',
'Transpose', 'LoadImageFromWebcam', 'LoadAnnotations',
'LoadPanopticAnnotations', 'LoadMultiChannelImageFromFiles',
'LoadProposals', 'MultiScaleFlipAug', 'Resize', 'RandomFlip', 'RandomCrop',
'Normalize', 'SegRescale', 'MinIoURandomCrop', 'Expand',
'PhotoMetricDistortion', 'Albu', 'InstaBoost', 'RandomCenterCropPad',
'AutoAugment', 'CutOut', 'Shear', 'Rotate', 'ColorTransform',
'EqualizeTransform', 'BrightnessTransform', 'ContrastTransform',
'Translate', 'RandomShift', 'Mosaic', 'MixUp', 'RandomAffine',
'YOLOXHSVRandomAug', 'CopyPaste', 'FilterAnnotations'
]
|
from docutils import nodes
from docutils.parsers.rst import Directive
class BetaStatus(Directive):
has_content = True
text = "The {api_name} is in Beta stage, and backward compatibility is not guaranteed."
def run(self):
text = self.text.format(api_name=" ".join(self.content))
return [nodes.warning("", nodes.paragraph("", "", nodes.Text(text)))]
class V2BetaStatus(BetaStatus):
text = (
"The {api_name} is in Beta stage, and while we do not expect major breaking changes, "
"some APIs may still change according to user feedback. Please submit any feedback you may have "
"in this issue: https://github.com/pytorch/vision/issues/6753, and you can also check "
"out https://github.com/pytorch/vision/issues/7319 to learn "
"more about the APIs that we suspect might involve future changes."
)
def setup(app):
app.add_directive("betastatus", BetaStatus)
app.add_directive("v2betastatus", V2BetaStatus)
return {
"version": "0.1",
"parallel_read_safe": True,
"parallel_write_safe": True,
}
|
from docutils import nodes
from docutils.parsers.rst import Directive
class BetaStatus(Directive):
has_content = True
def run(self):
api_name = " ".join(self.content)
text = f"The {api_name} is in Beta stage, and backward compatibility is not guaranteed."
return [nodes.warning("", nodes.paragraph("", "", nodes.Text(text)))]
def setup(app):
app.add_directive("betastatus", BetaStatus)
return {
"version": "0.1",
"parallel_read_safe": True,
"parallel_write_safe": True,
}
|
import asyncio
import copy
from typing import Any, List, TYPE_CHECKING
from jina.serve.runtimes.servers import BaseServer
if TYPE_CHECKING:
from jina.logging.logger import JinaLogger
class CompositeBaseServer(BaseServer):
"""Composite Base Server implementation from which u can inherit a specific custom composite one"""
servers: List['BaseServer']
logger: 'JinaLogger'
def __init__(
self,
**kwargs,
):
"""Initialize the gateway
:param kwargs: keyword args
"""
super().__init__(**kwargs)
self._kwargs = kwargs
@property
def _server_kwargs(self):
ret = []
# ignore monitoring and tracing args since they are not copyable
ignored_attrs = [
'metrics_registry',
'tracer_provider',
'grpc_tracing_server_interceptors',
'aio_tracing_client_interceptors',
'tracing_client_interceptor',
]
for port, protocol in zip(self.ports, self.protocols):
# ignore monitoring and tracing args since they are not copyable
runtime_args = self._deepcopy_with_ignore_attrs(
self.runtime_args, ignored_attrs
)
runtime_args.port = port
runtime_args.protocol = protocol
server_kwargs = {k: v for k, v in self._kwargs.items() if k != 'runtime_args'}
server_kwargs['runtime_args'] = dict(vars(runtime_args))
server_kwargs['req_handler'] = self._request_handler
ret.append(server_kwargs)
return ret
async def setup_server(self):
"""
setup servers inside CompositeServer
"""
self.logger.debug(f'Setting up Composite server')
tasks = []
for server in self.servers:
tasks.append(asyncio.create_task(server.setup_server()))
await asyncio.gather(*tasks)
self.logger.debug(f'Composite server setup successful')
async def shutdown(self):
"""Free other resources allocated with the server, e.g, gateway object, ..."""
self.logger.debug(f'Shutting down server')
await super().shutdown()
shutdown_tasks = []
for server in self.servers:
shutdown_tasks.append(asyncio.create_task(server.shutdown()))
await asyncio.gather(*shutdown_tasks)
self.logger.debug(f'Server shutdown finished')
async def run_server(self):
"""Run servers inside CompositeServer forever"""
run_server_tasks = []
for server in self.servers:
run_server_tasks.append(asyncio.create_task(server.run_server()))
await asyncio.gather(*run_server_tasks)
@property
def _should_exit(self) -> bool:
should_exit_values = [
getattr(server, 'should_exit', True) for server in self.servers
]
return all(should_exit_values)
class CompositeServer(CompositeBaseServer):
"""Composite Server implementation"""
def __init__(
self,
**kwargs,
):
"""Initialize the gateway
:param kwargs: keyword args
"""
super().__init__(**kwargs)
from jina.parsers.helper import _get_gateway_class
self.servers: List[BaseServer] = []
for server_kwargs in self._server_kwargs:
server_cls = _get_gateway_class(server_kwargs['runtime_args']['protocol'], works_as_load_balancer=self.works_as_load_balancer)
server = server_cls(**server_kwargs)
self.servers.append(server)
self.gateways = self.servers # for backwards compatibility
@staticmethod
def _deepcopy_with_ignore_attrs(obj: Any, ignore_attrs: List[str]) -> Any:
"""Deep copy an object and ignore some attributes
:param obj: the object to copy
:param ignore_attrs: the attributes to ignore
:return: the copied object
"""
memo = {}
for k in ignore_attrs:
if hasattr(obj, k):
memo[id(getattr(obj, k))] = None # getattr(obj, k)
return copy.deepcopy(obj, memo)
|
import asyncio
import copy
from typing import Any, List
from jina.serve.runtimes.servers import BaseServer
class CompositeServer(BaseServer):
"""Composite Server implementation"""
def __init__(
self,
**kwargs,
):
"""Initialize the gateway
:param kwargs: keyword args
"""
super().__init__(**kwargs)
from jina.parsers.helper import _get_gateway_class
self.servers: List[BaseServer] = []
for port, protocol in zip(self.ports, self.protocols):
server_cls = _get_gateway_class(protocol, works_as_load_balancer=self.works_as_load_balancer)
# ignore monitoring and tracing args since they are not copyable
ignored_attrs = [
'metrics_registry',
'tracer_provider',
'grpc_tracing_server_interceptors',
'aio_tracing_client_interceptors',
'tracing_client_interceptor',
]
runtime_args = self._deepcopy_with_ignore_attrs(
self.runtime_args, ignored_attrs
)
runtime_args.port = [port]
runtime_args.protocol = [protocol]
server_kwargs = {k: v for k, v in kwargs.items() if k != 'runtime_args'}
server_kwargs['runtime_args'] = dict(vars(runtime_args))
server_kwargs['req_handler'] = self._request_handler
server = server_cls(**server_kwargs)
self.servers.append(server)
self.gateways = self.servers # for backwards compatibility
async def setup_server(self):
"""
setup servers inside CompositeServer
"""
self.logger.debug(f'Setting up Composite server')
tasks = []
for server in self.servers:
tasks.append(asyncio.create_task(server.setup_server()))
await asyncio.gather(*tasks)
self.logger.debug(f'Composite server setup successful')
async def shutdown(self):
"""Free other resources allocated with the server, e.g, gateway object, ..."""
self.logger.debug(f'Shutting down server')
await super().shutdown()
shutdown_tasks = []
for server in self.servers:
shutdown_tasks.append(asyncio.create_task(server.shutdown()))
await asyncio.gather(*shutdown_tasks)
self.logger.debug(f'Server shutdown finished')
async def run_server(self):
"""Run servers inside CompositeServer forever"""
run_server_tasks = []
for server in self.servers:
run_server_tasks.append(asyncio.create_task(server.run_server()))
await asyncio.gather(*run_server_tasks)
@staticmethod
def _deepcopy_with_ignore_attrs(obj: Any, ignore_attrs: List[str]) -> Any:
"""Deep copy an object and ignore some attributes
:param obj: the object to copy
:param ignore_attrs: the attributes to ignore
:return: the copied object
"""
memo = {}
for k in ignore_attrs:
if hasattr(obj, k):
memo[id(getattr(obj, k))] = None # getattr(obj, k)
return copy.deepcopy(obj, memo)
@property
def _should_exit(self) -> bool:
should_exit_values = [
getattr(server, 'should_exit', True) for server in self.servers
]
return all(should_exit_values)
|
from __future__ import annotations
from typing import Any, Optional, Union
import PIL.Image
import torch
from ._tv_tensor import TVTensor
class Mask(TVTensor):
""":class:`torch.Tensor` subclass for segmentation and detection masks.
Args:
data (tensor-like, PIL.Image.Image): Any data that can be turned into a tensor with :func:`torch.as_tensor` as
well as PIL images.
dtype (torch.dtype, optional): Desired data type. If omitted, will be inferred from
``data``.
device (torch.device, optional): Desired device. If omitted and ``data`` is a
:class:`torch.Tensor`, the device is taken from it. Otherwise, the mask is constructed on the CPU.
requires_grad (bool, optional): Whether autograd should record operations. If omitted and
``data`` is a :class:`torch.Tensor`, the value is taken from it. Otherwise, defaults to ``False``.
"""
def __new__(
cls,
data: Any,
*,
dtype: Optional[torch.dtype] = None,
device: Optional[Union[torch.device, str, int]] = None,
requires_grad: Optional[bool] = None,
) -> Mask:
if isinstance(data, PIL.Image.Image):
from torchvision.transforms.v2 import functional as F
data = F.pil_to_tensor(data)
tensor = cls._to_tensor(data, dtype=dtype, device=device, requires_grad=requires_grad)
return tensor.as_subclass(cls)
|
from __future__ import annotations
from typing import Any, Optional, Union
import PIL.Image
import torch
from ._tv_tensor import TVTensor
class Mask(TVTensor):
"""[BETA] :class:`torch.Tensor` subclass for segmentation and detection masks.
Args:
data (tensor-like, PIL.Image.Image): Any data that can be turned into a tensor with :func:`torch.as_tensor` as
well as PIL images.
dtype (torch.dtype, optional): Desired data type. If omitted, will be inferred from
``data``.
device (torch.device, optional): Desired device. If omitted and ``data`` is a
:class:`torch.Tensor`, the device is taken from it. Otherwise, the mask is constructed on the CPU.
requires_grad (bool, optional): Whether autograd should record operations. If omitted and
``data`` is a :class:`torch.Tensor`, the value is taken from it. Otherwise, defaults to ``False``.
"""
def __new__(
cls,
data: Any,
*,
dtype: Optional[torch.dtype] = None,
device: Optional[Union[torch.device, str, int]] = None,
requires_grad: Optional[bool] = None,
) -> Mask:
if isinstance(data, PIL.Image.Image):
from torchvision.transforms.v2 import functional as F
data = F.pil_to_tensor(data)
tensor = cls._to_tensor(data, dtype=dtype, device=device, requires_grad=requires_grad)
return tensor.as_subclass(cls)
|
import os
import sys
from pathlib import Path
import pytest
from datasets import Dataset, IterableDataset
from datasets.distributed import split_dataset_by_node
from .utils import execute_subprocess_async, get_torch_dist_unique_port, require_torch
def test_split_dataset_by_node_map_style():
full_ds = Dataset.from_dict({"i": range(17)})
full_size = len(full_ds)
world_size = 3
datasets_per_rank = [
split_dataset_by_node(full_ds, rank=rank, world_size=world_size) for rank in range(world_size)
]
assert sum(len(ds) for ds in datasets_per_rank) == full_size
assert len({tuple(x.values()) for ds in datasets_per_rank for x in ds}) == full_size
def test_split_dataset_by_node_iterable():
def gen():
return ({"i": i} for i in range(17))
world_size = 3
full_ds = IterableDataset.from_generator(gen)
full_size = len(list(full_ds))
datasets_per_rank = [
split_dataset_by_node(full_ds, rank=rank, world_size=world_size) for rank in range(world_size)
]
assert sum(len(list(ds)) for ds in datasets_per_rank) == full_size
assert len({tuple(x.values()) for ds in datasets_per_rank for x in ds}) == full_size
@pytest.mark.parametrize("shards_per_node", [1, 2, 3])
def test_split_dataset_by_node_iterable_sharded(shards_per_node):
def gen(shards):
for shard in shards:
yield from ({"i": i, "shard": shard} for i in range(17))
world_size = 3
num_shards = shards_per_node * world_size
gen_kwargs = {"shards": [f"shard_{shard_idx}.txt" for shard_idx in range(num_shards)]}
full_ds = IterableDataset.from_generator(gen, gen_kwargs=gen_kwargs)
full_size = len(list(full_ds))
assert full_ds.n_shards == world_size * shards_per_node
datasets_per_rank = [
split_dataset_by_node(full_ds, rank=rank, world_size=world_size) for rank in range(world_size)
]
assert [ds.n_shards for ds in datasets_per_rank] == [shards_per_node] * world_size
assert sum(len(list(ds)) for ds in datasets_per_rank) == full_size
assert len({tuple(x.values()) for ds in datasets_per_rank for x in ds}) == full_size
def test_distributed_shuffle_iterable():
def gen():
return ({"i": i} for i in range(17))
world_size = 2
full_ds = IterableDataset.from_generator(gen)
full_size = len(list(full_ds))
ds_rank0 = split_dataset_by_node(full_ds, rank=0, world_size=world_size).shuffle(seed=42)
assert len(list(ds_rank0)) == 1 + full_size // world_size
with pytest.raises(RuntimeError):
split_dataset_by_node(full_ds, rank=0, world_size=world_size).shuffle()
ds_rank0 = split_dataset_by_node(full_ds.shuffle(seed=42), rank=0, world_size=world_size)
assert len(list(ds_rank0)) == 1 + full_size // world_size
with pytest.raises(RuntimeError):
split_dataset_by_node(full_ds.shuffle(), rank=0, world_size=world_size)
@pytest.mark.parametrize("streaming", [False, True])
@require_torch
@pytest.mark.skipif(os.name == "nt", reason="execute_subprocess_async doesn't support windows")
@pytest.mark.integration
def test_torch_distributed_run(streaming):
nproc_per_node = 2
master_port = get_torch_dist_unique_port()
test_script = Path(__file__).resolve().parent / "distributed_scripts" / "run_torch_distributed.py"
distributed_args = f"""
-m torch.distributed.run
--nproc_per_node={nproc_per_node}
--master_port={master_port}
{test_script}
""".split()
args = f"""
--streaming={streaming}
""".split()
cmd = [sys.executable] + distributed_args + args
execute_subprocess_async(cmd, env=os.environ.copy())
@pytest.mark.parametrize(
"nproc_per_node, num_workers",
[
(2, 2), # each node has 2 shards and each worker has 1 shards
(3, 2), # each node uses all the shards but skips examples, and each worker has 2 shards
],
)
@require_torch
@pytest.mark.skipif(os.name == "nt", reason="execute_subprocess_async doesn't support windows")
@pytest.mark.integration
def test_torch_distributed_run_streaming_with_num_workers(nproc_per_node, num_workers):
streaming = True
master_port = get_torch_dist_unique_port()
test_script = Path(__file__).resolve().parent / "distributed_scripts" / "run_torch_distributed.py"
distributed_args = f"""
-m torch.distributed.run
--nproc_per_node={nproc_per_node}
--master_port={master_port}
{test_script}
""".split()
args = f"""
--streaming={streaming}
--num_workers={num_workers}
""".split()
cmd = [sys.executable] + distributed_args + args
execute_subprocess_async(cmd, env=os.environ.copy())
|
import os
import sys
from pathlib import Path
import pytest
from datasets import Dataset, IterableDataset
from datasets.distributed import split_dataset_by_node
from .utils import execute_subprocess_async, get_torch_dist_unique_port, require_torch
def test_split_dataset_by_node_map_style():
full_ds = Dataset.from_dict({"i": range(17)})
full_size = len(full_ds)
world_size = 3
datasets_per_rank = [
split_dataset_by_node(full_ds, rank=rank, world_size=world_size) for rank in range(world_size)
]
assert sum(len(ds) for ds in datasets_per_rank) == full_size
assert len({tuple(x.values()) for ds in datasets_per_rank for x in ds}) == full_size
def test_split_dataset_by_node_iterable():
def gen():
return ({"i": i} for i in range(17))
world_size = 3
full_ds = IterableDataset.from_generator(gen)
full_size = len(list(full_ds))
datasets_per_rank = [
split_dataset_by_node(full_ds, rank=rank, world_size=world_size) for rank in range(world_size)
]
assert sum(len(list(ds)) for ds in datasets_per_rank) == full_size
assert len({tuple(x.values()) for ds in datasets_per_rank for x in ds}) == full_size
@pytest.mark.parametrize("shards_per_node", [1, 2, 3])
def test_split_dataset_by_node_iterable_sharded(shards_per_node):
def gen(shards):
for shard in shards:
yield from ({"i": i, "shard": shard} for i in range(17))
world_size = 3
num_shards = shards_per_node * world_size
gen_kwargs = {"shards": [f"shard_{shard_idx}.txt" for shard_idx in range(num_shards)]}
full_ds = IterableDataset.from_generator(gen, gen_kwargs=gen_kwargs)
full_size = len(list(full_ds))
assert full_ds.n_shards == world_size * shards_per_node
datasets_per_rank = [
split_dataset_by_node(full_ds, rank=rank, world_size=world_size) for rank in range(world_size)
]
assert [ds.n_shards for ds in datasets_per_rank] == [shards_per_node] * world_size
assert sum(len(list(ds)) for ds in datasets_per_rank) == full_size
assert len({tuple(x.values()) for ds in datasets_per_rank for x in ds}) == full_size
def test_distributed_shuffle_iterable():
def gen():
return ({"i": i} for i in range(17))
world_size = 2
full_ds = IterableDataset.from_generator(gen)
full_size = len(list(full_ds))
ds_rank0 = split_dataset_by_node(full_ds, rank=0, world_size=world_size).shuffle(seed=42)
assert len(list(ds_rank0)) == 1 + full_size // world_size
with pytest.raises(RuntimeError):
split_dataset_by_node(full_ds, rank=0, world_size=world_size).shuffle()
ds_rank0 = split_dataset_by_node(full_ds.shuffle(seed=42), rank=0, world_size=world_size)
assert len(list(ds_rank0)) == 1 + full_size // world_size
with pytest.raises(RuntimeError):
split_dataset_by_node(full_ds.shuffle(), rank=0, world_size=world_size)
@pytest.mark.parametrize("streaming", [False, True])
@require_torch
@pytest.mark.skipif(os.name == "nt", reason="execute_subprocess_async doesn't support windows")
@pytest.mark.integration
def test_torch_distributed_launch(streaming):
nproc_per_node = 2
master_port = get_torch_dist_unique_port()
test_script = Path(__file__).resolve().parent / "distributed_scripts" / "launch_torch_distributed.py"
distributed_args = f"""
-m torch.distributed.launch
--nproc_per_node={nproc_per_node}
--master_port={master_port}
{test_script}
""".split()
args = f"""
--streaming={streaming}
""".split()
cmd = [sys.executable] + distributed_args + args
execute_subprocess_async(cmd, env=os.environ.copy())
@pytest.mark.parametrize(
"nproc_per_node, num_workers",
[
(2, 2), # each node has 2 shards and each worker has 1 shards
(3, 2), # each node uses all the shards but skips examples, and each worker has 2 shards
],
)
@require_torch
@pytest.mark.skipif(os.name == "nt", reason="execute_subprocess_async doesn't support windows")
@pytest.mark.integration
def test_torch_distributed_launch_streaming_with_num_workers(nproc_per_node, num_workers):
streaming = True
master_port = get_torch_dist_unique_port()
test_script = Path(__file__).resolve().parent / "distributed_scripts" / "launch_torch_distributed.py"
distributed_args = f"""
-m torch.distributed.launch
--nproc_per_node={nproc_per_node}
--master_port={master_port}
{test_script}
""".split()
args = f"""
--streaming={streaming}
--num_workers={num_workers}
""".split()
cmd = [sys.executable] + distributed_args + args
execute_subprocess_async(cmd, env=os.environ.copy())
|
"""Tool for asking human input."""
from typing import Callable, Optional
from langchain_core.callbacks import CallbackManagerForToolRun
from langchain_core.tools import BaseTool
from pydantic import Field
def _print_func(text: str) -> None:
print("\n") # noqa: T201
print(text) # noqa: T201
class HumanInputRun(BaseTool):
"""Tool that asks user for input."""
name: str = "human"
description: str = (
"You can ask a human for guidance when you think you "
"got stuck or you are not sure what to do next. "
"The input should be a question for the human."
)
prompt_func: Callable[[str], None] = Field(default_factory=lambda: _print_func)
input_func: Callable = Field(default_factory=lambda: input)
def _run(
self,
query: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the Human input tool."""
self.prompt_func(query)
return self.input_func()
|
"""Tool for asking human input."""
from typing import Callable, Optional
from langchain_core.callbacks import CallbackManagerForToolRun
from langchain_core.tools import BaseTool
from pydantic import Field
def _print_func(text: str) -> None:
print("\n") # noqa: T201
print(text) # noqa: T201
class HumanInputRun(BaseTool): # type: ignore[override]
"""Tool that asks user for input."""
name: str = "human"
description: str = (
"You can ask a human for guidance when you think you "
"got stuck or you are not sure what to do next. "
"The input should be a question for the human."
)
prompt_func: Callable[[str], None] = Field(default_factory=lambda: _print_func)
input_func: Callable = Field(default_factory=lambda: input)
def _run(
self,
query: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the Human input tool."""
self.prompt_func(query)
return self.input_func()
|
import sys
from os import path
from setuptools import find_packages
from setuptools import setup
if sys.version_info < (3, 7, 0):
raise OSError(f'DocArray requires Python >=3.7, but yours is {sys.version}')
try:
pkg_name = 'docarray'
libinfo_py = path.join(pkg_name, '__init__.py')
libinfo_content = open(libinfo_py, 'r', encoding='utf8').readlines()
version_line = [l.strip() for l in libinfo_content if l.startswith('__version__')][
0
]
exec(version_line) # gives __version__
except FileNotFoundError:
__version__ = '0.0.0'
try:
with open('README.md', encoding='utf8') as fp:
_long_description = fp.read()
except FileNotFoundError:
_long_description = ''
setup(
name=pkg_name,
packages=find_packages(),
version=__version__,
include_package_data=True,
description='The data structure for unstructured data',
author='Jina AI',
author_email='hello@jina.ai',
license='Apache 2.0',
url='https://github.com/jina-ai/docarray',
download_url='https://github.com/jina-ai/docarray/tags',
long_description=_long_description,
long_description_content_type='text/markdown',
zip_safe=False,
setup_requires=['setuptools>=18.0', 'wheel'],
install_requires=['numpy', 'rich>=12.0.0'],
extras_require={
# req usage, please see https://docarray.jina.ai/#install
'common': [
'protobuf>=3.13.0',
'lz4',
'requests',
'matplotlib',
'Pillow',
'fastapi',
'uvicorn',
'jina-hubble-sdk>=0.11.0',
],
'full': [
'protobuf>=3.13.0',
'lz4',
'requests',
'matplotlib',
'Pillow',
'trimesh',
'scipy',
'jina-hubble-sdk>=0.10.0',
'av',
'fastapi',
'uvicorn',
'strawberry-graphql',
],
'qdrant': [
'qdrant-client~=0.7.3',
],
'annlite': [
'annlite>=0.3.10',
],
'weaviate': [
'weaviate-client~=3.3.0',
],
'elasticsearch': [
'elasticsearch>=8.2.0',
],
'redis': [
'redis>=4.3.0',
],
'benchmark': [
'pandas',
'seaborn',
],
'test': [
'pytest',
'pytest-timeout',
'pytest-mock',
'pytest-cov',
'pytest-repeat',
'pytest-reraise',
'mock',
'pytest-custom_exit_code',
'black==22.3.0',
'tensorflow==2.7.0',
'paddlepaddle==2.2.0',
'torch==1.9.0',
'torchvision==0.10.0',
'datasets',
'onnx',
'onnxruntime',
'jupyterlab',
'transformers>=4.16.2',
'weaviate-client~=3.3.0',
'annlite>=0.3.10',
'elasticsearch>=8.2.0',
'redis>=4.3.0',
'jina',
],
},
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Intended Audience :: Science/Research',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
'Programming Language :: Unix Shell',
'Environment :: Console',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Topic :: Database :: Database Engines/Servers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Internet :: WWW/HTTP :: Indexing/Search',
'Topic :: Scientific/Engineering :: Image Recognition',
'Topic :: Multimedia :: Video',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Mathematics',
'Topic :: Software Development',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
],
project_urls={
'Documentation': 'https://docarray.jina.ai',
'Source': 'https://github.com/jina-ai/docarray/',
'Tracker': 'https://github.com/jina-ai/docarray/issues',
},
keywords='docarray deep-learning data-structures cross-modal multi-modal unstructured-data nested-data neural-search',
)
|
import sys
from os import path
from setuptools import find_packages
from setuptools import setup
if sys.version_info < (3, 7, 0):
raise OSError(f'DocArray requires Python >=3.7, but yours is {sys.version}')
try:
pkg_name = 'docarray'
libinfo_py = path.join(pkg_name, '__init__.py')
libinfo_content = open(libinfo_py, 'r', encoding='utf8').readlines()
version_line = [l.strip() for l in libinfo_content if l.startswith('__version__')][
0
]
exec(version_line) # gives __version__
except FileNotFoundError:
__version__ = '0.0.0'
try:
with open('README.md', encoding='utf8') as fp:
_long_description = fp.read()
except FileNotFoundError:
_long_description = ''
setup(
name=pkg_name,
packages=find_packages(),
version=__version__,
include_package_data=True,
description='The data structure for unstructured data',
author='Jina AI',
author_email='hello@jina.ai',
license='Apache 2.0',
url='https://github.com/jina-ai/docarray',
download_url='https://github.com/jina-ai/docarray/tags',
long_description=_long_description,
long_description_content_type='text/markdown',
zip_safe=False,
setup_requires=['setuptools>=18.0', 'wheel'],
install_requires=['numpy', 'rich>=12.0.0'],
extras_require={
# req usage, please see https://docarray.jina.ai/#install
'common': [
'protobuf>=3.13.0',
'lz4',
'requests',
'matplotlib',
'Pillow',
'fastapi',
'uvicorn',
'jina-hubble-sdk>=0.11.0',
],
'full': [
'protobuf>=3.13.0',
'lz4',
'requests',
'matplotlib',
'Pillow',
'trimesh',
'scipy',
'jina-hubble-sdk>=0.10.0',
'av',
'fastapi',
'uvicorn',
'strawberry-graphql',
],
'qdrant': [
'qdrant-client~=0.7.3',
],
'annlite': [
'annlite>=0.3.2',
],
'weaviate': [
'weaviate-client~=3.3.0',
],
'elasticsearch': [
'elasticsearch>=8.2.0',
],
'redis': [
'redis>=4.3.0',
],
'benchmark': [
'pandas',
'seaborn',
],
'test': [
'pytest',
'pytest-timeout',
'pytest-mock',
'pytest-cov',
'pytest-repeat',
'pytest-reraise',
'mock',
'pytest-custom_exit_code',
'black==22.3.0',
'tensorflow==2.7.0',
'paddlepaddle==2.2.0',
'torch==1.9.0',
'torchvision==0.10.0',
'datasets',
'onnx',
'onnxruntime',
'jupyterlab',
'transformers>=4.16.2',
'weaviate-client~=3.3.0',
'annlite>=0.3.2',
'elasticsearch>=8.2.0',
'redis>=4.3.0',
'jina',
],
},
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Intended Audience :: Science/Research',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
'Programming Language :: Unix Shell',
'Environment :: Console',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Topic :: Database :: Database Engines/Servers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Internet :: WWW/HTTP :: Indexing/Search',
'Topic :: Scientific/Engineering :: Image Recognition',
'Topic :: Multimedia :: Video',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Mathematics',
'Topic :: Software Development',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
],
project_urls={
'Documentation': 'https://docarray.jina.ai',
'Source': 'https://github.com/jina-ai/docarray/',
'Tracker': 'https://github.com/jina-ai/docarray/issues',
},
keywords='docarray deep-learning data-structures cross-modal multi-modal unstructured-data nested-data neural-search',
)
|
import logging
import random
from datasets import load_dataset
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.evaluation import SparseInformationRetrievalEvaluator
logging.basicConfig(format="%(message)s", level=logging.INFO)
# Load a model
model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
# Load the NFcorpus IR dataset (https://huggingface.co/datasets/BeIR/nfcorpus, https://huggingface.co/datasets/BeIR/nfcorpus-qrels)
corpus = load_dataset("BeIR/nfcorpus", "corpus", split="corpus")
queries = load_dataset("BeIR/nfcorpus", "queries", split="queries")
relevant_docs_data = load_dataset("BeIR/nfcorpus-qrels", split="test")
# For this dataset, we want to concatenate the title and texts for the corpus
corpus = corpus.map(lambda x: {"text": x["title"] + " " + x["text"]}, remove_columns=["title"])
# Shrink the corpus size heavily to only the relevant documents + 1,000 random documents
required_corpus_ids = set(map(str, relevant_docs_data["corpus-id"]))
required_corpus_ids |= set(random.sample(corpus["_id"], k=1000))
corpus = corpus.filter(lambda x: x["_id"] in required_corpus_ids)
# Convert the datasets to dictionaries
corpus = dict(zip(corpus["_id"], corpus["text"])) # Our corpus (cid => document)
queries = dict(zip(queries["_id"], queries["text"])) # Our queries (qid => question)
relevant_docs = {} # Query ID to relevant documents (qid => set([relevant_cids])
for qid, corpus_ids in zip(relevant_docs_data["query-id"], relevant_docs_data["corpus-id"]):
qid = str(qid)
corpus_ids = str(corpus_ids)
if qid not in relevant_docs:
relevant_docs[qid] = set()
relevant_docs[qid].add(corpus_ids)
# Given queries, a corpus and a mapping with relevant documents, the SparseInformationRetrievalEvaluator computes different IR metrics.
ir_evaluator = SparseInformationRetrievalEvaluator(
queries=queries,
corpus=corpus,
relevant_docs=relevant_docs,
name="BeIR-nfcorpus-subset-test",
show_progress_bar=True,
batch_size=16,
)
# Run evaluation
results = ir_evaluator(model)
"""
Queries: 323
Corpus: 3269
Score-Function: dot
Accuracy@1: 50.77%
Accuracy@3: 64.40%
Accuracy@5: 66.87%
Accuracy@10: 71.83%
Precision@1: 50.77%
Precision@3: 40.45%
Precision@5: 34.06%
Precision@10: 25.98%
Recall@1: 6.27%
Recall@3: 11.69%
Recall@5: 13.74%
Recall@10: 17.23%
MRR@10: 0.5814
NDCG@10: 0.3621
MAP@100: 0.1838
Model Query Sparsity: Active Dimensions: 40.0, Sparsity Ratio: 0.9987
Model Corpus Sparsity: Active Dimensions: 206.2, Sparsity Ratio: 0.9932
"""
# Print the results
print(f"Primary metric: {ir_evaluator.primary_metric}")
# => Primary metric: BeIR-nfcorpus-subset-test_dot_ndcg@10
print(f"Primary metric value: {results[ir_evaluator.primary_metric]:.4f}")
# => Primary metric value: 0.Primary metric value: 0.3621
|
import logging
import random
from datasets import load_dataset
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.evaluation import SparseInformationRetrievalEvaluator
logging.basicConfig(format="%(message)s", level=logging.INFO)
# Load a model
model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
# Load the NFcorpus IR dataset (https://huggingface.co/datasets/BeIR/nfcorpus, https://huggingface.co/datasets/BeIR/nfcorpus-qrels)
corpus = load_dataset("BeIR/nfcorpus", "corpus", split="corpus")
queries = load_dataset("BeIR/nfcorpus", "queries", split="queries")
relevant_docs_data = load_dataset("BeIR/nfcorpus-qrels", split="test")
# For this dataset, we want to concatenate the title and texts for the corpus
corpus = corpus.map(lambda x: {"text": x["title"] + " " + x["text"]}, remove_columns=["title"])
# Shrink the corpus size heavily to only the relevant documents + 1,000 random documents
required_corpus_ids = set(map(str, relevant_docs_data["corpus-id"]))
required_corpus_ids |= set(random.sample(corpus["_id"], k=1000))
corpus = corpus.filter(lambda x: x["_id"] in required_corpus_ids)
# Convert the datasets to dictionaries
corpus = dict(zip(corpus["_id"], corpus["text"])) # Our corpus (cid => document)
queries = dict(zip(queries["_id"], queries["text"])) # Our queries (qid => question)
relevant_docs = {} # Query ID to relevant documents (qid => set([relevant_cids])
for qid, corpus_ids in zip(relevant_docs_data["query-id"], relevant_docs_data["corpus-id"]):
qid = str(qid)
corpus_ids = str(corpus_ids)
if qid not in relevant_docs:
relevant_docs[qid] = set()
relevant_docs[qid].add(corpus_ids)
# Given queries, a corpus and a mapping with relevant documents, the SparseInformationRetrievalEvaluator computes different IR metrics.
ir_evaluator = SparseInformationRetrievalEvaluator(
queries=queries,
corpus=corpus,
relevant_docs=relevant_docs,
name="BeIR-nfcorpus-subset-test",
show_progress_bar=True,
batch_size=16,
)
# Run evaluation
results = ir_evaluator(model)
"""
Queries: 323
Corpus: 3269
Score-Function: dot
Accuracy@1: 50.46%
Accuracy@3: 64.40%
Accuracy@5: 67.49%
Accuracy@10: 72.14%
Precision@1: 50.46%
Precision@3: 40.87%
Precision@5: 34.12%
Precision@10: 26.10%
Recall@1: 6.11%
Recall@3: 11.73%
Recall@5: 13.64%
Recall@10: 17.24%
MRR@10: 0.5801
NDCG@10: 0.3626
MAP@100: 0.1832
Model Query Sparsity: Active Dimensions: 43.1, Sparsity Ratio: 0.9986
Model Corpus Sparsity: Active Dimensions: 207.0, Sparsity Ratio: 0.9932
"""
# Print the results
print(f"Primary metric: {ir_evaluator.primary_metric}")
# => Primary metric: BeIR-nfcorpus-subset-test_dot_ndcg@10
print(f"Primary metric value: {results[ir_evaluator.primary_metric]:.4f}")
# => Primary metric value: 0.3626
|
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import subprocess
import pytest
from executor.audioclip_text import AudioCLIPTextEncoder
from jina import Document, DocumentArray, Flow
_EMBEDDING_DIM = 1024
@pytest.mark.parametrize('request_size', [1, 10, 50, 100])
def test_integration(request_size: int):
docs = DocumentArray(
[Document(text='just some random text here') for _ in range(50)]
)
with Flow(return_results=True).add(uses=AudioCLIPTextEncoder) as flow:
resp = flow.post(
on='/index',
inputs=docs,
request_size=request_size,
return_results=True,
)
assert sum(len(resp_batch.docs) for resp_batch in resp) == 50
for r in resp:
for doc in r.docs:
assert doc.embedding.shape == (_EMBEDDING_DIM,)
@pytest.mark.docker
def test_docker_runtime(build_docker_image: str):
with pytest.raises(subprocess.TimeoutExpired):
subprocess.run(
[
'jina',
'executor',
f'--uses=docker://{build_docker_image}',
],
timeout=30,
check=True,
)
@pytest.mark.gpu
@pytest.mark.docker
def test_docker_runtime_gpu(build_docker_image_gpu: str):
with pytest.raises(subprocess.TimeoutExpired):
subprocess.run(
[
'jina',
'pea',
f'--uses=docker://{build_docker_image_gpu}',
'--gpus',
'all',
'--uses-with',
'device:"cuda"',
'download_model:True',
],
timeout=30,
check=True,
)
|
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import subprocess
import pytest
from executor.audioclip_text import AudioCLIPTextEncoder
from jina import Document, DocumentArray, Flow
_EMBEDDING_DIM = 1024
@pytest.mark.parametrize('request_size', [1, 10, 50, 100])
def test_integration(request_size: int):
docs = DocumentArray(
[Document(text='just some random text here') for _ in range(50)]
)
with Flow(return_results=True).add(uses=AudioCLIPTextEncoder) as flow:
resp = flow.post(
on='/index',
inputs=docs,
request_size=request_size,
return_results=True,
)
assert sum(len(resp_batch.docs) for resp_batch in resp) == 50
for r in resp:
for doc in r.docs:
assert doc.embedding.shape == (_EMBEDDING_DIM,)
@pytest.mark.docker
def test_docker_runtime(build_docker_image: str):
with pytest.raises(subprocess.TimeoutExpired):
subprocess.run(
[
'jina',
'executor',
f'--uses=docker://{build_docker_image}',
],
timeout=30,
check=True,
)
@pytest.mark.gpu
@pytest.mark.docker
def test_docker_runtime_gpu(build_docker_image_gpu: str):
with pytest.raises(subprocess.TimeoutExpired):
subprocess.run(
[
'jina',
'pea',
f'--uses=docker://{build_docker_image_gpu}',
'--gpus',
'all',
'--uses-with',
'device:"cuda"',
],
timeout=30,
check=True,
)
|
from __future__ import annotations
import torch
import transformers
from PIL import Image
from torch import nn
class CLIPModel(nn.Module):
save_in_root: bool = True
def __init__(self, model_name: str = "openai/clip-vit-base-patch32", processor_name=None) -> None:
super().__init__()
if processor_name is None:
processor_name = model_name
self.model = transformers.CLIPModel.from_pretrained(model_name)
self.processor = transformers.CLIPProcessor.from_pretrained(processor_name)
def __repr__(self) -> str:
return "CLIPModel()"
@property
def max_seq_length(self) -> int:
return self.processor.tokenizer.model_max_length
@max_seq_length.setter
def max_seq_length(self, value: int) -> None:
self.processor.tokenizer.model_max_length = value
def forward(self, features: dict[str, torch.Tensor]) -> dict[str, torch.Tensor]:
image_embeds = []
text_embeds = []
if "pixel_values" in features:
vision_outputs = self.model.vision_model(pixel_values=features["pixel_values"])
image_embeds = self.model.visual_projection(vision_outputs[1])
if "input_ids" in features:
text_outputs = self.model.text_model(
input_ids=features.get("input_ids"),
attention_mask=features.get("attention_mask", None),
position_ids=features.get("position_ids", None),
output_attentions=features.get("output_attentions", None),
output_hidden_states=features.get("output_hidden_states", None),
)
text_embeds = self.model.text_projection(text_outputs[1])
sentence_embedding = []
image_features = iter(image_embeds)
text_features = iter(text_embeds)
for idx, input_type in enumerate(features["image_text_info"]):
if input_type == 0:
sentence_embedding.append(next(image_features))
else:
sentence_embedding.append(next(text_features))
features["sentence_embedding"] = torch.stack(sentence_embedding).float()
return features
def tokenize(self, texts, padding: str | bool = True) -> dict[str, torch.Tensor]:
images = []
texts_values = []
image_text_info = []
for idx, data in enumerate(texts):
if isinstance(data, Image.Image): # An Image
images.append(data)
image_text_info.append(0)
else: # A text
texts_values.append(data)
image_text_info.append(1)
encoding = {}
if len(texts_values):
encoding = self.processor.tokenizer(texts_values, padding=padding, truncation=True, return_tensors="pt")
if len(images):
image_features = self.processor.image_processor(images, return_tensors="pt")
encoding["pixel_values"] = image_features.pixel_values
encoding["image_text_info"] = image_text_info
return dict(encoding)
@property
def tokenizer(self) -> transformers.CLIPProcessor:
return self.processor
def save(self, output_path: str) -> None:
self.model.save_pretrained(output_path)
self.processor.save_pretrained(output_path)
@staticmethod
def load(input_path: str) -> CLIPModel:
return CLIPModel(model_name=input_path)
|
from __future__ import annotations
import torch
import transformers
from PIL import Image
from torch import nn
class CLIPModel(nn.Module):
save_in_root: bool = True
def __init__(self, model_name: str = "openai/clip-vit-base-patch32", processor_name=None) -> None:
super().__init__()
if processor_name is None:
processor_name = model_name
self.model = transformers.CLIPModel.from_pretrained(model_name)
self.processor = transformers.CLIPProcessor.from_pretrained(processor_name)
def __repr__(self) -> str:
return "CLIPModel()"
def forward(self, features: dict[str, torch.Tensor]) -> dict[str, torch.Tensor]:
image_embeds = []
text_embeds = []
if "pixel_values" in features:
vision_outputs = self.model.vision_model(pixel_values=features["pixel_values"])
image_embeds = self.model.visual_projection(vision_outputs[1])
if "input_ids" in features:
text_outputs = self.model.text_model(
input_ids=features.get("input_ids"),
attention_mask=features.get("attention_mask", None),
position_ids=features.get("position_ids", None),
output_attentions=features.get("output_attentions", None),
output_hidden_states=features.get("output_hidden_states", None),
)
text_embeds = self.model.text_projection(text_outputs[1])
sentence_embedding = []
image_features = iter(image_embeds)
text_features = iter(text_embeds)
for idx, input_type in enumerate(features["image_text_info"]):
if input_type == 0:
sentence_embedding.append(next(image_features))
else:
sentence_embedding.append(next(text_features))
features["sentence_embedding"] = torch.stack(sentence_embedding).float()
return features
def tokenize(self, texts, padding: str | bool = True) -> dict[str, torch.Tensor]:
images = []
texts_values = []
image_text_info = []
for idx, data in enumerate(texts):
if isinstance(data, Image.Image): # An Image
images.append(data)
image_text_info.append(0)
else: # A text
texts_values.append(data)
image_text_info.append(1)
encoding = {}
if len(texts_values):
encoding = self.processor.tokenizer(texts_values, return_tensors="pt", padding=padding)
if len(images):
image_features = self.processor.image_processor(images, return_tensors="pt")
encoding["pixel_values"] = image_features.pixel_values
encoding["image_text_info"] = image_text_info
return dict(encoding)
@property
def tokenizer(self) -> transformers.CLIPProcessor:
return self.processor
def save(self, output_path: str) -> None:
self.model.save_pretrained(output_path)
self.processor.save_pretrained(output_path)
@staticmethod
def load(input_path: str) -> CLIPModel:
return CLIPModel(model_name=input_path)
|
from langchain_core.agents import AgentAction
from langchain.agents.format_scratchpad.xml import format_xml
def test_single_agent_action_observation() -> None:
# Arrange
agent_action = AgentAction(tool="Tool1", tool_input="Input1", log="Log1")
observation = "Observation1"
intermediate_steps = [(agent_action, observation)]
# Act
result = format_xml(intermediate_steps)
expected_result = """<tool>Tool1</tool><tool_input>Input1\
</tool_input><observation>Observation1</observation>"""
# Assert
assert result == expected_result
def test_multiple_agent_actions_observations() -> None:
# Arrange
agent_action1 = AgentAction(tool="Tool1", tool_input="Input1", log="Log1")
agent_action2 = AgentAction(tool="Tool2", tool_input="Input2", log="Log2")
observation1 = "Observation1"
observation2 = "Observation2"
intermediate_steps = [(agent_action1, observation1), (agent_action2, observation2)]
# Act
result = format_xml(intermediate_steps)
# Assert
expected_result = """<tool>Tool1</tool><tool_input>Input1\
</tool_input><observation>Observation1</observation><tool>\
Tool2</tool><tool_input>Input2</tool_input><observation>\
Observation2</observation>"""
assert result == expected_result
def test_empty_list_agent_actions() -> None:
result = format_xml([])
assert result == ""
def test_xml_escaping_minimal() -> None:
"""Test that XML tags in tool names are escaped with minimal format."""
# Arrange
agent_action = AgentAction(
tool="search<tool>nested</tool>", tool_input="query<input>test</input>", log=""
)
observation = "Found <observation>result</observation>"
intermediate_steps = [(agent_action, observation)]
# Act
result = format_xml(intermediate_steps, escape_format="minimal")
# Assert - XML tags should be replaced with custom delimiters
expected_result = (
"<tool>search[[tool]]nested[[/tool]]</tool>"
"<tool_input>query<input>test</input></tool_input>"
"<observation>Found [[observation]]result[[/observation]]</observation>"
)
assert result == expected_result
def test_no_escaping() -> None:
"""Test that escaping can be disabled."""
# Arrange
agent_action = AgentAction(tool="Tool1", tool_input="Input1", log="")
observation = "Observation1"
intermediate_steps = [(agent_action, observation)]
# Act
result = format_xml(intermediate_steps, escape_format=None)
# Assert
expected_result = (
"<tool>Tool1</tool><tool_input>Input1</tool_input>"
"<observation>Observation1</observation>"
)
assert result == expected_result
|
from langchain_core.agents import AgentAction
from langchain.agents.format_scratchpad.xml import format_xml
def test_single_agent_action_observation() -> None:
# Arrange
agent_action = AgentAction(tool="Tool1", tool_input="Input1", log="Log1")
observation = "Observation1"
intermediate_steps = [(agent_action, observation)]
# Act
result = format_xml(intermediate_steps)
expected_result = """<tool>Tool1</tool><tool_input>Input1\
</tool_input><observation>Observation1</observation>"""
# Assert
assert result == expected_result
def test_multiple_agent_actions_observations() -> None:
# Arrange
agent_action1 = AgentAction(tool="Tool1", tool_input="Input1", log="Log1")
agent_action2 = AgentAction(tool="Tool2", tool_input="Input2", log="Log2")
observation1 = "Observation1"
observation2 = "Observation2"
intermediate_steps = [(agent_action1, observation1), (agent_action2, observation2)]
# Act
result = format_xml(intermediate_steps)
# Assert
expected_result = """<tool>Tool1</tool><tool_input>Input1\
</tool_input><observation>Observation1</observation><tool>\
Tool2</tool><tool_input>Input2</tool_input><observation>\
Observation2</observation>"""
assert result == expected_result
def test_empty_list_agent_actions() -> None:
result = format_xml([])
assert result == ""
|
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, Features, Value
from .base import TaskTemplate
@dataclass(frozen=True)
class AutomaticSpeechRecognition(TaskTemplate):
task: str = field(default="automatic-speech-recognition", metadata={"include_in_asdict_even_if_is_default": True})
input_schema: ClassVar[Features] = Features({"audio": Audio()})
label_schema: ClassVar[Features] = Features({"transcription": Value("string")})
audio_column: str = "audio"
transcription_column: str = "transcription"
def align_with_features(self, features):
if self.audio_column not in features:
raise ValueError(f"Column {self.audio_column} is not present in features.")
if not isinstance(features[self.audio_column], Audio):
raise ValueError(f"Column {self.audio_column} is not an Audio type.")
task_template = copy.deepcopy(self)
input_schema = self.input_schema.copy()
input_schema["audio"] = features[self.audio_column]
task_template.__dict__["input_schema"] = input_schema
return task_template
@property
def column_mapping(self) -> Dict[str, str]:
return {self.audio_column: "audio", self.transcription_column: "transcription"}
|
import copy
from dataclasses import dataclass
from typing import ClassVar, Dict
from ..features import Audio, Features, Value
from .base import TaskTemplate
@dataclass(frozen=True)
class AutomaticSpeechRecognition(TaskTemplate):
task: str = "automatic-speech-recognition"
input_schema: ClassVar[Features] = Features({"audio": Audio()})
label_schema: ClassVar[Features] = Features({"transcription": Value("string")})
audio_column: str = "audio"
transcription_column: str = "transcription"
def align_with_features(self, features):
if self.audio_column not in features:
raise ValueError(f"Column {self.audio_column} is not present in features.")
if not isinstance(features[self.audio_column], Audio):
raise ValueError(f"Column {self.audio_column} is not an Audio type.")
task_template = copy.deepcopy(self)
input_schema = self.input_schema.copy()
input_schema["audio"] = features[self.audio_column]
task_template.__dict__["input_schema"] = input_schema
return task_template
@property
def column_mapping(self) -> Dict[str, str]:
return {self.audio_column: "audio", self.transcription_column: "transcription"}
|
_base_ = './rtmdet_l_8xb32-300e_coco.py'
checkpoint = 'https://download.openmmlab.com/mmdetection/v3.0/rtmdet/cspnext_rsb_pretrain/cspnext-s_imagenet_600e.pth' # noqa
model = dict(
backbone=dict(
deepen_factor=0.33,
widen_factor=0.5,
init_cfg=dict(
type='Pretrained', prefix='backbone.', checkpoint=checkpoint)),
neck=dict(in_channels=[128, 256, 512], out_channels=128, num_csp_blocks=1),
bbox_head=dict(in_channels=128, feat_channels=128, exp_on_reg=False))
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='CachedMosaic', img_scale=(640, 640), pad_val=114.0),
dict(
type='RandomResize',
scale=(1280, 1280),
ratio_range=(0.5, 2.0),
keep_ratio=True),
dict(type='RandomCrop', crop_size=(640, 640)),
dict(type='YOLOXHSVRandomAug'),
dict(type='RandomFlip', prob=0.5),
dict(type='Pad', size=(640, 640), pad_val=dict(img=(114, 114, 114))),
dict(
type='CachedMixUp',
img_scale=(640, 640),
ratio_range=(1.0, 1.0),
max_cached_images=20,
pad_val=(114, 114, 114)),
dict(type='PackDetInputs')
]
train_pipeline_stage2 = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='RandomResize',
scale=(640, 640),
ratio_range=(0.5, 2.0),
keep_ratio=True),
dict(type='RandomCrop', crop_size=(640, 640)),
dict(type='YOLOXHSVRandomAug'),
dict(type='RandomFlip', prob=0.5),
dict(type='Pad', size=(640, 640), pad_val=dict(img=(114, 114, 114))),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
custom_hooks = [
dict(
type='EMAHook',
ema_type='ExpMomentumEMA',
momentum=0.0002,
update_buffers=True,
priority=49),
dict(
type='PipelineSwitchHook',
switch_epoch=280,
switch_pipeline=train_pipeline_stage2)
]
|
_base_ = './rtmdet_l_8xb32-300e_coco.py'
checkpoint = 'TODO:imagenet_pretrain' # noqa
model = dict(
backbone=dict(
deepen_factor=0.33,
widen_factor=0.5,
init_cfg=dict(
type='Pretrained', prefix='backbone.', checkpoint=checkpoint)),
neck=dict(in_channels=[128, 256, 512], out_channels=128, num_csp_blocks=1),
bbox_head=dict(in_channels=128, feat_channels=128, exp_on_reg=False))
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='CachedMosaic', img_scale=(640, 640), pad_val=114.0),
dict(
type='RandomResize',
scale=(1280, 1280),
ratio_range=(0.5, 2.0),
keep_ratio=True),
dict(type='RandomCrop', crop_size=(640, 640)),
dict(type='YOLOXHSVRandomAug'),
dict(type='RandomFlip', prob=0.5),
dict(type='Pad', size=(640, 640), pad_val=dict(img=(114, 114, 114))),
dict(
type='CachedMixUp',
img_scale=(640, 640),
ratio_range=(1.0, 1.0),
max_cached_images=20,
pad_val=(114, 114, 114)),
dict(type='PackDetInputs')
]
train_pipeline_stage2 = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='RandomResize',
scale=(640, 640),
ratio_range=(0.5, 2.0),
keep_ratio=True),
dict(type='RandomCrop', crop_size=(640, 640)),
dict(type='YOLOXHSVRandomAug'),
dict(type='RandomFlip', prob=0.5),
dict(type='Pad', size=(640, 640), pad_val=dict(img=(114, 114, 114))),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
custom_hooks = [
dict(
type='EMAHook',
ema_type='ExpMomentumEMA',
momentum=0.0002,
update_buffers=True,
priority=49),
dict(
type='PipelineSwitchHook',
switch_epoch=280,
switch_pipeline=train_pipeline_stage2)
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from argparse import ArgumentParser, Namespace
from pathlib import Path
from tempfile import TemporaryDirectory
from mmengine.config import Config
from mmengine.utils import mkdir_or_exist
try:
from model_archiver.model_packaging import package_model
from model_archiver.model_packaging_utils import ModelExportUtils
except ImportError:
package_model = None
def mmdet2torchserve(
config_file: str,
checkpoint_file: str,
output_folder: str,
model_name: str,
model_version: str = '1.0',
force: bool = False,
):
"""Converts MMDetection model (config + checkpoint) to TorchServe `.mar`.
Args:
config_file:
In MMDetection config format.
The contents vary for each task repository.
checkpoint_file:
In MMDetection checkpoint format.
The contents vary for each task repository.
output_folder:
Folder where `{model_name}.mar` will be created.
The file created will be in TorchServe archive format.
model_name:
If not None, used for naming the `{model_name}.mar` file
that will be created under `output_folder`.
If None, `{Path(checkpoint_file).stem}` will be used.
model_version:
Model's version.
force:
If True, if there is an existing `{model_name}.mar`
file under `output_folder` it will be overwritten.
"""
mkdir_or_exist(output_folder)
config = Config.fromfile(config_file)
with TemporaryDirectory() as tmpdir:
config.dump(f'{tmpdir}/config.py')
args = Namespace(
**{
'model_file': f'{tmpdir}/config.py',
'serialized_file': checkpoint_file,
'handler': f'{Path(__file__).parent}/mmdet_handler.py',
'model_name': model_name or Path(checkpoint_file).stem,
'version': model_version,
'export_path': output_folder,
'force': force,
'requirements_file': None,
'extra_files': None,
'runtime': 'python',
'archive_format': 'default'
})
manifest = ModelExportUtils.generate_manifest_json(args)
package_model(args, manifest)
def parse_args():
parser = ArgumentParser(
description='Convert MMDetection models to TorchServe `.mar` format.')
parser.add_argument('config', type=str, help='config file path')
parser.add_argument('checkpoint', type=str, help='checkpoint file path')
parser.add_argument(
'--output-folder',
type=str,
required=True,
help='Folder where `{model_name}.mar` will be created.')
parser.add_argument(
'--model-name',
type=str,
default=None,
help='If not None, used for naming the `{model_name}.mar`'
'file that will be created under `output_folder`.'
'If None, `{Path(checkpoint_file).stem}` will be used.')
parser.add_argument(
'--model-version',
type=str,
default='1.0',
help='Number used for versioning.')
parser.add_argument(
'-f',
'--force',
action='store_true',
help='overwrite the existing `{model_name}.mar`')
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
if package_model is None:
raise ImportError('`torch-model-archiver` is required.'
'Try: pip install torch-model-archiver')
mmdet2torchserve(args.config, args.checkpoint, args.output_folder,
args.model_name, args.model_version, args.force)
|
# Copyright (c) OpenMMLab. All rights reserved.
from argparse import ArgumentParser, Namespace
from pathlib import Path
from tempfile import TemporaryDirectory
import mmcv
try:
from model_archiver.model_packaging import package_model
from model_archiver.model_packaging_utils import ModelExportUtils
except ImportError:
package_model = None
def mmdet2torchserve(
config_file: str,
checkpoint_file: str,
output_folder: str,
model_name: str,
model_version: str = '1.0',
force: bool = False,
):
"""Converts MMDetection model (config + checkpoint) to TorchServe `.mar`.
Args:
config_file:
In MMDetection config format.
The contents vary for each task repository.
checkpoint_file:
In MMDetection checkpoint format.
The contents vary for each task repository.
output_folder:
Folder where `{model_name}.mar` will be created.
The file created will be in TorchServe archive format.
model_name:
If not None, used for naming the `{model_name}.mar` file
that will be created under `output_folder`.
If None, `{Path(checkpoint_file).stem}` will be used.
model_version:
Model's version.
force:
If True, if there is an existing `{model_name}.mar`
file under `output_folder` it will be overwritten.
"""
mmcv.mkdir_or_exist(output_folder)
config = mmcv.Config.fromfile(config_file)
with TemporaryDirectory() as tmpdir:
config.dump(f'{tmpdir}/config.py')
args = Namespace(
**{
'model_file': f'{tmpdir}/config.py',
'serialized_file': checkpoint_file,
'handler': f'{Path(__file__).parent}/mmdet_handler.py',
'model_name': model_name or Path(checkpoint_file).stem,
'version': model_version,
'export_path': output_folder,
'force': force,
'requirements_file': None,
'extra_files': None,
'runtime': 'python',
'archive_format': 'default'
})
manifest = ModelExportUtils.generate_manifest_json(args)
package_model(args, manifest)
def parse_args():
parser = ArgumentParser(
description='Convert MMDetection models to TorchServe `.mar` format.')
parser.add_argument('config', type=str, help='config file path')
parser.add_argument('checkpoint', type=str, help='checkpoint file path')
parser.add_argument(
'--output-folder',
type=str,
required=True,
help='Folder where `{model_name}.mar` will be created.')
parser.add_argument(
'--model-name',
type=str,
default=None,
help='If not None, used for naming the `{model_name}.mar`'
'file that will be created under `output_folder`.'
'If None, `{Path(checkpoint_file).stem}` will be used.')
parser.add_argument(
'--model-version',
type=str,
default='1.0',
help='Number used for versioning.')
parser.add_argument(
'-f',
'--force',
action='store_true',
help='overwrite the existing `{model_name}.mar`')
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
if package_model is None:
raise ImportError('`torch-model-archiver` is required.'
'Try: pip install torch-model-archiver')
mmdet2torchserve(args.config, args.checkpoint, args.output_folder,
args.model_name, args.model_version, args.force)
|
from backend.blocks.nvidia._auth import (
NvidiaCredentials,
NvidiaCredentialsField,
NvidiaCredentialsInput,
)
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from backend.data.model import SchemaField
from backend.util.request import requests
from backend.util.type import MediaFileType
class NvidiaDeepfakeDetectBlock(Block):
class Input(BlockSchema):
credentials: NvidiaCredentialsInput = NvidiaCredentialsField()
image_base64: MediaFileType = SchemaField(
description="Image to analyze for deepfakes",
)
return_image: bool = SchemaField(
description="Whether to return the processed image with markings",
default=False,
)
class Output(BlockSchema):
status: str = SchemaField(
description="Detection status (SUCCESS, ERROR, CONTENT_FILTERED)",
)
image: MediaFileType = SchemaField(
description="Processed image with detection markings (if return_image=True)",
)
is_deepfake: float = SchemaField(
description="Probability that the image is a deepfake (0-1)",
)
def __init__(self):
super().__init__(
id="8c7d0d67-e79c-44f6-92a1-c2600c8aac7f",
description="Detects potential deepfakes in images using Nvidia's AI API",
categories={BlockCategory.SAFETY},
input_schema=NvidiaDeepfakeDetectBlock.Input,
output_schema=NvidiaDeepfakeDetectBlock.Output,
)
def run(
self, input_data: Input, *, credentials: NvidiaCredentials, **kwargs
) -> BlockOutput:
url = "https://ai.api.nvidia.com/v1/cv/hive/deepfake-image-detection"
headers = {
"accept": "application/json",
"content-type": "application/json",
"Authorization": f"Bearer {credentials.api_key.get_secret_value()}",
}
image_data = f"data:image/jpeg;base64,{input_data.image_base64}"
payload = {
"input": [image_data],
"return_image": input_data.return_image,
}
try:
response = requests.post(url, headers=headers, json=payload)
response.raise_for_status()
data = response.json()
result = data.get("data", [{}])[0]
# Get deepfake probability from first bounding box if any
deepfake_prob = 0.0
if result.get("bounding_boxes"):
deepfake_prob = result["bounding_boxes"][0].get("is_deepfake", 0.0)
yield "status", result.get("status", "ERROR")
yield "is_deepfake", deepfake_prob
if input_data.return_image:
image_data = result.get("image", "")
output_data = f"data:image/jpeg;base64,{image_data}"
yield "image", output_data
else:
yield "image", ""
except Exception as e:
yield "error", str(e)
yield "status", "ERROR"
yield "is_deepfake", 0.0
yield "image", ""
|
from backend.blocks.nvidia._auth import (
NvidiaCredentials,
NvidiaCredentialsField,
NvidiaCredentialsInput,
)
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from backend.data.model import SchemaField
from backend.util.request import requests
class NvidiaDeepfakeDetectBlock(Block):
class Input(BlockSchema):
credentials: NvidiaCredentialsInput = NvidiaCredentialsField()
image_base64: str = SchemaField(
description="Image to analyze for deepfakes", image_upload=True
)
return_image: bool = SchemaField(
description="Whether to return the processed image with markings",
default=False,
)
class Output(BlockSchema):
status: str = SchemaField(
description="Detection status (SUCCESS, ERROR, CONTENT_FILTERED)",
default="",
)
image: str = SchemaField(
description="Processed image with detection markings (if return_image=True)",
default="",
image_output=True,
)
is_deepfake: float = SchemaField(
description="Probability that the image is a deepfake (0-1)",
default=0.0,
)
def __init__(self):
super().__init__(
id="8c7d0d67-e79c-44f6-92a1-c2600c8aac7f",
description="Detects potential deepfakes in images using Nvidia's AI API",
categories={BlockCategory.SAFETY},
input_schema=NvidiaDeepfakeDetectBlock.Input,
output_schema=NvidiaDeepfakeDetectBlock.Output,
)
def run(
self, input_data: Input, *, credentials: NvidiaCredentials, **kwargs
) -> BlockOutput:
url = "https://ai.api.nvidia.com/v1/cv/hive/deepfake-image-detection"
headers = {
"accept": "application/json",
"content-type": "application/json",
"Authorization": f"Bearer {credentials.api_key.get_secret_value()}",
}
image_data = f"data:image/jpeg;base64,{input_data.image_base64}"
payload = {
"input": [image_data],
"return_image": input_data.return_image,
}
try:
response = requests.post(url, headers=headers, json=payload)
response.raise_for_status()
data = response.json()
result = data.get("data", [{}])[0]
# Get deepfake probability from first bounding box if any
deepfake_prob = 0.0
if result.get("bounding_boxes"):
deepfake_prob = result["bounding_boxes"][0].get("is_deepfake", 0.0)
yield "status", result.get("status", "ERROR")
yield "is_deepfake", deepfake_prob
if input_data.return_image:
image_data = result.get("image", "")
output_data = f"data:image/jpeg;base64,{image_data}"
yield "image", output_data
else:
yield "image", ""
except Exception as e:
yield "error", str(e)
yield "status", "ERROR"
yield "is_deepfake", 0.0
yield "image", ""
|
_base_ = 'tridentnet_r50-caffe_1x_coco.py'
train_pipeline = [
dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='RandomChoiceResize',
scales=[(1333, 640), (1333, 672), (1333, 704), (1333, 736),
(1333, 768), (1333, 800)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
|
_base_ = 'tridentnet_r50-caffe_1x_coco.py'
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='RandomChoiceResize',
scales=[(1333, 640), (1333, 672), (1333, 704), (1333, 736),
(1333, 768), (1333, 800)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
|
# Copyright (c) OpenMMLab. All rights reserved.
"""copy from
https://github.com/ZwwWayne/K-Net/blob/main/knet/det/mask_pseudo_sampler.py."""
import torch
from mmengine.data import InstanceData
from mmdet.core.bbox.assigners import AssignResult
from mmdet.registry import TASK_UTILS
from .base_sampler import BaseSampler
from .mask_sampling_result import MaskSamplingResult
@TASK_UTILS.register_module()
class MaskPseudoSampler(BaseSampler):
"""A pseudo sampler that does not do sampling actually."""
def __init__(self, **kwargs):
pass
def _sample_pos(self, **kwargs):
"""Sample positive samples."""
raise NotImplementedError
def _sample_neg(self, **kwargs):
"""Sample negative samples."""
raise NotImplementedError
def sample(self, assign_result: AssignResult, pred_instances: InstanceData,
gt_instances: InstanceData, *args, **kwargs):
"""Directly returns the positive and negative indices of samples.
Args:
assign_result (:obj:`AssignResult`): Mask assigning results.
pred_instances (:obj:`InstanceData`): Instances of model
predictions. It includes ``scores`` and ``masks`` predicted
by the model.
gt_instances (:obj:`InstanceData`): Ground truth of instance
annotations. It usually includes ``labels`` and ``masks``
attributes.
Returns:
:obj:`SamplingResult`: sampler results
"""
pred_masks = pred_instances.masks
gt_masks = gt_instances.masks
pos_inds = torch.nonzero(
assign_result.gt_inds > 0, as_tuple=False).squeeze(-1).unique()
neg_inds = torch.nonzero(
assign_result.gt_inds == 0, as_tuple=False).squeeze(-1).unique()
gt_flags = pred_masks.new_zeros(pred_masks.shape[0], dtype=torch.uint8)
sampling_result = MaskSamplingResult(
pos_inds=pos_inds,
neg_inds=neg_inds,
masks=pred_masks,
gt_masks=gt_masks,
assign_result=assign_result,
gt_flags=gt_flags,
avg_factor_with_neg=False)
return sampling_result
|
# Copyright (c) OpenMMLab. All rights reserved.
"""copy from
https://github.com/ZwwWayne/K-Net/blob/main/knet/det/mask_pseudo_sampler.py."""
import torch
from mmdet.registry import TASK_UTILS
from .base_sampler import BaseSampler
from .mask_sampling_result import MaskSamplingResult
@TASK_UTILS.register_module()
class MaskPseudoSampler(BaseSampler):
"""A pseudo sampler that does not do sampling actually."""
def __init__(self, **kwargs):
pass
def _sample_pos(self, **kwargs):
"""Sample positive samples."""
raise NotImplementedError
def _sample_neg(self, **kwargs):
"""Sample negative samples."""
raise NotImplementedError
def sample(self, assign_result, masks, gt_masks, **kwargs):
"""Directly returns the positive and negative indices of samples.
Args:
assign_result (:obj:`AssignResult`): Assigned results
masks (torch.Tensor): Bounding boxes
gt_masks (torch.Tensor): Ground truth boxes
Returns:
:obj:`SamplingResult`: sampler results
"""
pos_inds = torch.nonzero(
assign_result.gt_inds > 0, as_tuple=False).squeeze(-1).unique()
neg_inds = torch.nonzero(
assign_result.gt_inds == 0, as_tuple=False).squeeze(-1).unique()
gt_flags = masks.new_zeros(masks.shape[0], dtype=torch.uint8)
sampling_result = MaskSamplingResult(pos_inds, neg_inds, masks,
gt_masks, assign_result, gt_flags)
return sampling_result
|
from typing import TYPE_CHECKING, Type, TypeVar
from pydantic import AnyUrl as BaseAnyUrl
from pydantic import errors, parse_obj_as
from docarray.document.base_node import BaseNode
from docarray.proto import NodeProto
if TYPE_CHECKING:
from pydantic.networks import Parts
T = TypeVar('T', bound='AnyUrl')
class AnyUrl(BaseAnyUrl, BaseNode):
host_required = (
False # turn off host requirement to allow passing of local paths as URL
)
def _to_node_protobuf(self) -> NodeProto:
"""Convert Document into a NodeProto protobuf message. This function should
be called when the Document is nested into another Document that need to
be converted into a protobuf
:return: the nested item protobuf message
"""
return NodeProto(any_url=str(self))
@classmethod
def validate_parts(cls, parts: 'Parts', validate_port: bool = True) -> 'Parts':
"""
A method used to validate parts of a URL.
Our URLs should be able to function both in local and remote settings.
Therefore, we allow missing `scheme`, making it possible to pass a file path.
"""
scheme = parts['scheme']
if scheme is None:
pass # allow missing scheme, unlike pydantic
elif cls.allowed_schemes and scheme.lower() not in cls.allowed_schemes:
raise errors.UrlSchemePermittedError(set(cls.allowed_schemes))
if validate_port:
cls._validate_port(parts['port'])
user = parts['user']
if cls.user_required and user is None:
raise errors.UrlUserInfoError()
return parts
@classmethod
def from_protobuf(cls: Type[T], pb_msg: 'str') -> T:
"""
read url from a proto msg
:param pb_msg:
:return: url
"""
return parse_obj_as(cls, pb_msg)
|
from typing import Type, TypeVar
from pydantic import AnyUrl as BaseAnyUrl
from pydantic import parse_obj_as
from docarray.document.base_node import BaseNode
from docarray.proto import NodeProto
T = TypeVar('T', bound='AnyUrl')
class AnyUrl(BaseAnyUrl, BaseNode):
def _to_node_protobuf(self) -> NodeProto:
"""Convert Document into a NodeProto protobuf message. This function should
be called when the Document is nested into another Document that need to
be converted into a protobuf
:return: the nested item protobuf message
"""
return NodeProto(any_url=str(self))
@classmethod
def from_protobuf(cls: Type[T], pb_msg: 'str') -> T:
"""
read url from a proto msg
:param pb_msg:
:return: url
"""
return parse_obj_as(cls, pb_msg)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.