input
stringlengths 33
5k
| output
stringlengths 32
5k
|
---|---|
import torch
from ._bounding_boxes import BoundingBoxes, BoundingBoxFormat
from ._image import Image
from ._mask import Mask
from ._torch_function_helpers import set_return_type
from ._tv_tensor import TVTensor
from ._video import Video
# TODO: Fix this. We skip this method as it leads to
# RecursionError: maximum recursion depth exceeded while calling a Python object
# Until `disable` is removed, there will be graph breaks after all calls to functional transforms
@torch.compiler.disable
def wrap(wrappee, *, like, **kwargs):
"""[BETA] Convert a :class:`torch.Tensor` (``wrappee``) into the same :class:`~torchvision.tv_tensors.TVTensor` subclass as ``like``.
If ``like`` is a :class:`~torchvision.tv_tensors.BoundingBoxes`, the ``format`` and ``canvas_size`` of
``like`` are assigned to ``wrappee``, unless they are passed as ``kwargs``.
Args:
wrappee (Tensor): The tensor to convert.
like (:class:`~torchvision.tv_tensors.TVTensor`): The reference.
``wrappee`` will be converted into the same subclass as ``like``.
kwargs: Can contain "format" and "canvas_size" if ``like`` is a :class:`~torchvision.tv_tensor.BoundingBoxes`.
Ignored otherwise.
"""
if isinstance(like, BoundingBoxes):
return BoundingBoxes._wrap(
wrappee,
format=kwargs.get("format", like.format),
canvas_size=kwargs.get("canvas_size", like.canvas_size),
)
else:
return wrappee.as_subclass(type(like))
|
import torch
from ._bounding_boxes import BoundingBoxes, BoundingBoxFormat
from ._image import Image
from ._mask import Mask
from ._torch_function_helpers import set_return_type
from ._tv_tensor import TVTensor
from ._video import Video
def wrap(wrappee, *, like, **kwargs):
"""[BETA] Convert a :class:`torch.Tensor` (``wrappee``) into the same :class:`~torchvision.tv_tensors.TVTensor` subclass as ``like``.
If ``like`` is a :class:`~torchvision.tv_tensors.BoundingBoxes`, the ``format`` and ``canvas_size`` of
``like`` are assigned to ``wrappee``, unless they are passed as ``kwargs``.
Args:
wrappee (Tensor): The tensor to convert.
like (:class:`~torchvision.tv_tensors.TVTensor`): The reference.
``wrappee`` will be converted into the same subclass as ``like``.
kwargs: Can contain "format" and "canvas_size" if ``like`` is a :class:`~torchvision.tv_tensor.BoundingBoxes`.
Ignored otherwise.
"""
if isinstance(like, BoundingBoxes):
return BoundingBoxes._wrap(
wrappee,
format=kwargs.get("format", like.format),
canvas_size=kwargs.get("canvas_size", like.canvas_size),
)
else:
return wrappee.as_subclass(type(like))
|
import os
from pathlib import Path
import numpy as np
import pytest
import torch
from mmdet.apis import inference_detector, init_detector
from mmdet.structures import DetDataSample
from mmdet.utils import register_all_modules
# TODO: Waiting to fix multiple call error bug
register_all_modules()
@pytest.mark.parametrize('config,devices',
[('configs/retinanet/retinanet_r18_fpn_1x_coco.py',
('cpu', 'cuda'))])
def test_init_detector(config, devices):
assert all([device in ['cpu', 'cuda'] for device in devices])
project_dir = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
project_dir = os.path.join(project_dir, '..')
config_file = os.path.join(project_dir, config)
# test init_detector with config_file: str and cfg_options
cfg_options = dict(
model=dict(
backbone=dict(
depth=18,
init_cfg=dict(
type='Pretrained', checkpoint='torchvision://resnet18'))))
for device in devices:
if device == 'cuda' and not torch.cuda.is_available():
pytest.skip('test requires GPU and torch+cuda')
model = init_detector(
config_file, device=device, cfg_options=cfg_options)
# test init_detector with :obj:`Path`
config_path_object = Path(config_file)
model = init_detector(config_path_object, device=device)
# test init_detector with undesirable type
with pytest.raises(TypeError):
config_list = [config_file]
model = init_detector(config_list) # noqa: F841
@pytest.mark.parametrize('config,devices',
[('configs/retinanet/retinanet_r18_fpn_1x_coco.py',
('cpu', 'cuda'))])
def test_inference_detector(config, devices):
assert all([device in ['cpu', 'cuda'] for device in devices])
project_dir = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
project_dir = os.path.join(project_dir, '..')
config_file = os.path.join(project_dir, config)
# test init_detector with config_file: str and cfg_options
rng = np.random.RandomState(0)
img1 = rng.randint(0, 255, (100, 100, 3), dtype=np.uint8)
img2 = rng.randint(0, 255, (100, 100, 3), dtype=np.uint8)
for device in devices:
if device == 'cuda' and not torch.cuda.is_available():
pytest.skip('test requires GPU and torch+cuda')
model = init_detector(config_file, device=device)
result = inference_detector(model, img1)
assert isinstance(result, DetDataSample)
result = inference_detector(model, [img1, img2])
assert isinstance(result, list) and len(result) == 2
|
import os
from pathlib import Path
import numpy as np
import pytest
import torch
from mmdet.apis import inference_detector, init_detector
from mmdet.data_elements import DetDataSample
from mmdet.utils import register_all_modules
# TODO: Waiting to fix multiple call error bug
register_all_modules()
@pytest.mark.parametrize('config,devices',
[('configs/retinanet/retinanet_r18_fpn_1x_coco.py',
('cpu', 'cuda'))])
def test_init_detector(config, devices):
assert all([device in ['cpu', 'cuda'] for device in devices])
project_dir = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
project_dir = os.path.join(project_dir, '..')
config_file = os.path.join(project_dir, config)
# test init_detector with config_file: str and cfg_options
cfg_options = dict(
model=dict(
backbone=dict(
depth=18,
init_cfg=dict(
type='Pretrained', checkpoint='torchvision://resnet18'))))
for device in devices:
if device == 'cuda' and not torch.cuda.is_available():
pytest.skip('test requires GPU and torch+cuda')
model = init_detector(
config_file, device=device, cfg_options=cfg_options)
# test init_detector with :obj:`Path`
config_path_object = Path(config_file)
model = init_detector(config_path_object, device=device)
# test init_detector with undesirable type
with pytest.raises(TypeError):
config_list = [config_file]
model = init_detector(config_list) # noqa: F841
@pytest.mark.parametrize('config,devices',
[('configs/retinanet/retinanet_r18_fpn_1x_coco.py',
('cpu', 'cuda'))])
def test_inference_detector(config, devices):
assert all([device in ['cpu', 'cuda'] for device in devices])
project_dir = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
project_dir = os.path.join(project_dir, '..')
config_file = os.path.join(project_dir, config)
# test init_detector with config_file: str and cfg_options
rng = np.random.RandomState(0)
img1 = rng.randint(0, 255, (100, 100, 3), dtype=np.uint8)
img2 = rng.randint(0, 255, (100, 100, 3), dtype=np.uint8)
for device in devices:
if device == 'cuda' and not torch.cuda.is_available():
pytest.skip('test requires GPU and torch+cuda')
model = init_detector(config_file, device=device)
result = inference_detector(model, img1)
assert isinstance(result, DetDataSample)
result = inference_detector(model, [img1, img2])
assert isinstance(result, list) and len(result) == 2
|
__version__ = '0.13.1'
import os
from .document import Document
from .array import DocumentArray
from .dataclasses import dataclass, field
if 'DA_NO_RICH_HANDLER' not in os.environ:
from rich.traceback import install
install()
|
__version__ = '0.13.1'
import os
from .document import Document
from .array import DocumentArray
from .dataclasses import dataclass, field
if 'DA_NO_RICH_HANDLER' not in os.environ:
from rich.traceback import install
install()
if 'NO_VERSION_CHECK' not in os.environ:
from .helper import is_latest_version
is_latest_version()
|
from collections import defaultdict
import torch
import transforms as reference_transforms
def get_modules(use_v2):
# We need a protected import to avoid the V2 warning in case just V1 is used
if use_v2:
import torchvision.datapoints
import torchvision.transforms.v2
return torchvision.transforms.v2, torchvision.datapoints
else:
return reference_transforms, None
class DetectionPresetTrain:
# Note: this transform assumes that the input to forward() are always PIL
# images, regardless of the backend parameter.
def __init__(
self,
*,
data_augmentation,
hflip_prob=0.5,
mean=(123.0, 117.0, 104.0),
backend="pil",
use_v2=False,
):
T, datapoints = get_modules(use_v2)
transforms = []
backend = backend.lower()
if backend == "datapoint":
transforms.append(T.ToImage())
elif backend == "tensor":
transforms.append(T.PILToTensor())
elif backend != "pil":
raise ValueError(f"backend can be 'datapoint', 'tensor' or 'pil', but got {backend}")
if data_augmentation == "hflip":
transforms += [T.RandomHorizontalFlip(p=hflip_prob)]
elif data_augmentation == "lsj":
transforms += [
T.ScaleJitter(target_size=(1024, 1024), antialias=True),
# TODO: FixedSizeCrop below doesn't work on tensors!
reference_transforms.FixedSizeCrop(size=(1024, 1024), fill=mean),
T.RandomHorizontalFlip(p=hflip_prob),
]
elif data_augmentation == "multiscale":
transforms += [
T.RandomShortestSize(min_size=(480, 512, 544, 576, 608, 640, 672, 704, 736, 768, 800), max_size=1333),
T.RandomHorizontalFlip(p=hflip_prob),
]
elif data_augmentation == "ssd":
fill = defaultdict(lambda: mean, {datapoints.Mask: 0}) if use_v2 else list(mean)
transforms += [
T.RandomPhotometricDistort(),
T.RandomZoomOut(fill=fill),
T.RandomIoUCrop(),
T.RandomHorizontalFlip(p=hflip_prob),
]
elif data_augmentation == "ssdlite":
transforms += [
T.RandomIoUCrop(),
T.RandomHorizontalFlip(p=hflip_prob),
]
else:
raise ValueError(f'Unknown data augmentation policy "{data_augmentation}"')
if backend == "pil":
# Note: we could just convert to pure tensors even in v2.
transforms += [T.ToImage() if use_v2 else T.PILToTensor()]
transforms += [T.ConvertImageDtype(torch.float)]
if use_v2:
transforms += [
T.ConvertBoundingBoxFormat(datapoints.BoundingBoxFormat.XYXY),
T.SanitizeBoundingBoxes(),
]
self.transforms = T.Compose(transforms)
def __call__(self, img, target):
return self.transforms(img, target)
class DetectionPresetEval:
def __init__(self, backend="pil", use_v2=False):
T, _ = get_modules(use_v2)
transforms = []
backend = backend.lower()
if backend == "pil":
# Note: we could just convert to pure tensors even in v2?
transforms += [T.ToImage() if use_v2 else T.PILToTensor()]
elif backend == "tensor":
transforms += [T.PILToTensor()]
elif backend == "datapoint":
transforms += [T.ToImage()]
else:
raise ValueError(f"backend can be 'datapoint', 'tensor' or 'pil', but got {backend}")
transforms += [T.ConvertImageDtype(torch.float)]
self.transforms = T.Compose(transforms)
def __call__(self, img, target):
return self.transforms(img, target)
|
from collections import defaultdict
import torch
import transforms as reference_transforms
def get_modules(use_v2):
# We need a protected import to avoid the V2 warning in case just V1 is used
if use_v2:
import torchvision.datapoints
import torchvision.transforms.v2
return torchvision.transforms.v2, torchvision.datapoints
else:
return reference_transforms, None
class DetectionPresetTrain:
# Note: this transform assumes that the input to forward() are always PIL
# images, regardless of the backend parameter.
def __init__(
self,
*,
data_augmentation,
hflip_prob=0.5,
mean=(123.0, 117.0, 104.0),
backend="pil",
use_v2=False,
):
T, datapoints = get_modules(use_v2)
transforms = []
backend = backend.lower()
if backend == "datapoint":
transforms.append(T.ToImageTensor())
elif backend == "tensor":
transforms.append(T.PILToTensor())
elif backend != "pil":
raise ValueError(f"backend can be 'datapoint', 'tensor' or 'pil', but got {backend}")
if data_augmentation == "hflip":
transforms += [T.RandomHorizontalFlip(p=hflip_prob)]
elif data_augmentation == "lsj":
transforms += [
T.ScaleJitter(target_size=(1024, 1024), antialias=True),
# TODO: FixedSizeCrop below doesn't work on tensors!
reference_transforms.FixedSizeCrop(size=(1024, 1024), fill=mean),
T.RandomHorizontalFlip(p=hflip_prob),
]
elif data_augmentation == "multiscale":
transforms += [
T.RandomShortestSize(min_size=(480, 512, 544, 576, 608, 640, 672, 704, 736, 768, 800), max_size=1333),
T.RandomHorizontalFlip(p=hflip_prob),
]
elif data_augmentation == "ssd":
fill = defaultdict(lambda: mean, {datapoints.Mask: 0}) if use_v2 else list(mean)
transforms += [
T.RandomPhotometricDistort(),
T.RandomZoomOut(fill=fill),
T.RandomIoUCrop(),
T.RandomHorizontalFlip(p=hflip_prob),
]
elif data_augmentation == "ssdlite":
transforms += [
T.RandomIoUCrop(),
T.RandomHorizontalFlip(p=hflip_prob),
]
else:
raise ValueError(f'Unknown data augmentation policy "{data_augmentation}"')
if backend == "pil":
# Note: we could just convert to pure tensors even in v2.
transforms += [T.ToImageTensor() if use_v2 else T.PILToTensor()]
transforms += [T.ConvertImageDtype(torch.float)]
if use_v2:
transforms += [
T.ConvertBoundingBoxFormat(datapoints.BoundingBoxFormat.XYXY),
T.SanitizeBoundingBoxes(),
]
self.transforms = T.Compose(transforms)
def __call__(self, img, target):
return self.transforms(img, target)
class DetectionPresetEval:
def __init__(self, backend="pil", use_v2=False):
T, _ = get_modules(use_v2)
transforms = []
backend = backend.lower()
if backend == "pil":
# Note: we could just convert to pure tensors even in v2?
transforms += [T.ToImageTensor() if use_v2 else T.PILToTensor()]
elif backend == "tensor":
transforms += [T.PILToTensor()]
elif backend == "datapoint":
transforms += [T.ToImageTensor()]
else:
raise ValueError(f"backend can be 'datapoint', 'tensor' or 'pil', but got {backend}")
transforms += [T.ConvertImageDtype(torch.float)]
self.transforms = T.Compose(transforms)
def __call__(self, img, target):
return self.transforms(img, target)
|
from keras.src.api_export import keras_export
from keras.src.layers.pooling.base_pooling import BasePooling
@keras_export(["keras.layers.AveragePooling2D", "keras.layers.AvgPool2D"])
class AveragePooling2D(BasePooling):
"""Average pooling operation for 2D spatial data.
Downsamples the input along its spatial dimensions (height and width)
by taking the average value over an input window
(of size defined by `pool_size`) for each channel of the input.
The window is shifted by `strides` along each dimension.
The resulting output when using the `"valid"` padding option has a spatial
shape (number of rows or columns) of:
`output_shape = math.floor((input_shape - pool_size) / strides) + 1`
(when `input_shape >= pool_size`)
The resulting output shape when using the `"same"` padding option is:
`output_shape = math.floor((input_shape - 1) / strides) + 1`
Args:
pool_size: int or tuple of 2 integers, factors by which to downscale
(dim1, dim2). If only one integer is specified, the same
window length will be used for all dimensions.
strides: int or tuple of 2 integers, or None. Strides values. If None,
it will default to `pool_size`. If only one int is specified, the
same stride size will be used for all dimensions.
padding: string, either `"valid"` or `"same"` (case-insensitive).
`"valid"` means no padding. `"same"` results in padding evenly to
the left/right or up/down of the input such that output has the same
height/width dimension as the input.
data_format: string, either `"channels_last"` or `"channels_first"`.
The ordering of the dimensions in the inputs. `"channels_last"`
corresponds to inputs with shape `(batch, height, width, channels)`
while `"channels_first"` corresponds to inputs with shape
`(batch, channels, height, width)`. It defaults to the
`image_data_format` value found in your Keras config file at
`~/.keras/keras.json`. If you never set it, then it will be
`"channels_last"`.
Input shape:
- If `data_format="channels_last"`:
4D tensor with shape `(batch_size, height, width, channels)`.
- If `data_format="channels_first"`:
4D tensor with shape `(batch_size, channels, height, width)`.
Output shape:
- If `data_format="channels_last"`:
4D tensor with shape
`(batch_size, pooled_height, pooled_width, channels)`.
- If `data_format="channels_first"`:
4D tensor with shape
`(batch_size, channels, pooled_height, pooled_width)`.
Examples:
`strides=(1, 1)` and `padding="valid"`:
>>> x = np.array([[1., 2., 3.],
... [4., 5., 6.],
... [7., 8., 9.]])
>>> x = np.reshape(x, [1, 3, 3, 1])
>>> avg_pool_2d = keras.layers.AveragePooling2D(pool_size=(2, 2),
... strides=(1, 1), padding="valid")
>>> avg_pool_2d(x)
`strides=(2, 2)` and `padding="valid"`:
>>> x = np.array([[1., 2., 3., 4.],
... [5., 6., 7., 8.],
... [9., 10., 11., 12.]])
>>> x = np.reshape(x, [1, 3, 4, 1])
>>> avg_pool_2d = keras.layers.AveragePooling2D(pool_size=(2, 2),
... strides=(2, 2), padding="valid")
>>> avg_pool_2d(x)
`stride=(1, 1)` and `padding="same"`:
>>> x = np.array([[1., 2., 3.],
... [4., 5., 6.],
... [7., 8., 9.]])
>>> x = np.reshape(x, [1, 3, 3, 1])
>>> avg_pool_2d = keras.layers.AveragePooling2D(pool_size=(2, 2),
... strides=(1, 1), padding="same")
>>> avg_pool_2d(x)
"""
def __init__(
self,
pool_size,
strides=None,
padding="valid",
data_format=None,
name=None,
**kwargs
):
super().__init__(
pool_size,
strides,
pool_dimensions=2,
pool_mode="average",
padding=padding,
data_format=data_format,
name=name,
**kwargs,
)
|
from keras.src.api_export import keras_export
from keras.src.layers.pooling.base_pooling import BasePooling
@keras_export(["keras.layers.AveragePooling2D", "keras.layers.AvgPool2D"])
class AveragePooling2D(BasePooling):
"""Average pooling operation for 2D spatial data.
Downsamples the input along its spatial dimensions (height and width)
by taking the average value over an input window
(of size defined by `pool_size`) for each channel of the input.
The window is shifted by `strides` along each dimension.
The resulting output when using the `"valid"` padding option has a spatial
shape (number of rows or columns) of:
`output_shape = math.floor((input_shape - pool_size) / strides) + 1`
(when `input_shape >= pool_size`)
The resulting output shape when using the `"same"` padding option is:
`output_shape = math.floor((input_shape - 1) / strides) + 1`
Args:
pool_size: int or tuple of 2 integers, factors by which to downscale
(dim1, dim2). If only one integer is specified, the same
window length will be used for all dimensions.
strides: int or tuple of 2 integers, or None. Strides values. If None,
it will default to `pool_size`. If only one int is specified, the
same stride size will be used for all dimensions.
padding: string, either `"valid"` or `"same"` (case-insensitive).
`"valid"` means no padding. `"same"` results in padding evenly to
the left/right or up/down of the input such that output has the same
height/width dimension as the input.
data_format: string, either `"channels_last"` or `"channels_first"`.
The ordering of the dimensions in the inputs. `"channels_last"`
corresponds to inputs with shape `(batch, height, width, channels)`
while `"channels_first"` corresponds to inputs with shape
`(batch, channels, height, width)`. It defaults to the
`image_data_format` value found in your Keras config file at
`~/.keras/keras.json`. If you never set it, then it will be
`"channels_last"`.
Input shape:
- If `data_format="channels_last"`:
4D tensor with shape `(batch_size, height, width, channels)`.
- If `data_format="channels_first"`:
4D tensor with shape `(batch_size, channels, height, width)`.
Output shape:
- If `data_format="channels_last"`:
4D tensor with shape
`(batch_size, pooled_height, pooled_width, channels)`.
- If `data_format="channels_first"`:
4D tensor with shape
`(batch_size, channels, pooled_height, pooled_width)`.
Examples:
`strides=(1, 1)` and `padding="valid"`:
>>> x = np.array([[1., 2., 3.],
... [4., 5., 6.],
... [7., 8., 9.]])
>>> x = np.reshape(x, [1, 3, 3, 1])
>>> avg_pool_2d = keras.layers.AveragePooling2D(pool_size=(2, 2),
... strides=(1, 1), padding="valid")
>>> avg_pool_2d(x)
`strides=(2, 2)` and `padding="valid"`:
>>> x = np.array([[1., 2., 3., 4.],
... [5., 6., 7., 8.],
... [9., 10., 11., 12.]])
>>> x = np.reshape(x, [1, 3, 4, 1])
>>> avg_pool_2d = keras.layers.AveragePooling2D(pool_size=(2, 2),
... strides=(2, 2), padding="valid")
>>> avg_pool_2d(x)
`stride=(1, 1)` and `padding="same"`:
>>> x = np.array([[1., 2., 3.],
... [4., 5., 6.],
... [7., 8., 9.]])
>>> x = np.reshape(x, [1, 3, 3, 1])
>>> avg_pool_2d = keras.layers.AveragePooling2D(pool_size=(2, 2),
... strides=(1, 1), padding="same")
>>> avg_pool_2d(x)
"""
def __init__(
self,
pool_size,
strides=None,
padding="valid",
data_format=None,
name=None,
**kwargs
):
super().__init__(
pool_size,
strides,
pool_dimensions=2,
pool_mode="average",
padding=padding,
data_format=data_format,
name=name,
**kwargs,
)
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.callbacks.utils import (
BaseMetadataCallbackHandler,
_flatten_dict,
flatten_dict,
hash_string,
import_pandas,
import_spacy,
import_textstat,
load_json,
)
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"import_spacy": "langchain_community.callbacks.utils",
"import_pandas": "langchain_community.callbacks.utils",
"import_textstat": "langchain_community.callbacks.utils",
"_flatten_dict": "langchain_community.callbacks.utils",
"flatten_dict": "langchain_community.callbacks.utils",
"hash_string": "langchain_community.callbacks.utils",
"load_json": "langchain_community.callbacks.utils",
"BaseMetadataCallbackHandler": "langchain_community.callbacks.utils",
}
_import_attribute = create_importer(__file__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"BaseMetadataCallbackHandler",
"_flatten_dict",
"flatten_dict",
"hash_string",
"import_pandas",
"import_spacy",
"import_textstat",
"load_json",
]
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.callbacks.utils import (
BaseMetadataCallbackHandler,
_flatten_dict,
flatten_dict,
hash_string,
import_pandas,
import_spacy,
import_textstat,
load_json,
)
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"import_spacy": "langchain_community.callbacks.utils",
"import_pandas": "langchain_community.callbacks.utils",
"import_textstat": "langchain_community.callbacks.utils",
"_flatten_dict": "langchain_community.callbacks.utils",
"flatten_dict": "langchain_community.callbacks.utils",
"hash_string": "langchain_community.callbacks.utils",
"load_json": "langchain_community.callbacks.utils",
"BaseMetadataCallbackHandler": "langchain_community.callbacks.utils",
}
_import_attribute = create_importer(__file__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"import_spacy",
"import_pandas",
"import_textstat",
"_flatten_dict",
"flatten_dict",
"hash_string",
"load_json",
"BaseMetadataCallbackHandler",
]
|
_base_ = [
'../_base_/models/mask-rcnn_r50_fpn.py',
'../_base_/datasets/coco_instance.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
pretrained = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_tiny_patch4_window7_224.pth' # noqa
model = dict(
type='MaskRCNN',
backbone=dict(
_delete_=True,
type='SwinTransformer',
embed_dims=96,
depths=[2, 2, 6, 2],
num_heads=[3, 6, 12, 24],
window_size=7,
mlp_ratio=4,
qkv_bias=True,
qk_scale=None,
drop_rate=0.,
attn_drop_rate=0.,
drop_path_rate=0.2,
patch_norm=True,
out_indices=(0, 1, 2, 3),
with_cp=False,
convert_weights=True,
init_cfg=dict(type='Pretrained', checkpoint=pretrained)),
neck=dict(in_channels=[96, 192, 384, 768]))
# augmentation strategy originates from DETR / Sparse RCNN
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(type='RandomFlip', prob=0.5),
dict(
type='RandomChoice',
transforms=[[
dict(
type='RandomChoiceResize',
scales=[(480, 1333), (512, 1333), (544, 1333), (576, 1333),
(608, 1333), (640, 1333), (672, 1333), (704, 1333),
(736, 1333), (768, 1333), (800, 1333)],
keep_ratio=True)
],
[
dict(
type='RandomChoiceResize',
scales=[(400, 1333), (500, 1333), (600, 1333)],
keep_ratio=True),
dict(
type='RandomCrop',
crop_type='absolute_range',
crop_size=(384, 600),
allow_negative_crop=True),
dict(
type='RandomChoiceResize',
scales=[(480, 1333), (512, 1333), (544, 1333),
(576, 1333), (608, 1333), (640, 1333),
(672, 1333), (704, 1333), (736, 1333),
(768, 1333), (800, 1333)],
keep_ratio=True)
]]),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
max_epochs = 36
train_cfg = dict(max_epochs=max_epochs)
# learning rate
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0,
end=1000),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[27, 33],
gamma=0.1)
]
# optimizer
optim_wrapper = dict(
type='OptimWrapper',
paramwise_cfg=dict(
custom_keys={
'absolute_pos_embed': dict(decay_mult=0.),
'relative_position_bias_table': dict(decay_mult=0.),
'norm': dict(decay_mult=0.)
}),
optimizer=dict(
_delete_=True,
type='AdamW',
lr=0.0001,
betas=(0.9, 0.999),
weight_decay=0.05))
|
_base_ = [
'../_base_/models/mask_rcnn_r50_fpn.py',
'../_base_/datasets/coco_instance.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
pretrained = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_tiny_patch4_window7_224.pth' # noqa
model = dict(
type='MaskRCNN',
backbone=dict(
_delete_=True,
type='SwinTransformer',
embed_dims=96,
depths=[2, 2, 6, 2],
num_heads=[3, 6, 12, 24],
window_size=7,
mlp_ratio=4,
qkv_bias=True,
qk_scale=None,
drop_rate=0.,
attn_drop_rate=0.,
drop_path_rate=0.2,
patch_norm=True,
out_indices=(0, 1, 2, 3),
with_cp=False,
convert_weights=True,
init_cfg=dict(type='Pretrained', checkpoint=pretrained)),
neck=dict(in_channels=[96, 192, 384, 768]))
# augmentation strategy originates from DETR / Sparse RCNN
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(type='RandomFlip', prob=0.5),
dict(
type='RandomChoice',
transforms=[[
dict(
type='RandomChoiceResize',
scales=[(480, 1333), (512, 1333), (544, 1333), (576, 1333),
(608, 1333), (640, 1333), (672, 1333), (704, 1333),
(736, 1333), (768, 1333), (800, 1333)],
keep_ratio=True)
],
[
dict(
type='RandomChoiceResize',
scales=[(400, 1333), (500, 1333), (600, 1333)],
keep_ratio=True),
dict(
type='RandomCrop',
crop_type='absolute_range',
crop_size=(384, 600),
allow_negative_crop=True),
dict(
type='RandomChoiceResize',
scales=[(480, 1333), (512, 1333), (544, 1333),
(576, 1333), (608, 1333), (640, 1333),
(672, 1333), (704, 1333), (736, 1333),
(768, 1333), (800, 1333)],
keep_ratio=True)
]]),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
max_epochs = 36
train_cfg = dict(max_epochs=max_epochs)
# learning rate
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0,
end=1000),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[27, 33],
gamma=0.1)
]
# optimizer
optim_wrapper = dict(
type='OptimWrapper',
paramwise_cfg=dict(
custom_keys={
'absolute_pos_embed': dict(decay_mult=0.),
'relative_position_bias_table': dict(decay_mult=0.),
'norm': dict(decay_mult=0.)
}),
optimizer=dict(
_delete_=True,
type='AdamW',
lr=0.0001,
betas=(0.9, 0.999),
weight_decay=0.05))
|
import logging
import random
from datasets import load_dataset
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.evaluation import SparseInformationRetrievalEvaluator
logging.basicConfig(format="%(message)s", level=logging.INFO)
# Load a model
model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
# Load the NFcorpus IR dataset (https://huggingface.co/datasets/BeIR/nfcorpus, https://huggingface.co/datasets/BeIR/nfcorpus-qrels)
corpus = load_dataset("BeIR/nfcorpus", "corpus", split="corpus")
queries = load_dataset("BeIR/nfcorpus", "queries", split="queries")
relevant_docs_data = load_dataset("BeIR/nfcorpus-qrels", split="test")
# For this dataset, we want to concatenate the title and texts for the corpus
corpus = corpus.map(lambda x: {"text": x["title"] + " " + x["text"]}, remove_columns=["title"])
# Shrink the corpus size heavily to only the relevant documents + 1,000 random documents
required_corpus_ids = set(map(str, relevant_docs_data["corpus-id"]))
required_corpus_ids |= set(random.sample(corpus["_id"], k=1000))
corpus = corpus.filter(lambda x: x["_id"] in required_corpus_ids)
# Convert the datasets to dictionaries
corpus = dict(zip(corpus["_id"], corpus["text"])) # Our corpus (cid => document)
queries = dict(zip(queries["_id"], queries["text"])) # Our queries (qid => question)
relevant_docs = {} # Query ID to relevant documents (qid => set([relevant_cids])
for qid, corpus_ids in zip(relevant_docs_data["query-id"], relevant_docs_data["corpus-id"]):
qid = str(qid)
corpus_ids = str(corpus_ids)
if qid not in relevant_docs:
relevant_docs[qid] = set()
relevant_docs[qid].add(corpus_ids)
# Given queries, a corpus and a mapping with relevant documents, the SparseInformationRetrievalEvaluator computes different IR metrics.
ir_evaluator = SparseInformationRetrievalEvaluator(
queries=queries,
corpus=corpus,
relevant_docs=relevant_docs,
name="BeIR-nfcorpus-subset-test",
show_progress_bar=True,
batch_size=16,
)
# Run evaluation
results = ir_evaluator(model)
"""
Queries: 323
Corpus: 3269
Score-Function: dot
Accuracy@1: 50.46%
Accuracy@3: 64.40%
Accuracy@5: 67.49%
Accuracy@10: 72.14%
Precision@1: 50.46%
Precision@3: 40.87%
Precision@5: 34.12%
Precision@10: 26.10%
Recall@1: 6.11%
Recall@3: 11.73%
Recall@5: 13.64%
Recall@10: 17.24%
MRR@10: 0.5801
NDCG@10: 0.3626
MAP@100: 0.1832
Model Query Sparsity: Active Dimensions: 43.1, Sparsity Ratio: 0.9986
Model Corpus Sparsity: Active Dimensions: 207.0, Sparsity Ratio: 0.9932
"""
# Print the results
print(f"Primary metric: {ir_evaluator.primary_metric}")
# => Primary metric: BeIR-nfcorpus-subset-test_dot_ndcg@10
print(f"Primary metric value: {results[ir_evaluator.primary_metric]:.4f}")
# => Primary metric value: 0.3626
|
import logging
import random
from datasets import load_dataset
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.evaluation import SparseInformationRetrievalEvaluator
logging.basicConfig(format="%(message)s", level=logging.INFO)
# Load a model
model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
# Load the NFcorpus IR dataset (https://huggingface.co/datasets/BeIR/nfcorpus, https://huggingface.co/datasets/BeIR/nfcorpus-qrels)
corpus = load_dataset("BeIR/nfcorpus", "corpus", split="corpus")
queries = load_dataset("BeIR/nfcorpus", "queries", split="queries")
relevant_docs_data = load_dataset("BeIR/nfcorpus-qrels", split="test")
# For this dataset, we want to concatenate the title and texts for the corpus
corpus = corpus.map(lambda x: {"text": x["title"] + " " + x["text"]}, remove_columns=["title"])
# Shrink the corpus size heavily to only the relevant documents + 1,000 random documents
required_corpus_ids = set(map(str, relevant_docs_data["corpus-id"]))
required_corpus_ids |= set(random.sample(corpus["_id"], k=1000))
corpus = corpus.filter(lambda x: x["_id"] in required_corpus_ids)
# Convert the datasets to dictionaries
corpus = dict(zip(corpus["_id"], corpus["text"])) # Our corpus (cid => document)
queries = dict(zip(queries["_id"], queries["text"])) # Our queries (qid => question)
relevant_docs = {} # Query ID to relevant documents (qid => set([relevant_cids])
for qid, corpus_ids in zip(relevant_docs_data["query-id"], relevant_docs_data["corpus-id"]):
qid = str(qid)
corpus_ids = str(corpus_ids)
if qid not in relevant_docs:
relevant_docs[qid] = set()
relevant_docs[qid].add(corpus_ids)
# Given queries, a corpus and a mapping with relevant documents, the SparseInformationRetrievalEvaluator computes different IR metrics.
ir_evaluator = SparseInformationRetrievalEvaluator(
queries=queries,
corpus=corpus,
relevant_docs=relevant_docs,
name="BeIR-nfcorpus-subset-test",
show_progress_bar=True,
batch_size=16,
)
# Run evaluation
results = ir_evaluator(model)
"""
Queries: 323
Corpus: 3269
Score-Function: dot
Accuracy@1: 50.46%
Accuracy@3: 64.40%
Accuracy@5: 67.49%
Accuracy@10: 72.14%
Precision@1: 50.46%
Precision@3: 40.87%
Precision@5: 34.12%
Precision@10: 26.10%
Recall@1: 6.11%
Recall@3: 11.73%
Recall@5: 13.64%
Recall@10: 17.24%
MRR@10: 0.5801
NDCG@10: 0.3626
MAP@100: 0.1832
Model Sparsity Stats Query : Row Non-Zero Mean: 43.08049392700195, Row Sparsity Mean: 0.9985886216163635
Model Sparsity Stats Corpus : Row Non-Zero Mean: 206.8623504638672, Row Sparsity Mean: 0.9932224750518799
"""
# Print the results
print(f"Primary metric: {ir_evaluator.primary_metric}")
# => Primary metric: BeIR-nfcorpus-subset-test_dot_ndcg@10
print(f"Primary metric value: {results[ir_evaluator.primary_metric]:.4f}")
# => Primary metric value: 0.3626
|
_base_ = './solov2-light_r50_fpn_ms-3x_coco.py'
# model settings
model = dict(
backbone=dict(
dcn=dict(type='DCNv2', deformable_groups=1, fallback_on_stride=False),
stage_with_dcn=(False, True, True, True)),
mask_head=dict(
feat_channels=256,
stacked_convs=3,
scale_ranges=((1, 64), (32, 128), (64, 256), (128, 512), (256, 2048)),
mask_feature_head=dict(out_channels=128),
dcn_cfg=dict(type='DCNv2'),
dcn_apply_to_all_conv=False)) # light solov2 head
|
_base_ = 'solov2_light_r50_fpn_mstrain_3x_coco.py'
# model settings
model = dict(
backbone=dict(
dcn=dict(type='DCNv2', deformable_groups=1, fallback_on_stride=False),
stage_with_dcn=(False, True, True, True)),
mask_head=dict(
feat_channels=256,
stacked_convs=3,
scale_ranges=((1, 64), (32, 128), (64, 256), (128, 512), (256, 2048)),
mask_feature_head=dict(out_channels=128),
dcn_cfg=dict(type='DCNv2'),
dcn_apply_to_all_conv=False)) # light solov2 head
|
# Copyright (c) OpenMMLab. All rights reserved.
import sys
from unittest import TestCase
import mmengine
from mmengine.utils.dl_utils import collect_env
class TestCollectEnv(TestCase):
def test_collect_env(self):
env_info = collect_env()
expected_keys = [
'sys.platform', 'Python', 'CUDA available', 'PyTorch',
'PyTorch compiling details', 'OpenCV', 'MMEngine', 'GCC'
]
for key in expected_keys:
assert key in env_info
if env_info['CUDA available']:
for key in ['CUDA_HOME', 'NVCC']:
assert key in env_info
assert env_info['sys.platform'] == sys.platform
assert env_info['Python'] == sys.version.replace('\n', '')
assert env_info['MMEngine'] == mmengine.__version__
|
# Copyright (c) OpenMMLab. All rights reserved.
import sys
from unittest import TestCase
import torch.cuda
import mmengine
from mmengine.utils.dl_utils import collect_env
from mmengine.utils.dl_utils.parrots_wrapper import _get_cuda_home
class TestCollectEnv(TestCase):
def test_get_cuda_home(self):
CUDA_HOME = _get_cuda_home()
if torch.version.cuda is not None:
self.assertIsNotNone(CUDA_HOME)
else:
self.assertIsNone(CUDA_HOME)
def test_collect_env(self):
env_info = collect_env()
expected_keys = [
'sys.platform', 'Python', 'CUDA available', 'PyTorch',
'PyTorch compiling details', 'OpenCV', 'MMEngine', 'GCC'
]
for key in expected_keys:
assert key in env_info
if env_info['CUDA available']:
for key in ['CUDA_HOME', 'NVCC']:
assert key in env_info
if sys.platform == 'win32':
assert 'MSVC' in env_info
assert env_info['sys.platform'] == sys.platform
assert env_info['Python'] == sys.version.replace('\n', '')
assert env_info['MMEngine'] == mmengine.__version__
|
"""Utils for LLM Compiler."""
import ast
import re
from typing import Any, Dict, List, Sequence, Tuple, Union
from llama_index.core.tools.function_tool import FunctionTool
from llama_index.core.tools.types import BaseTool, adapt_to_async_tool
from .schema import (
LLMCompilerParseResult,
LLMCompilerTask,
)
# $1 or ${1} -> 1
ID_PATTERN = r"\$\{?(\d+)\}?"
def default_dependency_rule(idx: int, args: str) -> bool:
"""Default dependency rule."""
matches = re.findall(ID_PATTERN, args)
numbers = [int(match) for match in matches]
return idx in numbers
def parse_llm_compiler_action_args(args: str) -> Union[List, Tuple]:
"""Parse arguments from a string."""
# This will convert the string into a python object
# e.g. '"Ronaldo number of kids"' -> ("Ronaldo number of kids", )
# '"I can answer the question now.", [3]' -> ("I can answer the question now.", [3])
if args == "":
return ()
try:
eval_args: Union[List, Tuple, str] = ast.literal_eval(args)
except Exception:
eval_args = args
if not isinstance(eval_args, list) and not isinstance(eval_args, tuple):
new_args: Union[List, Tuple] = (eval_args,)
else:
new_args = eval_args
return new_args
def _find_tool(tool_name: str, tools: Sequence[BaseTool]) -> BaseTool:
"""
Find a tool by name.
Args:
tool_name: Name of the tool to find.
Returns:
Tool or StructuredTool.
"""
for tool in tools:
if tool.metadata.name == tool_name:
return tool
raise ValueError(f"Tool {tool_name} not found.")
def _get_dependencies_from_graph(idx: int, tool_name: str, args: str) -> List[int]:
"""Get dependencies from a graph."""
if tool_name == "join":
# depends on the previous step
dependencies = list(range(1, idx))
else:
# define dependencies based on the dependency rule in tool_definitions.py
dependencies = [i for i in range(1, idx) if default_dependency_rule(i, args)]
return dependencies
def instantiate_new_step(
tools: Sequence[BaseTool],
idx: int,
tool_name: str,
args: str,
thought: str,
) -> LLMCompilerTask:
"""Instantiate a new step."""
dependencies = _get_dependencies_from_graph(idx, tool_name, args)
args_list = parse_llm_compiler_action_args(args)
if tool_name == "join":
# tool: Optional[BaseTool] = None
# assume that the only tool that returns None is join
tool: BaseTool = FunctionTool.from_defaults(fn=lambda x: None)
else:
tool = _find_tool(tool_name, tools)
return LLMCompilerTask(
idx=idx,
name=tool_name,
tool=adapt_to_async_tool(tool),
args=args_list,
dependencies=dependencies,
# TODO: look into adding a stringify rule
# stringify_rule=stringify_rule,
thought=thought,
is_join=tool_name == "join",
)
def get_graph_dict(
parse_results: List[LLMCompilerParseResult],
tools: Sequence[BaseTool],
) -> Dict[int, Any]:
"""Get graph dict."""
graph_dict = {}
for parse_result in parse_results:
# idx = 1, function = "search", args = "Ronaldo number of kids"
# thought will be the preceding thought, if any, otherwise an empty string
# thought, idx, tool_name, args, _ = match
idx = int(parse_result.idx)
task = instantiate_new_step(
tools=tools,
idx=idx,
tool_name=parse_result.tool_name,
args=parse_result.args,
thought=parse_result.thought,
)
graph_dict[idx] = task
if task.is_join:
break
return graph_dict
def generate_context_for_replanner(
tasks: Dict[int, LLMCompilerTask], joiner_thought: str
) -> str:
"""
Generate context for replanning.
Formatted like this.
```
1. action 1
Observation: xxx
2. action 2
Observation: yyy
...
Thought: joinner_thought
```
"""
previous_plan_and_observations = "\n".join(
[
task.get_thought_action_observation(
include_action=True, include_action_idx=True
)
for task in tasks.values()
if not task.is_join
]
)
joiner_thought = f"Thought: {joiner_thought}"
# use f-string instead
return f"{previous_plan_and_observations}\n\n{joiner_thought}"
def format_contexts(contexts: Sequence[str]) -> str:
"""
Format contexts.
Taken from https://github.com/SqueezeAILab/LLMCompiler/blob/main/src/llm_compiler/llm_compiler.py
Contexts is a list of context.
Each context is formatted as the description of generate_context_for_replanner
"""
formatted_contexts = ""
for context in contexts:
formatted_contexts += f"Previous Plan:\n\n{context}\n\n"
formatted_contexts += "Current Plan:\n\n"
return formatted_contexts
|
"""Utils for LLM Compiler."""
import ast
import re
from typing import Any, Dict, List, Sequence, Tuple, Union
from llama_index.core.tools.function_tool import FunctionTool
from llama_index.core.tools.types import BaseTool, adapt_to_async_tool
from .schema import (
LLMCompilerParseResult,
LLMCompilerTask,
)
# $1 or ${1} -> 1
ID_PATTERN = r"\$\{?(\d+)\}?"
def default_dependency_rule(idx: int, args: str) -> bool:
"""Default dependency rule."""
matches = re.findall(ID_PATTERN, args)
numbers = [int(match) for match in matches]
return idx in numbers
def parse_llm_compiler_action_args(args: str) -> Union[List, Tuple]:
"""Parse arguments from a string."""
# This will convert the string into a python object
# e.g. '"Ronaldo number of kids"' -> ("Ronaldo number of kids", )
# '"I can answer the question now.", [3]' -> ("I can answer the question now.", [3])
if args == "":
return ()
try:
eval_args: Union[List, Tuple, str] = ast.literal_eval(args)
except Exception:
eval_args = args
if not isinstance(eval_args, list) and not isinstance(eval_args, tuple):
new_args: Union[List, Tuple] = (eval_args,)
else:
new_args = eval_args
return new_args
def _find_tool(tool_name: str, tools: Sequence[BaseTool]) -> BaseTool:
"""Find a tool by name.
Args:
tool_name: Name of the tool to find.
Returns:
Tool or StructuredTool.
"""
for tool in tools:
if tool.metadata.name == tool_name:
return tool
raise ValueError(f"Tool {tool_name} not found.")
def _get_dependencies_from_graph(idx: int, tool_name: str, args: str) -> List[int]:
"""Get dependencies from a graph."""
if tool_name == "join":
# depends on the previous step
dependencies = list(range(1, idx))
else:
# define dependencies based on the dependency rule in tool_definitions.py
dependencies = [i for i in range(1, idx) if default_dependency_rule(i, args)]
return dependencies
def instantiate_new_step(
tools: Sequence[BaseTool],
idx: int,
tool_name: str,
args: str,
thought: str,
) -> LLMCompilerTask:
"""Instantiate a new step."""
dependencies = _get_dependencies_from_graph(idx, tool_name, args)
args_list = parse_llm_compiler_action_args(args)
if tool_name == "join":
# tool: Optional[BaseTool] = None
# assume that the only tool that returns None is join
tool: BaseTool = FunctionTool.from_defaults(fn=lambda x: None)
else:
tool = _find_tool(tool_name, tools)
return LLMCompilerTask(
idx=idx,
name=tool_name,
tool=adapt_to_async_tool(tool),
args=args_list,
dependencies=dependencies,
# TODO: look into adding a stringify rule
# stringify_rule=stringify_rule,
thought=thought,
is_join=tool_name == "join",
)
def get_graph_dict(
parse_results: List[LLMCompilerParseResult],
tools: Sequence[BaseTool],
) -> Dict[int, Any]:
"""Get graph dict."""
graph_dict = {}
for parse_result in parse_results:
# idx = 1, function = "search", args = "Ronaldo number of kids"
# thought will be the preceding thought, if any, otherwise an empty string
# thought, idx, tool_name, args, _ = match
idx = int(parse_result.idx)
task = instantiate_new_step(
tools=tools,
idx=idx,
tool_name=parse_result.tool_name,
args=parse_result.args,
thought=parse_result.thought,
)
graph_dict[idx] = task
if task.is_join:
break
return graph_dict
def generate_context_for_replanner(
tasks: Dict[int, LLMCompilerTask], joiner_thought: str
) -> str:
"""Generate context for replanning.
Formatted like this.
```
1. action 1
Observation: xxx
2. action 2
Observation: yyy
...
Thought: joinner_thought
```
"""
previous_plan_and_observations = "\n".join(
[
task.get_thought_action_observation(
include_action=True, include_action_idx=True
)
for task in tasks.values()
if not task.is_join
]
)
joiner_thought = f"Thought: {joiner_thought}"
# use f-string instead
return f"{previous_plan_and_observations}\n\n{joiner_thought}"
def format_contexts(contexts: Sequence[str]) -> str:
"""Format contexts.
Taken from https://github.com/SqueezeAILab/LLMCompiler/blob/main/src/llm_compiler/llm_compiler.py
Contexts is a list of context.
Each context is formatted as the description of generate_context_for_replanner
"""
formatted_contexts = ""
for context in contexts:
formatted_contexts += f"Previous Plan:\n\n{context}\n\n"
formatted_contexts += "Current Plan:\n\n"
return formatted_contexts
|
"""Base class for Office 365 tools."""
from __future__ import annotations
from typing import TYPE_CHECKING
from langchain_core.tools import BaseTool
from pydantic import Field
from langchain_community.tools.office365.utils import authenticate
if TYPE_CHECKING:
from O365 import Account
class O365BaseTool(BaseTool):
"""Base class for the Office 365 tools."""
account: Account = Field(default_factory=authenticate)
"""The account object for the Office 365 account."""
|
"""Base class for Office 365 tools."""
from __future__ import annotations
from typing import TYPE_CHECKING
from langchain_core.tools import BaseTool
from pydantic import Field
from langchain_community.tools.office365.utils import authenticate
if TYPE_CHECKING:
from O365 import Account
class O365BaseTool(BaseTool): # type: ignore[override]
"""Base class for the Office 365 tools."""
account: Account = Field(default_factory=authenticate)
"""The account object for the Office 365 account."""
|
# THIS FILE HAS BEEN AUTOGENERATED. To update:
# 1. modify the `_deps` dict in setup.py
# 2. run `make deps_table_update`
deps = {
"Pillow": "Pillow",
"accelerate": "accelerate>=0.31.0",
"compel": "compel==0.1.8",
"datasets": "datasets",
"filelock": "filelock",
"flax": "flax>=0.4.1",
"hf-doc-builder": "hf-doc-builder>=0.3.0",
"huggingface-hub": "huggingface-hub>=0.27.0",
"requests-mock": "requests-mock==1.10.0",
"importlib_metadata": "importlib_metadata",
"invisible-watermark": "invisible-watermark>=0.2.0",
"isort": "isort>=5.5.4",
"jax": "jax>=0.4.1",
"jaxlib": "jaxlib>=0.4.1",
"Jinja2": "Jinja2",
"k-diffusion": "k-diffusion==0.0.12",
"torchsde": "torchsde",
"note_seq": "note_seq",
"librosa": "librosa",
"numpy": "numpy",
"parameterized": "parameterized",
"peft": "peft>=0.15.0",
"protobuf": "protobuf>=3.20.3,<4",
"pytest": "pytest",
"pytest-timeout": "pytest-timeout",
"pytest-xdist": "pytest-xdist",
"python": "python>=3.8.0",
"ruff": "ruff==0.9.10",
"safetensors": "safetensors>=0.3.1",
"sentencepiece": "sentencepiece>=0.1.91,!=0.1.92",
"GitPython": "GitPython<3.1.19",
"scipy": "scipy",
"onnx": "onnx",
"optimum_quanto": "optimum_quanto>=0.2.6",
"gguf": "gguf>=0.10.0",
"torchao": "torchao>=0.7.0",
"bitsandbytes": "bitsandbytes>=0.43.3",
"regex": "regex!=2019.12.17",
"requests": "requests",
"tensorboard": "tensorboard",
"tiktoken": "tiktoken>=0.7.0",
"torch": "torch>=1.4",
"torchvision": "torchvision",
"transformers": "transformers>=4.41.2",
"urllib3": "urllib3<=2.0.0",
"black": "black",
"phonemizer": "phonemizer",
"opencv-python": "opencv-python",
}
|
# THIS FILE HAS BEEN AUTOGENERATED. To update:
# 1. modify the `_deps` dict in setup.py
# 2. run `make deps_table_update`
deps = {
"Pillow": "Pillow",
"accelerate": "accelerate>=0.31.0",
"compel": "compel==0.1.8",
"datasets": "datasets",
"filelock": "filelock",
"flax": "flax>=0.4.1",
"hf-doc-builder": "hf-doc-builder>=0.3.0",
"huggingface-hub": "huggingface-hub>=0.27.0",
"requests-mock": "requests-mock==1.10.0",
"importlib_metadata": "importlib_metadata",
"invisible-watermark": "invisible-watermark>=0.2.0",
"isort": "isort>=5.5.4",
"jax": "jax>=0.4.1",
"jaxlib": "jaxlib>=0.4.1",
"Jinja2": "Jinja2",
"k-diffusion": "k-diffusion>=0.0.12",
"torchsde": "torchsde",
"note_seq": "note_seq",
"librosa": "librosa",
"numpy": "numpy",
"parameterized": "parameterized",
"peft": "peft>=0.15.0",
"protobuf": "protobuf>=3.20.3,<4",
"pytest": "pytest",
"pytest-timeout": "pytest-timeout",
"pytest-xdist": "pytest-xdist",
"python": "python>=3.8.0",
"ruff": "ruff==0.9.10",
"safetensors": "safetensors>=0.3.1",
"sentencepiece": "sentencepiece>=0.1.91,!=0.1.92",
"GitPython": "GitPython<3.1.19",
"scipy": "scipy",
"onnx": "onnx",
"optimum_quanto": "optimum_quanto>=0.2.6",
"gguf": "gguf>=0.10.0",
"torchao": "torchao>=0.7.0",
"bitsandbytes": "bitsandbytes>=0.43.3",
"regex": "regex!=2019.12.17",
"requests": "requests",
"tensorboard": "tensorboard",
"tiktoken": "tiktoken>=0.7.0",
"torch": "torch>=1.4",
"torchvision": "torchvision",
"transformers": "transformers>=4.41.2",
"urllib3": "urllib3<=2.0.0",
"black": "black",
"phonemizer": "phonemizer",
"opencv-python": "opencv-python",
}
|
from typing import Any, Dict, List, Optional, Sequence, Type, Union
import PIL.Image
import torch
from torchvision import datapoints
from torchvision.prototype.datapoints import Label, OneHotLabel
from torchvision.transforms.v2 import functional as F, Transform
from torchvision.transforms.v2._utils import _FillType, _get_fill, _setup_fill_arg, _setup_size
from torchvision.transforms.v2.utils import get_bounding_boxes, has_any, is_simple_tensor, query_size
class FixedSizeCrop(Transform):
def __init__(
self,
size: Union[int, Sequence[int]],
fill: Union[_FillType, Dict[Union[Type, str], _FillType]] = 0,
padding_mode: str = "constant",
) -> None:
super().__init__()
size = tuple(_setup_size(size, error_msg="Please provide only two dimensions (h, w) for size."))
self.crop_height = size[0]
self.crop_width = size[1]
self.fill = fill
self._fill = _setup_fill_arg(fill)
self.padding_mode = padding_mode
def _check_inputs(self, flat_inputs: List[Any]) -> None:
if not has_any(
flat_inputs,
PIL.Image.Image,
datapoints.Image,
is_simple_tensor,
datapoints.Video,
):
raise TypeError(
f"{type(self).__name__}() requires input sample to contain an tensor or PIL image or a Video."
)
if has_any(flat_inputs, datapoints.BoundingBoxes) and not has_any(flat_inputs, Label, OneHotLabel):
raise TypeError(
f"If a BoundingBoxes is contained in the input sample, "
f"{type(self).__name__}() also requires it to contain a Label or OneHotLabel."
)
def _get_params(self, flat_inputs: List[Any]) -> Dict[str, Any]:
height, width = query_size(flat_inputs)
new_height = min(height, self.crop_height)
new_width = min(width, self.crop_width)
needs_crop = new_height != height or new_width != width
offset_height = max(height - self.crop_height, 0)
offset_width = max(width - self.crop_width, 0)
r = torch.rand(1)
top = int(offset_height * r)
left = int(offset_width * r)
bounding_boxes: Optional[torch.Tensor]
try:
bounding_boxes = get_bounding_boxes(flat_inputs)
except ValueError:
bounding_boxes = None
if needs_crop and bounding_boxes is not None:
format = bounding_boxes.format
bounding_boxes, canvas_size = F.crop_bounding_boxes(
bounding_boxes.as_subclass(torch.Tensor),
format=format,
top=top,
left=left,
height=new_height,
width=new_width,
)
bounding_boxes = F.clamp_bounding_boxes(bounding_boxes, format=format, canvas_size=canvas_size)
height_and_width = F.convert_format_bounding_boxes(
bounding_boxes, old_format=format, new_format=datapoints.BoundingBoxFormat.XYWH
)[..., 2:]
is_valid = torch.all(height_and_width > 0, dim=-1)
else:
is_valid = None
pad_bottom = max(self.crop_height - new_height, 0)
pad_right = max(self.crop_width - new_width, 0)
needs_pad = pad_bottom != 0 or pad_right != 0
return dict(
needs_crop=needs_crop,
top=top,
left=left,
height=new_height,
width=new_width,
is_valid=is_valid,
padding=[0, 0, pad_right, pad_bottom],
needs_pad=needs_pad,
)
def _transform(self, inpt: Any, params: Dict[str, Any]) -> Any:
if params["needs_crop"]:
inpt = F.crop(
inpt,
top=params["top"],
left=params["left"],
height=params["height"],
width=params["width"],
)
if params["is_valid"] is not None:
if isinstance(inpt, (Label, OneHotLabel, datapoints.Mask)):
inpt = inpt.wrap_like(inpt, inpt[params["is_valid"]]) # type: ignore[arg-type]
elif isinstance(inpt, datapoints.BoundingBoxes):
inpt = datapoints.BoundingBoxes.wrap_like(
inpt,
F.clamp_bounding_boxes(inpt[params["is_valid"]], format=inpt.format, canvas_size=inpt.canvas_size),
)
if params["needs_pad"]:
fill = _get_fill(self._fill, type(inpt))
inpt = F.pad(inpt, params["padding"], fill=fill, padding_mode=self.padding_mode)
return inpt
|
from typing import Any, Dict, List, Optional, Sequence, Type, Union
import PIL.Image
import torch
from torchvision import datapoints
from torchvision.prototype.datapoints import Label, OneHotLabel
from torchvision.transforms.v2 import functional as F, Transform
from torchvision.transforms.v2._utils import _get_fill, _setup_fill_arg, _setup_size
from torchvision.transforms.v2.utils import get_bounding_boxes, has_any, is_simple_tensor, query_size
class FixedSizeCrop(Transform):
def __init__(
self,
size: Union[int, Sequence[int]],
fill: Union[datapoints._FillType, Dict[Union[Type, str], datapoints._FillType]] = 0,
padding_mode: str = "constant",
) -> None:
super().__init__()
size = tuple(_setup_size(size, error_msg="Please provide only two dimensions (h, w) for size."))
self.crop_height = size[0]
self.crop_width = size[1]
self.fill = fill
self._fill = _setup_fill_arg(fill)
self.padding_mode = padding_mode
def _check_inputs(self, flat_inputs: List[Any]) -> None:
if not has_any(
flat_inputs,
PIL.Image.Image,
datapoints.Image,
is_simple_tensor,
datapoints.Video,
):
raise TypeError(
f"{type(self).__name__}() requires input sample to contain an tensor or PIL image or a Video."
)
if has_any(flat_inputs, datapoints.BoundingBoxes) and not has_any(flat_inputs, Label, OneHotLabel):
raise TypeError(
f"If a BoundingBoxes is contained in the input sample, "
f"{type(self).__name__}() also requires it to contain a Label or OneHotLabel."
)
def _get_params(self, flat_inputs: List[Any]) -> Dict[str, Any]:
height, width = query_size(flat_inputs)
new_height = min(height, self.crop_height)
new_width = min(width, self.crop_width)
needs_crop = new_height != height or new_width != width
offset_height = max(height - self.crop_height, 0)
offset_width = max(width - self.crop_width, 0)
r = torch.rand(1)
top = int(offset_height * r)
left = int(offset_width * r)
bounding_boxes: Optional[torch.Tensor]
try:
bounding_boxes = get_bounding_boxes(flat_inputs)
except ValueError:
bounding_boxes = None
if needs_crop and bounding_boxes is not None:
format = bounding_boxes.format
bounding_boxes, canvas_size = F.crop_bounding_boxes(
bounding_boxes.as_subclass(torch.Tensor),
format=format,
top=top,
left=left,
height=new_height,
width=new_width,
)
bounding_boxes = F.clamp_bounding_boxes(bounding_boxes, format=format, canvas_size=canvas_size)
height_and_width = F.convert_format_bounding_boxes(
bounding_boxes, old_format=format, new_format=datapoints.BoundingBoxFormat.XYWH
)[..., 2:]
is_valid = torch.all(height_and_width > 0, dim=-1)
else:
is_valid = None
pad_bottom = max(self.crop_height - new_height, 0)
pad_right = max(self.crop_width - new_width, 0)
needs_pad = pad_bottom != 0 or pad_right != 0
return dict(
needs_crop=needs_crop,
top=top,
left=left,
height=new_height,
width=new_width,
is_valid=is_valid,
padding=[0, 0, pad_right, pad_bottom],
needs_pad=needs_pad,
)
def _transform(self, inpt: Any, params: Dict[str, Any]) -> Any:
if params["needs_crop"]:
inpt = F.crop(
inpt,
top=params["top"],
left=params["left"],
height=params["height"],
width=params["width"],
)
if params["is_valid"] is not None:
if isinstance(inpt, (Label, OneHotLabel, datapoints.Mask)):
inpt = inpt.wrap_like(inpt, inpt[params["is_valid"]]) # type: ignore[arg-type]
elif isinstance(inpt, datapoints.BoundingBoxes):
inpt = datapoints.BoundingBoxes.wrap_like(
inpt,
F.clamp_bounding_boxes(inpt[params["is_valid"]], format=inpt.format, canvas_size=inpt.canvas_size),
)
if params["needs_pad"]:
fill = _get_fill(self._fill, type(inpt))
inpt = F.pad(inpt, params["padding"], fill=fill, padding_mode=self.padding_mode)
return inpt
|
# Copyright (c) OpenMMLab. All rights reserved.
"""Image Demo.
This script adopts a new infenence class, currently supports image path,
np.array and folder input formats, and will support video and webcam
in the future.
Example:
Save visualizations and predictions results::
python demo/image_demo.py demo/demo.jpg rtmdet-s
python demo/image_demo.py demo/demo.jpg \
configs/rtmdet/rtmdet_s_8xb32-300e_coco.py \
--weights rtmdet_s_8xb32-300e_coco_20220905_161602-387a891e.pth
python demo/image_demo.py demo/demo.jpg \
glip_atss_swin-t_a_fpn_dyhead_pretrain_obj365 --texts bench
python demo/image_demo.py demo/demo.jpg \
glip_atss_swin-t_a_fpn_dyhead_pretrain_obj365 --texts 'bench . car .'
python demo/image_demo.py demo/demo.jpg \
glip_atss_swin-t_a_fpn_dyhead_pretrain_obj365
--texts 'bench . car .' -c
python demo/image_demo.py demo/demo.jpg \
glip_atss_swin-t_a_fpn_dyhead_pretrain_obj365 \
--texts 'There are a lot of cars here.'
Visualize prediction results::
python demo/image_demo.py demo/demo.jpg rtmdet-ins-s --show
python demo/image_demo.py demo/demo.jpg rtmdet-ins_s_8xb32-300e_coco \
--show
"""
from argparse import ArgumentParser
from mmengine.logging import print_log
from mmdet.apis import DetInferencer
def parse_args():
parser = ArgumentParser()
parser.add_argument(
'inputs', type=str, help='Input image file or folder path.')
parser.add_argument(
'model',
type=str,
help='Config or checkpoint .pth file or the model name '
'and alias defined in metafile. The model configuration '
'file will try to read from .pth if the parameter is '
'a .pth weights file.')
parser.add_argument('--weights', default=None, help='Checkpoint file')
parser.add_argument(
'--out-dir',
type=str,
default='outputs',
help='Output directory of images or prediction results.')
parser.add_argument('--texts', help='text prompt')
parser.add_argument(
'--device', default='cuda:0', help='Device used for inference')
parser.add_argument(
'--pred-score-thr',
type=float,
default=0.3,
help='bbox score threshold')
parser.add_argument(
'--batch-size', type=int, default=1, help='Inference batch size.')
parser.add_argument(
'--show',
action='store_true',
help='Display the image in a popup window.')
parser.add_argument(
'--no-save-vis',
action='store_true',
help='Do not save detection vis results')
parser.add_argument(
'--no-save-pred',
action='store_true',
help='Do not save detection json results')
parser.add_argument(
'--print-result',
action='store_true',
help='Whether to print the results.')
parser.add_argument(
'--palette',
default='none',
choices=['coco', 'voc', 'citys', 'random', 'none'],
help='Color palette used for visualization')
# only for GLIP
parser.add_argument(
'--custom-entities',
'-c',
action='store_true',
help='Whether to customize entity names? '
'If so, the input text should be '
'"cls_name1 . cls_name2 . cls_name3 ." format')
call_args = vars(parser.parse_args())
if call_args['no_save_vis'] and call_args['no_save_pred']:
call_args['out_dir'] = ''
if call_args['model'].endswith('.pth'):
print_log('The model is a weight file, automatically '
'assign the model to --weights')
call_args['weights'] = call_args['model']
call_args['model'] = None
init_kws = ['model', 'weights', 'device', 'palette']
init_args = {}
for init_kw in init_kws:
init_args[init_kw] = call_args.pop(init_kw)
return init_args, call_args
def main():
init_args, call_args = parse_args()
# TODO: Video and Webcam are currently not supported and
# may consume too much memory if your input folder has a lot of images.
# We will be optimized later.
inferencer = DetInferencer(**init_args)
inferencer(**call_args)
if call_args['out_dir'] != '' and not (call_args['no_save_vis']
and call_args['no_save_pred']):
print_log(f'results have been saved at {call_args["out_dir"]}')
if __name__ == '__main__':
main()
|
# Copyright (c) OpenMMLab. All rights reserved.
"""Image Demo.
This script adopts a new infenence class, currently supports image path,
np.array and folder input formats, and will support video and webcam
in the future.
Example:
Save visualizations and predictions results::
python demo/image_demo.py demo/demo.jpg rtmdet-s
python demo/image_demo.py demo/demo.jpg \
configs/rtmdet/rtmdet_s_8xb32-300e_coco.py \
--weights rtmdet_s_8xb32-300e_coco_20220905_161602-387a891e.pth
Visualize prediction results::
python demo/image_demo.py demo/demo.jpg rtmdet-ins-s --show
python demo/image_demo.py demo/demo.jpg rtmdet-ins_s_8xb32-300e_coco \
--show
"""
from argparse import ArgumentParser
from mmengine.logging import print_log
from mmdet.apis import DetInferencer
def parse_args():
parser = ArgumentParser()
parser.add_argument(
'inputs', type=str, help='Input image file or folder path.')
parser.add_argument(
'model',
type=str,
help='Config or checkpoint .pth file or the model name '
'and alias defined in metafile. The model configuration '
'file will try to read from .pth if the parameter is '
'a .pth weights file.')
parser.add_argument('--weights', default=None, help='Checkpoint file')
parser.add_argument(
'--out-dir',
type=str,
default='outputs',
help='Output directory of images or prediction results.')
parser.add_argument(
'--device', default='cuda:0', help='Device used for inference')
parser.add_argument(
'--pred-score-thr',
type=float,
default=0.3,
help='bbox score threshold')
parser.add_argument(
'--batch-size', type=int, default=1, help='Inference batch size.')
parser.add_argument(
'--show',
action='store_true',
help='Display the image in a popup window.')
parser.add_argument(
'--no-save-vis',
action='store_true',
help='Do not save detection vis results')
parser.add_argument(
'--no-save-pred',
action='store_true',
help='Do not save detection json results')
parser.add_argument(
'--print-result',
action='store_true',
help='Whether to print the results.')
parser.add_argument(
'--palette',
default='none',
choices=['coco', 'voc', 'citys', 'random', 'none'],
help='Color palette used for visualization')
call_args = vars(parser.parse_args())
if call_args['no_save_vis'] and call_args['no_save_pred']:
call_args['out_dir'] = ''
if call_args['model'].endswith('.pth'):
print_log('The model is a weight file, automatically '
'assign the model to --weights')
call_args['weights'] = call_args['model']
call_args['model'] = None
init_kws = ['model', 'weights', 'device', 'palette']
init_args = {}
for init_kw in init_kws:
init_args[init_kw] = call_args.pop(init_kw)
return init_args, call_args
def main():
init_args, call_args = parse_args()
# TODO: Video and Webcam are currently not supported and
# may consume too much memory if your input folder has a lot of images.
# We will be optimized later.
inferencer = DetInferencer(**init_args)
inferencer(**call_args)
if call_args['out_dir'] != '' and not (call_args['no_save_vis']
and call_args['no_save_pred']):
print_log(f'results have been saved at {call_args["out_dir"]}')
if __name__ == '__main__':
main()
|
_base_ = [
'../common/mstrain-poly_3x_coco_instance.py',
'../_base_/models/mask_rcnn_r50_fpn.py'
]
model = dict(
# use caffe img_norm
data_preprocessor=dict(
mean=[103.530, 116.280, 123.675],
std=[1.0, 1.0, 1.0],
bgr_to_rgb=False),
backbone=dict(
depth=101,
norm_cfg=dict(requires_grad=False),
norm_eval=True,
style='caffe',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet101_caffe')))
|
_base_ = [
'../common/mstrain-poly_3x_coco_instance.py',
'../_base_/models/mask_rcnn_r50_fpn.py'
]
preprocess_cfg = dict(
mean=[103.530, 116.280, 123.675],
std=[1.0, 1.0, 1.0],
to_rgb=False,
pad_size_divisor=32)
model = dict(
# use caffe img_norm
preprocess_cfg=preprocess_cfg,
backbone=dict(
depth=101,
norm_cfg=dict(requires_grad=False),
norm_eval=True,
style='caffe',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet101_caffe')))
|
import logging
import tempfile
import typing
import autogpt_libs.auth.depends
import fastapi
import fastapi.responses
import prisma.enums
import backend.server.v2.store.db
import backend.server.v2.store.exceptions
import backend.server.v2.store.model
import backend.util.json
logger = logging.getLogger(__name__)
router = fastapi.APIRouter(prefix="/admin", tags=["store", "admin"])
@router.get(
"/listings",
response_model=backend.server.v2.store.model.StoreListingsWithVersionsResponse,
dependencies=[fastapi.Depends(autogpt_libs.auth.depends.requires_admin_user)],
)
async def get_admin_listings_with_versions(
status: typing.Optional[prisma.enums.SubmissionStatus] = None,
search: typing.Optional[str] = None,
page: int = 1,
page_size: int = 20,
):
"""
Get store listings with their version history for admins.
This provides a consolidated view of listings with their versions,
allowing for an expandable UI in the admin dashboard.
Args:
status: Filter by submission status (PENDING, APPROVED, REJECTED)
search: Search by name, description, or user email
page: Page number for pagination
page_size: Number of items per page
Returns:
StoreListingsWithVersionsResponse with listings and their versions
"""
try:
listings = await backend.server.v2.store.db.get_admin_listings_with_versions(
status=status,
search_query=search,
page=page,
page_size=page_size,
)
return listings
except Exception as e:
logger.exception("Error getting admin listings with versions: %s", e)
return fastapi.responses.JSONResponse(
status_code=500,
content={
"detail": "An error occurred while retrieving listings with versions"
},
)
@router.post(
"/submissions/{store_listing_version_id}/review",
response_model=backend.server.v2.store.model.StoreSubmission,
dependencies=[fastapi.Depends(autogpt_libs.auth.depends.requires_admin_user)],
)
async def review_submission(
store_listing_version_id: str,
request: backend.server.v2.store.model.ReviewSubmissionRequest,
user: typing.Annotated[
autogpt_libs.auth.models.User,
fastapi.Depends(autogpt_libs.auth.depends.requires_admin_user),
],
):
"""
Review a store listing submission.
Args:
store_listing_version_id: ID of the submission to review
request: Review details including approval status and comments
user: Authenticated admin user performing the review
Returns:
StoreSubmission with updated review information
"""
try:
submission = await backend.server.v2.store.db.review_store_submission(
store_listing_version_id=store_listing_version_id,
is_approved=request.is_approved,
external_comments=request.comments,
internal_comments=request.internal_comments or "",
reviewer_id=user.user_id,
)
return submission
except Exception as e:
logger.exception("Error reviewing submission: %s", e)
return fastapi.responses.JSONResponse(
status_code=500,
content={"detail": "An error occurred while reviewing the submission"},
)
@router.get(
"/submissions/download/{store_listing_version_id}",
tags=["store", "admin"],
dependencies=[fastapi.Depends(autogpt_libs.auth.depends.requires_admin_user)],
)
async def admin_download_agent_file(
user: typing.Annotated[
autogpt_libs.auth.models.User,
fastapi.Depends(autogpt_libs.auth.depends.requires_admin_user),
],
store_listing_version_id: str = fastapi.Path(
..., description="The ID of the agent to download"
),
) -> fastapi.responses.FileResponse:
"""
Download the agent file by streaming its content.
Args:
store_listing_version_id (str): The ID of the agent to download
Returns:
StreamingResponse: A streaming response containing the agent's graph data.
Raises:
HTTPException: If the agent is not found or an unexpected error occurs.
"""
graph_data = await backend.server.v2.store.db.get_agent(
user_id=user.user_id,
store_listing_version_id=store_listing_version_id,
)
file_name = f"agent_{graph_data.id}_v{graph_data.version or 'latest'}.json"
# Sending graph as a stream (similar to marketplace v1)
with tempfile.NamedTemporaryFile(
mode="w", suffix=".json", delete=False
) as tmp_file:
tmp_file.write(backend.util.json.dumps(graph_data))
tmp_file.flush()
return fastapi.responses.FileResponse(
tmp_file.name, filename=file_name, media_type="application/json"
)
|
import logging
import typing
import autogpt_libs.auth.depends
import fastapi
import fastapi.responses
import prisma.enums
import backend.server.v2.store.db
import backend.server.v2.store.exceptions
import backend.server.v2.store.model
logger = logging.getLogger(__name__)
router = fastapi.APIRouter(prefix="/admin", tags=["store", "admin"])
@router.get(
"/listings",
response_model=backend.server.v2.store.model.StoreListingsWithVersionsResponse,
dependencies=[fastapi.Depends(autogpt_libs.auth.depends.requires_admin_user)],
)
async def get_admin_listings_with_versions(
status: typing.Optional[prisma.enums.SubmissionStatus] = None,
search: typing.Optional[str] = None,
page: int = 1,
page_size: int = 20,
):
"""
Get store listings with their version history for admins.
This provides a consolidated view of listings with their versions,
allowing for an expandable UI in the admin dashboard.
Args:
status: Filter by submission status (PENDING, APPROVED, REJECTED)
search: Search by name, description, or user email
page: Page number for pagination
page_size: Number of items per page
Returns:
StoreListingsWithVersionsResponse with listings and their versions
"""
try:
listings = await backend.server.v2.store.db.get_admin_listings_with_versions(
status=status,
search_query=search,
page=page,
page_size=page_size,
)
return listings
except Exception as e:
logger.exception("Error getting admin listings with versions: %s", e)
return fastapi.responses.JSONResponse(
status_code=500,
content={
"detail": "An error occurred while retrieving listings with versions"
},
)
@router.post(
"/submissions/{store_listing_version_id}/review",
response_model=backend.server.v2.store.model.StoreSubmission,
dependencies=[fastapi.Depends(autogpt_libs.auth.depends.requires_admin_user)],
)
async def review_submission(
store_listing_version_id: str,
request: backend.server.v2.store.model.ReviewSubmissionRequest,
user: typing.Annotated[
autogpt_libs.auth.models.User,
fastapi.Depends(autogpt_libs.auth.depends.requires_admin_user),
],
):
"""
Review a store listing submission.
Args:
store_listing_version_id: ID of the submission to review
request: Review details including approval status and comments
user: Authenticated admin user performing the review
Returns:
StoreSubmission with updated review information
"""
try:
submission = await backend.server.v2.store.db.review_store_submission(
store_listing_version_id=store_listing_version_id,
is_approved=request.is_approved,
external_comments=request.comments,
internal_comments=request.internal_comments or "",
reviewer_id=user.user_id,
)
return submission
except Exception as e:
logger.exception("Error reviewing submission: %s", e)
return fastapi.responses.JSONResponse(
status_code=500,
content={"detail": "An error occurred while reviewing the submission"},
)
|
from ._transforms import (
AddNoise,
BarkScale,
BarkSpectrogram,
Convolve,
FFTConvolve,
InverseBarkScale,
Speed,
SpeedPerturbation,
)
__all__ = [
"AddNoise",
"BarkScale",
"BarkSpectrogram",
"Convolve",
"FFTConvolve",
"InverseBarkScale",
"SpeedPerturbation",
"Speed",
]
|
from ._transforms import BarkScale, BarkSpectrogram, Convolve, FFTConvolve, InverseBarkScale, Speed, SpeedPerturbation
__all__ = [
"BarkScale",
"BarkSpectrogram",
"Convolve",
"FFTConvolve",
"InverseBarkScale",
"SpeedPerturbation",
"Speed",
]
|
import pytest
from hypothesis import assume, given, note, settings, strategies
import xgboost as xgb
from xgboost import testing as tm
pytestmark = tm.timeout(10)
parameter_strategy = strategies.fixed_dictionaries(
{
"booster": strategies.just("gblinear"),
"eta": strategies.floats(0.01, 0.25),
"tolerance": strategies.floats(1e-5, 1e-2),
"nthread": strategies.integers(1, 4),
"feature_selector": strategies.sampled_from(
["cyclic", "shuffle", "greedy", "thrifty"]
),
"top_k": strategies.integers(1, 10),
}
)
def train_result(param, dmat, num_rounds):
result = {}
booster = xgb.train(
param,
dmat,
num_rounds,
[(dmat, "train")],
verbose_eval=False,
evals_result=result,
)
assert booster.num_boosted_rounds() == num_rounds
return result
class TestGPULinear:
@given(parameter_strategy, strategies.integers(10, 50), tm.make_dataset_strategy())
@settings(deadline=None, max_examples=20, print_blob=True)
def test_gpu_coordinate(self, param, num_rounds, dataset):
assume(len(dataset.y) > 0)
param["updater"] = "gpu_coord_descent"
param = dataset.set_params(param)
result = train_result(param, dataset.get_dmat(), num_rounds)["train"][
dataset.metric
]
note(result)
assert tm.non_increasing(result)
# Loss is not guaranteed to always decrease because of regularisation parameters
# We test a weaker condition that the loss has not increased between the first and last
# iteration
@given(
parameter_strategy,
strategies.integers(10, 50),
tm.make_dataset_strategy(),
strategies.floats(1e-5, 0.8),
strategies.floats(1e-5, 0.8),
)
@settings(deadline=None, max_examples=20, print_blob=True)
def test_gpu_coordinate_regularised(self, param, num_rounds, dataset, alpha, lambd):
assume(len(dataset.y) > 0)
param["updater"] = "gpu_coord_descent"
param["alpha"] = alpha
param["lambda"] = lambd
param = dataset.set_params(param)
result = train_result(param, dataset.get_dmat(), num_rounds)["train"][
dataset.metric
]
note(result)
assert tm.non_increasing([result[0], result[-1]])
@pytest.mark.skipif(**tm.no_cupy())
def test_gpu_coordinate_from_cupy(self):
# Training linear model is quite expensive, so we don't include it in
# test_from_cupy.py
import cupy
params = {
"booster": "gblinear",
"updater": "gpu_coord_descent",
"n_estimators": 100,
}
X, y = tm.get_california_housing()
cpu_model = xgb.XGBRegressor(**params)
cpu_model.fit(X, y)
cpu_predt = cpu_model.predict(X)
X = cupy.array(X)
y = cupy.array(y)
gpu_model = xgb.XGBRegressor(**params)
gpu_model.fit(X, y)
gpu_predt = gpu_model.predict(X)
cupy.testing.assert_allclose(cpu_predt, gpu_predt)
|
import pytest
from hypothesis import assume, given, note, settings, strategies
import xgboost as xgb
from xgboost import testing as tm
pytestmark = tm.timeout(10)
parameter_strategy = strategies.fixed_dictionaries({
'booster': strategies.just('gblinear'),
'eta': strategies.floats(0.01, 0.25),
'tolerance': strategies.floats(1e-5, 1e-2),
'nthread': strategies.integers(1, 4),
'feature_selector': strategies.sampled_from(['cyclic', 'shuffle',
'greedy', 'thrifty']),
'top_k': strategies.integers(1, 10),
})
def train_result(param, dmat, num_rounds):
result = {}
booster = xgb.train(
param, dmat, num_rounds, [(dmat, 'train')], verbose_eval=False,
evals_result=result
)
assert booster.num_boosted_rounds() == num_rounds
return result
class TestGPULinear:
@given(parameter_strategy, strategies.integers(10, 50), tm.make_dataset_strategy())
@settings(deadline=None, max_examples=20, print_blob=True)
def test_gpu_coordinate(self, param, num_rounds, dataset):
assume(len(dataset.y) > 0)
param['updater'] = 'gpu_coord_descent'
param = dataset.set_params(param)
result = train_result(param, dataset.get_dmat(), num_rounds)['train'][dataset.metric]
note(result)
assert tm.non_increasing(result)
# Loss is not guaranteed to always decrease because of regularisation parameters
# We test a weaker condition that the loss has not increased between the first and last
# iteration
@given(
parameter_strategy,
strategies.integers(10, 50),
tm.make_dataset_strategy(),
strategies.floats(1e-5, 0.8),
strategies.floats(1e-5, 0.8)
)
@settings(deadline=None, max_examples=20, print_blob=True)
def test_gpu_coordinate_regularised(self, param, num_rounds, dataset, alpha, lambd):
assume(len(dataset.y) > 0)
param['updater'] = 'gpu_coord_descent'
param['alpha'] = alpha
param['lambda'] = lambd
param = dataset.set_params(param)
result = train_result(param, dataset.get_dmat(), num_rounds)['train'][dataset.metric]
note(result)
assert tm.non_increasing([result[0], result[-1]])
@pytest.mark.skipif(**tm.no_cupy())
def test_gpu_coordinate_from_cupy(self):
# Training linear model is quite expensive, so we don't include it in
# test_from_cupy.py
import cupy
params = {'booster': 'gblinear', 'updater': 'gpu_coord_descent',
'n_estimators': 100}
X, y = tm.get_california_housing()
cpu_model = xgb.XGBRegressor(**params)
cpu_model.fit(X, y)
cpu_predt = cpu_model.predict(X)
X = cupy.array(X)
y = cupy.array(y)
gpu_model = xgb.XGBRegressor(**params)
gpu_model.fit(X, y)
gpu_predt = gpu_model.predict(X)
cupy.testing.assert_allclose(cpu_predt, gpu_predt)
|
import multiprocessing
from concurrent.futures import ThreadPoolExecutor
import pytest
import xgboost as xgb
@pytest.mark.parametrize("verbosity_level", [0, 1, 2, 3])
def test_global_config_verbosity(verbosity_level):
def get_current_verbosity():
return xgb.get_config()["verbosity"]
old_verbosity = get_current_verbosity()
assert old_verbosity == 1
with xgb.config_context(verbosity=verbosity_level):
new_verbosity = get_current_verbosity()
assert new_verbosity == verbosity_level
assert old_verbosity == get_current_verbosity()
@pytest.mark.parametrize("use_rmm", [False, True])
def test_global_config_use_rmm(use_rmm):
def get_current_use_rmm_flag():
return xgb.get_config()["use_rmm"]
old_use_rmm_flag = get_current_use_rmm_flag()
with xgb.config_context(use_rmm=use_rmm):
new_use_rmm_flag = get_current_use_rmm_flag()
assert new_use_rmm_flag == use_rmm
assert old_use_rmm_flag == get_current_use_rmm_flag()
def test_nested_config() -> None:
verbosity = xgb.get_config()["verbosity"]
assert verbosity == 1
with xgb.config_context(verbosity=3):
assert xgb.get_config()["verbosity"] == 3
with xgb.config_context(verbosity=2):
assert xgb.get_config()["verbosity"] == 2
with xgb.config_context(verbosity=1):
assert xgb.get_config()["verbosity"] == 1
assert xgb.get_config()["verbosity"] == 2
assert xgb.get_config()["verbosity"] == 3
with xgb.config_context(verbosity=3):
assert xgb.get_config()["verbosity"] == 3
with xgb.config_context(verbosity=None):
assert xgb.get_config()["verbosity"] == 3 # None has no effect
xgb.set_config(verbosity=2)
assert xgb.get_config()["verbosity"] == 2
with xgb.config_context(verbosity=3):
assert xgb.get_config()["verbosity"] == 3
xgb.set_config(verbosity=verbosity) # reset
verbosity = xgb.get_config()["verbosity"]
assert verbosity == 1
def test_thread_safty():
n_threads = multiprocessing.cpu_count()
futures = []
with ThreadPoolExecutor(max_workers=n_threads) as executor:
for i in range(256):
f = executor.submit(test_nested_config)
futures.append(f)
for f in futures:
f.result()
|
import multiprocessing
from concurrent.futures import ThreadPoolExecutor
import pytest
import xgboost as xgb
@pytest.mark.parametrize("verbosity_level", [0, 1, 2, 3])
def test_global_config_verbosity(verbosity_level):
def get_current_verbosity():
return xgb.get_config()["verbosity"]
old_verbosity = get_current_verbosity()
with xgb.config_context(verbosity=verbosity_level):
new_verbosity = get_current_verbosity()
assert new_verbosity == verbosity_level
assert old_verbosity == get_current_verbosity()
@pytest.mark.parametrize("use_rmm", [False, True])
def test_global_config_use_rmm(use_rmm):
def get_current_use_rmm_flag():
return xgb.get_config()["use_rmm"]
old_use_rmm_flag = get_current_use_rmm_flag()
with xgb.config_context(use_rmm=use_rmm):
new_use_rmm_flag = get_current_use_rmm_flag()
assert new_use_rmm_flag == use_rmm
assert old_use_rmm_flag == get_current_use_rmm_flag()
def test_nested_config():
with xgb.config_context(verbosity=3):
assert xgb.get_config()["verbosity"] == 3
with xgb.config_context(verbosity=2):
assert xgb.get_config()["verbosity"] == 2
with xgb.config_context(verbosity=1):
assert xgb.get_config()["verbosity"] == 1
assert xgb.get_config()["verbosity"] == 2
assert xgb.get_config()["verbosity"] == 3
with xgb.config_context(verbosity=3):
assert xgb.get_config()["verbosity"] == 3
with xgb.config_context(verbosity=None):
assert xgb.get_config()["verbosity"] == 3 # None has no effect
verbosity = xgb.get_config()["verbosity"]
xgb.set_config(verbosity=2)
assert xgb.get_config()["verbosity"] == 2
with xgb.config_context(verbosity=3):
assert xgb.get_config()["verbosity"] == 3
xgb.set_config(verbosity=verbosity) # reset
def test_thread_safty():
n_threads = multiprocessing.cpu_count()
futures = []
with ThreadPoolExecutor(max_workers=n_threads) as executor:
for i in range(256):
f = executor.submit(test_nested_config)
futures.append(f)
for f in futures:
f.result()
|
from typing import Type
from docarray.proto import DocumentArrayProto, NodeProto
from ..abstract_array import AbstractDocumentArray
class ProtoArrayMixin(AbstractDocumentArray):
@classmethod
def from_protobuf(
cls: Type[AbstractDocumentArray], pb_msg: 'DocumentArrayProto'
) -> AbstractDocumentArray:
"""create a Document from a protobuf message"""
return cls(cls.document_type.from_protobuf(od) for od in pb_msg.docs)
def to_protobuf(self) -> 'DocumentArrayProto':
"""Convert DocumentArray into a Protobuf message.
:param ndarray_type: can be ``list`` or ``numpy``,
if set it will force all ndarray-like object from all
Documents to ``List`` or ``numpy.ndarray``.
:return: the protobuf message
"""
dap = DocumentArrayProto()
for doc in self:
dap.docs.append(doc.to_protobuf())
return dap
def _to_nested_item_protobuf(self) -> 'NodeProto':
"""Convert a DocumentArray into a nested item protobuf message.
This function should be called when a DocumentArray
is nested into another Document that need to be converted into a protobuf
:return: the nested item protobuf message
"""
return NodeProto(chunks=self.to_protobuf())
|
from typing import Type
from docarray.proto import DocumentArrayProto, NodeProto
from ..abstract_array import AbstractDocumentArray
class ProtoArrayMixin(AbstractDocumentArray):
@classmethod
def from_protobuf(
cls: Type[AbstractDocumentArray], pb_msg: 'DocumentArrayProto'
) -> AbstractDocumentArray:
"""create a Document from a protobuf message"""
return cls(cls.document_type.from_protobuf(od) for od in pb_msg.docs)
def to_protobuf(self) -> 'DocumentArrayProto':
"""Convert DocumentArray into a Protobuf message.
:param ndarray_type: can be ``list`` or ``numpy``, if set it will force all ndarray-like object from all
Documents to ``List`` or ``numpy.ndarray``.
:return: the protobuf message
"""
dap = DocumentArrayProto()
for doc in self:
dap.docs.append(doc.to_protobuf())
return dap
def _to_nested_item_protobuf(self) -> 'NodeProto':
"""Convert a DocumentArray into a nested item protobuf message. This function should be called when a DocumentArray
is nested into another Document that need to be converted into a protobuf
:return: the nested item protobuf message
"""
return NodeProto(chunks=self.to_protobuf())
|
import logging
import random
from datasets import load_dataset
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.evaluation import SparseInformationRetrievalEvaluator
logging.basicConfig(format="%(message)s", level=logging.INFO)
# Load a model
model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
# Load the NFcorpus IR dataset (https://huggingface.co/datasets/BeIR/nfcorpus, https://huggingface.co/datasets/BeIR/nfcorpus-qrels)
corpus = load_dataset("BeIR/nfcorpus", "corpus", split="corpus")
queries = load_dataset("BeIR/nfcorpus", "queries", split="queries")
relevant_docs_data = load_dataset("BeIR/nfcorpus-qrels", split="test")
# For this dataset, we want to concatenate the title and texts for the corpus
corpus = corpus.map(lambda x: {"text": x["title"] + " " + x["text"]}, remove_columns=["title"])
# Shrink the corpus size heavily to only the relevant documents + 1,000 random documents
required_corpus_ids = set(map(str, relevant_docs_data["corpus-id"]))
required_corpus_ids |= set(random.sample(corpus["_id"], k=1000))
corpus = corpus.filter(lambda x: x["_id"] in required_corpus_ids)
# Convert the datasets to dictionaries
corpus = dict(zip(corpus["_id"], corpus["text"])) # Our corpus (cid => document)
queries = dict(zip(queries["_id"], queries["text"])) # Our queries (qid => question)
relevant_docs = {} # Query ID to relevant documents (qid => set([relevant_cids])
for qid, corpus_ids in zip(relevant_docs_data["query-id"], relevant_docs_data["corpus-id"]):
qid = str(qid)
corpus_ids = str(corpus_ids)
if qid not in relevant_docs:
relevant_docs[qid] = set()
relevant_docs[qid].add(corpus_ids)
# Given queries, a corpus and a mapping with relevant documents, the SparseInformationRetrievalEvaluator computes different IR metrics.
ir_evaluator = SparseInformationRetrievalEvaluator(
queries=queries,
corpus=corpus,
relevant_docs=relevant_docs,
name="BeIR-nfcorpus-subset-test",
show_progress_bar=True,
batch_size=16,
)
# Run evaluation
results = ir_evaluator(model)
"""
Queries: 323
Corpus: 3269
Score-Function: dot
Accuracy@1: 50.46%
Accuracy@3: 64.40%
Accuracy@5: 67.49%
Accuracy@10: 72.14%
Precision@1: 50.46%
Precision@3: 40.87%
Precision@5: 34.12%
Precision@10: 26.10%
Recall@1: 6.11%
Recall@3: 11.73%
Recall@5: 13.64%
Recall@10: 17.24%
MRR@10: 0.5801
NDCG@10: 0.3626
MAP@100: 0.1832
Model Query Sparsity: Active Dimensions: 43.1, Sparsity Ratio: 0.9986
Model Corpus Sparsity: Active Dimensions: 207.0, Sparsity Ratio: 0.9932
"""
# Print the results
print(f"Primary metric: {ir_evaluator.primary_metric}")
# => Primary metric: BeIR-nfcorpus-subset-test_dot_ndcg@10
print(f"Primary metric value: {results[ir_evaluator.primary_metric]:.4f}")
# => Primary metric value: 0.3626
|
import logging
import random
from datasets import load_dataset
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.evaluation import SparseInformationRetrievalEvaluator
logging.basicConfig(format="%(message)s", level=logging.INFO)
# Load a model
model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
# Load the NFcorpus IR dataset (https://huggingface.co/datasets/BeIR/nfcorpus, https://huggingface.co/datasets/BeIR/nfcorpus-qrels)
corpus = load_dataset("BeIR/nfcorpus", "corpus", split="corpus")
queries = load_dataset("BeIR/nfcorpus", "queries", split="queries")
relevant_docs_data = load_dataset("BeIR/nfcorpus-qrels", split="test")
# For this dataset, we want to concatenate the title and texts for the corpus
corpus = corpus.map(lambda x: {"text": x["title"] + " " + x["text"]}, remove_columns=["title"])
# Shrink the corpus size heavily to only the relevant documents + 1,000 random documents
required_corpus_ids = set(map(str, relevant_docs_data["corpus-id"]))
required_corpus_ids |= set(random.sample(corpus["_id"], k=1000))
corpus = corpus.filter(lambda x: x["_id"] in required_corpus_ids)
# Convert the datasets to dictionaries
corpus = dict(zip(corpus["_id"], corpus["text"])) # Our corpus (cid => document)
queries = dict(zip(queries["_id"], queries["text"])) # Our queries (qid => question)
relevant_docs = {} # Query ID to relevant documents (qid => set([relevant_cids])
for qid, corpus_ids in zip(relevant_docs_data["query-id"], relevant_docs_data["corpus-id"]):
qid = str(qid)
corpus_ids = str(corpus_ids)
if qid not in relevant_docs:
relevant_docs[qid] = set()
relevant_docs[qid].add(corpus_ids)
# Given queries, a corpus and a mapping with relevant documents, the SparseInformationRetrievalEvaluator computes different IR metrics.
ir_evaluator = SparseInformationRetrievalEvaluator(
queries=queries,
corpus=corpus,
relevant_docs=relevant_docs,
name="BeIR-nfcorpus-subset-test",
show_progress_bar=True,
batch_size=16,
)
# Run evaluation
results = ir_evaluator(model)
"""
Queries: 323
Corpus: 3269
Score-Function: dot
Accuracy@1: 50.46%
Accuracy@3: 64.40%
Accuracy@5: 67.49%
Accuracy@10: 72.14%
Precision@1: 50.46%
Precision@3: 40.87%
Precision@5: 34.12%
Precision@10: 26.10%
Recall@1: 6.11%
Recall@3: 11.73%
Recall@5: 13.64%
Recall@10: 17.24%
MRR@10: 0.5801
NDCG@10: 0.3626
MAP@100: 0.1832
Model Sparsity Stats Query : Row Non-Zero Mean: 43.08049392700195, Row Sparsity Mean: 0.9985886216163635
Model Sparsity Stats Corpus : Row Non-Zero Mean: 206.8623504638672, Row Sparsity Mean: 0.9932224750518799
"""
# Print the results
print(f"Primary metric: {ir_evaluator.primary_metric}")
# => Primary metric: BeIR-nfcorpus-subset-test_dot_ndcg@10
print(f"Primary metric value: {results[ir_evaluator.primary_metric]:.4f}")
# => Primary metric value: 0.3626
|
from langchain_core.agents import AgentAction, AgentFinish
from langchain.agents.output_parsers.xml import XMLAgentOutputParser
def test_tool_usage() -> None:
parser = XMLAgentOutputParser()
# Test when final closing </tool_input> is included
_input = """<tool>search</tool><tool_input>foo</tool_input>"""
output = parser.invoke(_input)
expected_output = AgentAction(tool="search", tool_input="foo", log=_input)
assert output == expected_output
# Test when final closing </tool_input> is NOT included
# This happens when it's used as a stop token
_input = """<tool>search</tool><tool_input>foo</tool_input>"""
output = parser.invoke(_input)
expected_output = AgentAction(tool="search", tool_input="foo", log=_input)
assert output == expected_output
def test_finish() -> None:
parser = XMLAgentOutputParser()
# Test when final closing <final_answer> is included
_input = """<final_answer>bar</final_answer>"""
output = parser.invoke(_input)
expected_output = AgentFinish(return_values={"output": "bar"}, log=_input)
assert output == expected_output
# Test when final closing <final_answer> is NOT included
# This happens when it's used as a stop token
_input = """<final_answer>bar</final_answer>"""
output = parser.invoke(_input)
expected_output = AgentFinish(return_values={"output": "bar"}, log=_input)
assert output == expected_output
def test_malformed_xml_with_nested_tags() -> None:
"""Test handling of tool names with XML tags via format_xml minimal escaping."""
from langchain.agents.format_scratchpad.xml import format_xml
# Create an AgentAction with XML tags in the tool name
action = AgentAction(tool="search<tool>nested</tool>", tool_input="query", log="")
# The format_xml function should escape the XML tags using custom delimiters
formatted_xml = format_xml([(action, "observation")])
# Extract just the tool part for parsing
tool_part = formatted_xml.split("<observation>")[0] # Remove observation part
# Now test that the parser can handle the escaped XML
parser = XMLAgentOutputParser(escape_format="minimal")
output = parser.invoke(tool_part)
# The parser should unescape and extract the original tool name
expected_output = AgentAction(
tool="search<tool>nested</tool>", tool_input="query", log=tool_part
)
assert output == expected_output
def test_no_escaping() -> None:
"""Test parser with escaping disabled."""
parser = XMLAgentOutputParser(escape_format=None)
# Test with regular tool name (no XML tags)
_input = """<tool>search</tool><tool_input>foo</tool_input>"""
output = parser.invoke(_input)
expected_output = AgentAction(tool="search", tool_input="foo", log=_input)
assert output == expected_output
|
from langchain_core.agents import AgentAction, AgentFinish
from langchain.agents.output_parsers.xml import XMLAgentOutputParser
def test_tool_usage() -> None:
parser = XMLAgentOutputParser()
# Test when final closing </tool_input> is included
_input = """<tool>search</tool><tool_input>foo</tool_input>"""
output = parser.invoke(_input)
expected_output = AgentAction(tool="search", tool_input="foo", log=_input)
assert output == expected_output
# Test when final closing </tool_input> is NOT included
# This happens when it's used as a stop token
_input = """<tool>search</tool><tool_input>foo</tool_input>"""
output = parser.invoke(_input)
expected_output = AgentAction(tool="search", tool_input="foo", log=_input)
assert output == expected_output
def test_finish() -> None:
parser = XMLAgentOutputParser()
# Test when final closing <final_answer> is included
_input = """<final_answer>bar</final_answer>"""
output = parser.invoke(_input)
expected_output = AgentFinish(return_values={"output": "bar"}, log=_input)
assert output == expected_output
# Test when final closing <final_answer> is NOT included
# This happens when it's used as a stop token
_input = """<final_answer>bar</final_answer>"""
output = parser.invoke(_input)
expected_output = AgentFinish(return_values={"output": "bar"}, log=_input)
assert output == expected_output
|
import asyncio
from langchain_core.callbacks import (
AsyncCallbackManagerForRetrieverRun,
CallbackManagerForRetrieverRun,
)
from langchain_core.documents import Document
from langchain_core.retrievers import BaseRetriever
class MergerRetriever(BaseRetriever):
"""Retriever that merges the results of multiple retrievers."""
retrievers: list[BaseRetriever]
"""A list of retrievers to merge."""
def _get_relevant_documents(
self,
query: str,
*,
run_manager: CallbackManagerForRetrieverRun,
) -> list[Document]:
"""
Get the relevant documents for a given query.
Args:
query: The query to search for.
Returns:
A list of relevant documents.
"""
# Merge the results of the retrievers.
return self.merge_documents(query, run_manager)
async def _aget_relevant_documents(
self,
query: str,
*,
run_manager: AsyncCallbackManagerForRetrieverRun,
) -> list[Document]:
"""
Asynchronously get the relevant documents for a given query.
Args:
query: The query to search for.
Returns:
A list of relevant documents.
"""
# Merge the results of the retrievers.
return await self.amerge_documents(query, run_manager)
def merge_documents(
self, query: str, run_manager: CallbackManagerForRetrieverRun
) -> list[Document]:
"""
Merge the results of the retrievers.
Args:
query: The query to search for.
Returns:
A list of merged documents.
"""
# Get the results of all retrievers.
retriever_docs = [
retriever.invoke(
query,
config={"callbacks": run_manager.get_child(f"retriever_{i + 1}")},
)
for i, retriever in enumerate(self.retrievers)
]
# Merge the results of the retrievers.
merged_documents = []
max_docs = max(map(len, retriever_docs), default=0)
for i in range(max_docs):
for retriever, doc in zip(self.retrievers, retriever_docs):
if i < len(doc):
merged_documents.append(doc[i])
return merged_documents
async def amerge_documents(
self, query: str, run_manager: AsyncCallbackManagerForRetrieverRun
) -> list[Document]:
"""
Asynchronously merge the results of the retrievers.
Args:
query: The query to search for.
Returns:
A list of merged documents.
"""
# Get the results of all retrievers.
retriever_docs = await asyncio.gather(
*(
retriever.ainvoke(
query,
config={"callbacks": run_manager.get_child(f"retriever_{i + 1}")},
)
for i, retriever in enumerate(self.retrievers)
)
)
# Merge the results of the retrievers.
merged_documents = []
max_docs = max(map(len, retriever_docs), default=0)
for i in range(max_docs):
for retriever, doc in zip(self.retrievers, retriever_docs):
if i < len(doc):
merged_documents.append(doc[i])
return merged_documents
|
import asyncio
from langchain_core.callbacks import (
AsyncCallbackManagerForRetrieverRun,
CallbackManagerForRetrieverRun,
)
from langchain_core.documents import Document
from langchain_core.retrievers import BaseRetriever
class MergerRetriever(BaseRetriever):
"""Retriever that merges the results of multiple retrievers."""
retrievers: list[BaseRetriever]
"""A list of retrievers to merge."""
def _get_relevant_documents(
self,
query: str,
*,
run_manager: CallbackManagerForRetrieverRun,
) -> list[Document]:
"""
Get the relevant documents for a given query.
Args:
query: The query to search for.
Returns:
A list of relevant documents.
"""
# Merge the results of the retrievers.
merged_documents = self.merge_documents(query, run_manager)
return merged_documents
async def _aget_relevant_documents(
self,
query: str,
*,
run_manager: AsyncCallbackManagerForRetrieverRun,
) -> list[Document]:
"""
Asynchronously get the relevant documents for a given query.
Args:
query: The query to search for.
Returns:
A list of relevant documents.
"""
# Merge the results of the retrievers.
merged_documents = await self.amerge_documents(query, run_manager)
return merged_documents
def merge_documents(
self, query: str, run_manager: CallbackManagerForRetrieverRun
) -> list[Document]:
"""
Merge the results of the retrievers.
Args:
query: The query to search for.
Returns:
A list of merged documents.
"""
# Get the results of all retrievers.
retriever_docs = [
retriever.invoke(
query,
config={"callbacks": run_manager.get_child(f"retriever_{i + 1}")},
)
for i, retriever in enumerate(self.retrievers)
]
# Merge the results of the retrievers.
merged_documents = []
max_docs = max(map(len, retriever_docs), default=0)
for i in range(max_docs):
for retriever, doc in zip(self.retrievers, retriever_docs):
if i < len(doc):
merged_documents.append(doc[i])
return merged_documents
async def amerge_documents(
self, query: str, run_manager: AsyncCallbackManagerForRetrieverRun
) -> list[Document]:
"""
Asynchronously merge the results of the retrievers.
Args:
query: The query to search for.
Returns:
A list of merged documents.
"""
# Get the results of all retrievers.
retriever_docs = await asyncio.gather(
*(
retriever.ainvoke(
query,
config={"callbacks": run_manager.get_child(f"retriever_{i + 1}")},
)
for i, retriever in enumerate(self.retrievers)
)
)
# Merge the results of the retrievers.
merged_documents = []
max_docs = max(map(len, retriever_docs), default=0)
for i in range(max_docs):
for retriever, doc in zip(self.retrievers, retriever_docs):
if i < len(doc):
merged_documents.append(doc[i])
return merged_documents
|
_base_ = [
'../_base_/models/retinanet_r50_fpn.py',
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
model = dict(
data_preprocessor=dict(
# The mean and std are used in PyCls when training RegNets
mean=[103.53, 116.28, 123.675],
std=[57.375, 57.12, 58.395],
bgr_to_rgb=False),
backbone=dict(
_delete_=True,
type='RegNet',
arch='regnetx_3.2gf',
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://regnetx_3.2gf')),
neck=dict(
type='FPN',
in_channels=[96, 192, 432, 1008],
out_channels=256,
num_outs=5))
optim_wrapper = dict(
optimizer=dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.00005),
clip_grad=dict(max_norm=35, norm_type=2))
|
_base_ = [
'../_base_/models/retinanet_r50_fpn.py',
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
model = dict(
backbone=dict(
_delete_=True,
type='RegNet',
arch='regnetx_3.2gf',
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://regnetx_3.2gf')),
neck=dict(
type='FPN',
in_channels=[96, 192, 432, 1008],
out_channels=256,
num_outs=5))
img_norm_cfg = dict(
# The mean and std are used in PyCls when training RegNets
mean=[103.53, 116.28, 123.675],
std=[57.375, 57.12, 58.395],
to_rgb=False)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
train=dict(pipeline=train_pipeline),
val=dict(pipeline=test_pipeline),
test=dict(pipeline=test_pipeline))
optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.00005)
optimizer_config = dict(
_delete_=True, grad_clip=dict(max_norm=35, norm_type=2))
|
from textwrap import dedent
from types import SimpleNamespace
from unittest.mock import patch
from urllib.parse import quote
import pytest
from huggingface_hub import CommitOperationAdd, CommitOperationDelete
import datasets
from datasets.config import METADATA_CONFIGS_FIELD
from datasets.hub import delete_from_hub
from datasets.utils.hub import hf_dataset_url
@pytest.mark.parametrize("repo_id", ["canonical_dataset_name", "org-name/dataset-name"])
@pytest.mark.parametrize("filename", ["filename.csv", "filename with blanks.csv"])
@pytest.mark.parametrize("revision", [None, "v2"])
def test_dataset_url(repo_id, filename, revision):
url = hf_dataset_url(repo_id=repo_id, filename=filename, revision=revision)
assert url == f"https://huggingface.co/datasets/{repo_id}/resolve/{revision or 'main'}/{quote(filename)}"
def test_delete_from_hub(temporary_repo, hf_api, hf_token, csv_path, ci_hub_config, ci_hfh_hf_hub_url) -> None:
with temporary_repo() as repo_id:
hf_api.create_repo(repo_id, token=hf_token, repo_type="dataset")
hf_api.upload_file(
path_or_fileobj=str(csv_path),
path_in_repo="cats/train/0000.csv",
repo_id=repo_id,
repo_type="dataset",
token=hf_token,
)
hf_api.upload_file(
path_or_fileobj=str(csv_path),
path_in_repo="dogs/train/0000.csv",
repo_id=repo_id,
repo_type="dataset",
token=hf_token,
)
hf_api.upload_file(
token=hf_token,
path_or_fileobj=dedent(f"""\
---
{METADATA_CONFIGS_FIELD}:
- config_name: cats
data_files:
- split: train
path: cats/train/*
- config_name: dogs
data_files:
- split: train
path: dogs/train/*
---
""").encode(),
path_in_repo="README.md",
repo_id=repo_id,
repo_type="dataset",
)
commit_info = SimpleNamespace(
pr_url="https:///hub-ci.huggingface.co/datasets/__DUMMY_USER__/__DUMMY_DATASET__/refs%2Fpr%2F1"
)
with patch.object(datasets.hub.HfApi, "create_commit", return_value=commit_info) as mock_method:
delete_from_hub(repo_id, "dogs")
assert mock_method.called
assert mock_method.call_args.kwargs.get("commit_message") == "Delete 'dogs' config"
assert mock_method.call_args.kwargs.get("create_pr")
expected_operations = [
CommitOperationDelete(path_in_repo="dogs/train/0000.csv", is_folder=False),
CommitOperationAdd(
path_in_repo="README.md",
path_or_fileobj=dedent(f"""\
---
{METADATA_CONFIGS_FIELD}:
- config_name: cats
data_files:
- split: train
path: cats/train/*
---
""").encode(),
),
]
assert mock_method.call_args.kwargs.get("operations") == expected_operations
|
from textwrap import dedent
from types import SimpleNamespace
from unittest.mock import patch
from urllib.parse import quote
import pytest
from huggingface_hub import CommitOperationAdd, CommitOperationDelete
import datasets
from datasets.config import METADATA_CONFIGS_FIELD
from datasets.hub import delete_from_hub
from datasets.utils.hub import hf_dataset_url
@pytest.mark.parametrize("repo_id", ["canonical_dataset_name", "org-name/dataset-name"])
@pytest.mark.parametrize("filename", ["filename.csv", "filename with blanks.csv"])
@pytest.mark.parametrize("revision", [None, "v2"])
def test_dataset_url(repo_id, filename, revision):
url = hf_dataset_url(repo_id=repo_id, filename=filename, revision=revision)
assert url == f"https://huggingface.co/datasets/{repo_id}/resolve/{revision or 'main'}/{quote(filename)}"
def test_delete_from_hub(
temporary_repo, hf_api, hf_token, csv_path, tmp_path, ci_hub_config, ci_hfh_hf_hub_url
) -> None:
with temporary_repo() as repo_id:
hf_api.create_repo(repo_id, token=hf_token, repo_type="dataset")
hf_api.upload_file(
path_or_fileobj=str(csv_path),
path_in_repo="cats/train/0000.csv",
repo_id=repo_id,
repo_type="dataset",
token=hf_token,
)
hf_api.upload_file(
path_or_fileobj=str(csv_path),
path_in_repo="dogs/train/0000.csv",
repo_id=repo_id,
repo_type="dataset",
token=hf_token,
)
readme_path = tmp_path / "README.md"
readme_path.write_text(
dedent(f"""\
---
{METADATA_CONFIGS_FIELD}:
- config_name: cats
data_files:
- split: train
path: cats/train/*
- config_name: dogs
data_files:
- split: train
path: dogs/train/*
---
""")
)
hf_api.upload_file(
token=hf_token,
path_or_fileobj=str(readme_path),
path_in_repo="README.md",
repo_id=repo_id,
repo_type="dataset",
)
commit_info = SimpleNamespace(
pr_url="https:///hub-ci.huggingface.co/datasets/__DUMMY_USER__/__DUMMY_DATASET__/refs%2Fpr%2F1"
)
with patch.object(datasets.hub.HfApi, "create_commit", return_value=commit_info) as mock_method:
delete_from_hub(repo_id, "dogs")
assert mock_method.called
assert mock_method.call_args.kwargs.get("commit_message") == "Delete 'dogs' config"
assert mock_method.call_args.kwargs.get("create_pr")
expected_operations = [
CommitOperationDelete(path_in_repo="dogs/train/0000.csv", is_folder=False),
CommitOperationAdd(
path_in_repo="README.md",
path_or_fileobj=b"---\nconfigs:\n- config_name: cats\n data_files:\n - split: train\n path: cats/train/*\n---\n",
),
]
assert mock_method.call_args.kwargs.get("operations") == expected_operations
|
# Copyright (c) OpenMMLab. All rights reserved.
import os.path as osp
from mmcv.utils import build_from_cfg
from mmdet.datasets.builder import PIPELINES
def test_default_format_bundle():
results = dict(
img_prefix=osp.join(osp.dirname(__file__), '../../data'),
img_info=dict(filename='color.jpg'))
load = dict(type='LoadImageFromFile')
load = build_from_cfg(load, PIPELINES)
bundle = dict(type='DefaultFormatBundle')
bundle = build_from_cfg(bundle, PIPELINES)
results = load(results)
assert 'pad_shape' not in results
assert 'scale_factor' not in results
assert 'img_norm_cfg' not in results
results = bundle(results)
assert 'pad_shape' in results
assert 'scale_factor' in results
assert 'img_norm_cfg' in results
|
import os.path as osp
from mmcv.utils import build_from_cfg
from mmdet.datasets.builder import PIPELINES
def test_default_format_bundle():
results = dict(
img_prefix=osp.join(osp.dirname(__file__), '../../data'),
img_info=dict(filename='color.jpg'))
load = dict(type='LoadImageFromFile')
load = build_from_cfg(load, PIPELINES)
bundle = dict(type='DefaultFormatBundle')
bundle = build_from_cfg(bundle, PIPELINES)
results = load(results)
assert 'pad_shape' not in results
assert 'scale_factor' not in results
assert 'img_norm_cfg' not in results
results = bundle(results)
assert 'pad_shape' in results
assert 'scale_factor' in results
assert 'img_norm_cfg' in results
|
import os
import sys
import pytest
from llama_index.core.evaluation.eval_utils import upload_eval_dataset
base_url = os.environ.get("LLAMA_CLOUD_BASE_URL", None)
api_key = os.environ.get("LLAMA_CLOUD_API_KEY", None)
python_version = sys.version
@pytest.mark.skipif(
not base_url or not api_key, reason="No platform base url or api keyset"
)
@pytest.mark.integration
def test_upload_eval_dataset() -> None:
from llama_cloud.client import LlamaCloud
eval_dataset_id = upload_eval_dataset(
"test_dataset" + python_version, # avoid CI test clashes
project_name="test_project" + python_version,
questions=["foo", "bar"],
overwrite=True,
)
client = LlamaCloud(base_url=base_url, token=api_key)
eval_dataset = client.evals.get_dataset(dataset_id=eval_dataset_id)
assert eval_dataset.name == "test_dataset" + python_version
eval_questions = client.evals.get_questions(dataset_id=eval_dataset_id)
assert len(eval_questions) == 2
|
import os
import sys
import pytest
from llama_index.core.evaluation.eval_utils import upload_eval_dataset
base_url = os.environ.get("LLAMA_CLOUD_BASE_URL", None)
api_key = os.environ.get("LLAMA_CLOUD_API_KEY", None)
python_version = sys.version
@pytest.mark.skipif(
not base_url or not api_key, reason="No platform base url or api keyset"
)
@pytest.mark.integration()
def test_upload_eval_dataset() -> None:
from llama_cloud.client import LlamaCloud
eval_dataset_id = upload_eval_dataset(
"test_dataset" + python_version, # avoid CI test clashes
project_name="test_project" + python_version,
questions=["foo", "bar"],
overwrite=True,
)
client = LlamaCloud(base_url=base_url, token=api_key)
eval_dataset = client.evals.get_dataset(dataset_id=eval_dataset_id)
assert eval_dataset.name == "test_dataset" + python_version
eval_questions = client.evals.get_questions(dataset_id=eval_dataset_id)
assert len(eval_questions) == 2
|
import abc
from platform import architecture, python_version
from typing import Any, Optional
from importlib.metadata import version
from llama_index.core.readers.base import BasePydanticReader
from llama_index.core.schema import Document
from llama_index.readers.oxylabs.utils import json_to_markdown
from oxylabs import RealtimeClient, AsyncClient
from oxylabs.sources.response import Response
class OxylabsBaseReader(BasePydanticReader, abc.ABC):
"""
Oxylabs Scraper base class.
https://developers.oxylabs.io/scraper-apis/web-scraper-api
"""
top_level_header: Optional[str] = None
timeout_s: int = 100
oxylabs_scraper_url: str = "https://realtime.oxyserps-dev.fun/v1/queries"
oxylabs_api: RealtimeClient
async_oxylabs_api: AsyncClient
def __init__(self, username: str, password: str, **data) -> None:
bits, _ = architecture()
sdk_type = (
f"oxylabs-llama-index-oxy-sdk-python/"
f"{version('llama-index-readers-oxylabs')} "
f"({python_version()}; {bits})"
)
data["oxylabs_api"] = RealtimeClient(username, password, sdk_type=sdk_type)
data["async_oxylabs_api"] = AsyncClient(username, password, sdk_type=sdk_type)
super().__init__(**data)
def _get_document_from_response(
self, response: list[dict] | list[list[dict]]
) -> Document:
processed_content = json_to_markdown(response, 0, self.top_level_header)
return Document(text=processed_content)
def load_data(self, payload: dict[str, Any]) -> list[Document]:
response = self.get_response(payload)
validated_responses = self._validate_response(response)
return [self._get_document_from_response(validated_responses)]
async def aload_data(self, payload: dict[str, Any]) -> list[Document]:
response = await self.aget_response(payload)
validated_responses = self._validate_response(response)
return [self._get_document_from_response(validated_responses)]
def get_response(self, payload: dict[str, Any]) -> Response:
raise NotImplementedError
async def aget_response(self, payload: dict[str, Any]) -> Response:
raise NotImplementedError
@staticmethod
def _validate_response(
response: Any,
) -> list[dict[Any, Any]] | list[list[dict[Any, Any]]]:
"""
Validate Oxylabs response format and unpack data.
"""
validated_results = []
try:
result_pages = response.raw["results"]
if not isinstance(result_pages, list) or not result_pages:
raise ValueError("No results returned!")
for result_page in result_pages:
result_page = dict(result_page)
content = result_page["content"]
if isinstance(content, list):
validated_results.append(content)
continue
if not isinstance(content, dict):
raise ValueError(
"Result `content` format error,"
" try setting parameter `parse` to True"
)
if "results" in content:
result = content["results"]
if isinstance(result, list):
validated_results.append(result)
elif isinstance(result, dict):
validated_results.append(result)
else:
raise ValueError("Response format Error!")
else:
validated_results.append(content)
return validated_results
except (KeyError, IndexError, TypeError, ValueError) as exc:
raise RuntimeError(f"Response Validation Error: {exc!s}") from exc
|
import abc
from platform import architecture, python_version
from typing import Any
from importlib.metadata import version
from llama_index.core.readers.base import BasePydanticReader
from llama_index.core.schema import Document
from llama_index.readers.oxylabs.utils import json_to_markdown
from oxylabs import RealtimeClient, AsyncClient
from oxylabs.sources.response import Response
class OxylabsBaseReader(BasePydanticReader, abc.ABC):
"""
Oxylabs Scraper base class.
https://developers.oxylabs.io/scraper-apis/web-scraper-api
"""
top_level_header: str | None = None
timeout_s: int = 100
oxylabs_scraper_url: str = "https://realtime.oxyserps-dev.fun/v1/queries"
oxylabs_api: RealtimeClient
async_oxylabs_api: AsyncClient
def __init__(self, username: str, password: str, **data) -> None:
bits, _ = architecture()
sdk_type = (
f"oxylabs-llama-index-oxy-sdk-python/"
f"{version('llama-index-readers-oxylabs')} "
f"({python_version()}; {bits})"
)
data["oxylabs_api"] = RealtimeClient(username, password, sdk_type=sdk_type)
data["async_oxylabs_api"] = AsyncClient(username, password, sdk_type=sdk_type)
super().__init__(**data)
def _get_document_from_response(
self, response: list[dict] | list[list[dict]]
) -> Document:
processed_content = json_to_markdown(response, 0, self.top_level_header)
return Document(text=processed_content)
def load_data(self, payload: dict[str, Any]) -> list[Document]:
response = self.get_response(payload)
validated_responses = self._validate_response(response)
return [self._get_document_from_response(validated_responses)]
async def aload_data(self, payload: dict[str, Any]) -> list[Document]:
response = await self.aget_response(payload)
validated_responses = self._validate_response(response)
return [self._get_document_from_response(validated_responses)]
def get_response(self, payload: dict[str, Any]) -> Response:
raise NotImplementedError
async def aget_response(self, payload: dict[str, Any]) -> Response:
raise NotImplementedError
@staticmethod
def _validate_response(
response: Any,
) -> list[dict[Any, Any]] | list[list[dict[Any, Any]]]:
"""
Validate Oxylabs response format and unpack data.
"""
validated_results = []
try:
result_pages = response.raw["results"]
if not isinstance(result_pages, list) or not result_pages:
raise ValueError("No results returned!")
for result_page in result_pages:
result_page = dict(result_page)
content = result_page["content"]
if isinstance(content, list):
validated_results.append(content)
continue
if not isinstance(content, dict):
raise ValueError(
"Result `content` format error,"
" try setting parameter `parse` to True"
)
if "results" in content:
result = content["results"]
if isinstance(result, list):
validated_results.append(result)
elif isinstance(result, dict):
validated_results.append(result)
else:
raise ValueError("Response format Error!")
else:
validated_results.append(content)
return validated_results
except (KeyError, IndexError, TypeError, ValueError) as exc:
raise RuntimeError(f"Response Validation Error: {exc!s}") from exc
|
# Copyright (c) OpenMMLab. All rights reserved.
from .gaussian_target import (gather_feat, gaussian_radius,
gen_gaussian_target, get_local_maximum,
get_topk_from_heatmap, transpose_and_gather_feat)
from .make_divisible import make_divisible
from .misc import (aligned_bilinear, center_of_mass, empty_instances,
filter_gt_instances, filter_scores_and_topk, flip_tensor,
generate_coordinate, images_to_levels, interpolate_as,
levels_to_images, mask2ndarray, multi_apply,
relative_coordinate_maps, rename_loss_dict,
reweight_loss_dict, samplelist_boxtype2tensor,
select_single_mlvl, sigmoid_geometric_mean,
unfold_wo_center, unmap, unpack_gt_instances)
from .panoptic_gt_processing import preprocess_panoptic_gt
from .point_sample import (get_uncertain_point_coords_with_randomness,
get_uncertainty)
__all__ = [
'gaussian_radius', 'gen_gaussian_target', 'make_divisible',
'get_local_maximum', 'get_topk_from_heatmap', 'transpose_and_gather_feat',
'interpolate_as', 'sigmoid_geometric_mean', 'gather_feat',
'preprocess_panoptic_gt', 'get_uncertain_point_coords_with_randomness',
'get_uncertainty', 'unpack_gt_instances', 'empty_instances',
'center_of_mass', 'filter_scores_and_topk', 'flip_tensor',
'generate_coordinate', 'levels_to_images', 'mask2ndarray', 'multi_apply',
'select_single_mlvl', 'unmap', 'images_to_levels',
'samplelist_boxtype2tensor', 'filter_gt_instances', 'rename_loss_dict',
'reweight_loss_dict', 'relative_coordinate_maps', 'aligned_bilinear',
'unfold_wo_center'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .gaussian_target import (gather_feat, gaussian_radius,
gen_gaussian_target, get_local_maximum,
get_topk_from_heatmap, transpose_and_gather_feat)
from .make_divisible import make_divisible
from .misc import (aligned_bilinear, center_of_mass, empty_instances,
filter_gt_instances, filter_scores_and_topk, flip_tensor,
generate_coordinate, images_to_levels, interpolate_as,
levels_to_images, mask2ndarray, multi_apply,
relative_coordinate_maps, rename_loss_dict,
reweight_loss_dict, samplelist_boxtype2tensor,
select_single_mlvl, sigmoid_geometric_mean, unmap,
unpack_gt_instances)
from .panoptic_gt_processing import preprocess_panoptic_gt
from .point_sample import (get_uncertain_point_coords_with_randomness,
get_uncertainty)
__all__ = [
'gaussian_radius', 'gen_gaussian_target', 'make_divisible',
'get_local_maximum', 'get_topk_from_heatmap', 'transpose_and_gather_feat',
'interpolate_as', 'sigmoid_geometric_mean', 'gather_feat',
'preprocess_panoptic_gt', 'get_uncertain_point_coords_with_randomness',
'get_uncertainty', 'unpack_gt_instances', 'empty_instances',
'center_of_mass', 'filter_scores_and_topk', 'flip_tensor',
'generate_coordinate', 'levels_to_images', 'mask2ndarray', 'multi_apply',
'select_single_mlvl', 'unmap', 'images_to_levels',
'samplelist_boxtype2tensor', 'filter_gt_instances', 'rename_loss_dict',
'reweight_loss_dict', 'relative_coordinate_maps', 'aligned_bilinear'
]
|
from __future__ import annotations
from typing import Any, Optional, Sequence, Type, TypeVar, Union
import torch
from torch.utils._pytree import tree_map
from torchvision.datapoints._datapoint import Datapoint
L = TypeVar("L", bound="_LabelBase")
class _LabelBase(Datapoint):
categories: Optional[Sequence[str]]
@classmethod
def _wrap(cls: Type[L], tensor: torch.Tensor, *, categories: Optional[Sequence[str]]) -> L:
label_base = tensor.as_subclass(cls)
label_base.categories = categories
return label_base
def __new__(
cls: Type[L],
data: Any,
*,
categories: Optional[Sequence[str]] = None,
dtype: Optional[torch.dtype] = None,
device: Optional[Union[torch.device, str, int]] = None,
requires_grad: Optional[bool] = None,
) -> L:
tensor = cls._to_tensor(data, dtype=dtype, device=device, requires_grad=requires_grad)
return cls._wrap(tensor, categories=categories)
@classmethod
def from_category(
cls: Type[L],
category: str,
*,
categories: Sequence[str],
**kwargs: Any,
) -> L:
return cls(categories.index(category), categories=categories, **kwargs)
class Label(_LabelBase):
def to_categories(self) -> Any:
if self.categories is None:
raise RuntimeError("Label does not have categories")
return tree_map(lambda idx: self.categories[idx], self.tolist())
class OneHotLabel(_LabelBase):
def __new__(
cls,
data: Any,
*,
categories: Optional[Sequence[str]] = None,
dtype: Optional[torch.dtype] = None,
device: Optional[Union[torch.device, str, int]] = None,
requires_grad: bool = False,
) -> OneHotLabel:
one_hot_label = super().__new__(
cls, data, categories=categories, dtype=dtype, device=device, requires_grad=requires_grad
)
if categories is not None and len(categories) != one_hot_label.shape[-1]:
raise ValueError()
return one_hot_label
|
from __future__ import annotations
from typing import Any, Optional, Sequence, Type, TypeVar, Union
import torch
from torch.utils._pytree import tree_map
from torchvision.datapoints._datapoint import Datapoint
L = TypeVar("L", bound="_LabelBase")
class _LabelBase(Datapoint):
categories: Optional[Sequence[str]]
@classmethod
def _wrap(cls: Type[L], tensor: torch.Tensor, *, categories: Optional[Sequence[str]]) -> L:
label_base = tensor.as_subclass(cls)
label_base.categories = categories
return label_base
def __new__(
cls: Type[L],
data: Any,
*,
categories: Optional[Sequence[str]] = None,
dtype: Optional[torch.dtype] = None,
device: Optional[Union[torch.device, str, int]] = None,
requires_grad: Optional[bool] = None,
) -> L:
tensor = cls._to_tensor(data, dtype=dtype, device=device, requires_grad=requires_grad)
return cls._wrap(tensor, categories=categories)
@classmethod
def wrap_like(cls: Type[L], other: L, tensor: torch.Tensor, *, categories: Optional[Sequence[str]] = None) -> L:
return cls._wrap(
tensor,
categories=categories if categories is not None else other.categories,
)
@classmethod
def from_category(
cls: Type[L],
category: str,
*,
categories: Sequence[str],
**kwargs: Any,
) -> L:
return cls(categories.index(category), categories=categories, **kwargs)
class Label(_LabelBase):
def to_categories(self) -> Any:
if self.categories is None:
raise RuntimeError("Label does not have categories")
return tree_map(lambda idx: self.categories[idx], self.tolist())
class OneHotLabel(_LabelBase):
def __new__(
cls,
data: Any,
*,
categories: Optional[Sequence[str]] = None,
dtype: Optional[torch.dtype] = None,
device: Optional[Union[torch.device, str, int]] = None,
requires_grad: bool = False,
) -> OneHotLabel:
one_hot_label = super().__new__(
cls, data, categories=categories, dtype=dtype, device=device, requires_grad=requires_grad
)
if categories is not None and len(categories) != one_hot_label.shape[-1]:
raise ValueError()
return one_hot_label
|
import numpy as np
import pytest
from tensorflow import data as tf_data
from keras.src import backend
from keras.src import layers
from keras.src import testing
from keras.src.ops import convert_to_tensor
class StringLookupTest(testing.TestCase):
# TODO: increase coverage. Most features aren't being tested.
def test_config(self):
layer = layers.StringLookup(
output_mode="int",
vocabulary=["a", "b", "c"],
oov_token="[OOV]",
mask_token="[MASK]",
)
self.run_class_serialization_test(layer)
def test_adapt_flow(self):
layer = layers.StringLookup(
output_mode="int",
)
layer.adapt(["a", "a", "a", "b", "b", "c"])
input_data = ["b", "c", "d"]
output = layer(input_data)
self.assertTrue(backend.is_tensor(output))
self.assertAllClose(output, np.array([2, 3, 0]))
def test_fixed_vocabulary(self):
layer = layers.StringLookup(
output_mode="int",
vocabulary=["a", "b", "c"],
)
input_data = ["b", "c", "d"]
output = layer(input_data)
self.assertTrue(backend.is_tensor(output))
self.assertAllClose(output, np.array([2, 3, 0]))
@pytest.mark.skipif(
not backend.backend() == "tensorflow", reason="Requires tf.SparseTensor"
)
def test_sparse_inputs(self):
import tensorflow as tf
layer = layers.StringLookup(
output_mode="int",
vocabulary=["a", "b", "c"],
)
input_data = tf.SparseTensor(
indices=[[0, 0], [1, 1], [2, 2]],
values=["b", "c", "d"],
dense_shape=(3, 3),
)
output = layer(input_data)
self.assertIsInstance(output, tf.SparseTensor)
self.assertAllClose(output, np.array([[2, 0, 0], [0, 3, 0], [0, 0, 0]]))
self.assertAllClose(output.values, np.array([2, 3, 0]))
def test_set_vocabulary(self):
layer = layers.StringLookup(
output_mode="int",
)
layer.set_vocabulary(["a", "b", "c"])
input_data = ["b", "c", "d"]
output = layer(input_data)
self.assertTrue(backend.is_tensor(output))
self.assertAllClose(output, np.array([2, 3, 0]))
def test_tf_data_compatibility(self):
layer = layers.StringLookup(
output_mode="int",
vocabulary=["a", "b", "c"],
)
input_data = ["b", "c", "d"]
ds = tf_data.Dataset.from_tensor_slices(input_data).batch(3).map(layer)
output = next(iter(ds)).numpy()
self.assertAllClose(output, np.array([2, 3, 0]))
@pytest.mark.skipif(not backend.backend() == "tensorflow", reason="tf only")
def test_tensor_as_vocab(self):
vocab = convert_to_tensor(["a", "b", "c", "d"])
data = [["a", "c", "d"], ["d", "z", "b"]]
layer = layers.StringLookup(
vocabulary=vocab,
)
output = layer(data)
self.assertAllClose(output, np.array([[1, 3, 4], [4, 0, 2]]))
|
import numpy as np
import pytest
from tensorflow import data as tf_data
from keras.src import backend
from keras.src import layers
from keras.src import testing
from keras.src.ops import convert_to_tensor
class StringLookupTest(testing.TestCase):
# TODO: increase coverage. Most features aren't being tested.
def test_config(self):
layer = layers.StringLookup(
output_mode="int",
vocabulary=["a", "b", "c"],
oov_token="[OOV]",
mask_token="[MASK]",
)
self.run_class_serialization_test(layer)
def test_adapt_flow(self):
layer = layers.StringLookup(
output_mode="int",
)
layer.adapt(["a", "a", "a", "b", "b", "c"])
input_data = ["b", "c", "d"]
output = layer(input_data)
self.assertTrue(backend.is_tensor(output))
self.assertAllClose(output, np.array([2, 3, 0]))
def test_fixed_vocabulary(self):
layer = layers.StringLookup(
output_mode="int",
vocabulary=["a", "b", "c"],
)
input_data = ["b", "c", "d"]
output = layer(input_data)
self.assertTrue(backend.is_tensor(output))
self.assertAllClose(output, np.array([2, 3, 0]))
@pytest.mark.skipif(
not backend.backend() == "tensorflow", reason="Requires tf.SparseTensor"
)
def test_sparse_inputs(self):
import tensorflow as tf
layer = layers.StringLookup(
output_mode="int",
vocabulary=["a", "b", "c"],
)
input_data = tf.SparseTensor(
indices=[[0, 0], [1, 1], [2, 2]],
values=["b", "c", "d"],
dense_shape=(3, 3),
)
output = layer(input_data)
self.assertIsInstance(output, tf.SparseTensor)
self.assertAllClose(output, np.array([[2, 0, 0], [0, 3, 0], [0, 0, 0]]))
self.assertAllClose(output.values, np.array([2, 3, 0]))
def test_set_vocabulary(self):
layer = layers.StringLookup(
output_mode="int",
)
layer.set_vocabulary(["a", "b", "c"])
input_data = ["b", "c", "d"]
output = layer(input_data)
self.assertTrue(backend.is_tensor(output))
self.assertAllClose(output, np.array([2, 3, 0]))
def test_tf_data_compatibility(self):
layer = layers.StringLookup(
output_mode="int",
vocabulary=["a", "b", "c"],
)
input_data = ["b", "c", "d"]
ds = tf_data.Dataset.from_tensor_slices(input_data).batch(3).map(layer)
for output in ds.take(1):
output = output.numpy()
self.assertAllClose(output, np.array([2, 3, 0]))
@pytest.mark.skipif(not backend.backend() == "tensorflow", reason="tf only")
def test_tensor_as_vocab(self):
vocab = convert_to_tensor(["a", "b", "c", "d"])
data = [["a", "c", "d"], ["d", "z", "b"]]
layer = layers.StringLookup(
vocabulary=vocab,
)
output = layer(data)
self.assertAllClose(output, np.array([[1, 3, 4], [4, 0, 2]]))
|
_base_ = './faster-rcnn_r50_fpn_1x_coco.py'
# MMEngine support the following two ways, users can choose
# according to convenience
# optim_wrapper = dict(type='AmpOptimWrapper')
_base_.optim_wrapper.type = 'AmpOptimWrapper'
|
_base_ = './faster-rcnn_r50_fpn_1x_coco.py'
# fp16 settings
fp16 = dict(loss_scale=512.)
|
import os
import time
import pytest
from docarray import Document
from jina import Flow, __cache_path__
cur_dir = os.path.dirname(os.path.abspath(__file__))
@pytest.fixture(scope='module')
def filewriter_exec_docker_image_built():
import docker
client = docker.from_env()
client.images.build(
path=os.path.join(cur_dir, 'filewriter-exec/'), tag='filewriter-exec'
)
client.close()
yield
time.sleep(2)
client = docker.from_env()
client.containers.prune()
@pytest.mark.parametrize(
'source,destination,workspace',
[('test/dir', '/custom/app', '/custom/app')],
)
def test_volumes_in_flow(
tmpdir, source, destination, workspace, filewriter_exec_docker_image_built
):
if source: # test manually set volume and workspace
source = os.path.join(tmpdir, source)
volumes = [str(source) + ':' + destination]
else: # test auto volume and workspace
source = __cache_path__
f = Flow().add(
uses='docker://filewriter-exec', volumes=volumes, workspace=workspace
)
with f:
f.post(inputs=[Document()], on='/foo')
assert os.path.exists(source)
found_output_file = False # workspace has random element, so we search for it
for cur_path, dirs, files in os.walk(source):
if 'out.txt' in files:
with open(os.path.join(cur_path, 'out.txt'), 'r') as f:
if f.read() == 'Filewriter was here':
found_output_file = True
assert found_output_file
|
import os
import time
from unittest import mock
import pytest
from docarray import Document, DocumentArray
from jina import Executor, Flow, requests
cur_dir = os.path.dirname(os.path.abspath(__file__))
@pytest.fixture(scope='module')
def filewriter_exec_docker_image_built():
import docker
client = docker.from_env()
client.images.build(
path=os.path.join(cur_dir, 'filewriter-exec/'), tag='filewriter-exec'
)
client.close()
yield
time.sleep(2)
client = docker.from_env()
client.containers.prune()
@pytest.mark.parametrize(
'source,destination,workspace',
[('test/dir', '/custom/app', '/custom/app'), (None, None, '/app')],
)
def test_volumes_in_flow(
tmpdir, source, destination, workspace, filewriter_exec_docker_image_built
):
with mock.patch.dict(
os.environ,
{'JINA_DEFAULT_WORKSPACE_BASE': str(os.path.join(tmpdir, 'default'))},
):
if source: # test manually set volume and workspace
source = os.path.join(tmpdir, source)
volumes = [str(source) + ':' + destination]
else: # test auto volume and workspace
volumes = None
source = os.path.join(tmpdir, 'default')
f = Flow().add(
uses='docker://filewriter-exec', volumes=volumes, workspace=workspace
)
with f:
f.post(inputs=[Document()], on='/foo')
assert os.path.exists(source)
found_output_file = False # workspace has random element, so we search for it
for cur_path, dirs, files in os.walk(source):
if 'out.txt' in files:
with open(os.path.join(cur_path, 'out.txt'), 'r') as f:
if f.read() == 'Filewriter was here':
found_output_file = True
assert found_output_file
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.registry import MODELS
from .single_stage import SingleStageDetector
@MODELS.register_module()
class YOLOF(SingleStageDetector):
r"""Implementation of `You Only Look One-level Feature
<https://arxiv.org/abs/2103.09460>`_"""
def __init__(self,
backbone,
neck,
bbox_head,
train_cfg=None,
test_cfg=None,
pretrained=None):
super(YOLOF, self).__init__(backbone, neck, bbox_head, train_cfg,
test_cfg, pretrained)
|
# Copyright (c) OpenMMLab. All rights reserved.
from ..builder import DETECTORS
from .single_stage import SingleStageDetector
@DETECTORS.register_module()
class YOLOF(SingleStageDetector):
r"""Implementation of `You Only Look One-level Feature
<https://arxiv.org/abs/2103.09460>`_"""
def __init__(self,
backbone,
neck,
bbox_head,
train_cfg=None,
test_cfg=None,
pretrained=None):
super(YOLOF, self).__init__(backbone, neck, bbox_head, train_cfg,
test_cfg, pretrained)
|
_base_ = [
'../common/ms-poly_3x_coco-instance.py',
'../_base_/models/mask-rcnn_r50_fpn.py'
]
model = dict(
# use caffe img_norm
data_preprocessor=dict(
mean=[103.530, 116.280, 123.675],
std=[1.0, 1.0, 1.0],
bgr_to_rgb=False),
backbone=dict(
depth=101,
norm_cfg=dict(requires_grad=False),
norm_eval=True,
style='caffe',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet101_caffe')))
|
_base_ = [
'../common/mstrain-poly_3x_coco_instance.py',
'../_base_/models/mask_rcnn_r50_fpn.py'
]
model = dict(
# use caffe img_norm
data_preprocessor=dict(
mean=[103.530, 116.280, 123.675],
std=[1.0, 1.0, 1.0],
bgr_to_rgb=False),
backbone=dict(
depth=101,
norm_cfg=dict(requires_grad=False),
norm_eval=True,
style='caffe',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet101_caffe')))
|
_base_ = [
'../common/mstrain_3x_coco_instance.py',
'../_base_/models/cascade_mask_rcnn_r50_fpn.py'
]
model = dict(
# use caffe img_norm
data_preprocessor=dict(
mean=[103.530, 116.280, 123.675],
std=[1.0, 1.0, 1.0],
bgr_to_rgb=False),
backbone=dict(
norm_cfg=dict(requires_grad=False),
norm_eval=True,
style='caffe',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet50_caffe')))
|
_base_ = [
'../common/mstrain_3x_coco_instance.py',
'../_base_/models/cascade_mask_rcnn_r50_fpn.py'
]
model = dict(
# use caffe img_norm
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[103.530, 116.280, 123.675],
std=[1.0, 1.0, 1.0],
to_rgb=False,
pad_size_divisor=32),
backbone=dict(
norm_cfg=dict(requires_grad=False),
norm_eval=True,
style='caffe',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet50_caffe')))
|
# Copyright (c) OpenMMLab. All rights reserved.
from .accuracy import Accuracy, accuracy
from .ae_loss import AssociativeEmbeddingLoss
from .balanced_l1_loss import BalancedL1Loss, balanced_l1_loss
from .cross_entropy_loss import (CrossEntropyLoss, binary_cross_entropy,
cross_entropy, mask_cross_entropy)
from .dice_loss import DiceLoss
from .eqlv2_loss import EQLV2Loss
from .focal_loss import FocalLoss, sigmoid_focal_loss
from .gaussian_focal_loss import GaussianFocalLoss
from .gfocal_loss import DistributionFocalLoss, QualityFocalLoss
from .ghm_loss import GHMC, GHMR
from .iou_loss import (BoundedIoULoss, CIoULoss, DIoULoss, EIoULoss, GIoULoss,
IoULoss, bounded_iou_loss, iou_loss)
from .kd_loss import KnowledgeDistillationKLDivLoss
from .l2_loss import L2Loss
from .margin_loss import MarginL2Loss
from .mse_loss import MSELoss, mse_loss
from .multipos_cross_entropy_loss import MultiPosCrossEntropyLoss
from .pisa_loss import carl_loss, isr_p
from .seesaw_loss import SeesawLoss
from .smooth_l1_loss import L1Loss, SmoothL1Loss, l1_loss, smooth_l1_loss
from .triplet_loss import TripletLoss
from .utils import reduce_loss, weight_reduce_loss, weighted_loss
from .varifocal_loss import VarifocalLoss
__all__ = [
'accuracy', 'Accuracy', 'cross_entropy', 'binary_cross_entropy',
'mask_cross_entropy', 'CrossEntropyLoss', 'sigmoid_focal_loss',
'FocalLoss', 'smooth_l1_loss', 'SmoothL1Loss', 'balanced_l1_loss',
'BalancedL1Loss', 'mse_loss', 'MSELoss', 'iou_loss', 'bounded_iou_loss',
'IoULoss', 'BoundedIoULoss', 'GIoULoss', 'DIoULoss', 'CIoULoss',
'EIoULoss', 'GHMC', 'GHMR', 'reduce_loss', 'weight_reduce_loss',
'weighted_loss', 'L1Loss', 'l1_loss', 'isr_p', 'carl_loss',
'AssociativeEmbeddingLoss', 'GaussianFocalLoss', 'QualityFocalLoss',
'DistributionFocalLoss', 'VarifocalLoss', 'KnowledgeDistillationKLDivLoss',
'SeesawLoss', 'DiceLoss', 'EQLV2Loss', 'MarginL2Loss',
'MultiPosCrossEntropyLoss', 'L2Loss', 'TripletLoss'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .accuracy import Accuracy, accuracy
from .ae_loss import AssociativeEmbeddingLoss
from .balanced_l1_loss import BalancedL1Loss, balanced_l1_loss
from .cross_entropy_loss import (CrossEntropyLoss, binary_cross_entropy,
cross_entropy, mask_cross_entropy)
from .dice_loss import DiceLoss
from .eqlv2_loss import EQLV2Loss
from .focal_loss import FocalLoss, sigmoid_focal_loss
from .gaussian_focal_loss import GaussianFocalLoss
from .gfocal_loss import DistributionFocalLoss, QualityFocalLoss
from .ghm_loss import GHMC, GHMR
from .iou_loss import (BoundedIoULoss, CIoULoss, DIoULoss, EIoULoss, GIoULoss,
IoULoss, bounded_iou_loss, iou_loss)
from .kd_loss import KnowledgeDistillationKLDivLoss
from .l2_loss import L2Loss
from .margin_loss import MarginL2Loss
from .mse_loss import MSELoss, mse_loss
from .multipos_cross_entropy_loss import MultiPosCrossEntropyLoss
from .pisa_loss import carl_loss, isr_p
from .seesaw_loss import SeesawLoss
from .smooth_l1_loss import L1Loss, SmoothL1Loss, l1_loss, smooth_l1_loss
from .triplet_loss import TripletLoss
from .utils import reduce_loss, weight_reduce_loss, weighted_loss
from .varifocal_loss import VarifocalLoss
__all__ = [
'accuracy', 'Accuracy', 'cross_entropy', 'binary_cross_entropy',
'mask_cross_entropy', 'CrossEntropyLoss', 'sigmoid_focal_loss',
'FocalLoss', 'smooth_l1_loss', 'SmoothL1Loss', 'balanced_l1_loss',
'BalancedL1Loss', 'mse_loss', 'MSELoss', 'iou_loss', 'bounded_iou_loss',
'IoULoss', 'BoundedIoULoss', 'GIoULoss', 'DIoULoss', 'CIoULoss',
'EIoULoss', 'GHMC', 'GHMR', 'reduce_loss', 'weight_reduce_loss',
'weighted_loss', 'L1Loss', 'l1_loss', 'isr_p', 'carl_loss',
'AssociativeEmbeddingLoss', 'GaussianFocalLoss', 'QualityFocalLoss',
'DistributionFocalLoss', 'VarifocalLoss', 'KnowledgeDistillationKLDivLoss',
'SeesawLoss', 'DiceLoss', 'EQLV2Loss', 'MarginL2Loss', 'MultiPosCrossEntropyLoss',
'L2Loss', 'TripletLoss'
]
|
"""Tests related to the `DataIter` interface."""
from typing import Callable, Optional
import numpy as np
from xgboost import testing as tm
from ..core import DataIter, ExtMemQuantileDMatrix, QuantileDMatrix
def run_mixed_sparsity(device: str) -> None:
"""Check QDM with mixed batches."""
X_0, y_0, _ = tm.make_regression(128, 16, False)
if device.startswith("cuda"):
X_1, y_1 = tm.make_sparse_regression(256, 16, 0.1, True)
else:
X_1, y_1 = tm.make_sparse_regression(256, 16, 0.1, False)
X_2, y_2 = tm.make_sparse_regression(512, 16, 0.9, True)
X = [X_0, X_1, X_2]
y = [y_0, y_1, y_2]
if device.startswith("cuda"):
import cupy as cp # pylint: disable=import-error
X = [cp.array(batch) for batch in X]
it = tm.IteratorForTest(X, y, None, cache=None, on_host=False)
Xy_0 = QuantileDMatrix(it)
X_1, y_1 = tm.make_sparse_regression(256, 16, 0.1, True)
X = [X_0, X_1, X_2]
y = [y_0, y_1, y_2]
X_arr = np.concatenate(X, axis=0)
y_arr = np.concatenate(y, axis=0)
Xy_1 = QuantileDMatrix(X_arr, y_arr)
assert tm.predictor_equal(Xy_0, Xy_1)
def check_invalid_cat_batches(device: str) -> None:
"""Check error message for inconsistent feature types."""
class _InvalidCatIter(DataIter):
def __init__(self) -> None:
super().__init__(cache_prefix=None)
self._it = 0
def next(self, input_data: Callable) -> bool:
if self._it == 2:
return False
X, y = tm.make_categorical(
64,
12,
4,
onehot=False,
sparsity=0.5,
cat_ratio=1.0 if self._it == 0 else 0.5,
)
if device == "cuda":
import cudf # pylint: disable=import-error
import cupy # pylint: disable=import-error
X = cudf.DataFrame(X)
y = cupy.array(y)
input_data(data=X, label=y)
self._it += 1
return True
def reset(self) -> None:
self._it = 0
it = _InvalidCatIter()
import pytest
with pytest.raises(ValueError, match="Inconsistent feature types between batches"):
ExtMemQuantileDMatrix(it, enable_categorical=True)
class CatIter(DataIter): # pylint: disable=too-many-instance-attributes
"""An iterator for testing categorical features."""
def __init__( # pylint: disable=too-many-arguments,too-many-locals
self,
n_samples_per_batch: int,
n_features: int,
*,
n_batches: int,
n_cats: int,
sparsity: float,
cat_ratio: float,
onehot: bool,
device: str,
cache: Optional[str],
) -> None:
super().__init__(cache_prefix=cache)
self.n_batches = n_batches
self.device = device
n_samples = n_samples_per_batch * n_batches
cat, y = tm.make_categorical(
n_samples,
n_features,
n_categories=n_cats,
onehot=onehot,
cat_ratio=cat_ratio,
sparsity=sparsity,
)
xs, ys = [], []
prev = 0
for _ in range(n_batches):
n = min(n_samples_per_batch, n_samples - prev)
X = cat.iloc[prev : prev + n, :]
xs.append(X)
ys.append(y[prev : prev + n])
prev += n_samples_per_batch
self.xs = xs
self.ys = ys
self.x = cat
self.y = y
self._it = 0
def xy(self) -> tuple:
"""Return the concatenated data."""
return self.x, self.y
def next(self, input_data: Callable) -> bool:
if self._it == self.n_batches:
return False
X, y = self.xs[self._it], self.ys[self._it]
if self.device == "cuda":
import cudf # pylint: disable=import-error
import cupy # pylint: disable=import-error
X = cudf.DataFrame(X)
y = cupy.array(y)
input_data(data=X, label=y)
self._it += 1
return True
def reset(self) -> None:
self._it = 0
|
"""Tests related to the `DataIter` interface."""
import numpy as np
import xgboost
from xgboost import testing as tm
def run_mixed_sparsity(device: str) -> None:
"""Check QDM with mixed batches."""
X_0, y_0, _ = tm.make_regression(128, 16, False)
if device.startswith("cuda"):
X_1, y_1 = tm.make_sparse_regression(256, 16, 0.1, True)
else:
X_1, y_1 = tm.make_sparse_regression(256, 16, 0.1, False)
X_2, y_2 = tm.make_sparse_regression(512, 16, 0.9, True)
X = [X_0, X_1, X_2]
y = [y_0, y_1, y_2]
if device.startswith("cuda"):
import cupy as cp # pylint: disable=import-error
X = [cp.array(batch) for batch in X]
it = tm.IteratorForTest(X, y, None, cache=None, on_host=False)
Xy_0 = xgboost.QuantileDMatrix(it)
X_1, y_1 = tm.make_sparse_regression(256, 16, 0.1, True)
X = [X_0, X_1, X_2]
y = [y_0, y_1, y_2]
X_arr = np.concatenate(X, axis=0)
y_arr = np.concatenate(y, axis=0)
Xy_1 = xgboost.QuantileDMatrix(X_arr, y_arr)
assert tm.predictor_equal(Xy_0, Xy_1)
|
from __future__ import annotations
import random
import pytest
import torch
from torch.utils.data import ConcatDataset
from sentence_transformers.sampler import NoDuplicatesBatchSampler, ProportionalBatchSampler
from sentence_transformers.util import is_datasets_available
if is_datasets_available():
from datasets import Dataset
else:
pytest.skip(
reason='Sentence Transformers was not installed with the `["train"]` extra.',
allow_module_level=True,
)
@pytest.fixture
def dummy_dataset() -> Dataset:
"""
Dummy dataset for testing purposes. The dataset looks as follows:
{
"data": [0, 47, 3, 30, 3, ... 2],
"label": [0, 1, 0, 1, ..., 0, 1],
}
"""
# Create a list of two 0's, two 1's, two 2's, ... two 49's. Then shuffle.
values = [j for i in range(50) for j in (i, i)]
random.shuffle(values)
data = {"data": values, "label": [i % 2 for i in range(100)]}
return Dataset.from_dict(data)
@pytest.fixture
def dummy_duplicates_dataset() -> Dataset:
"""
Dummy dataset for testing purposes. The dataset looks as follows:
{
"anchor": ["anchor_1", "anchor_1", "anchor_1", ... "anchor_2", "anchor_2"],
"positive": ["positive_1", "positive_1", "positive_1", ... "positive_2", "positive_2"],
}
"""
values = [{"anchor": "anchor_1", "positive": "positive_1"}] * 10 + [
{"anchor": "anchor_2", "positive": "positive_2"}
] * 8
return Dataset.from_list(values)
def test_group_by_label_batch_sampler_label_a(dummy_dataset: Dataset) -> None:
batch_size = 10
sampler = NoDuplicatesBatchSampler(
dataset=dummy_dataset, batch_size=batch_size, drop_last=True, valid_label_columns=["label"]
)
batches = list(iter(sampler))
# Assert all batch sizes are correct
assert all(len(batch) == batch_size for batch in batches)
# Assert batches contain no duplicate values
for batch in batches:
batch_values = [dummy_dataset[i]["data"] for i in batch]
assert len(batch_values) == len(set(batch_values)), f"Batch {batch} contains duplicate values: {batch_values}"
@pytest.mark.parametrize("drop_last", [True, False])
def test_proportional_no_duplicates(dummy_duplicates_dataset: Dataset, drop_last: bool) -> None:
batch_size = 2
sampler_1 = NoDuplicatesBatchSampler(
dataset=dummy_duplicates_dataset, batch_size=batch_size, drop_last=drop_last, valid_label_columns=["anchor"]
)
sampler_2 = NoDuplicatesBatchSampler(
dataset=dummy_duplicates_dataset, batch_size=batch_size, drop_last=drop_last, valid_label_columns=["positive"]
)
concat_dataset = ConcatDataset([dummy_duplicates_dataset, dummy_duplicates_dataset])
batch_sampler = ProportionalBatchSampler(
concat_dataset, [sampler_1, sampler_2], generator=torch.Generator(), seed=12
)
batches = list(iter(batch_sampler))
if drop_last:
# If we drop the last batch (i.e. incomplete batches), we should have 16 batches out of the 18 possible,
# because of the duplicates being skipped by the NoDuplicatesBatchSampler.
# Notably, we should not crash like reported in #2816.
assert len(batches) == 16
# All batches are the same size: 2
assert all(len(batch) == batch_size for batch in batches)
assert len(sum(batches, [])) == 32
else:
# If we don't drop incomplete batches, we should be able to do 18 batches, and get more data.
# Note: we don't get all data, because the NoDuplicatesBatchSampler will estimate the number of batches
# and it would require more (non-complete) batches to get all data.
assert len(batches) == 18
assert len(sum(batches, [])) == 34
|
from __future__ import annotations
import random
import pytest
import torch
from datasets import Dataset
from torch.utils.data import ConcatDataset
from sentence_transformers.sampler import NoDuplicatesBatchSampler, ProportionalBatchSampler
@pytest.fixture
def dummy_dataset() -> Dataset:
"""
Dummy dataset for testing purposes. The dataset looks as follows:
{
"data": [0, 47, 3, 30, 3, ... 2],
"label": [0, 1, 0, 1, ..., 0, 1],
}
"""
# Create a list of two 0's, two 1's, two 2's, ... two 49's. Then shuffle.
values = [j for i in range(50) for j in (i, i)]
random.shuffle(values)
data = {"data": values, "label": [i % 2 for i in range(100)]}
return Dataset.from_dict(data)
@pytest.fixture
def dummy_duplicates_dataset() -> Dataset:
"""
Dummy dataset for testing purposes. The dataset looks as follows:
{
"anchor": ["anchor_1", "anchor_1", "anchor_1", ... "anchor_2", "anchor_2"],
"positive": ["positive_1", "positive_1", "positive_1", ... "positive_2", "positive_2"],
}
"""
values = [{"anchor": "anchor_1", "positive": "positive_1"}] * 10 + [
{"anchor": "anchor_2", "positive": "positive_2"}
] * 8
return Dataset.from_list(values)
def test_group_by_label_batch_sampler_label_a(dummy_dataset: Dataset) -> None:
batch_size = 10
sampler = NoDuplicatesBatchSampler(
dataset=dummy_dataset, batch_size=batch_size, drop_last=True, valid_label_columns=["label"]
)
batches = list(iter(sampler))
# Assert all batch sizes are correct
assert all(len(batch) == batch_size for batch in batches)
# Assert batches contain no duplicate values
for batch in batches:
batch_values = [dummy_dataset[i]["data"] for i in batch]
assert len(batch_values) == len(set(batch_values)), f"Batch {batch} contains duplicate values: {batch_values}"
@pytest.mark.parametrize("drop_last", [True, False])
def test_proportional_no_duplicates(dummy_duplicates_dataset: Dataset, drop_last: bool) -> None:
batch_size = 2
sampler_1 = NoDuplicatesBatchSampler(
dataset=dummy_duplicates_dataset, batch_size=batch_size, drop_last=drop_last, valid_label_columns=["anchor"]
)
sampler_2 = NoDuplicatesBatchSampler(
dataset=dummy_duplicates_dataset, batch_size=batch_size, drop_last=drop_last, valid_label_columns=["positive"]
)
concat_dataset = ConcatDataset([dummy_duplicates_dataset, dummy_duplicates_dataset])
batch_sampler = ProportionalBatchSampler(
concat_dataset, [sampler_1, sampler_2], generator=torch.Generator(), seed=12
)
batches = list(iter(batch_sampler))
if drop_last:
# If we drop the last batch (i.e. incomplete batches), we should have 16 batches out of the 18 possible,
# because of the duplicates being skipped by the NoDuplicatesBatchSampler.
# Notably, we should not crash like reported in #2816.
assert len(batches) == 16
# All batches are the same size: 2
assert all(len(batch) == batch_size for batch in batches)
assert len(sum(batches, [])) == 32
else:
# If we don't drop incomplete batches, we should be able to do 18 batches, and get more data.
# Note: we don't get all data, because the NoDuplicatesBatchSampler will estimate the number of batches
# and it would require more (non-complete) batches to get all data.
assert len(batches) == 18
assert len(sum(batches, [])) == 34
|
from typing import Any, Optional, Union
import numpy as np
import PIL.Image
import torch
from torchvision import tv_tensors
from torchvision.transforms.v2 import functional as F, Transform
from torchvision.transforms.v2._utils import is_pure_tensor
class PILToTensor(Transform):
"""Convert a PIL Image to a tensor of the same type - this does not scale values.
This transform does not support torchscript.
Converts a PIL Image (H x W x C) to a Tensor of shape (C x H x W).
"""
_transformed_types = (PIL.Image.Image,)
def transform(self, inpt: PIL.Image.Image, params: dict[str, Any]) -> torch.Tensor:
return F.pil_to_tensor(inpt)
class ToImage(Transform):
"""Convert a tensor, ndarray, or PIL Image to :class:`~torchvision.tv_tensors.Image`
; this does not scale values.
This transform does not support torchscript.
"""
_transformed_types = (is_pure_tensor, PIL.Image.Image, np.ndarray)
def transform(
self, inpt: Union[torch.Tensor, PIL.Image.Image, np.ndarray], params: dict[str, Any]
) -> tv_tensors.Image:
return F.to_image(inpt)
class ToPILImage(Transform):
"""Convert a tensor or an ndarray to PIL Image
This transform does not support torchscript.
Converts a torch.*Tensor of shape C x H x W or a numpy ndarray of shape
H x W x C to a PIL Image while adjusting the value range depending on the ``mode``.
Args:
mode (`PIL.Image mode`_): color space and pixel depth of input data (optional).
If ``mode`` is ``None`` (default) there are some assumptions made about the input data:
- If the input has 4 channels, the ``mode`` is assumed to be ``RGBA``.
- If the input has 3 channels, the ``mode`` is assumed to be ``RGB``.
- If the input has 2 channels, the ``mode`` is assumed to be ``LA``.
- If the input has 1 channel, the ``mode`` is determined by the data type (i.e ``int``, ``float``,
``short``).
.. _PIL.Image mode: https://pillow.readthedocs.io/en/latest/handbook/concepts.html#concept-modes
"""
_transformed_types = (is_pure_tensor, tv_tensors.Image, np.ndarray)
def __init__(self, mode: Optional[str] = None) -> None:
super().__init__()
self.mode = mode
def transform(
self, inpt: Union[torch.Tensor, PIL.Image.Image, np.ndarray], params: dict[str, Any]
) -> PIL.Image.Image:
return F.to_pil_image(inpt, mode=self.mode)
class ToPureTensor(Transform):
"""Convert all TVTensors to pure tensors, removing associated metadata (if any).
This doesn't scale or change the values, only the type.
"""
_transformed_types = (tv_tensors.TVTensor,)
def transform(self, inpt: Any, params: dict[str, Any]) -> torch.Tensor:
return inpt.as_subclass(torch.Tensor)
|
from typing import Any, Dict, Optional, Union
import numpy as np
import PIL.Image
import torch
from torchvision import tv_tensors
from torchvision.transforms.v2 import functional as F, Transform
from torchvision.transforms.v2._utils import is_pure_tensor
class PILToTensor(Transform):
"""Convert a PIL Image to a tensor of the same type - this does not scale values.
This transform does not support torchscript.
Converts a PIL Image (H x W x C) to a Tensor of shape (C x H x W).
"""
_transformed_types = (PIL.Image.Image,)
def transform(self, inpt: PIL.Image.Image, params: Dict[str, Any]) -> torch.Tensor:
return F.pil_to_tensor(inpt)
class ToImage(Transform):
"""Convert a tensor, ndarray, or PIL Image to :class:`~torchvision.tv_tensors.Image`
; this does not scale values.
This transform does not support torchscript.
"""
_transformed_types = (is_pure_tensor, PIL.Image.Image, np.ndarray)
def transform(
self, inpt: Union[torch.Tensor, PIL.Image.Image, np.ndarray], params: Dict[str, Any]
) -> tv_tensors.Image:
return F.to_image(inpt)
class ToPILImage(Transform):
"""Convert a tensor or an ndarray to PIL Image
This transform does not support torchscript.
Converts a torch.*Tensor of shape C x H x W or a numpy ndarray of shape
H x W x C to a PIL Image while adjusting the value range depending on the ``mode``.
Args:
mode (`PIL.Image mode`_): color space and pixel depth of input data (optional).
If ``mode`` is ``None`` (default) there are some assumptions made about the input data:
- If the input has 4 channels, the ``mode`` is assumed to be ``RGBA``.
- If the input has 3 channels, the ``mode`` is assumed to be ``RGB``.
- If the input has 2 channels, the ``mode`` is assumed to be ``LA``.
- If the input has 1 channel, the ``mode`` is determined by the data type (i.e ``int``, ``float``,
``short``).
.. _PIL.Image mode: https://pillow.readthedocs.io/en/latest/handbook/concepts.html#concept-modes
"""
_transformed_types = (is_pure_tensor, tv_tensors.Image, np.ndarray)
def __init__(self, mode: Optional[str] = None) -> None:
super().__init__()
self.mode = mode
def transform(
self, inpt: Union[torch.Tensor, PIL.Image.Image, np.ndarray], params: Dict[str, Any]
) -> PIL.Image.Image:
return F.to_pil_image(inpt, mode=self.mode)
class ToPureTensor(Transform):
"""Convert all TVTensors to pure tensors, removing associated metadata (if any).
This doesn't scale or change the values, only the type.
"""
_transformed_types = (tv_tensors.TVTensor,)
def transform(self, inpt: Any, params: Dict[str, Any]) -> torch.Tensor:
return inpt.as_subclass(torch.Tensor)
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.utilities.reddit_search import RedditSearchAPIWrapper
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"RedditSearchAPIWrapper": "langchain_community.utilities.reddit_search",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"RedditSearchAPIWrapper",
]
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.utilities.reddit_search import RedditSearchAPIWrapper
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"RedditSearchAPIWrapper": "langchain_community.utilities.reddit_search"
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"RedditSearchAPIWrapper",
]
|
import logging
from typing import Dict, Sequence
from octoai.text_gen import ChatMessage as OctoAIChatMessage
from llama_index.core.base.llms.types import ChatMessage
TEXT_MODELS: Dict[str, int] = {
"codellama-13b-instruct": 16384,
"codellama-34b-instruct": 16384,
"codellama-7b-instruct": 4096,
"meta-llama-3-8b-instruct": 8192,
"meta-llama-3-70b-instruct": 8192,
"llama-2-13b-chat": 4096,
"llama-2-70b-chat": 4096,
"mistral-7b-instruct": 32768,
"mixtral-8x7b-instruct": 32768,
"mixtral-8x22b-instruct": 65536,
"mixtral-8x22b-finetuned": 65536,
"nous-hermes-2-mixtral-8x7b-dpo": 32768,
"hermes-2-pro-mistral-7b": 32768,
"llamaguard-7b": 4096,
"qwen1.5-32b-chat": 32768,
}
ALL_AVAILABLE_MODELS = {**TEXT_MODELS}
MISSING_TOKEN_ERROR_MESSAGE = """No token found for OctoAI.
Please set the OCTOAI_TOKEN environment \
variable prior to initialization.
API keys can be found or created at \
https://octoai.cloud/settings
"""
logger = logging.getLogger(__name__)
def octoai_modelname_to_contextsize(modelname: str) -> int:
"""
Calculate the maximum number of tokens possible to generate for a model.
Args:
modelname: The modelname we want to know the context size for.
Returns:
The maximum context size
Examples:
.. code-block:: python
max_tokens = octoai.modelname_to_contextsize(TextModel.CODELLAMA_13B_INSTRUCT)
max_tokens = octoai.modelname_to_contextsize("llama-2-13b-chat")
"""
if modelname not in ALL_AVAILABLE_MODELS:
print(
"WARNING: Model not found in octoai.utils.py, returning a generous default value."
)
return 8192
return ALL_AVAILABLE_MODELS[modelname]
def to_octoai_messages(messages: Sequence[ChatMessage]) -> Sequence[OctoAIChatMessage]:
return [
OctoAIChatMessage(content=message.content, role=message.role.value)
for message in messages
]
|
import logging
from typing import Dict, Sequence
from octoai.text_gen import ChatMessage as OctoAIChatMessage
from llama_index.core.base.llms.types import ChatMessage
TEXT_MODELS: Dict[str, int] = {
"codellama-13b-instruct": 16384,
"codellama-34b-instruct": 16384,
"codellama-7b-instruct": 4096,
"meta-llama-3-8b-instruct": 8192,
"meta-llama-3-70b-instruct": 8192,
"llama-2-13b-chat": 4096,
"llama-2-70b-chat": 4096,
"mistral-7b-instruct": 32768,
"mixtral-8x7b-instruct": 32768,
"mixtral-8x22b-instruct": 65536,
"mixtral-8x22b-finetuned": 65536,
"nous-hermes-2-mixtral-8x7b-dpo": 32768,
"hermes-2-pro-mistral-7b": 32768,
"llamaguard-7b": 4096,
"qwen1.5-32b-chat": 32768,
}
ALL_AVAILABLE_MODELS = {**TEXT_MODELS}
MISSING_TOKEN_ERROR_MESSAGE = """No token found for OctoAI.
Please set the OCTOAI_TOKEN environment \
variable prior to initialization.
API keys can be found or created at \
https://octoai.cloud/settings
"""
logger = logging.getLogger(__name__)
def octoai_modelname_to_contextsize(modelname: str) -> int:
"""Calculate the maximum number of tokens possible to generate for a model.
Args:
modelname: The modelname we want to know the context size for.
Returns:
The maximum context size
Examples:
.. code-block:: python
max_tokens = octoai.modelname_to_contextsize(TextModel.CODELLAMA_13B_INSTRUCT)
max_tokens = octoai.modelname_to_contextsize("llama-2-13b-chat")
"""
if modelname not in ALL_AVAILABLE_MODELS:
print(
"WARNING: Model not found in octoai.utils.py, returning a generous default value."
)
return 8192
return ALL_AVAILABLE_MODELS[modelname]
def to_octoai_messages(messages: Sequence[ChatMessage]) -> Sequence[OctoAIChatMessage]:
return [
OctoAIChatMessage(content=message.content, role=message.role.value)
for message in messages
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmengine.registry import HOOKS
from .hook import Hook
@HOOKS.register_module()
class DistSamplerSeedHook(Hook):
"""Data-loading sampler for distributed training.
When distributed training, it is only useful in conjunction with
:obj:`EpochBasedRunner`, while :obj:`IterBasedRunner` achieves the same
purpose with :obj:`IterLoader`.
"""
priority = 'NORMAL'
def before_train_epoch(self, runner) -> None:
"""Set the seed for sampler and batch_sampler.
Args:
runner (Runner): The runner of the training process.
"""
if hasattr(runner.train_loop.dataloader, 'sampler') and hasattr(
runner.train_loop.dataloader.sampler, 'set_epoch'):
# In case the` _SingleProcessDataLoaderIter` has no sampler,
# or data loader uses `SequentialSampler` in Pytorch.
runner.train_loop.dataloader.sampler.set_epoch(runner.epoch)
elif hasattr(runner.train_loop.dataloader,
'batch_sampler') and hasattr(
runner.train_loop.dataloader.batch_sampler.sampler,
'set_epoch'):
# In case the` _SingleProcessDataLoaderIter` has no batch sampler.
# batch sampler in pytorch warps the sampler as its attributes.
runner.train_loop.dataloader.batch_sampler.sampler.set_epoch(
runner.epoch)
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmengine.registry import HOOKS
from .hook import Hook
@HOOKS.register_module()
class DistSamplerSeedHook(Hook):
"""Data-loading sampler for distributed training.
When distributed training, it is only useful in conjunction with
:obj:`EpochBasedRunner`, while :obj:`IterBasedRunner` achieves the same
purpose with :obj:`IterLoader`.
"""
priority = 'NORMAL'
def before_train_epoch(self, runner, mode: str = 'train') -> None:
"""Set the seed for sampler and batch_sampler.
Args:
runner (Runner): The runner of the training process.
"""
if hasattr(runner.train_loop.dataloader, 'sampler') and hasattr(
runner.train_loop.dataloader.sampler, 'set_epoch'):
# In case the` _SingleProcessDataLoaderIter` has no sampler,
# or data loader uses `SequentialSampler` in Pytorch.
runner.train_loop.dataloader.sampler.set_epoch(runner.epoch)
elif hasattr(runner.train_loop.dataloader,
'batch_sampler') and hasattr(
runner.train_loop.dataloader.batch_sampler.sampler,
'set_epoch'):
# In case the` _SingleProcessDataLoaderIter` has no batch sampler.
# batch sampler in pytorch warps the sampler as its attributes.
runner.train_loop.dataloader.batch_sampler.sampler.set_epoch(
runner.epoch)
|
import warnings
from abc import abstractmethod
from typing import Iterable, Iterator, MutableSequence
from docarray import Document, DocumentArray
class BaseSequenceLikeMixin(MutableSequence[Document]):
"""Implement sequence-like methods"""
def _update_subindices_append_extend(self, value):
if getattr(self, '_subindices', None):
for selector, da in self._subindices.items():
value = DocumentArray(value)
if getattr(da, '_config', None) and da._config.root_id:
for v in value:
for doc in DocumentArray(v)[selector]:
doc.tags['_root_id_'] = v.id
docs_selector = value[selector]
if len(docs_selector) > 0:
da.extend(docs_selector)
def insert(self, index: int, value: 'Document', **kwargs):
"""Insert `doc` at `index`.
:param index: Position of the insertion.
:param value: The doc needs to be inserted.
:param kwargs: Additional Arguments that are passed to the Document Store. This has no effect for in-memory DocumentArray.
"""
self._set_doc_by_id(value.id, value, **kwargs)
self._offset2ids.insert(index, value.id)
def append(self, value: 'Document', **kwargs):
"""Append `doc` to the end of the array.
:param value: The doc needs to be appended.
"""
self._append(value, **kwargs)
self._update_subindices_append_extend(value)
def _append(self, value, **kwargs):
self._set_doc_by_id(value.id, value)
self._offset2ids.append(value.id)
@abstractmethod
def __eq__(self, other):
...
def __len__(self):
...
def __iter__(self) -> Iterator['Document']:
for _id in self._offset2ids:
yield self._get_doc_by_id(_id)
@abstractmethod
def __contains__(self, other):
...
def clear(self):
"""Clear the data of :class:`DocumentArray`"""
self._del_all_docs()
def __bool__(self):
"""To simulate ```l = []; if l: ...```
:return: returns true if the length of the array is larger than 0
"""
return len(self) > 0
def extend(self, values: Iterable['Document'], **kwargs) -> None:
from docarray.helper import check_root_id
if getattr(self, '_is_subindex', None):
check_root_id(self, values)
self._extend(values, **kwargs)
self._update_subindices_append_extend(values)
def _extend(self, values, **kwargs):
for value in values:
self._append(value, **kwargs)
|
import warnings
from abc import abstractmethod
from typing import Iterable, Iterator, MutableSequence
from docarray import Document, DocumentArray
class BaseSequenceLikeMixin(MutableSequence[Document]):
"""Implement sequence-like methods"""
def _update_subindices_append_extend(self, value):
if getattr(self, '_subindices', None):
for selector, da in self._subindices.items():
value = DocumentArray(value)
if getattr(da, '_config', None) and da._config.root_id:
for v in value:
for doc in DocumentArray(v)[selector]:
doc.tags['_root_id_'] = v.id
docs_selector = value[selector]
if len(docs_selector) > 0:
da.extend(docs_selector)
def insert(self, index: int, value: 'Document', **kwargs):
"""Insert `doc` at `index`.
:param index: Position of the insertion.
:param value: The doc needs to be inserted.
:param kwargs: Additional Arguments that are passed to the Document Store. This has no effect for in-memory DocumentArray.
"""
self._set_doc_by_id(value.id, value, **kwargs)
self._offset2ids.insert(index, value.id)
def append(self, value: 'Document', **kwargs):
"""Append `doc` to the end of the array.
:param value: The doc needs to be appended.
"""
self._append(value, **kwargs)
self._update_subindices_append_extend(value)
def _append(self, value, **kwargs):
self._set_doc_by_id(value.id, value)
self._offset2ids.append(value.id)
@abstractmethod
def __eq__(self, other):
...
def __len__(self):
...
def __iter__(self) -> Iterator['Document']:
for _id in self._offset2ids:
yield self._get_doc_by_id(_id)
@abstractmethod
def __contains__(self, other):
...
def clear(self):
"""Clear the data of :class:`DocumentArray`"""
self._del_all_docs()
def __bool__(self):
"""To simulate ```l = []; if l: ...```
:return: returns true if the length of the array is larger than 0
"""
return len(self) > 0
def extend(self, values: Iterable['Document'], **kwargs) -> None:
from docarray.helper import check_root_id
if self._is_subindex:
check_root_id(self, values)
self._extend(values, **kwargs)
self._update_subindices_append_extend(values)
def _extend(self, values, **kwargs):
for value in values:
self._append(value, **kwargs)
|
from llama_index.core.extractors.interface import BaseExtractor
from llama_index.core.extractors.metadata_extractors import (
KeywordExtractor,
PydanticProgramExtractor,
QuestionsAnsweredExtractor,
SummaryExtractor,
TitleExtractor,
)
from llama_index.core.extractors.document_context import DocumentContextExtractor
__all__ = [
"SummaryExtractor",
"QuestionsAnsweredExtractor",
"TitleExtractor",
"KeywordExtractor",
"BaseExtractor",
"PydanticProgramExtractor",
"DocumentContextExtractor",
]
|
from llama_index.core.extractors.interface import BaseExtractor
from llama_index.core.extractors.metadata_extractors import (
KeywordExtractor,
PydanticProgramExtractor,
QuestionsAnsweredExtractor,
SummaryExtractor,
TitleExtractor,
)
__all__ = [
"SummaryExtractor",
"QuestionsAnsweredExtractor",
"TitleExtractor",
"KeywordExtractor",
"BaseExtractor",
"PydanticProgramExtractor",
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .anchor_free_head import AnchorFreeHead
from .anchor_head import AnchorHead
from .atss_head import ATSSHead
from .autoassign_head import AutoAssignHead
from .cascade_rpn_head import CascadeRPNHead, StageCascadeRPNHead
from .centernet_head import CenterNetHead
from .centripetal_head import CentripetalHead
from .corner_head import CornerHead
from .ddod_head import DDODHead
from .deformable_detr_head import DeformableDETRHead
from .detr_head import DETRHead
from .embedding_rpn_head import EmbeddingRPNHead
from .fcos_head import FCOSHead
from .fovea_head import FoveaHead
from .free_anchor_retina_head import FreeAnchorRetinaHead
from .fsaf_head import FSAFHead
from .ga_retina_head import GARetinaHead
from .ga_rpn_head import GARPNHead
from .gfl_head import GFLHead
from .guided_anchor_head import FeatureAdaption, GuidedAnchorHead
from .lad_head import LADHead
from .ld_head import LDHead
from .mask2former_head import Mask2FormerHead
from .maskformer_head import MaskFormerHead
from .nasfcos_head import NASFCOSHead
from .paa_head import PAAHead
from .pisa_retinanet_head import PISARetinaHead
from .pisa_ssd_head import PISASSDHead
from .reppoints_head import RepPointsHead
from .retina_head import RetinaHead
from .retina_sepbn_head import RetinaSepBNHead
from .rpn_head import RPNHead
from .sabl_retina_head import SABLRetinaHead
from .solo_head import DecoupledSOLOHead, DecoupledSOLOLightHead, SOLOHead
from .solov2_head import SOLOV2Head
from .ssd_head import SSDHead
from .tood_head import TOODHead
from .vfnet_head import VFNetHead
from .yolact_head import YOLACTHead, YOLACTProtonet, YOLACTSegmHead
from .yolo_head import YOLOV3Head
from .yolof_head import YOLOFHead
from .yolox_head import YOLOXHead
__all__ = [
'AnchorFreeHead', 'AnchorHead', 'GuidedAnchorHead', 'FeatureAdaption',
'RPNHead', 'GARPNHead', 'RetinaHead', 'RetinaSepBNHead', 'GARetinaHead',
'SSDHead', 'FCOSHead', 'RepPointsHead', 'FoveaHead',
'FreeAnchorRetinaHead', 'ATSSHead', 'FSAFHead', 'NASFCOSHead',
'PISARetinaHead', 'PISASSDHead', 'GFLHead', 'CornerHead', 'YOLACTHead',
'YOLACTSegmHead', 'YOLACTProtonet', 'YOLOV3Head', 'PAAHead',
'SABLRetinaHead', 'CentripetalHead', 'VFNetHead', 'StageCascadeRPNHead',
'CascadeRPNHead', 'EmbeddingRPNHead', 'LDHead', 'CascadeRPNHead',
'AutoAssignHead', 'DETRHead', 'YOLOFHead', 'DeformableDETRHead',
'SOLOHead', 'DecoupledSOLOHead', 'CenterNetHead', 'YOLOXHead',
'DecoupledSOLOLightHead', 'LADHead', 'TOODHead', 'MaskFormerHead',
'Mask2FormerHead', 'SOLOV2Head', 'DDODHead'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .anchor_free_head import AnchorFreeHead
from .anchor_head import AnchorHead
from .atss_head import ATSSHead
from .autoassign_head import AutoAssignHead
from .cascade_rpn_head import CascadeRPNHead, StageCascadeRPNHead
from .centernet_head import CenterNetHead
from .centripetal_head import CentripetalHead
from .corner_head import CornerHead
from .ddod_head import DDODHead
from .deformable_detr_head import DeformableDETRHead
from .detr_head import DETRHead
from .embedding_rpn_head import EmbeddingRPNHead
from .fcos_head import FCOSHead
from .fovea_head import FoveaHead
from .free_anchor_retina_head import FreeAnchorRetinaHead
from .fsaf_head import FSAFHead
from .ga_retina_head import GARetinaHead
from .ga_rpn_head import GARPNHead
from .gfl_head import GFLHead
from .guided_anchor_head import FeatureAdaption, GuidedAnchorHead
from .lad_head import LADHead
from .ld_head import LDHead
from .mask2former_head import Mask2FormerHead
from .maskformer_head import MaskFormerHead
from .nasfcos_head import NASFCOSHead
from .paa_head import PAAHead
from .pisa_retinanet_head import PISARetinaHead
from .pisa_ssd_head import PISASSDHead
from .reppoints_head import RepPointsHead
from .retina_head import RetinaHead
from .retina_sepbn_head import RetinaSepBNHead
from .rpn_head import RPNHead
from .sabl_retina_head import SABLRetinaHead
from .solo_head import DecoupledSOLOHead, DecoupledSOLOLightHead, SOLOHead
from .ssd_head import SSDHead
from .tood_head import TOODHead
from .vfnet_head import VFNetHead
from .yolact_head import YOLACTHead, YOLACTProtonet, YOLACTSegmHead
from .yolo_head import YOLOV3Head
from .yolof_head import YOLOFHead
from .yolox_head import YOLOXHead
__all__ = [
'AnchorFreeHead', 'AnchorHead', 'GuidedAnchorHead', 'FeatureAdaption',
'RPNHead', 'GARPNHead', 'RetinaHead', 'RetinaSepBNHead', 'GARetinaHead',
'SSDHead', 'FCOSHead', 'RepPointsHead', 'FoveaHead',
'FreeAnchorRetinaHead', 'ATSSHead', 'FSAFHead', 'NASFCOSHead',
'PISARetinaHead', 'PISASSDHead', 'GFLHead', 'CornerHead', 'YOLACTHead',
'YOLACTSegmHead', 'YOLACTProtonet', 'YOLOV3Head', 'PAAHead',
'SABLRetinaHead', 'CentripetalHead', 'VFNetHead', 'StageCascadeRPNHead',
'CascadeRPNHead', 'EmbeddingRPNHead', 'LDHead', 'CascadeRPNHead',
'AutoAssignHead', 'DETRHead', 'YOLOFHead', 'DeformableDETRHead',
'SOLOHead', 'DecoupledSOLOHead', 'CenterNetHead', 'YOLOXHead',
'DecoupledSOLOLightHead', 'LADHead', 'TOODHead', 'MaskFormerHead',
'DDODHead', 'Mask2FormerHead'
]
|
from keras.src import ops
from keras.src.api_export import keras_export
from keras.src.optimizers import optimizer
@keras_export(["keras.optimizers.Lion"])
class Lion(optimizer.Optimizer):
"""Optimizer that implements the Lion algorithm.
The Lion optimizer is a stochastic-gradient-descent method that uses the
sign operator to control the magnitude of the update, unlike other adaptive
optimizers such as Adam that rely on second-order moments. This makes
Lion more memory-efficient as it only keeps track of the momentum. According
to the authors (see reference), its performance gain over Adam grows with
the batch size. Because the update of Lion is produced through the sign
operation, resulting in a larger norm, a suitable learning rate for Lion is
typically 3-10x smaller than that for AdamW. The weight decay for Lion
should in turn be 3-10x larger than that for AdamW to maintain a
similar strength (lr * wd).
Args:
learning_rate: A float, a
`keras.optimizers.schedules.LearningRateSchedule` instance, or
a callable that takes no arguments and returns the actual value to
use. The learning rate. Defaults to `0.001`.
beta_1: A float value or a constant float tensor, or a callable
that takes no arguments and returns the actual value to use. The
rate to combine the current gradient and the 1st moment estimate.
Defaults to `0.9`.
beta_2: A float value or a constant float tensor, or a callable
that takes no arguments and returns the actual value to use. The
exponential decay rate for the 1st moment estimate. Defaults to
`0.99`.
{{base_optimizer_keyword_args}}
References:
- [Chen et al., 2023](http://arxiv.org/abs/2302.06675)
- [Authors' implementation](
http://github.com/google/automl/tree/master/lion)
"""
def __init__(
self,
learning_rate=0.001,
beta_1=0.9,
beta_2=0.99,
weight_decay=None,
clipnorm=None,
clipvalue=None,
global_clipnorm=None,
use_ema=False,
ema_momentum=0.99,
ema_overwrite_frequency=None,
loss_scale_factor=None,
gradient_accumulation_steps=None,
name="lion",
**kwargs,
):
super().__init__(
learning_rate=learning_rate,
name=name,
weight_decay=weight_decay,
clipnorm=clipnorm,
clipvalue=clipvalue,
global_clipnorm=global_clipnorm,
use_ema=use_ema,
ema_momentum=ema_momentum,
ema_overwrite_frequency=ema_overwrite_frequency,
loss_scale_factor=loss_scale_factor,
gradient_accumulation_steps=gradient_accumulation_steps,
**kwargs,
)
self.beta_1 = beta_1
self.beta_2 = beta_2
if beta_1 <= 0 or beta_1 > 1:
raise ValueError(
"Argument `beta_1` must be in the [0, 1] range. Otherwise, the "
f"optimizer degenerates to SignSGD. Received: beta_1={beta_1}."
)
def build(self, var_list):
"""Initialize optimizer variables.
Lion optimizer has one variable `momentums`.
Args:
var_list: list of model variables to build Lion variables on.
"""
if self.built:
return
super().build(var_list)
self._momentums = self.add_optimizer_variables(var_list, "momentum")
def update_step(self, gradient, variable, learning_rate):
"""Update step given gradient and the associated model variable."""
lr = ops.cast(learning_rate, variable.dtype)
gradient = ops.cast(gradient, variable.dtype)
beta_1 = ops.cast(self.beta_1, variable.dtype)
beta_2 = ops.cast(self.beta_2, variable.dtype)
m = self._momentums[self._get_variable_index(variable)]
self.assign_sub(
variable,
ops.multiply(
lr,
ops.sign(
ops.add(
ops.multiply(m, beta_1),
ops.multiply(gradient, (1.0 - beta_1)),
)
),
),
)
self.assign(
m,
ops.add(
ops.multiply(m, beta_2), ops.multiply(gradient, (1.0 - beta_2))
),
)
def get_config(self):
config = super().get_config()
config.update(
{
"beta_1": self.beta_1,
"beta_2": self.beta_2,
}
)
return config
Lion.__doc__ = Lion.__doc__.replace(
"{{base_optimizer_keyword_args}}", optimizer.base_optimizer_keyword_args
)
|
from keras.src import ops
from keras.src.api_export import keras_export
from keras.src.optimizers import optimizer
@keras_export(["keras.optimizers.Lion"])
class Lion(optimizer.Optimizer):
"""Optimizer that implements the Lion algorithm.
The Lion optimizer is a stochastic-gradient-descent method that uses the
sign operator to control the magnitude of the update, unlike other adaptive
optimizers such as Adam that rely on second-order moments. This makes
Lion more memory-efficient as it only keeps track of the momentum. According
to the authors (see reference), its performance gain over Adam grows with
the batch size. Because the update of Lion is produced through the sign
operation, resulting in a larger norm, a suitable learning rate for Lion is
typically 3-10x smaller than that for AdamW. The weight decay for Lion
should in turn be 3-10x larger than that for AdamW to maintain a
similar strength (lr * wd).
Args:
learning_rate: A float, a
`keras.optimizers.schedules.LearningRateSchedule` instance, or
a callable that takes no arguments and returns the actual value to
use. The learning rate. Defaults to `0.001`.
beta_1: A float value or a constant float tensor, or a callable
that takes no arguments and returns the actual value to use. The
rate to combine the current gradient and the 1st moment estimate.
Defaults to `0.9`.
beta_2: A float value or a constant float tensor, or a callable
that takes no arguments and returns the actual value to use. The
exponential decay rate for the 1st moment estimate. Defaults to
`0.99`.
{{base_optimizer_keyword_args}}
References:
- [Chen et al., 2023](http://arxiv.org/abs/2302.06675)
- [Authors' implementation](
http://github.com/google/automl/tree/master/lion)
"""
def __init__(
self,
learning_rate=0.001,
beta_1=0.9,
beta_2=0.99,
weight_decay=None,
clipnorm=None,
clipvalue=None,
global_clipnorm=None,
use_ema=False,
ema_momentum=0.99,
ema_overwrite_frequency=None,
loss_scale_factor=None,
gradient_accumulation_steps=None,
name="lion",
**kwargs,
):
super().__init__(
learning_rate=learning_rate,
name=name,
weight_decay=weight_decay,
clipnorm=clipnorm,
clipvalue=clipvalue,
global_clipnorm=global_clipnorm,
use_ema=use_ema,
ema_momentum=ema_momentum,
ema_overwrite_frequency=ema_overwrite_frequency,
loss_scale_factor=loss_scale_factor,
gradient_accumulation_steps=gradient_accumulation_steps,
**kwargs,
)
self.beta_1 = beta_1
self.beta_2 = beta_2
if beta_1 <= 0 or beta_1 > 1:
raise ValueError(
"Argument `beta_1` must be in the [0, 1] range. Otherwise, the "
f"optimizer degenerates to SignSGD. Received: beta_1={beta_1}."
)
def build(self, var_list):
"""Initialize optimizer variables.
Lion optimizer has one variable `momentums`.
Args:
var_list: list of model variables to build Lion variables on.
"""
if self.built:
return
super().build(var_list)
self._momentums = []
for var in var_list:
self._momentums.append(
self.add_variable_from_reference(
reference_variable=var, name="momentum"
)
)
def update_step(self, gradient, variable, learning_rate):
"""Update step given gradient and the associated model variable."""
lr = ops.cast(learning_rate, variable.dtype)
gradient = ops.cast(gradient, variable.dtype)
beta_1 = ops.cast(self.beta_1, variable.dtype)
beta_2 = ops.cast(self.beta_2, variable.dtype)
m = self._momentums[self._get_variable_index(variable)]
self.assign_sub(
variable,
ops.multiply(
lr,
ops.sign(
ops.add(
ops.multiply(m, beta_1),
ops.multiply(gradient, (1.0 - beta_1)),
)
),
),
)
self.assign(
m,
ops.add(
ops.multiply(m, beta_2), ops.multiply(gradient, (1.0 - beta_2))
),
)
def get_config(self):
config = super().get_config()
config.update(
{
"beta_1": self.beta_1,
"beta_2": self.beta_2,
}
)
return config
Lion.__doc__ = Lion.__doc__.replace(
"{{base_optimizer_keyword_args}}", optimizer.base_optimizer_keyword_args
)
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import torch
from mmengine.structures import InstanceData
from mmdet.models.dense_heads import FoveaHead
class TestFOVEAHead(TestCase):
def test_fovea_head_loss(self):
"""Tests anchor head loss when truth is empty and non-empty."""
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'pad_shape': (s, s, 3),
'scale_factor': 1,
}]
fovea_head = FoveaHead(num_classes=4, in_channels=1)
# Anchor head expects a multiple levels of features per image
feats = (
torch.rand(1, 1, s // (2**(i + 2)), s // (2**(i + 2)))
for i in range(len(fovea_head.prior_generator.strides)))
cls_scores, bbox_preds = fovea_head.forward(feats)
# Test that empty ground truth encourages the network to
# predict background
gt_instances = InstanceData()
gt_instances.bboxes = torch.empty((0, 4))
gt_instances.labels = torch.LongTensor([])
empty_gt_losses = fovea_head.loss_by_feat(cls_scores, bbox_preds,
[gt_instances], img_metas)
# When there is no truth, the cls loss should be nonzero but
# there should be no box loss.
empty_cls_loss = empty_gt_losses['loss_cls']
empty_box_loss = empty_gt_losses['loss_bbox']
self.assertGreater(empty_cls_loss.item(), 0,
'cls loss should be non-zero')
self.assertEqual(
empty_box_loss.item(), 0,
'there should be no box loss when there are no true boxes')
# When truth is non-empty then both cls and box loss
# should be nonzero for random inputs
gt_instances = InstanceData()
gt_instances.bboxes = torch.Tensor(
[[23.6667, 23.8757, 238.6326, 151.8874]])
gt_instances.labels = torch.LongTensor([2])
one_gt_losses = fovea_head.loss_by_feat(cls_scores, bbox_preds,
[gt_instances], img_metas)
onegt_cls_loss = one_gt_losses['loss_cls']
onegt_box_loss = one_gt_losses['loss_bbox']
self.assertGreater(onegt_cls_loss.item(), 0,
'cls loss should be non-zero')
self.assertGreater(onegt_box_loss.item(), 0,
'box loss should be non-zero')
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import torch
from mmengine.data import InstanceData
from mmdet.models.dense_heads import FoveaHead
class TestFOVEAHead(TestCase):
def test_fovea_head_loss(self):
"""Tests anchor head loss when truth is empty and non-empty."""
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'pad_shape': (s, s, 3),
'scale_factor': 1,
}]
fovea_head = FoveaHead(num_classes=4, in_channels=1)
# Anchor head expects a multiple levels of features per image
feats = (
torch.rand(1, 1, s // (2**(i + 2)), s // (2**(i + 2)))
for i in range(len(fovea_head.prior_generator.strides)))
cls_scores, bbox_preds = fovea_head.forward(feats)
# Test that empty ground truth encourages the network to
# predict background
gt_instances = InstanceData()
gt_instances.bboxes = torch.empty((0, 4))
gt_instances.labels = torch.LongTensor([])
empty_gt_losses = fovea_head.loss_by_feat(cls_scores, bbox_preds,
[gt_instances], img_metas)
# When there is no truth, the cls loss should be nonzero but
# there should be no box loss.
empty_cls_loss = empty_gt_losses['loss_cls']
empty_box_loss = empty_gt_losses['loss_bbox']
self.assertGreater(empty_cls_loss.item(), 0,
'cls loss should be non-zero')
self.assertEqual(
empty_box_loss.item(), 0,
'there should be no box loss when there are no true boxes')
# When truth is non-empty then both cls and box loss
# should be nonzero for random inputs
gt_instances = InstanceData()
gt_instances.bboxes = torch.Tensor(
[[23.6667, 23.8757, 238.6326, 151.8874]])
gt_instances.labels = torch.LongTensor([2])
one_gt_losses = fovea_head.loss_by_feat(cls_scores, bbox_preds,
[gt_instances], img_metas)
onegt_cls_loss = one_gt_losses['loss_cls']
onegt_box_loss = one_gt_losses['loss_bbox']
self.assertGreater(onegt_cls_loss.item(), 0,
'cls loss should be non-zero')
self.assertGreater(onegt_box_loss.item(), 0,
'box loss should be non-zero')
|
from __future__ import annotations
from typing import TYPE_CHECKING, Optional
from langchain_core.callbacks.base import BaseCallbackHandler
if TYPE_CHECKING:
from langchain_community.callbacks import LLMThoughtLabeler
from streamlit.delta_generator import DeltaGenerator
def StreamlitCallbackHandler(
parent_container: DeltaGenerator,
*,
max_thought_containers: int = 4,
expand_new_thoughts: bool = True,
collapse_completed_thoughts: bool = True,
thought_labeler: Optional[LLMThoughtLabeler] = None,
) -> BaseCallbackHandler:
"""Callback Handler that writes to a Streamlit app.
This CallbackHandler is geared towards
use with a LangChain Agent; it displays the Agent's LLM and tool-usage "thoughts"
inside a series of Streamlit expanders.
Parameters
----------
parent_container
The `st.container` that will contain all the Streamlit elements that the
Handler creates.
max_thought_containers
The max number of completed LLM thought containers to show at once. When this
threshold is reached, a new thought will cause the oldest thoughts to be
collapsed into a "History" expander. Defaults to 4.
expand_new_thoughts
Each LLM "thought" gets its own `st.expander`. This param controls whether that
expander is expanded by default. Defaults to True.
collapse_completed_thoughts
If True, LLM thought expanders will be collapsed when completed.
Defaults to True.
thought_labeler
An optional custom LLMThoughtLabeler instance. If unspecified, the handler
will use the default thought labeling logic. Defaults to None.
Returns
-------
A new StreamlitCallbackHandler instance.
Note that this is an "auto-updating" API: if the installed version of Streamlit
has a more recent StreamlitCallbackHandler implementation, an instance of that class
will be used.
"""
# If we're using a version of Streamlit that implements StreamlitCallbackHandler,
# delegate to it instead of using our built-in handler. The official handler is
# guaranteed to support the same set of kwargs.
try:
from streamlit.external.langchain import StreamlitCallbackHandler
# This is the official handler, so we can just return it.
return StreamlitCallbackHandler(
parent_container,
max_thought_containers=max_thought_containers,
expand_new_thoughts=expand_new_thoughts,
collapse_completed_thoughts=collapse_completed_thoughts,
thought_labeler=thought_labeler,
)
except ImportError:
try:
from langchain_community.callbacks.streamlit.streamlit_callback_handler import ( # noqa: E501
StreamlitCallbackHandler as _InternalStreamlitCallbackHandler,
)
except ImportError:
msg = (
"To use the StreamlitCallbackHandler, please install "
"langchain-community with `pip install langchain-community`."
)
raise ImportError(msg)
return _InternalStreamlitCallbackHandler(
parent_container,
max_thought_containers=max_thought_containers,
expand_new_thoughts=expand_new_thoughts,
collapse_completed_thoughts=collapse_completed_thoughts,
thought_labeler=thought_labeler,
)
|
from __future__ import annotations
from typing import TYPE_CHECKING, Optional
from langchain_core.callbacks.base import BaseCallbackHandler
if TYPE_CHECKING:
from langchain_community.callbacks import LLMThoughtLabeler
from streamlit.delta_generator import DeltaGenerator
def StreamlitCallbackHandler(
parent_container: DeltaGenerator,
*,
max_thought_containers: int = 4,
expand_new_thoughts: bool = True,
collapse_completed_thoughts: bool = True,
thought_labeler: Optional[LLMThoughtLabeler] = None,
) -> BaseCallbackHandler:
"""Callback Handler that writes to a Streamlit app.
This CallbackHandler is geared towards
use with a LangChain Agent; it displays the Agent's LLM and tool-usage "thoughts"
inside a series of Streamlit expanders.
Parameters
----------
parent_container
The `st.container` that will contain all the Streamlit elements that the
Handler creates.
max_thought_containers
The max number of completed LLM thought containers to show at once. When this
threshold is reached, a new thought will cause the oldest thoughts to be
collapsed into a "History" expander. Defaults to 4.
expand_new_thoughts
Each LLM "thought" gets its own `st.expander`. This param controls whether that
expander is expanded by default. Defaults to True.
collapse_completed_thoughts
If True, LLM thought expanders will be collapsed when completed.
Defaults to True.
thought_labeler
An optional custom LLMThoughtLabeler instance. If unspecified, the handler
will use the default thought labeling logic. Defaults to None.
Returns
-------
A new StreamlitCallbackHandler instance.
Note that this is an "auto-updating" API: if the installed version of Streamlit
has a more recent StreamlitCallbackHandler implementation, an instance of that class
will be used.
"""
# If we're using a version of Streamlit that implements StreamlitCallbackHandler,
# delegate to it instead of using our built-in handler. The official handler is
# guaranteed to support the same set of kwargs.
try:
from streamlit.external.langchain import StreamlitCallbackHandler
# This is the official handler, so we can just return it.
return StreamlitCallbackHandler(
parent_container,
max_thought_containers=max_thought_containers,
expand_new_thoughts=expand_new_thoughts,
collapse_completed_thoughts=collapse_completed_thoughts,
thought_labeler=thought_labeler,
)
except ImportError:
try:
from langchain_community.callbacks.streamlit.streamlit_callback_handler import ( # noqa: E501
StreamlitCallbackHandler as _InternalStreamlitCallbackHandler,
)
except ImportError:
raise ImportError(
"To use the StreamlitCallbackHandler, please install "
"langchain-community with `pip install langchain-community`."
)
return _InternalStreamlitCallbackHandler(
parent_container,
max_thought_containers=max_thought_containers,
expand_new_thoughts=expand_new_thoughts,
collapse_completed_thoughts=collapse_completed_thoughts,
thought_labeler=thought_labeler,
)
|
from keras.src.backend.common.name_scope import name_scope
from keras.src.backend.numpy import core
from keras.src.backend.numpy import image
from keras.src.backend.numpy import linalg
from keras.src.backend.numpy import math
from keras.src.backend.numpy import nn
from keras.src.backend.numpy import numpy
from keras.src.backend.numpy import random
from keras.src.backend.numpy.core import SUPPORTS_SPARSE_TENSORS
from keras.src.backend.numpy.core import Variable
from keras.src.backend.numpy.core import cast
from keras.src.backend.numpy.core import compute_output_spec
from keras.src.backend.numpy.core import cond
from keras.src.backend.numpy.core import convert_to_numpy
from keras.src.backend.numpy.core import convert_to_tensor
from keras.src.backend.numpy.core import device_scope
from keras.src.backend.numpy.core import is_tensor
from keras.src.backend.numpy.core import random_seed_dtype
from keras.src.backend.numpy.core import shape
from keras.src.backend.numpy.core import vectorized_map
from keras.src.backend.numpy.rnn import cudnn_ok
from keras.src.backend.numpy.rnn import gru
from keras.src.backend.numpy.rnn import lstm
from keras.src.backend.numpy.rnn import rnn
|
from keras.src.backend.numpy import core
from keras.src.backend.numpy import image
from keras.src.backend.numpy import linalg
from keras.src.backend.numpy import math
from keras.src.backend.numpy import nn
from keras.src.backend.numpy import numpy
from keras.src.backend.numpy import random
from keras.src.backend.numpy.core import SUPPORTS_SPARSE_TENSORS
from keras.src.backend.numpy.core import Variable
from keras.src.backend.numpy.core import cast
from keras.src.backend.numpy.core import compute_output_spec
from keras.src.backend.numpy.core import cond
from keras.src.backend.numpy.core import convert_to_numpy
from keras.src.backend.numpy.core import convert_to_tensor
from keras.src.backend.numpy.core import is_tensor
from keras.src.backend.numpy.core import random_seed_dtype
from keras.src.backend.numpy.core import shape
from keras.src.backend.numpy.core import vectorized_map
from keras.src.backend.numpy.rnn import cudnn_ok
from keras.src.backend.numpy.rnn import gru
from keras.src.backend.numpy.rnn import lstm
from keras.src.backend.numpy.rnn import rnn
|
from __future__ import annotations
import logging
from typing import TYPE_CHECKING, Any, Callable
from sentence_transformers.evaluation import InformationRetrievalEvaluator
if TYPE_CHECKING:
import numpy as np
from torch import Tensor
from sentence_transformers.similarity_functions import SimilarityFunction
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
logger = logging.getLogger(__name__)
class SparseInformationRetrievalEvaluator(InformationRetrievalEvaluator):
def __init__(
self,
queries: dict[str, str], # qid => query
corpus: dict[str, str], # cid => doc
relevant_docs: dict[str, set[str]], # qid => Set[cid]
corpus_chunk_size: int = 50000,
mrr_at_k: list[int] = [10],
ndcg_at_k: list[int] = [10],
accuracy_at_k: list[int] = [1, 3, 5, 10],
precision_recall_at_k: list[int] = [1, 3, 5, 10],
map_at_k: list[int] = [100],
show_progress_bar: bool = False,
batch_size: int = 32,
name: str = "",
write_csv: bool = True,
truncate_dim: int | None = None,
score_functions: dict[str, Callable[[Tensor, Tensor], Tensor]] | None = None,
main_score_function: str | SimilarityFunction | None = None,
query_prompt: str | None = None,
query_prompt_name: str | None = None,
corpus_prompt: str | None = None,
corpus_prompt_name: str | None = None,
) -> None:
return super().__init__(
queries=queries,
corpus=corpus,
relevant_docs=relevant_docs,
corpus_chunk_size=corpus_chunk_size,
mrr_at_k=mrr_at_k,
ndcg_at_k=ndcg_at_k,
accuracy_at_k=accuracy_at_k,
precision_recall_at_k=precision_recall_at_k,
map_at_k=map_at_k,
show_progress_bar=show_progress_bar,
batch_size=batch_size,
name=name,
write_csv=write_csv,
truncate_dim=truncate_dim,
score_functions=score_functions,
main_score_function=main_score_function,
query_prompt=query_prompt,
query_prompt_name=query_prompt_name,
corpus_prompt=corpus_prompt,
corpus_prompt_name=corpus_prompt_name,
)
def __call__(
self, model: SparseEncoder, output_path: str = None, epoch: int = -1, steps: int = -1, *args, **kwargs
) -> dict[str, float]:
return super().__call__(model=model, output_path=output_path, epoch=epoch, steps=steps, *args, **kwargs)
def compute_metrices(
self, model: SparseEncoder, corpus_model=None, corpus_embeddings: Tensor | None = None
) -> dict[str, float]:
return super().compute_metrices(model=model, corpus_model=corpus_model, corpus_embeddings=corpus_embeddings)
def embed_inputs(
self,
model: SparseEncoder,
sentences: str | list[str] | np.ndarray,
prompt_name: str | None = None,
prompt: str | None = None,
**kwargs,
) -> Tensor:
kwargs["truncate_dim"] = self.truncate_dim
embeddings = model.encode(
sentences,
prompt_name=prompt_name,
prompt=prompt,
batch_size=self.batch_size,
show_progress_bar=self.show_progress_bar,
convert_to_sparse_tensor=True,
save_on_cpu=True,
**kwargs,
)
sparsity_infos = model.get_sparsity_stats(embeddings)
if sparsity_infos["num_rows"] == self.queries_info["lenght_of_queries"]:
self.queries_info["sparsity_infos"] = model.get_sparsity_stats(embeddings)
else:
self.corpus_info["sparsity_infos"] = model.get_sparsity_stats(embeddings)
return embeddings
def store_metrics_in_model_card_data(
self, model: SparseEncoder, metrics: dict[str, Any], epoch: int = 0, step: int = 0
) -> None:
model.model_card_data.set_evaluation_metrics(self, metrics, epoch=epoch, step=step)
|
from __future__ import annotations
import logging
from typing import TYPE_CHECKING, Any, Callable
from sentence_transformers.evaluation import InformationRetrievalEvaluator
if TYPE_CHECKING:
import numpy as np
from torch import Tensor
from sentence_transformers.similarity_functions import SimilarityFunction
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
logger = logging.getLogger(__name__)
class SparseInformationRetrievalEvaluator(InformationRetrievalEvaluator):
def __init__(
self,
queries: dict[str, str], # qid => query
corpus: dict[str, str], # cid => doc
relevant_docs: dict[str, set[str]], # qid => Set[cid]
corpus_chunk_size: int = 50000,
mrr_at_k: list[int] = [10],
ndcg_at_k: list[int] = [10],
accuracy_at_k: list[int] = [1, 3, 5, 10],
precision_recall_at_k: list[int] = [1, 3, 5, 10],
map_at_k: list[int] = [100],
show_progress_bar: bool = False,
batch_size: int = 32,
name: str = "",
write_csv: bool = True,
truncate_dim: int | None = None,
score_functions: dict[str, Callable[[Tensor, Tensor], Tensor]] | None = None,
main_score_function: str | SimilarityFunction | None = None,
query_prompt: str | None = None,
query_prompt_name: str | None = None,
corpus_prompt: str | None = None,
corpus_prompt_name: str | None = None,
) -> None:
return super().__init__(
queries=queries,
corpus=corpus,
relevant_docs=relevant_docs,
corpus_chunk_size=corpus_chunk_size,
mrr_at_k=mrr_at_k,
ndcg_at_k=ndcg_at_k,
accuracy_at_k=accuracy_at_k,
precision_recall_at_k=precision_recall_at_k,
map_at_k=map_at_k,
show_progress_bar=show_progress_bar,
batch_size=batch_size,
name=name,
write_csv=write_csv,
truncate_dim=truncate_dim,
score_functions=score_functions,
main_score_function=main_score_function,
query_prompt=query_prompt,
query_prompt_name=query_prompt_name,
corpus_prompt=corpus_prompt,
corpus_prompt_name=corpus_prompt_name,
)
def __call__(
self, model: SparseEncoder, output_path: str = None, epoch: int = -1, steps: int = -1, *args, **kwargs
) -> dict[str, float]:
return super().__call__(model=model, output_path=output_path, epoch=epoch, steps=steps, *args, **kwargs)
def compute_metrices(
self, model: SparseEncoder, corpus_model=None, corpus_embeddings: Tensor | None = None
) -> dict[str, float]:
return super().compute_metrices(model=model, corpus_model=corpus_model, corpus_embeddings=corpus_embeddings)
def embed_inputs(
self,
model: SparseEncoder,
sentences: str | list[str] | np.ndarray,
prompt_name: str | None = None,
prompt: str | None = None,
**kwargs,
) -> Tensor:
kwargs["truncate_dim"] = self.truncate_dim
embeddings = model.encode(
sentences,
prompt_name=prompt_name,
prompt=prompt,
batch_size=self.batch_size,
show_progress_bar=self.show_progress_bar,
convert_to_sparse_tensor=True,
save_on_cpu=True,
**kwargs,
)
logger.info(model.get_sparsity_stats(embeddings))
return embeddings
def store_metrics_in_model_card_data(
self, model: SparseEncoder, metrics: dict[str, Any], epoch: int = 0, step: int = 0
) -> None:
model.model_card_data.set_evaluation_metrics(self, metrics, epoch=epoch, step=step)
|
from llama_index.observability.otel import LlamaIndexOpenTelemetry
from llama_index.observability.otel.base import (
Resource,
SERVICE_NAME,
ConsoleSpanExporter,
)
def test_initialization() -> None:
instrumentor = LlamaIndexOpenTelemetry()
assert instrumentor.service_name_or_resource == Resource(
attributes={SERVICE_NAME: "llamaindex.opentelemetry"}
)
assert isinstance(instrumentor.span_exporter, ConsoleSpanExporter)
assert instrumentor.span_processor == "batch"
assert instrumentor._tracer is None
assert not instrumentor.debug
def test_diff_initialization() -> None:
instrumentor = LlamaIndexOpenTelemetry(
service_name_or_resource="this.is.a.test",
span_processor="simple",
debug=True,
)
assert instrumentor.service_name_or_resource == "this.is.a.test"
assert isinstance(instrumentor.span_exporter, ConsoleSpanExporter)
assert instrumentor.span_processor == "simple"
assert instrumentor._tracer is None
assert instrumentor.debug
|
from llama_index.observability.otel import LlamaIndexOpenTelemetry
from llama_index.observability.otel.base import Resource, SERVICE_NAME, ConsoleSpanExporter
def test_initialization() -> None:
instrumentor = LlamaIndexOpenTelemetry()
assert instrumentor.service_name_or_resource == Resource(attributes={SERVICE_NAME: "llamaindex.opentelemetry"})
assert isinstance(instrumentor.span_exporter, ConsoleSpanExporter)
assert instrumentor.span_processor == "batch"
assert instrumentor._tracer is None
assert not instrumentor.debug
def test_diff_initialization() -> None:
instrumentor = LlamaIndexOpenTelemetry(
service_name_or_resource="this.is.a.test",
span_processor="simple",
debug = True,
)
assert instrumentor.service_name_or_resource == "this.is.a.test"
assert isinstance(instrumentor.span_exporter, ConsoleSpanExporter)
assert instrumentor.span_processor == "simple"
assert instrumentor._tracer is None
assert instrumentor.debug
|
"""Interface with the LangChain Hub."""
from __future__ import annotations
import json
from collections.abc import Sequence
from typing import Any, Optional
from langchain_core.load.dump import dumps
from langchain_core.load.load import loads
from langchain_core.prompts import BasePromptTemplate
def _get_client(
api_key: Optional[str] = None,
api_url: Optional[str] = None,
) -> Any:
try:
from langsmith import Client as LangSmithClient
ls_client = LangSmithClient(api_url, api_key=api_key)
if hasattr(ls_client, "push_prompt") and hasattr(ls_client, "pull_prompt"):
return ls_client
else:
from langchainhub import Client as LangChainHubClient
return LangChainHubClient(api_url, api_key=api_key)
except ImportError:
try:
from langchainhub import Client as LangChainHubClient
return LangChainHubClient(api_url, api_key=api_key)
except ImportError as e:
msg = (
"Could not import langsmith or langchainhub (deprecated),"
"please install with `pip install langsmith`."
)
raise ImportError(msg) from e
def push(
repo_full_name: str,
object: Any,
*,
api_url: Optional[str] = None,
api_key: Optional[str] = None,
parent_commit_hash: Optional[str] = None,
new_repo_is_public: bool = False,
new_repo_description: Optional[str] = None,
readme: Optional[str] = None,
tags: Optional[Sequence[str]] = None,
) -> str:
"""
Push an object to the hub and returns the URL it can be viewed at in a browser.
:param repo_full_name: The full name of the prompt to push to in the format of
`owner/prompt_name` or `prompt_name`.
:param object: The LangChain to serialize and push to the hub.
:param api_url: The URL of the LangChain Hub API. Defaults to the hosted API service
if you have an api key set, or a localhost instance if not.
:param api_key: The API key to use to authenticate with the LangChain Hub API.
:param parent_commit_hash: The commit hash of the parent commit to push to. Defaults
to the latest commit automatically.
:param new_repo_is_public: Whether the prompt should be public. Defaults to
False (Private by default).
:param new_repo_description: The description of the prompt. Defaults to an empty
string.
"""
client = _get_client(api_key=api_key, api_url=api_url)
# Then it's langsmith
if hasattr(client, "push_prompt"):
return client.push_prompt(
repo_full_name,
object=object,
parent_commit_hash=parent_commit_hash,
is_public=new_repo_is_public,
description=new_repo_description,
readme=readme,
tags=tags,
)
# Then it's langchainhub
manifest_json = dumps(object)
message = client.push(
repo_full_name,
manifest_json,
parent_commit_hash=parent_commit_hash,
new_repo_is_public=new_repo_is_public,
new_repo_description=new_repo_description,
)
return message
def pull(
owner_repo_commit: str,
*,
include_model: Optional[bool] = None,
api_url: Optional[str] = None,
api_key: Optional[str] = None,
) -> Any:
"""
Pull an object from the hub and returns it as a LangChain object.
:param owner_repo_commit: The full name of the prompt to pull from in the format of
`owner/prompt_name:commit_hash` or `owner/prompt_name`
or just `prompt_name` if it's your own prompt.
:param api_url: The URL of the LangChain Hub API. Defaults to the hosted API service
if you have an api key set, or a localhost instance if not.
:param api_key: The API key to use to authenticate with the LangChain Hub API.
"""
client = _get_client(api_key=api_key, api_url=api_url)
# Then it's langsmith
if hasattr(client, "pull_prompt"):
response = client.pull_prompt(owner_repo_commit, include_model=include_model)
return response
# Then it's langchainhub
if hasattr(client, "pull_repo"):
# >= 0.1.15
res_dict = client.pull_repo(owner_repo_commit)
obj = loads(json.dumps(res_dict["manifest"]))
if isinstance(obj, BasePromptTemplate):
if obj.metadata is None:
obj.metadata = {}
obj.metadata["lc_hub_owner"] = res_dict["owner"]
obj.metadata["lc_hub_repo"] = res_dict["repo"]
obj.metadata["lc_hub_commit_hash"] = res_dict["commit_hash"]
return obj
# Then it's < 0.1.15 langchainhub
resp: str = client.pull(owner_repo_commit)
return loads(resp)
|
"""Interface with the LangChain Hub."""
from __future__ import annotations
import json
from collections.abc import Sequence
from typing import Any, Optional
from langchain_core.load.dump import dumps
from langchain_core.load.load import loads
from langchain_core.prompts import BasePromptTemplate
def _get_client(
api_key: Optional[str] = None,
api_url: Optional[str] = None,
) -> Any:
try:
from langsmith import Client as LangSmithClient
ls_client = LangSmithClient(api_url, api_key=api_key)
if hasattr(ls_client, "push_prompt") and hasattr(ls_client, "pull_prompt"):
return ls_client
else:
from langchainhub import Client as LangChainHubClient
return LangChainHubClient(api_url, api_key=api_key)
except ImportError:
try:
from langchainhub import Client as LangChainHubClient
return LangChainHubClient(api_url, api_key=api_key)
except ImportError as e:
raise ImportError(
"Could not import langsmith or langchainhub (deprecated),"
"please install with `pip install langsmith`."
) from e
def push(
repo_full_name: str,
object: Any,
*,
api_url: Optional[str] = None,
api_key: Optional[str] = None,
parent_commit_hash: Optional[str] = None,
new_repo_is_public: bool = False,
new_repo_description: Optional[str] = None,
readme: Optional[str] = None,
tags: Optional[Sequence[str]] = None,
) -> str:
"""
Push an object to the hub and returns the URL it can be viewed at in a browser.
:param repo_full_name: The full name of the prompt to push to in the format of
`owner/prompt_name` or `prompt_name`.
:param object: The LangChain to serialize and push to the hub.
:param api_url: The URL of the LangChain Hub API. Defaults to the hosted API service
if you have an api key set, or a localhost instance if not.
:param api_key: The API key to use to authenticate with the LangChain Hub API.
:param parent_commit_hash: The commit hash of the parent commit to push to. Defaults
to the latest commit automatically.
:param new_repo_is_public: Whether the prompt should be public. Defaults to
False (Private by default).
:param new_repo_description: The description of the prompt. Defaults to an empty
string.
"""
client = _get_client(api_key=api_key, api_url=api_url)
# Then it's langsmith
if hasattr(client, "push_prompt"):
return client.push_prompt(
repo_full_name,
object=object,
parent_commit_hash=parent_commit_hash,
is_public=new_repo_is_public,
description=new_repo_description,
readme=readme,
tags=tags,
)
# Then it's langchainhub
manifest_json = dumps(object)
message = client.push(
repo_full_name,
manifest_json,
parent_commit_hash=parent_commit_hash,
new_repo_is_public=new_repo_is_public,
new_repo_description=new_repo_description,
)
return message
def pull(
owner_repo_commit: str,
*,
include_model: Optional[bool] = None,
api_url: Optional[str] = None,
api_key: Optional[str] = None,
) -> Any:
"""
Pull an object from the hub and returns it as a LangChain object.
:param owner_repo_commit: The full name of the prompt to pull from in the format of
`owner/prompt_name:commit_hash` or `owner/prompt_name`
or just `prompt_name` if it's your own prompt.
:param api_url: The URL of the LangChain Hub API. Defaults to the hosted API service
if you have an api key set, or a localhost instance if not.
:param api_key: The API key to use to authenticate with the LangChain Hub API.
"""
client = _get_client(api_key=api_key, api_url=api_url)
# Then it's langsmith
if hasattr(client, "pull_prompt"):
response = client.pull_prompt(owner_repo_commit, include_model=include_model)
return response
# Then it's langchainhub
if hasattr(client, "pull_repo"):
# >= 0.1.15
res_dict = client.pull_repo(owner_repo_commit)
obj = loads(json.dumps(res_dict["manifest"]))
if isinstance(obj, BasePromptTemplate):
if obj.metadata is None:
obj.metadata = {}
obj.metadata["lc_hub_owner"] = res_dict["owner"]
obj.metadata["lc_hub_repo"] = res_dict["repo"]
obj.metadata["lc_hub_commit_hash"] = res_dict["commit_hash"]
return obj
# Then it's < 0.1.15 langchainhub
resp: str = client.pull(owner_repo_commit)
return loads(resp)
|
from __future__ import annotations
try:
from typing import Self
except ImportError:
from typing_extensions import Self
import torch
from torch import Tensor, nn
from sentence_transformers.models.Module import Module
class WeightedLayerPooling(Module):
"""Token embeddings are weighted mean of their different hidden layer representations"""
config_keys: list[str] = ["word_embedding_dimension", "layer_start", "num_hidden_layers"]
def __init__(
self, word_embedding_dimension, num_hidden_layers: int = 12, layer_start: int = 4, layer_weights=None
):
super().__init__()
self.word_embedding_dimension = word_embedding_dimension
self.layer_start = layer_start
self.num_hidden_layers = num_hidden_layers
self.layer_weights = (
layer_weights
if layer_weights is not None
else nn.Parameter(torch.tensor([1] * (num_hidden_layers + 1 - layer_start), dtype=torch.float))
)
def forward(self, features: dict[str, Tensor]):
ft_all_layers = features["all_layer_embeddings"]
all_layer_embedding = torch.stack(ft_all_layers)
all_layer_embedding = all_layer_embedding[self.layer_start :, :, :, :] # Start from 4th layers output
weight_factor = self.layer_weights.unsqueeze(-1).unsqueeze(-1).unsqueeze(-1).expand(all_layer_embedding.size())
weighted_average = (weight_factor * all_layer_embedding).sum(dim=0) / self.layer_weights.sum()
features.update({"token_embeddings": weighted_average})
return features
def get_word_embedding_dimension(self):
return self.word_embedding_dimension
def save(self, output_path: str, *args, safe_serialization: bool = True, **kwargs) -> None:
self.save_config(output_path)
self.save_torch_weights(output_path, safe_serialization=safe_serialization)
@classmethod
def load(
cls,
model_name_or_path: str,
subfolder: str = "",
token: bool | str | None = None,
cache_folder: str | None = None,
revision: str | None = None,
local_files_only: bool = False,
**kwargs,
) -> Self:
hub_kwargs = {
"subfolder": subfolder,
"token": token,
"cache_folder": cache_folder,
"revision": revision,
"local_files_only": local_files_only,
}
config = cls.load_config(model_name_or_path=model_name_or_path, **hub_kwargs)
model = cls(**config)
model = cls.load_torch_weights(model_name_or_path=model_name_or_path, model=model, **hub_kwargs)
return model
|
from __future__ import annotations
import json
import os
import torch
from safetensors.torch import load_model as load_safetensors_model
from safetensors.torch import save_model as save_safetensors_model
from torch import Tensor, nn
class WeightedLayerPooling(nn.Module):
"""Token embeddings are weighted mean of their different hidden layer representations"""
def __init__(
self, word_embedding_dimension, num_hidden_layers: int = 12, layer_start: int = 4, layer_weights=None
):
super().__init__()
self.config_keys = ["word_embedding_dimension", "layer_start", "num_hidden_layers"]
self.word_embedding_dimension = word_embedding_dimension
self.layer_start = layer_start
self.num_hidden_layers = num_hidden_layers
self.layer_weights = (
layer_weights
if layer_weights is not None
else nn.Parameter(torch.tensor([1] * (num_hidden_layers + 1 - layer_start), dtype=torch.float))
)
def forward(self, features: dict[str, Tensor]):
ft_all_layers = features["all_layer_embeddings"]
all_layer_embedding = torch.stack(ft_all_layers)
all_layer_embedding = all_layer_embedding[self.layer_start :, :, :, :] # Start from 4th layers output
weight_factor = self.layer_weights.unsqueeze(-1).unsqueeze(-1).unsqueeze(-1).expand(all_layer_embedding.size())
weighted_average = (weight_factor * all_layer_embedding).sum(dim=0) / self.layer_weights.sum()
features.update({"token_embeddings": weighted_average})
return features
def get_word_embedding_dimension(self):
return self.word_embedding_dimension
def get_config_dict(self):
return {key: self.__dict__[key] for key in self.config_keys}
def save(self, output_path: str, safe_serialization: bool = True):
with open(os.path.join(output_path, "config.json"), "w") as fOut:
json.dump(self.get_config_dict(), fOut, indent=2)
if safe_serialization:
save_safetensors_model(self, os.path.join(output_path, "model.safetensors"))
else:
torch.save(self.state_dict(), os.path.join(output_path, "pytorch_model.bin"))
@staticmethod
def load(input_path):
with open(os.path.join(input_path, "config.json")) as fIn:
config = json.load(fIn)
model = WeightedLayerPooling(**config)
if os.path.exists(os.path.join(input_path, "model.safetensors")):
load_safetensors_model(model, os.path.join(input_path, "model.safetensors"))
else:
model.load_state_dict(
torch.load(
os.path.join(input_path, "pytorch_model.bin"), map_location=torch.device("cpu"), weights_only=True
)
)
return model
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.layers.preprocessing.image_preprocessing.bounding_boxes.converters import (
affine_transform as affine_transform,
)
from keras.src.layers.preprocessing.image_preprocessing.bounding_boxes.converters import (
clip_to_image_size as clip_to_image_size,
)
from keras.src.layers.preprocessing.image_preprocessing.bounding_boxes.converters import (
convert_format as convert_format,
)
from keras.src.layers.preprocessing.image_preprocessing.bounding_boxes.converters import (
crop as crop,
)
from keras.src.layers.preprocessing.image_preprocessing.bounding_boxes.converters import (
decode_deltas_to_boxes as decode_deltas_to_boxes,
)
from keras.src.layers.preprocessing.image_preprocessing.bounding_boxes.converters import (
encode_box_to_deltas as encode_box_to_deltas,
)
from keras.src.layers.preprocessing.image_preprocessing.bounding_boxes.converters import (
pad as pad,
)
from keras.src.layers.preprocessing.image_preprocessing.bounding_boxes.iou import (
compute_ciou as compute_ciou,
)
from keras.src.layers.preprocessing.image_preprocessing.bounding_boxes.iou import (
compute_iou as compute_iou,
)
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.layers.preprocessing.image_preprocessing.bounding_boxes.converters import (
affine_transform,
)
from keras.src.layers.preprocessing.image_preprocessing.bounding_boxes.converters import (
clip_to_image_size,
)
from keras.src.layers.preprocessing.image_preprocessing.bounding_boxes.converters import (
convert_format,
)
from keras.src.layers.preprocessing.image_preprocessing.bounding_boxes.converters import (
crop,
)
from keras.src.layers.preprocessing.image_preprocessing.bounding_boxes.converters import (
decode_deltas_to_boxes,
)
from keras.src.layers.preprocessing.image_preprocessing.bounding_boxes.converters import (
encode_box_to_deltas,
)
from keras.src.layers.preprocessing.image_preprocessing.bounding_boxes.converters import (
pad,
)
from keras.src.layers.preprocessing.image_preprocessing.bounding_boxes.iou import (
compute_ciou,
)
from keras.src.layers.preprocessing.image_preprocessing.bounding_boxes.iou import (
compute_iou,
)
|
import logging
import os
from typing import Optional
from jina.importer import ImportExtensions
from jina.serve.runtimes.servers import BaseServer
from jina._docarray import docarray_v2
class WebSocketServer(BaseServer):
"""WebSocket Server implementation"""
def __init__(
self,
ssl_keyfile: Optional[str] = None,
ssl_certfile: Optional[str] = None,
uvicorn_kwargs: Optional[dict] = None,
proxy: Optional[bool] = None,
**kwargs
):
"""Initialize the gateway
:param ssl_keyfile: the path to the key file
:param ssl_certfile: the path to the certificate file
:param uvicorn_kwargs: Dictionary of kwargs arguments that will be passed to Uvicorn server when starting the server
:param proxy: If set, respect the http_proxy and https_proxy environment variables, otherwise, it will unset
these proxy variables before start. gRPC seems to prefer no proxy
:param kwargs: keyword args
"""
super().__init__(**kwargs)
self.ssl_keyfile = ssl_keyfile
self.ssl_certfile = ssl_certfile
self.uvicorn_kwargs = uvicorn_kwargs
if not proxy and os.name != 'nt':
os.unsetenv('http_proxy')
os.unsetenv('https_proxy')
async def setup_server(self):
"""
Setup WebSocket Server
"""
self.logger.debug(f'Setting up Websocket server')
if docarray_v2:
from jina.serve.runtimes.gateway.request_handling import GatewayRequestHandler
if isinstance(self._request_handler, GatewayRequestHandler):
await self._request_handler.streamer._get_endpoints_input_output_models(is_cancel=self.is_cancel)
self._request_handler.streamer._validate_flow_docarray_compatibility()
self.app = self._request_handler._websocket_fastapi_default_app(tracing=self.tracing, tracer_provider=self.tracer_provider)
with ImportExtensions(required=True):
from uvicorn import Config, Server
class UviServer(Server):
"""The uvicorn server."""
async def setup(self, sockets=None):
"""
Setup uvicorn server.
:param sockets: sockets of server.
"""
config = self.config
if not config.loaded:
config.load()
self.lifespan = config.lifespan_class(config)
await self.startup(sockets=sockets)
if self.should_exit:
return
async def serve(self, **kwargs):
"""
Start the server.
:param kwargs: keyword arguments
"""
await self.main_loop()
if 'CICD_JINA_DISABLE_HEALTHCHECK_LOGS' in os.environ:
class _EndpointFilter(logging.Filter):
def filter(self, record: logging.LogRecord) -> bool:
# NOTE: space is important after `GET /`, else all logs will be disabled.
return record.getMessage().find("GET / ") == -1
# Filter out healthcheck endpoint `GET /`
logging.getLogger("uvicorn.access").addFilter(_EndpointFilter())
uvicorn_kwargs = self.uvicorn_kwargs or {}
if self.ssl_keyfile and 'ssl_keyfile' not in uvicorn_kwargs.keys():
uvicorn_kwargs['ssl_keyfile'] = self.ssl_keyfile
if self.ssl_certfile and 'ssl_certfile' not in uvicorn_kwargs.keys():
uvicorn_kwargs['ssl_certfile'] = self.ssl_certfile
self.server = UviServer(
config=Config(
app=self.app,
host=self.host,
port=self.port,
ws_max_size=1024 * 1024 * 1024,
log_level=os.getenv('JINA_LOG_LEVEL', 'error').lower(),
**uvicorn_kwargs,
)
)
self.logger.debug(f'UviServer server setup')
await self.server.setup()
self.logger.debug(f'Websocket server setup successful')
@property
def _should_exit(self):
"""Property describing if server is ready to exit
:return: boolean indicating if Server ready to exit
"""
return self.server.should_exit
@property
def should_exit(self):
"""Property describing if server is ready to exit
:return: boolean indicating if Server ready to exit
"""
return self._should_exit
async def shutdown(self):
"""Free other resources allocated with the server, e.g, gateway object, ..."""
self.logger.debug(f'Shutting down server')
await super().shutdown()
self.server.should_exit = True
await self.server.shutdown()
self.logger.debug(f'Server shutdown finished')
async def run_server(self):
"""Run WebSocket server forever"""
await self.server.serve()
|
import logging
import os
from typing import Optional
from jina.importer import ImportExtensions
from jina.serve.runtimes.servers import BaseServer
from jina._docarray import docarray_v2
class WebSocketServer(BaseServer):
"""WebSocket Server implementation"""
def __init__(
self,
ssl_keyfile: Optional[str] = None,
ssl_certfile: Optional[str] = None,
uvicorn_kwargs: Optional[dict] = None,
proxy: Optional[bool] = None,
**kwargs
):
"""Initialize the gateway
:param ssl_keyfile: the path to the key file
:param ssl_certfile: the path to the certificate file
:param uvicorn_kwargs: Dictionary of kwargs arguments that will be passed to Uvicorn server when starting the server
:param proxy: If set, respect the http_proxy and https_proxy environment variables, otherwise, it will unset
these proxy variables before start. gRPC seems to prefer no proxy
:param kwargs: keyword args
"""
super().__init__(**kwargs)
self.ssl_keyfile = ssl_keyfile
self.ssl_certfile = ssl_certfile
self.uvicorn_kwargs = uvicorn_kwargs
if not proxy and os.name != 'nt':
os.unsetenv('http_proxy')
os.unsetenv('https_proxy')
async def setup_server(self):
"""
Setup WebSocket Server
"""
if docarray_v2:
from jina.serve.runtimes.gateway.request_handling import GatewayRequestHandler
if isinstance(self._request_handler, GatewayRequestHandler):
await self._request_handler.streamer._get_endpoints_input_output_models()
self._request_handler.streamer._validate_flow_docarray_compatibility()
self.app = self._request_handler._websocket_fastapi_default_app(tracing=self.tracing, tracer_provider=self.tracer_provider)
with ImportExtensions(required=True):
from uvicorn import Config, Server
class UviServer(Server):
"""The uvicorn server."""
async def setup(self, sockets=None):
"""
Setup uvicorn server.
:param sockets: sockets of server.
"""
config = self.config
if not config.loaded:
config.load()
self.lifespan = config.lifespan_class(config)
await self.startup(sockets=sockets)
if self.should_exit:
return
async def serve(self, **kwargs):
"""
Start the server.
:param kwargs: keyword arguments
"""
await self.main_loop()
if 'CICD_JINA_DISABLE_HEALTHCHECK_LOGS' in os.environ:
class _EndpointFilter(logging.Filter):
def filter(self, record: logging.LogRecord) -> bool:
# NOTE: space is important after `GET /`, else all logs will be disabled.
return record.getMessage().find("GET / ") == -1
# Filter out healthcheck endpoint `GET /`
logging.getLogger("uvicorn.access").addFilter(_EndpointFilter())
uvicorn_kwargs = self.uvicorn_kwargs or {}
if self.ssl_keyfile and 'ssl_keyfile' not in uvicorn_kwargs.keys():
uvicorn_kwargs['ssl_keyfile'] = self.ssl_keyfile
if self.ssl_certfile and 'ssl_certfile' not in uvicorn_kwargs.keys():
uvicorn_kwargs['ssl_certfile'] = self.ssl_certfile
self.server = UviServer(
config=Config(
app=self.app,
host=self.host,
port=self.port,
ws_max_size=1024 * 1024 * 1024,
log_level=os.getenv('JINA_LOG_LEVEL', 'error').lower(),
**uvicorn_kwargs,
)
)
await self.server.setup()
@property
def _should_exit(self):
"""Property describing if server is ready to exit
:return: boolean indicating if Server ready to exit
"""
return self.server.should_exit
@property
def should_exit(self):
"""Property describing if server is ready to exit
:return: boolean indicating if Server ready to exit
"""
return self._should_exit
async def shutdown(self):
"""Free other resources allocated with the server, e.g, gateway object, ..."""
await super().shutdown()
self.server.should_exit = True
await self.server.shutdown()
async def run_server(self):
"""Run WebSocket server forever"""
await self.server.serve()
|
import torch
from keras.src.backend import config
from keras.src.backend import standardize_dtype
from keras.src.backend.common import dtypes
from keras.src.backend.torch.core import cast
from keras.src.backend.torch.core import convert_to_tensor
def cholesky(x):
return torch.linalg.cholesky(x)
def det(x):
return torch.det(x)
def eig(x):
return torch.linalg.eig(x)
def eigh(x):
return torch.linalg.eigh(x)
def inv(x):
return torch.linalg.inv(x)
def lu_factor(x):
LU, pivots = torch.linalg.lu_factor(x)
# torch retuns pivots with 1-based indexing
return LU, pivots - 1
def norm(x, ord=None, axis=None, keepdims=False):
x = convert_to_tensor(x)
if standardize_dtype(x.dtype) == "int64":
dtype = config.floatx()
else:
dtype = dtypes.result_type(x.dtype, float)
x = cast(x, dtype)
return torch.linalg.norm(x, ord=ord, dim=axis, keepdim=keepdims)
def qr(x, mode="reduced"):
if mode not in {"reduced", "complete"}:
raise ValueError(
"`mode` argument value not supported. "
"Expected one of {'reduced', 'complete'}. "
f"Received: mode={mode}"
)
return torch.linalg.qr(x, mode=mode)
def solve(a, b):
return torch.linalg.solve(a, b)
def solve_triangular(a, b, lower=False):
if b.ndim == a.ndim - 1:
b = torch.unsqueeze(b, axis=-1)
return torch.linalg.solve_triangular(a, b, upper=not lower).squeeze(
axis=-1
)
return torch.linalg.solve_triangular(a, b, upper=not lower)
def svd(x, full_matrices=True, compute_uv=True):
if not compute_uv:
raise NotImplementedError(
"`compute_uv=False` is not supported for torch backend."
)
return torch.linalg.svd(x, full_matrices=full_matrices)
def lstsq(a, b, rcond=None):
a = convert_to_tensor(a)
b = convert_to_tensor(b)
return torch.linalg.lstsq(a, b, rcond=rcond)[0]
|
import torch
from keras.src.backend import config
from keras.src.backend import standardize_dtype
from keras.src.backend.common import dtypes
from keras.src.backend.torch.core import cast
from keras.src.backend.torch.core import convert_to_tensor
def cholesky(x):
return torch.linalg.cholesky(x)
def det(x):
return torch.det(x)
def eig(x):
return torch.linalg.eig(x)
def eigh(x):
return torch.linalg.eigh(x)
def inv(x):
return torch.linalg.inv(x)
def lu_factor(x):
LU, pivots = torch.linalg.lu_factor(x)
# torch retuns pivots with 1-based indexing
return LU, pivots - 1
def norm(x, ord=None, axis=None, keepdims=False):
x = convert_to_tensor(x)
if standardize_dtype(x.dtype) == "int64":
dtype = config.floatx()
else:
dtype = dtypes.result_type(x.dtype, float)
x = cast(x, dtype)
return torch.linalg.norm(x, ord=ord, dim=axis, keepdim=keepdims)
def qr(x, mode="reduced"):
if mode not in {"reduced", "complete"}:
raise ValueError(
"`mode` argument value not supported. "
"Expected one of {'reduced', 'complete'}. "
f"Received: mode={mode}"
)
return torch.linalg.qr(x, mode=mode)
def solve(a, b):
return torch.linalg.solve(a, b)
def solve_triangular(a, b, lower=False):
if b.ndim == a.ndim - 1:
b = torch.unsqueeze(b, axis=-1)
return torch.linalg.solve_triangular(a, b, upper=not lower).squeeze(
axis=-1
)
return torch.linalg.solve_triangular(a, b, upper=not lower)
def svd(x, full_matrices=True, compute_uv=True):
if not compute_uv:
raise NotImplementedError(
"`compute_uv=False` is not supported for torch backend."
)
return torch.linalg.svd(x, full_matrices=full_matrices)
|
from typing import Union
import numpy as np
import pytest
import torch
from docarray import Document, DocumentArray
from docarray.typing import NdArray, TorchTensor
@pytest.fixture()
def batch():
class Image(Document):
tensor: TorchTensor[3, 224, 224]
batch = DocumentArray[Image](
[Image(tensor=torch.zeros(3, 224, 224)) for _ in range(10)]
)
return batch.stack()
def test_len(batch):
assert len(batch) == 10
def test_getitem(batch):
for i in range(len(batch)):
print(i)
assert (batch[i].tensor == torch.zeros(3, 224, 224)).all()
def test_iterator(batch):
for doc in batch:
assert (doc.tensor == torch.zeros(3, 224, 224)).all()
def test_stack_setter(batch):
batch.tensor = torch.ones(10, 3, 224, 224)
assert (batch.tensor == torch.ones(10, 3, 224, 224)).all()
def test_stack_optional(batch):
assert (batch._columns['tensor'] == torch.zeros(10, 3, 224, 224)).all()
assert (batch.tensor == torch.zeros(10, 3, 224, 224)).all()
def test_stack_numpy():
class Image(Document):
tensor: NdArray[3, 224, 224]
batch = DocumentArray[Image](
[Image(tensor=np.zeros((3, 224, 224))) for _ in range(10)]
)
batch = batch.stack()
assert (batch._columns['tensor'] == np.zeros((10, 3, 224, 224))).all()
assert (batch.tensor == np.zeros((10, 3, 224, 224))).all()
assert batch.tensor.ctypes.data == batch._columns['tensor'].ctypes.data
batch.unstack()
def test_stack(batch):
assert (batch._columns['tensor'] == torch.zeros(10, 3, 224, 224)).all()
assert (batch.tensor == torch.zeros(10, 3, 224, 224)).all()
assert batch._columns['tensor'].data_ptr() == batch.tensor.data_ptr()
for doc, tensor in zip(batch, batch.tensor):
assert doc.tensor.data_ptr() == tensor.data_ptr()
for i in range(len(batch)):
assert batch[i].tensor.data_ptr() == batch.tensor[i].data_ptr()
def test_stack_mod_nested_document():
class Image(Document):
tensor: TorchTensor[3, 224, 224]
class MMdoc(Document):
img: Image
batch = DocumentArray[MMdoc](
[MMdoc(img=Image(tensor=torch.zeros(3, 224, 224))) for _ in range(10)]
)
batch = batch.stack()
assert (
batch._columns['img']._columns['tensor'] == torch.zeros(10, 3, 224, 224)
).all()
assert (batch.img.tensor == torch.zeros(10, 3, 224, 224)).all()
assert (
batch._columns['img']._columns['tensor'].data_ptr()
== batch.img.tensor.data_ptr()
)
def test_convert_to_da(batch):
class Image(Document):
tensor: TorchTensor[3, 224, 224]
batch = DocumentArray[Image](
[Image(tensor=torch.zeros(3, 224, 224)) for _ in range(10)]
)
batch = batch.stack()
da = batch.unstack()
for doc in da:
assert (doc.tensor == torch.zeros(3, 224, 224)).all()
def test_unstack_nested_document():
class Image(Document):
tensor: TorchTensor[3, 224, 224]
class MMdoc(Document):
img: Image
batch = DocumentArray[MMdoc](
[MMdoc(img=Image(tensor=torch.zeros(3, 224, 224))) for _ in range(10)]
)
batch = batch.stack()
da = batch.unstack()
for doc in da:
assert (doc.img.tensor == torch.zeros(3, 224, 224)).all()
def test_proto_stacked_mode_torch(batch):
batch.from_protobuf(batch.to_protobuf())
def test_proto_stacked_mode_numpy():
class MyDoc(Document):
tensor: NdArray[3, 224, 224]
da = DocumentArray[MyDoc](
[MyDoc(tensor=np.zeros((3, 224, 224))) for _ in range(10)]
)
da = da.stack()
da.from_protobuf(da.to_protobuf())
def test_stack_call():
class Image(Document):
tensor: TorchTensor[3, 224, 224]
da = DocumentArray[Image](
[Image(tensor=torch.zeros(3, 224, 224)) for _ in range(10)]
)
da = da.stack()
assert len(da) == 10
assert da.tensor.shape == (10, 3, 224, 224)
def test_context_manager():
class Image(Document):
tensor: TorchTensor[3, 224, 224]
da = DocumentArray[Image](
[Image(tensor=torch.zeros(3, 224, 224)) for _ in range(10)]
)
with da.stacked_mode() as da:
assert len(da) == 10
assert da.tensor.shape == (10, 3, 224, 224)
da.tensor = torch.ones(10, 3, 224, 224)
tensor = da.tensor
assert isinstance(tensor, list)
for doc in da:
assert (doc.tensor == torch.ones(3, 224, 224)).all()
def test_stack_union():
class Image(Document):
tensor: Union[TorchTensor[3, 224, 224], NdArray[3, 224, 224]]
batch = DocumentArray[Image](
[Image(tensor=np.zeros((3, 224, 224))) for _ in range(10)]
)
batch[3].tensor = np.zeros((3, 224, 224))
# union fields aren't actually stacked
# just checking that there is no error
batch.stack()
|
import numpy as np
import pytest
import torch
from docarray import Document, DocumentArray
from docarray.typing import NdArray, TorchTensor
@pytest.fixture()
def batch():
class Image(Document):
tensor: TorchTensor[3, 224, 224]
batch = DocumentArray[Image](
[Image(tensor=torch.zeros(3, 224, 224)) for _ in range(10)]
)
return batch.stack()
def test_len(batch):
assert len(batch) == 10
def test_getitem(batch):
for i in range(len(batch)):
print(i)
assert (batch[i].tensor == torch.zeros(3, 224, 224)).all()
def test_iterator(batch):
for doc in batch:
assert (doc.tensor == torch.zeros(3, 224, 224)).all()
def test_stack_setter(batch):
batch.tensor = torch.ones(10, 3, 224, 224)
assert (batch.tensor == torch.ones(10, 3, 224, 224)).all()
def test_stack_optional(batch):
assert (batch._columns['tensor'] == torch.zeros(10, 3, 224, 224)).all()
assert (batch.tensor == torch.zeros(10, 3, 224, 224)).all()
def test_stack_numpy():
class Image(Document):
tensor: NdArray[3, 224, 224]
batch = DocumentArray[Image](
[Image(tensor=np.zeros((3, 224, 224))) for _ in range(10)]
)
batch = batch.stack()
assert (batch._columns['tensor'] == np.zeros((10, 3, 224, 224))).all()
assert (batch.tensor == np.zeros((10, 3, 224, 224))).all()
assert batch.tensor.ctypes.data == batch._columns['tensor'].ctypes.data
batch.unstack()
def test_stack(batch):
assert (batch._columns['tensor'] == torch.zeros(10, 3, 224, 224)).all()
assert (batch.tensor == torch.zeros(10, 3, 224, 224)).all()
assert batch._columns['tensor'].data_ptr() == batch.tensor.data_ptr()
for doc, tensor in zip(batch, batch.tensor):
assert doc.tensor.data_ptr() == tensor.data_ptr()
for i in range(len(batch)):
assert batch[i].tensor.data_ptr() == batch.tensor[i].data_ptr()
def test_stack_mod_nested_document():
class Image(Document):
tensor: TorchTensor[3, 224, 224]
class MMdoc(Document):
img: Image
batch = DocumentArray[MMdoc](
[MMdoc(img=Image(tensor=torch.zeros(3, 224, 224))) for _ in range(10)]
)
batch = batch.stack()
assert (
batch._columns['img']._columns['tensor'] == torch.zeros(10, 3, 224, 224)
).all()
assert (batch.img.tensor == torch.zeros(10, 3, 224, 224)).all()
assert (
batch._columns['img']._columns['tensor'].data_ptr()
== batch.img.tensor.data_ptr()
)
def test_convert_to_da(batch):
class Image(Document):
tensor: TorchTensor[3, 224, 224]
batch = DocumentArray[Image](
[Image(tensor=torch.zeros(3, 224, 224)) for _ in range(10)]
)
batch = batch.stack()
da = batch.unstack()
for doc in da:
assert (doc.tensor == torch.zeros(3, 224, 224)).all()
def test_unstack_nested_document():
class Image(Document):
tensor: TorchTensor[3, 224, 224]
class MMdoc(Document):
img: Image
batch = DocumentArray[MMdoc](
[MMdoc(img=Image(tensor=torch.zeros(3, 224, 224))) for _ in range(10)]
)
batch = batch.stack()
da = batch.unstack()
for doc in da:
assert (doc.img.tensor == torch.zeros(3, 224, 224)).all()
def test_proto_stacked_mode_torch(batch):
batch.from_protobuf(batch.to_protobuf())
def test_proto_stacked_mode_numpy():
class MyDoc(Document):
tensor: NdArray[3, 224, 224]
da = DocumentArray[MyDoc](
[MyDoc(tensor=np.zeros((3, 224, 224))) for _ in range(10)]
)
da = da.stack()
da.from_protobuf(da.to_protobuf())
def test_stack_call():
class Image(Document):
tensor: TorchTensor[3, 224, 224]
da = DocumentArray[Image](
[Image(tensor=torch.zeros(3, 224, 224)) for _ in range(10)]
)
da = da.stack()
assert len(da) == 10
assert da.tensor.shape == (10, 3, 224, 224)
def test_context_manager():
class Image(Document):
tensor: TorchTensor[3, 224, 224]
da = DocumentArray[Image](
[Image(tensor=torch.zeros(3, 224, 224)) for _ in range(10)]
)
with da.stacked_mode() as da:
assert len(da) == 10
assert da.tensor.shape == (10, 3, 224, 224)
da.tensor = torch.ones(10, 3, 224, 224)
tensor = da.tensor
assert isinstance(tensor, list)
for doc in da:
assert (doc.tensor == torch.ones(3, 224, 224)).all()
|
_base_ = [
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
# model settings
model = dict(
type='CenterNet',
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
bgr_to_rgb=True),
backbone=dict(
type='ResNet',
depth=18,
norm_eval=False,
norm_cfg=dict(type='BN'),
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet18')),
neck=dict(
type='CTResNetNeck',
in_channels=512,
num_deconv_filters=(256, 128, 64),
num_deconv_kernels=(4, 4, 4),
use_dcn=True),
bbox_head=dict(
type='CenterNetHead',
num_classes=80,
in_channels=64,
feat_channels=64,
loss_center_heatmap=dict(type='GaussianFocalLoss', loss_weight=1.0),
loss_wh=dict(type='L1Loss', loss_weight=0.1),
loss_offset=dict(type='L1Loss', loss_weight=1.0)),
train_cfg=None,
test_cfg=dict(topk=100, local_maximum_kernel=3, max_per_img=100))
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PhotoMetricDistortion',
brightness_delta=32,
contrast_range=(0.5, 1.5),
saturation_range=(0.5, 1.5),
hue_delta=18),
dict(
type='RandomCenterCropPad',
# The cropped images are padded into squares during training,
# but may be less than crop_size.
crop_size=(512, 512),
ratios=(0.6, 0.7, 0.8, 0.9, 1.0, 1.1, 1.2, 1.3),
mean=[0, 0, 0],
std=[1, 1, 1],
to_rgb=True,
test_pad_mode=None),
# Make sure the output is always crop_size.
dict(type='Resize', scale=(512, 512), keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(
type='LoadImageFromFile',
to_float32=True,
file_client_args={{_base_.file_client_args}}),
# don't need Resize
dict(
type='RandomCenterCropPad',
ratios=None,
border=None,
mean=[0, 0, 0],
std=[1, 1, 1],
to_rgb=True,
test_mode=True,
test_pad_mode=['logical_or', 31],
test_pad_add_pix=1),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', 'border'))
]
# Use RepeatDataset to speed up training
train_dataloader = dict(
batch_size=16,
num_workers=4,
persistent_workers=True,
sampler=dict(type='DefaultSampler', shuffle=True),
dataset=dict(
_delete_=True,
type='RepeatDataset',
times=5,
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instances_train2017.json',
data_prefix=dict(img='train2017/'),
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=train_pipeline)))
val_dataloader = dict(dataset=dict(pipeline=test_pipeline))
test_dataloader = val_dataloader
# optimizer
# Based on the default settings of modern detectors, the SGD effect is better
# than the Adam in the source code, so we use SGD default settings and
# if you use adam+lr5e-4, the map is 29.1.
optim_wrapper = dict(clip_grad=dict(max_norm=35, norm_type=2))
max_epochs = 28
# learning policy
# Based on the default settings of modern detectors, we added warmup settings.
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0,
end=1000),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[18, 24], # the real step is [18*5, 24*5]
gamma=0.1)
]
train_cfg = dict(max_epochs=max_epochs) # the real epoch is 28*5=140
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (8 GPUs) x (16 samples per GPU)
auto_scale_lr = dict(base_batch_size=128)
|
_base_ = [
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
# file_client_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/detection/',
# 'data/': 's3://openmmlab/datasets/detection/'
# }))
file_client_args = dict(backend='disk')
# model settings
preprocess_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
model = dict(
type='CenterNet',
preprocess_cfg=preprocess_cfg,
backbone=dict(
type='ResNet',
depth=18,
norm_eval=False,
norm_cfg=dict(type='BN'),
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet18')),
neck=dict(
type='CTResNetNeck',
in_channels=512,
num_deconv_filters=(256, 128, 64),
num_deconv_kernels=(4, 4, 4),
use_dcn=True),
bbox_head=dict(
type='CenterNetHead',
num_classes=80,
in_channels=64,
feat_channels=64,
loss_center_heatmap=dict(type='GaussianFocalLoss', loss_weight=1.0),
loss_wh=dict(type='L1Loss', loss_weight=0.1),
loss_offset=dict(type='L1Loss', loss_weight=1.0)),
train_cfg=None,
test_cfg=dict(topk=100, local_maximum_kernel=3, max_per_img=100))
train_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PhotoMetricDistortion',
brightness_delta=32,
contrast_range=(0.5, 1.5),
saturation_range=(0.5, 1.5),
hue_delta=18),
dict(
type='RandomCenterCropPad',
# The cropped images are padded into squares during training,
# but may be less than crop_size.
crop_size=(512, 512),
ratios=(0.6, 0.7, 0.8, 0.9, 1.0, 1.1, 1.2, 1.3),
mean=[0, 0, 0],
std=[1, 1, 1],
to_rgb=True,
test_pad_mode=None),
# Make sure the output is always crop_size.
dict(type='Resize', scale=(512, 512), keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(
type='LoadImageFromFile',
to_float32=True,
file_client_args=file_client_args),
# don't need Resize
dict(
type='RandomCenterCropPad',
ratios=None,
border=None,
mean=[0, 0, 0],
std=[1, 1, 1],
to_rgb=True,
test_mode=True,
test_pad_mode=['logical_or', 31],
test_pad_add_pix=1),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', 'border'))
]
# Use RepeatDataset to speed up training
train_dataloader = dict(
batch_size=16,
num_workers=4,
persistent_workers=True,
sampler=dict(type='DefaultSampler', shuffle=True),
dataset=dict(
_delete_=True,
type='RepeatDataset',
times=5,
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instances_train2017.json',
data_prefix=dict(img='train2017/'),
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=train_pipeline)))
val_dataloader = dict(dataset=dict(pipeline=test_pipeline))
test_dataloader = val_dataloader
# optimizer
# Based on the default settings of modern detectors, the SGD effect is better
# than the Adam in the source code, so we use SGD default settings and
# if you use adam+lr5e-4, the map is 29.1.
default_hooks = dict(
optimizer=dict(
_delete_=True,
type='OptimizerHook',
grad_clip=dict(max_norm=35, norm_type=2)))
max_epochs = 28
# learning policy
# Based on the default settings of modern detectors, we added warmup settings.
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0,
end=1000),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[18, 24], # the real step is [18*5, 24*5]
gamma=0.1)
]
train_cfg = dict(max_epochs=max_epochs) # the real epoch is 28*5=140
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (8 GPUs) x (16 samples per GPU)
auto_scale_lr = dict(base_batch_size=128)
|
"""Argparser module for Pod runtimes"""
import argparse
from dataclasses import dataclass
from typing import Dict
from jina import helper
from jina.enums import PodRoleType
from jina.parsers.helper import _SHOW_ALL_ARGS, KVAppendAction, add_arg_group
@dataclass
class PodTypeParams:
"""Data Class representing possible parameters for each pod type"""
runtime_cls: str
role_type: PodRoleType
POD_PARAMS_MAPPING: Dict[str, PodTypeParams] = {
'worker': PodTypeParams(runtime_cls='WorkerRuntime', role_type=PodRoleType.WORKER),
'head': PodTypeParams(runtime_cls='HeadRuntime', role_type=PodRoleType.HEAD),
'gateway': PodTypeParams(
runtime_cls='GatewayRuntime', role_type=PodRoleType.GATEWAY
),
}
def mixin_pod_parser(parser, pod_type: str = 'worker'):
"""Mixing in arguments required by :class:`Pod` into the given parser.
:param parser: the parser instance to which we add arguments
:param pod_type: the pod_type configured by the parser. Can be either 'worker' for WorkerRuntime or 'gateway' for GatewayRuntime
"""
gp = add_arg_group(parser, title='Pod')
gp.add_argument(
'--runtime-cls',
type=str,
default=POD_PARAMS_MAPPING[pod_type].runtime_cls,
help='The runtime class to run inside the Pod',
)
gp.add_argument(
'--timeout-ready',
type=int,
default=600000,
help='The timeout in milliseconds of a Pod waits for the runtime to be ready, -1 for waiting '
'forever',
)
gp.add_argument(
'--env',
action=KVAppendAction,
metavar='KEY: VALUE',
nargs='*',
help='The map of environment variables that are available inside runtime',
)
# hidden CLI used for internal only
gp.add_argument(
'--shard-id',
type=int,
default=0,
help='defines the shard identifier for the executor. It is used as suffix for the workspace path of the executor`'
if _SHOW_ALL_ARGS
else argparse.SUPPRESS,
)
gp.add_argument(
'--pod-role',
type=PodRoleType.from_string,
choices=list(PodRoleType),
default=POD_PARAMS_MAPPING[pod_type].role_type,
help='The role of this Pod in a Deployment'
if _SHOW_ALL_ARGS
else argparse.SUPPRESS,
)
gp.add_argument(
'--noblock-on-start',
action='store_true',
default=False,
help='If set, starting a Pod/Deployment does not block the thread/process. It then relies on '
'`wait_start_success` at outer function for the postpone check.'
if _SHOW_ALL_ARGS
else argparse.SUPPRESS,
)
gp.add_argument(
'--shards',
type=int,
default=1,
help='The number of shards in the deployment running at the same time. For more details check '
'https://docs.jina.ai/fundamentals/flow/create-flow/#complex-flow-topologies',
)
gp.add_argument(
'--replicas',
type=int,
default=1,
help='The number of replicas in the deployment',
)
gp.add_argument(
'--port',
type=str,
default=str(helper.random_port()),
help='The port for input data to bind to, default is a random port between [49152, 65535].'
' In the case of an external Executor (`--external` or `external=True`) this can be a list of ports, separated by commas.'
' Then, every resulting address will be considered as one replica of the Executor.',
)
gp.add_argument(
'--monitoring',
action='store_true',
default=False,
help='If set, spawn an http server with a prometheus endpoint to expose metrics',
)
gp.add_argument(
'--port-monitoring',
type=str,
default=str(helper.random_port()),
dest='port_monitoring',
help=f'The port on which the prometheus server is exposed, default is a random port between [49152, 65535]',
)
gp.add_argument(
'--retries',
type=int,
default=-1,
dest='retries',
help=f'Number of retries per gRPC call. If <0 it defaults to max(3, num_replicas)',
)
gp.add_argument(
'--floating',
action='store_true',
default=False,
help='If set, the current Pod/Deployment can not be further chained, '
'and the next `.add()` will chain after the last Pod/Deployment not this current one.',
)
|
"""Argparser module for Pod runtimes"""
import argparse
from dataclasses import dataclass
from typing import Dict
from jina import helper
from jina.enums import PodRoleType
from jina.parsers.helper import _SHOW_ALL_ARGS, KVAppendAction, add_arg_group
@dataclass
class PodTypeParams:
"""Data Class representing possible parameters for each pod type"""
runtime_cls: str
role_type: PodRoleType
POD_PARAMS_MAPPING: Dict[str, PodTypeParams] = {
'worker': PodTypeParams(runtime_cls='WorkerRuntime', role_type=PodRoleType.WORKER),
'head': PodTypeParams(runtime_cls='HeadRuntime', role_type=PodRoleType.HEAD),
'gateway': PodTypeParams(
runtime_cls='GatewayRuntime', role_type=PodRoleType.GATEWAY
),
}
def mixin_pod_parser(parser, pod_type: str = 'worker'):
"""Mixing in arguments required by :class:`Pod` into the given parser.
:param parser: the parser instance to which we add arguments
:param pod_type: the pod_type configured by the parser. Can be either 'worker' for WorkerRuntime or 'gateway' for GatewayRuntime
"""
gp = add_arg_group(parser, title='Pod')
gp.add_argument(
'--runtime-cls',
type=str,
default=POD_PARAMS_MAPPING[pod_type].runtime_cls,
help='The runtime class to run inside the Pod',
)
gp.add_argument(
'--timeout-ready',
type=int,
default=600000,
help='The timeout in milliseconds of a Pod waits for the runtime to be ready, -1 for waiting '
'forever',
)
gp.add_argument(
'--env',
action=KVAppendAction,
metavar='KEY: VALUE',
nargs='*',
help='The map of environment variables that are available inside runtime',
)
# hidden CLI used for internal only
gp.add_argument(
'--shard-id',
type=int,
default=0,
help='defines the shard identifier for the executor. It is used as suffix for the workspace path of the executor`'
if _SHOW_ALL_ARGS
else argparse.SUPPRESS,
)
gp.add_argument(
'--pod-role',
type=PodRoleType.from_string,
choices=list(PodRoleType),
default=POD_PARAMS_MAPPING[pod_type].role_type,
help='The role of this Pod in a Deployment'
if _SHOW_ALL_ARGS
else argparse.SUPPRESS,
)
gp.add_argument(
'--noblock-on-start',
action='store_true',
default=False,
help='If set, starting a Pod/Deployment does not block the thread/process. It then relies on '
'`wait_start_success` at outer function for the postpone check.'
if _SHOW_ALL_ARGS
else argparse.SUPPRESS,
)
gp.add_argument(
'--shards',
type=int,
default=1,
help='The number of shards in the deployment running at the same time. For more details check '
'https://docs.jina.ai/fundamentals/flow/create-flow/#complex-flow-topologies',
)
gp.add_argument(
'--replicas',
type=int,
default=1,
help='The number of replicas in the deployment',
)
gp.add_argument(
'--port',
type=int,
default=helper.random_port(),
help='The port for input data to bind to, default is a random port between [49152, 65535]',
)
gp.add_argument(
'--monitoring',
action='store_true',
default=False,
help='If set, spawn an http server with a prometheus endpoint to expose metrics',
)
gp.add_argument(
'--port-monitoring',
type=str,
default=str(helper.random_port()),
dest='port_monitoring',
help=f'The port on which the prometheus server is exposed, default is a random port between [49152, 65535]',
)
gp.add_argument(
'--retries',
type=int,
default=-1,
dest='retries',
help=f'Number of retries per gRPC call. If <0 it defaults to max(3, num_replicas)',
)
gp.add_argument(
'--floating',
action='store_true',
default=False,
help='If set, the current Pod/Deployment can not be further chained, '
'and the next `.add()` will chain after the last Pod/Deployment not this current one.',
)
|
import numpy as np
def psd_numpy(specgram, mask=None, normalize=True, eps=1e-10):
specgram_transposed = np.swapaxes(specgram, 0, 1)
psd = np.einsum("...ct,...et->...tce", specgram_transposed, specgram_transposed.conj())
if mask is not None:
if normalize:
mask_normmalized = mask / (mask.sum(axis=-1, keepdims=True) + eps)
else:
mask_normmalized = mask
psd = psd * mask_normmalized[..., None, None]
psd = psd.sum(axis=-3)
return psd
def mvdr_weights_souden_numpy(psd_s, psd_n, reference_channel, diag_eps=1e-7, eps=1e-8):
channel = psd_s.shape[-1]
eye = np.eye(channel)
trace = np.matrix.trace(psd_n, axis1=1, axis2=2)
epsilon = trace.real[..., None, None] * diag_eps + eps
diag = epsilon * eye[..., :, :]
psd_n = psd_n + diag
numerator = np.linalg.solve(psd_n, psd_s) # psd_n.inv() @ psd_s
numerator_trace = np.matrix.trace(numerator, axis1=1, axis2=2)
ws = numerator / (numerator_trace[..., None, None] + eps)
if isinstance(reference_channel, int):
beamform_weights = ws[..., :, reference_channel]
else:
beamform_weights = np.einsum("...c,...c->...", ws, reference_channel[..., None, None, :])
return beamform_weights
def mvdr_weights_rtf_numpy(rtf, psd_n, reference_channel, diag_eps=1e-7, eps=1e-8):
channel = rtf.shape[-1]
eye = np.eye(channel)
trace = np.matrix.trace(psd_n, axis1=1, axis2=2)
epsilon = trace.real[..., None, None] * diag_eps + eps
diag = epsilon * eye[..., :, :]
psd_n = psd_n + diag
numerator = np.linalg.solve(psd_n, np.expand_dims(rtf, -1)).squeeze(-1)
denominator = np.einsum("...d,...d->...", rtf.conj(), numerator)
beamform_weights = numerator / (np.expand_dims(denominator.real, -1) + eps)
if isinstance(reference_channel, int):
scale = rtf[..., reference_channel].conj()
else:
scale = np.einsum("...c,...c->...", rtf.conj(), reference_channel[..., None, :])
beamform_weights = beamform_weights * scale[..., None]
return beamform_weights
def rtf_evd_numpy(psd):
_, v = np.linalg.eigh(psd)
rtf = v[..., -1]
return rtf
def rtf_power_numpy(psd_s, psd_n, reference_channel, n_iter, diagonal_loading=True, diag_eps=1e-7, eps=1e-8):
if diagonal_loading:
channel = psd_s.shape[-1]
eye = np.eye(channel)
trace = np.matrix.trace(psd_n, axis1=1, axis2=2)
epsilon = trace.real[..., None, None] * diag_eps + eps
diag = epsilon * eye[..., :, :]
psd_n = psd_n + diag
phi = np.linalg.solve(psd_n, psd_s)
if isinstance(reference_channel, int):
rtf = phi[..., reference_channel]
else:
rtf = phi @ reference_channel
rtf = np.expand_dims(rtf, -1)
if n_iter >= 2:
for _ in range(n_iter - 2):
rtf = phi @ rtf
rtf = psd_s @ rtf
else:
rtf = psd_n @ rtf
rtf = rtf.squeeze(-1)
return rtf
def apply_beamforming_numpy(beamform_weights, specgram):
specgram_enhanced = np.einsum("...fc,...cft->...ft", beamform_weights.conj(), specgram)
return specgram_enhanced
|
import numpy as np
def psd_numpy(specgram, mask=None, normalize=True, eps=1e-10):
specgram_transposed = np.swapaxes(specgram, 0, 1)
psd = np.einsum("...ct,...et->...tce", specgram_transposed, specgram_transposed.conj())
if mask is not None:
if normalize:
mask_normmalized = mask / (mask.sum(axis=-1, keepdims=True) + eps)
else:
mask_normmalized = mask
psd = psd * mask_normmalized[..., None, None]
psd = psd.sum(axis=-3)
return psd
def mvdr_weights_souden_numpy(psd_s, psd_n, reference_channel, diag_eps=1e-7, eps=1e-8):
channel = psd_s.shape[-1]
eye = np.eye(channel)
trace = np.matrix.trace(psd_n, axis1=1, axis2=2)
epsilon = trace.real[..., None, None] * diag_eps + eps
diag = epsilon * eye[..., :, :]
psd_n = psd_n + diag
numerator = np.linalg.solve(psd_n, psd_s) # psd_n.inv() @ psd_s
numerator_trace = np.matrix.trace(numerator, axis1=1, axis2=2)
ws = numerator / (numerator_trace[..., None, None] + eps)
if isinstance(reference_channel, int):
beamform_weights = ws[..., :, reference_channel]
else:
beamform_weights = np.einsum("...c,...c->...", ws, reference_channel[..., None, None, :])
return beamform_weights
def mvdr_weights_rtf_numpy(rtf, psd_n, reference_channel, diag_eps=1e-7, eps=1e-8):
channel = rtf.shape[-1]
eye = np.eye(channel)
trace = np.matrix.trace(psd_n, axis1=1, axis2=2)
epsilon = trace.real[..., None, None] * diag_eps + eps
diag = epsilon * eye[..., :, :]
psd_n = psd_n + diag
numerator = np.linalg.solve(psd_n, np.expand_dims(rtf, -1)).squeeze(-1)
denominator = np.einsum("...d,...d->...", rtf.conj(), numerator)
beamform_weights = numerator / (np.expand_dims(denominator.real, -1) + eps)
if isinstance(reference_channel, int):
scale = rtf[..., reference_channel].conj()
else:
scale = np.einsum("...c,...c->...", rtf.conj(), reference_channel[..., None, :])
beamform_weights = beamform_weights * scale[..., None]
return beamform_weights
def rtf_evd_numpy(psd):
_, v = np.linalg.eigh(psd)
rtf = v[..., -1]
return rtf
def rtf_power_numpy(psd_s, psd_n, reference_channel, n_iter):
phi = np.linalg.solve(psd_n, psd_s)
if isinstance(reference_channel, int):
rtf = phi[..., reference_channel]
else:
rtf = phi @ reference_channel
rtf = np.expand_dims(rtf, -1)
if n_iter >= 2:
for _ in range(n_iter - 2):
rtf = phi @ rtf
rtf = psd_s @ rtf
else:
rtf = psd_n @ rtf
rtf = rtf.squeeze(-1)
return rtf
def apply_beamforming_numpy(beamform_weights, specgram):
specgram_enhanced = np.einsum("...fc,...cft->...ft", beamform_weights.conj(), specgram)
return specgram_enhanced
|
# Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import _LazyModule
from ...utils.import_utils import define_import_structure
if TYPE_CHECKING:
from .configuration_layoutlmv3 import *
from .feature_extraction_layoutlmv3 import *
from .image_processing_layoutlmv3 import *
from .image_processing_layoutlmv3_fast import *
from .modeling_layoutlmv3 import *
from .modeling_tf_layoutlmv3 import *
from .processing_layoutlmv3 import *
from .tokenization_layoutlmv3 import *
from .tokenization_layoutlmv3_fast import *
else:
import sys
_file = globals()["__file__"]
sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__)
|
# Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import _LazyModule
from ...utils.import_utils import define_import_structure
if TYPE_CHECKING:
from .configuration_layoutlmv3 import *
from .feature_extraction_layoutlmv3 import *
from .image_processing_layoutlmv3 import *
from .modeling_layoutlmv3 import *
from .modeling_tf_layoutlmv3 import *
from .processing_layoutlmv3 import *
from .tokenization_layoutlmv3 import *
from .tokenization_layoutlmv3_fast import *
else:
import sys
_file = globals()["__file__"]
sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__)
|
from typing import Optional, Tuple
import torch
from ..utils import logging
logger = logging.get_logger(__name__)
def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
"""
This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
"""
batch, num_key_value_heads, slen, head_dim = hidden_states.shape
if n_rep == 1:
return hidden_states
hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
def sdpa_attention_forward(
module: torch.nn.Module,
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
attention_mask: Optional[torch.Tensor],
dropout: float = 0.0,
scaling: Optional[float] = None,
is_causal: Optional[bool] = None,
**kwargs,
) -> Tuple[torch.Tensor, None]:
if kwargs.get("output_attentions", False) or kwargs.get("head_mask", None) is not None:
logger.warning_once(
"`sdpa` attention does not support `output_attentions=True` or `head_mask`."
" Please set your attention to `eager` if you want any of these features."
)
if hasattr(module, "num_key_value_groups"):
key = repeat_kv(key, module.num_key_value_groups)
value = repeat_kv(value, module.num_key_value_groups)
if attention_mask is not None and attention_mask.ndim == 4:
attention_mask = attention_mask[:, :, :, : key.shape[-2]]
# SDPA with memory-efficient backend is bugged with non-contiguous inputs and custom attn_mask for some torch versions
# Reference: https://github.com/pytorch/pytorch/issues/112577.
query = query.contiguous()
key = key.contiguous()
value = value.contiguous()
# We dispatch to SDPA's Flash Attention or Efficient kernels via this `is_causal` if statement instead of an inline conditional assignment
# in SDPA to support both torch.compile's dynamic shapes and full graph options. An inline conditional prevents dynamic shapes from compiling.
# Note that it is important to check first for the shape, otherwise compile will fail with `argument 'is_causal' must be bool, not SymBool`
if is_causal is None:
# The last condition is for encoder (decoder) models which specify this by passing their own `is_causal` flag
# This is mainly due to those models having mixed implementations for encoder, decoder, and encoder-decoder attns
is_causal = query.shape[2] > 1 and attention_mask is None and getattr(module, "is_causal", True)
# Shapes (e.g. query.shape[2]) are tensors during jit tracing, resulting in `is_causal` being a tensor.
# We convert it to a bool for the SDPA kernel that only accepts bools.
if torch.jit.is_tracing() and isinstance(is_causal, torch.Tensor):
is_causal = is_causal.item()
attn_output = torch.nn.functional.scaled_dot_product_attention(
query,
key,
value,
attn_mask=attention_mask,
dropout_p=dropout,
scale=scaling,
is_causal=is_causal,
)
attn_output = attn_output.transpose(1, 2).contiguous()
return attn_output, None
|
from typing import Optional, Tuple
import torch
def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
"""
This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
"""
batch, num_key_value_heads, slen, head_dim = hidden_states.shape
if n_rep == 1:
return hidden_states
hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
def sdpa_attention_forward(
module: torch.nn.Module,
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
attention_mask: Optional[torch.Tensor],
dropout: float = 0.0,
scaling: Optional[float] = None,
is_causal: Optional[bool] = None,
**kwargs,
) -> Tuple[torch.Tensor, None]:
if hasattr(module, "num_key_value_groups"):
key = repeat_kv(key, module.num_key_value_groups)
value = repeat_kv(value, module.num_key_value_groups)
causal_mask = attention_mask
if attention_mask is not None and causal_mask.ndim == 4:
causal_mask = causal_mask[:, :, :, : key.shape[-2]]
# SDPA with memory-efficient backend is bugged with non-contiguous inputs and custom attn_mask for some torch versions
# Reference: https://github.com/pytorch/pytorch/issues/112577.
query = query.contiguous()
key = key.contiguous()
value = value.contiguous()
# We dispatch to SDPA's Flash Attention or Efficient kernels via this `is_causal` if statement instead of an inline conditional assignment
# in SDPA to support both torch.compile's dynamic shapes and full graph options. An inline conditional prevents dynamic shapes from compiling.
# Note that it is important to check first for the shape, otherwise compile will fail with `argument 'is_causal' must be bool, not SymBool`
if is_causal is None:
is_causal = query.shape[2] > 1 and causal_mask is None
# Shapes (e.g. query.shape[2]) are tensors during jit tracing, resulting in `is_causal` being a tensor.
# We convert it to a bool for the SDPA kernel that only accepts bools.
if torch.jit.is_tracing() and isinstance(is_causal, torch.Tensor):
is_causal = is_causal.item()
attn_output = torch.nn.functional.scaled_dot_product_attention(
query,
key,
value,
attn_mask=causal_mask,
dropout_p=dropout,
scale=scaling,
is_causal=is_causal,
)
attn_output = attn_output.transpose(1, 2).contiguous()
return attn_output, None
|
from typing import List
from torch.utils.data import Dataset
from sentence_transformers import SentenceTransformer
from sentence_transformers.readers.InputExample import InputExample
class SentencesDataset(Dataset):
"""
DEPRECATED: This class is no longer used. Instead of wrapping your List of InputExamples in a SentencesDataset
and then passing it to the DataLoader, you can pass the list of InputExamples directly to the dataset loader.
"""
def __init__(self, examples: List[InputExample], model: SentenceTransformer):
self.examples = examples
def __getitem__(self, item):
return self.examples[item]
def __len__(self):
return len(self.examples)
|
from torch.utils.data import Dataset
from typing import List
from .. import SentenceTransformer
from ..readers.InputExample import InputExample
class SentencesDataset(Dataset):
"""
DEPRECATED: This class is no longer used. Instead of wrapping your List of InputExamples in a SentencesDataset
and then passing it to the DataLoader, you can pass the list of InputExamples directly to the dataset loader.
"""
def __init__(self, examples: List[InputExample], model: SentenceTransformer):
self.examples = examples
def __getitem__(self, item):
return self.examples[item]
def __len__(self):
return len(self.examples)
|
_base_ = [
'../_base_/models/mask-rcnn_r50_fpn.py',
'../common/lsj-100e_coco-instance.py'
]
image_size = (1024, 1024)
batch_augments = [
dict(type='BatchFixedSizePad', size=image_size, pad_mask=True)
]
norm_cfg = dict(type='SyncBN', requires_grad=True)
# Use MMSyncBN that handles empty tensor in head. It can be changed to
# SyncBN after https://github.com/pytorch/pytorch/issues/36530 is fixed
head_norm_cfg = dict(type='MMSyncBN', requires_grad=True)
model = dict(
# use caffe norm
data_preprocessor=dict(
mean=[103.530, 116.280, 123.675],
std=[1.0, 1.0, 1.0],
bgr_to_rgb=False,
# pad_size_divisor=32 is unnecessary in training but necessary
# in testing.
pad_size_divisor=32,
batch_augments=batch_augments),
backbone=dict(
frozen_stages=-1,
norm_eval=False,
norm_cfg=norm_cfg,
init_cfg=None,
style='caffe'),
neck=dict(norm_cfg=norm_cfg),
rpn_head=dict(num_convs=2),
roi_head=dict(
bbox_head=dict(
type='Shared4Conv1FCBBoxHead',
conv_out_channels=256,
norm_cfg=head_norm_cfg),
mask_head=dict(norm_cfg=head_norm_cfg)))
train_pipeline = [
dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='RandomResize',
scale=image_size,
ratio_range=(0.1, 2.0),
keep_ratio=True),
dict(
type='RandomCrop',
crop_type='absolute_range',
crop_size=image_size,
recompute_bbox=True,
allow_negative_crop=True),
dict(type='FilterAnnotations', min_gt_bbox_wh=(1e-2, 1e-2)),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
# Use RepeatDataset to speed up training
train_dataloader = dict(dataset=dict(dataset=dict(pipeline=train_pipeline)))
|
_base_ = [
'../_base_/models/mask-rcnn_r50_fpn.py',
'../common/lsj-100e_coco-instance.py'
]
image_size = (1024, 1024)
batch_augments = [
dict(type='BatchFixedSizePad', size=image_size, pad_mask=True)
]
norm_cfg = dict(type='SyncBN', requires_grad=True)
# Use MMSyncBN that handles empty tensor in head. It can be changed to
# SyncBN after https://github.com/pytorch/pytorch/issues/36530 is fixed
head_norm_cfg = dict(type='MMSyncBN', requires_grad=True)
model = dict(
# use caffe norm
data_preprocessor=dict(
mean=[103.530, 116.280, 123.675],
std=[1.0, 1.0, 1.0],
bgr_to_rgb=False,
# pad_size_divisor=32 is unnecessary in training but necessary
# in testing.
pad_size_divisor=32,
batch_augments=batch_augments),
backbone=dict(
frozen_stages=-1,
norm_eval=False,
norm_cfg=norm_cfg,
init_cfg=None,
style='caffe'),
neck=dict(norm_cfg=norm_cfg),
rpn_head=dict(num_convs=2),
roi_head=dict(
bbox_head=dict(
type='Shared4Conv1FCBBoxHead',
conv_out_channels=256,
norm_cfg=head_norm_cfg),
mask_head=dict(norm_cfg=head_norm_cfg)))
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='RandomResize',
scale=image_size,
ratio_range=(0.1, 2.0),
keep_ratio=True),
dict(
type='RandomCrop',
crop_type='absolute_range',
crop_size=image_size,
recompute_bbox=True,
allow_negative_crop=True),
dict(type='FilterAnnotations', min_gt_bbox_wh=(1e-2, 1e-2)),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
# Use RepeatDataset to speed up training
train_dataloader = dict(dataset=dict(dataset=dict(pipeline=train_pipeline)))
|
import logging
import os
import tarfile
import zipfile
from typing import Any, List, Optional
import torchaudio
_LG = logging.getLogger(__name__)
def _extract_tar(from_path: str, to_path: Optional[str] = None, overwrite: bool = False) -> List[str]:
if to_path is None:
to_path = os.path.dirname(from_path)
with tarfile.open(from_path, "r") as tar:
files = []
for file_ in tar: # type: Any
file_path = os.path.join(to_path, file_.name)
if file_.isfile():
files.append(file_path)
if os.path.exists(file_path):
_LG.info("%s already extracted.", file_path)
if not overwrite:
continue
tar.extract(file_, to_path)
return files
def _extract_zip(from_path: str, to_path: Optional[str] = None, overwrite: bool = False) -> List[str]:
if to_path is None:
to_path = os.path.dirname(from_path)
with zipfile.ZipFile(from_path, "r") as zfile:
files = zfile.namelist()
for file_ in files:
file_path = os.path.join(to_path, file_)
if os.path.exists(file_path):
_LG.info("%s already extracted.", file_path)
if not overwrite:
continue
zfile.extract(file_, to_path)
return files
def _load_waveform(
root: str,
filename: str,
exp_sample_rate: int,
):
path = os.path.join(root, filename)
waveform, sample_rate = torchaudio.load(path)
if exp_sample_rate != sample_rate:
raise ValueError(f"sample rate should be {exp_sample_rate}, but got {sample_rate}")
return waveform
|
import logging
import os
import tarfile
import zipfile
from typing import Any, List, Optional
import torchaudio
def _extract_tar(from_path: str, to_path: Optional[str] = None, overwrite: bool = False) -> List[str]:
if to_path is None:
to_path = os.path.dirname(from_path)
with tarfile.open(from_path, "r") as tar:
logging.info("Opened tar file {}.", from_path)
files = []
for file_ in tar: # type: Any
file_path = os.path.join(to_path, file_.name)
if file_.isfile():
files.append(file_path)
if os.path.exists(file_path):
logging.info("{} already extracted.".format(file_path))
if not overwrite:
continue
tar.extract(file_, to_path)
return files
def _extract_zip(from_path: str, to_path: Optional[str] = None, overwrite: bool = False) -> List[str]:
if to_path is None:
to_path = os.path.dirname(from_path)
with zipfile.ZipFile(from_path, "r") as zfile:
logging.info("Opened zip file {}.", from_path)
files = zfile.namelist()
for file_ in files:
file_path = os.path.join(to_path, file_)
if os.path.exists(file_path):
logging.info("{} already extracted.".format(file_path))
if not overwrite:
continue
zfile.extract(file_, to_path)
return files
def _load_waveform(
root: str,
filename: str,
exp_sample_rate: int,
):
path = os.path.join(root, filename)
waveform, sample_rate = torchaudio.load(path)
if exp_sample_rate != sample_rate:
raise ValueError(f"sample rate should be {exp_sample_rate}, but got {sample_rate}")
return waveform
|
from typing import Union
from docarray.typing.tensor.ndarray import NdArray
from docarray.utils.misc import is_tf_available, is_torch_available
torch_available = is_torch_available()
if torch_available:
from docarray.typing.tensor.torch_tensor import TorchTensor # noqa: F401
tf_available = is_tf_available()
if tf_available:
from docarray.typing.tensor.tensorflow_tensor import TensorFlowTensor # noqa: F401
AnyTensor = Union[NdArray]
if torch_available and tf_available:
AnyTensor = Union[NdArray, TorchTensor, TensorFlowTensor] # type: ignore
elif torch_available:
AnyTensor = Union[NdArray, TorchTensor] # type: ignore
elif tf_available:
AnyTensor = Union[NdArray, TensorFlowTensor] # type: ignore
|
from typing import Union
from docarray.typing.tensor.ndarray import NdArray
from docarray.utils.misc import is_tf_available, is_torch_available
torch_available = is_torch_available()
if torch_available:
from docarray.typing.tensor.torch_tensor import TorchTensor # noqa: F401
tf_available = is_tf_available()
if tf_available:
from docarray.typing.tensor.tensorflow_tensor import TensorFlowTensor # noqa: F401
if torch_available and tf_available:
AnyTensor = Union[NdArray, TorchTensor, TensorFlowTensor]
elif torch_available:
AnyTensor = Union[NdArray, TorchTensor] # type: ignore
elif tf_available:
AnyTensor = Union[NdArray, TensorFlowTensor] # type: ignore
else:
AnyTensor = Union[NdArray] # type: ignore
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest.mock import Mock
from mmengine.hooks import EmptyCacheHook
class TestEmptyCacheHook:
def test_emtpy_cache_hook(self):
Hook = EmptyCacheHook(True, True, True)
Runner = Mock()
Hook.after_iter(Runner)
Hook.before_epoch(Runner)
Hook.after_epoch(Runner)
|
# Copyright (c) OpenMMLab. All rights reserved.
from mock import Mock
from mmengine.hooks import EmptyCacheHook
class TestEmptyCacheHook:
def test_emtpy_cache_hook(self):
Hook = EmptyCacheHook(True, True, True)
Runner = Mock()
Hook.after_iter(Runner)
Hook.before_epoch(Runner)
Hook.after_epoch(Runner)
|
"""Couchbase document loader."""
from typing import Any, Iterable, List, Optional
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
class CouchbaseReader(BaseReader):
"""
Couchbase document loader.
Loads data from a Couchbase cluster into Document used by LlamaIndex.
Args:
client(Optional[Any]): A Couchbase client to use.
If not provided, the client will be created based on the connection_string
and database credentials.
connection_string (Optional[str]): The connection string to the Couchbase cluster.
db_username (Optional[str]): The username to connect to the Couchbase cluster.
db_password (Optional[str]): The password to connect to the Couchbase cluster.
"""
def __init__(
self,
client: Optional[Any] = None,
connection_string: Optional[str] = None,
db_username: Optional[str] = None,
db_password: Optional[str] = None,
) -> None:
"""Initialize Couchbase document loader."""
import_err_msg = "`couchbase` package not found, please run `pip install --upgrade couchbase`"
try:
from couchbase.auth import PasswordAuthenticator
from couchbase.cluster import Cluster
from couchbase.options import ClusterOptions
except ImportError:
raise ImportError(import_err_msg)
if not client:
if not connection_string or not db_username or not db_password:
raise ValueError(
"You need to pass either a couchbase client or connection_string and credentials must be provided."
)
else:
auth = PasswordAuthenticator(
db_username,
db_password,
)
self._client: Cluster = Cluster(connection_string, ClusterOptions(auth))
else:
self._client = client
def lazy_load_data(
self,
query: str,
text_fields: Optional[List[str]] = None,
metadata_fields: Optional[List[str]] = [],
) -> Iterable[Document]:
"""
Load data from the Couchbase cluster lazily.
Args:
query (str): The SQL++ query to execute.
text_fields (Optional[List[str]]): The columns to write into the
`text` field of the document. By default, all columns are
written.
metadata_fields (Optional[List[str]]): The columns to write into the
`metadata` field of the document. By default, no columns are written.
"""
from datetime import timedelta
if not query:
raise ValueError("Query must be provided.")
# Ensure connection to Couchbase cluster
self._client.wait_until_ready(timedelta(seconds=5))
# Run SQL++ Query
result = self._client.query(query)
for row in result:
if not text_fields:
text_fields = list(row.keys())
metadata = {field: row[field] for field in metadata_fields}
document = "\n".join(
f"{k}: {v}" for k, v in row.items() if k in text_fields
)
yield (Document(text=document, metadata=metadata))
def load_data(
self,
query: str,
text_fields: Optional[List[str]] = None,
metadata_fields: Optional[List[str]] = None,
) -> List[Document]:
"""
Load data from the Couchbase cluster.
Args:
query (str): The SQL++ query to execute.
text_fields (Optional[List[str]]): The columns to write into the
`text` field of the document. By default, all columns are
written.
metadata_fields (Optional[List[str]]): The columns to write into the
`metadata` field of the document. By default, no columns are written.
"""
return list(self.lazy_load_data(query, text_fields, metadata_fields))
|
"""Couchbase document loader."""
from typing import Any, Iterable, List, Optional
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
class CouchbaseReader(BaseReader):
"""Couchbase document loader.
Loads data from a Couchbase cluster into Document used by LlamaIndex.
Args:
client(Optional[Any]): A Couchbase client to use.
If not provided, the client will be created based on the connection_string
and database credentials.
connection_string (Optional[str]): The connection string to the Couchbase cluster.
db_username (Optional[str]): The username to connect to the Couchbase cluster.
db_password (Optional[str]): The password to connect to the Couchbase cluster.
"""
def __init__(
self,
client: Optional[Any] = None,
connection_string: Optional[str] = None,
db_username: Optional[str] = None,
db_password: Optional[str] = None,
) -> None:
"""Initialize Couchbase document loader."""
import_err_msg = "`couchbase` package not found, please run `pip install --upgrade couchbase`"
try:
from couchbase.auth import PasswordAuthenticator
from couchbase.cluster import Cluster
from couchbase.options import ClusterOptions
except ImportError:
raise ImportError(import_err_msg)
if not client:
if not connection_string or not db_username or not db_password:
raise ValueError(
"You need to pass either a couchbase client or connection_string and credentials must be provided."
)
else:
auth = PasswordAuthenticator(
db_username,
db_password,
)
self._client: Cluster = Cluster(connection_string, ClusterOptions(auth))
else:
self._client = client
def lazy_load_data(
self,
query: str,
text_fields: Optional[List[str]] = None,
metadata_fields: Optional[List[str]] = [],
) -> Iterable[Document]:
"""Load data from the Couchbase cluster lazily.
Args:
query (str): The SQL++ query to execute.
text_fields (Optional[List[str]]): The columns to write into the
`text` field of the document. By default, all columns are
written.
metadata_fields (Optional[List[str]]): The columns to write into the
`metadata` field of the document. By default, no columns are written.
"""
from datetime import timedelta
if not query:
raise ValueError("Query must be provided.")
# Ensure connection to Couchbase cluster
self._client.wait_until_ready(timedelta(seconds=5))
# Run SQL++ Query
result = self._client.query(query)
for row in result:
if not text_fields:
text_fields = list(row.keys())
metadata = {field: row[field] for field in metadata_fields}
document = "\n".join(
f"{k}: {v}" for k, v in row.items() if k in text_fields
)
yield (Document(text=document, metadata=metadata))
def load_data(
self,
query: str,
text_fields: Optional[List[str]] = None,
metadata_fields: Optional[List[str]] = None,
) -> List[Document]:
"""Load data from the Couchbase cluster.
Args:
query (str): The SQL++ query to execute.
text_fields (Optional[List[str]]): The columns to write into the
`text` field of the document. By default, all columns are
written.
metadata_fields (Optional[List[str]]): The columns to write into the
`metadata` field of the document. By default, no columns are written.
"""
return list(self.lazy_load_data(query, text_fields, metadata_fields))
|
from ._alignment import forced_align, merge_tokens, TokenSpan
from .filtering import (
allpass_biquad,
band_biquad,
bandpass_biquad,
bandreject_biquad,
bass_biquad,
biquad,
contrast,
dcshift,
deemph_biquad,
dither,
equalizer_biquad,
filtfilt,
flanger,
gain,
highpass_biquad,
lfilter,
lowpass_biquad,
overdrive,
phaser,
riaa_biquad,
treble_biquad,
vad,
)
from .functional import (
add_noise,
amplitude_to_DB,
apply_beamforming,
apply_codec,
compute_deltas,
convolve,
create_dct,
DB_to_amplitude,
deemphasis,
detect_pitch_frequency,
edit_distance,
fftconvolve,
frechet_distance,
griffinlim,
inverse_spectrogram,
linear_fbanks,
loudness,
mask_along_axis,
mask_along_axis_iid,
melscale_fbanks,
mu_law_decoding,
mu_law_encoding,
mvdr_weights_rtf,
mvdr_weights_souden,
phase_vocoder,
pitch_shift,
preemphasis,
psd,
resample,
rnnt_loss,
rtf_evd,
rtf_power,
sliding_window_cmn,
spectral_centroid,
spectrogram,
speed,
)
__all__ = [
"amplitude_to_DB",
"compute_deltas",
"create_dct",
"melscale_fbanks",
"linear_fbanks",
"DB_to_amplitude",
"loudness",
"detect_pitch_frequency",
"griffinlim",
"mask_along_axis",
"mask_along_axis_iid",
"mu_law_encoding",
"mu_law_decoding",
"phase_vocoder",
"sliding_window_cmn",
"spectrogram",
"inverse_spectrogram",
"spectral_centroid",
"allpass_biquad",
"band_biquad",
"bandpass_biquad",
"bandreject_biquad",
"bass_biquad",
"biquad",
"contrast",
"dither",
"dcshift",
"deemph_biquad",
"equalizer_biquad",
"filtfilt",
"flanger",
"forced_align",
"merge_tokens",
"TokenSpan",
"gain",
"highpass_biquad",
"lfilter",
"lowpass_biquad",
"overdrive",
"phaser",
"riaa_biquad",
"treble_biquad",
"vad",
"apply_codec",
"resample",
"edit_distance",
"pitch_shift",
"rnnt_loss",
"psd",
"mvdr_weights_souden",
"mvdr_weights_rtf",
"rtf_evd",
"rtf_power",
"apply_beamforming",
"fftconvolve",
"convolve",
"add_noise",
"speed",
"preemphasis",
"deemphasis",
"frechet_distance",
]
|
from ._alignment import forced_align, merge_tokens, TokenSpan
from .filtering import (
allpass_biquad,
band_biquad,
bandpass_biquad,
bandreject_biquad,
bass_biquad,
biquad,
contrast,
dcshift,
deemph_biquad,
dither,
equalizer_biquad,
filtfilt,
flanger,
gain,
highpass_biquad,
lfilter,
lowpass_biquad,
overdrive,
phaser,
riaa_biquad,
treble_biquad,
vad,
)
from .functional import (
add_noise,
amplitude_to_DB,
apply_beamforming,
apply_codec,
compute_deltas,
convolve,
create_dct,
DB_to_amplitude,
deemphasis,
detect_pitch_frequency,
edit_distance,
fftconvolve,
griffinlim,
inverse_spectrogram,
linear_fbanks,
loudness,
mask_along_axis,
mask_along_axis_iid,
melscale_fbanks,
mu_law_decoding,
mu_law_encoding,
mvdr_weights_rtf,
mvdr_weights_souden,
phase_vocoder,
pitch_shift,
preemphasis,
psd,
resample,
rnnt_loss,
rtf_evd,
rtf_power,
sliding_window_cmn,
spectral_centroid,
spectrogram,
speed,
)
__all__ = [
"amplitude_to_DB",
"compute_deltas",
"create_dct",
"melscale_fbanks",
"linear_fbanks",
"DB_to_amplitude",
"loudness",
"detect_pitch_frequency",
"griffinlim",
"mask_along_axis",
"mask_along_axis_iid",
"mu_law_encoding",
"mu_law_decoding",
"phase_vocoder",
"sliding_window_cmn",
"spectrogram",
"inverse_spectrogram",
"spectral_centroid",
"allpass_biquad",
"band_biquad",
"bandpass_biquad",
"bandreject_biquad",
"bass_biquad",
"biquad",
"contrast",
"dither",
"dcshift",
"deemph_biquad",
"equalizer_biquad",
"filtfilt",
"flanger",
"forced_align",
"merge_tokens",
"TokenSpan",
"gain",
"highpass_biquad",
"lfilter",
"lowpass_biquad",
"overdrive",
"phaser",
"riaa_biquad",
"treble_biquad",
"vad",
"apply_codec",
"resample",
"edit_distance",
"pitch_shift",
"rnnt_loss",
"psd",
"mvdr_weights_souden",
"mvdr_weights_rtf",
"rtf_evd",
"rtf_power",
"apply_beamforming",
"fftconvolve",
"convolve",
"add_noise",
"speed",
"preemphasis",
"deemphasis",
]
|
import numpy as np
from docarray import BaseDoc
from docarray.array import DocVec
from docarray.array.doc_vec.column_storage import ColumnStorageView
from docarray.typing import AnyTensor
def test_document_view():
class MyDoc(BaseDoc):
tensor: AnyTensor
name: str
docs = [MyDoc(tensor=np.zeros((10, 10)), name='hello', id=str(i)) for i in range(4)]
doc_vec = DocVec[MyDoc](docs)
storage = doc_vec._storage
result = str(doc_vec[0])
assert 'MyDoc' in result
assert 'id' in result
assert 'tensor' in result
assert 'name' in result
doc = MyDoc.from_view(ColumnStorageView(0, storage))
assert doc.is_view()
assert doc.id == '0'
assert (doc.tensor == np.zeros(10)).all()
assert doc.name == 'hello'
storage.columns['id'][0] = '12345'
storage.columns['tensor'][0] = np.ones(10)
storage.columns['name'][0] = 'byebye'
assert doc.id == '12345'
assert (doc.tensor == np.ones(10)).all()
assert doc.name == 'byebye'
|
import numpy as np
from docarray import BaseDoc
from docarray.array import DocVec
from docarray.array.doc_vec.column_storage import ColumnStorageView
from docarray.typing import AnyTensor
def test_document_view():
class MyDoc(BaseDoc):
tensor: AnyTensor
name: str
docs = [MyDoc(tensor=np.zeros((10, 10)), name='hello', id=i) for i in range(4)]
doc_vec = DocVec[MyDoc](docs)
storage = doc_vec._storage
result = str(doc_vec[0])
assert 'MyDoc' in result
assert 'id' in result
assert 'tensor' in result
assert 'name' in result
doc = MyDoc.from_view(ColumnStorageView(0, storage))
assert doc.is_view()
assert doc.id == '0'
assert (doc.tensor == np.zeros(10)).all()
assert doc.name == 'hello'
storage.columns['id'][0] = '12345'
storage.columns['tensor'][0] = np.ones(10)
storage.columns['name'][0] = 'byebye'
assert doc.id == '12345'
assert (doc.tensor == np.ones(10)).all()
assert doc.name == 'byebye'
|
# model settings
norm_cfg = dict(type='BN', requires_grad=False)
model = dict(
type='FasterRCNN',
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[103.530, 116.280, 123.675],
std=[1.0, 1.0, 1.0],
bgr_to_rgb=False,
pad_size_divisor=32),
backbone=dict(
type='ResNet',
depth=50,
num_stages=3,
strides=(1, 2, 2),
dilations=(1, 1, 1),
out_indices=(2, ),
frozen_stages=1,
norm_cfg=norm_cfg,
norm_eval=True,
style='caffe',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet50_caffe')),
rpn_head=dict(
type='RPNHead',
in_channels=1024,
feat_channels=1024,
anchor_generator=dict(
type='AnchorGenerator',
scales=[2, 4, 8, 16, 32],
ratios=[0.5, 1.0, 2.0],
strides=[16]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0]),
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='L1Loss', loss_weight=1.0)),
roi_head=dict(
type='StandardRoIHead',
shared_head=dict(
type='ResLayer',
depth=50,
stage=3,
stride=2,
dilation=1,
style='caffe',
norm_cfg=norm_cfg,
norm_eval=True,
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet50_caffe')),
bbox_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=0),
out_channels=1024,
featmap_strides=[16]),
bbox_head=dict(
type='BBoxHead',
with_avg_pool=True,
roi_feat_size=7,
in_channels=2048,
num_classes=80,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2]),
reg_class_agnostic=False,
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
loss_bbox=dict(type='L1Loss', loss_weight=1.0))),
# model training and testing settings
train_cfg=dict(
rpn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
match_low_quality=True,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
allowed_border=-1,
pos_weight=-1,
debug=False),
rpn_proposal=dict(
nms_pre=12000,
max_per_img=2000,
nms=dict(type='nms', iou_threshold=0.7),
min_bbox_size=0),
rcnn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0.5,
match_low_quality=False,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
pos_weight=-1,
debug=False)),
test_cfg=dict(
rpn=dict(
nms_pre=6000,
max_per_img=1000,
nms=dict(type='nms', iou_threshold=0.7),
min_bbox_size=0),
rcnn=dict(
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.5),
max_per_img=100)))
|
# model settings
preprocess_cfg = dict(
mean=[103.530, 116.280, 123.675],
std=[1.0, 1.0, 1.0],
to_rgb=False,
pad_size_divisor=32)
norm_cfg = dict(type='BN', requires_grad=False)
model = dict(
type='FasterRCNN',
preprocess_cfg=preprocess_cfg,
backbone=dict(
type='ResNet',
depth=50,
num_stages=3,
strides=(1, 2, 2),
dilations=(1, 1, 1),
out_indices=(2, ),
frozen_stages=1,
norm_cfg=norm_cfg,
norm_eval=True,
style='caffe',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet50_caffe')),
rpn_head=dict(
type='RPNHead',
in_channels=1024,
feat_channels=1024,
anchor_generator=dict(
type='AnchorGenerator',
scales=[2, 4, 8, 16, 32],
ratios=[0.5, 1.0, 2.0],
strides=[16]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0]),
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='L1Loss', loss_weight=1.0)),
roi_head=dict(
type='StandardRoIHead',
shared_head=dict(
type='ResLayer',
depth=50,
stage=3,
stride=2,
dilation=1,
style='caffe',
norm_cfg=norm_cfg,
norm_eval=True,
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet50_caffe')),
bbox_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=0),
out_channels=1024,
featmap_strides=[16]),
bbox_head=dict(
type='BBoxHead',
with_avg_pool=True,
roi_feat_size=7,
in_channels=2048,
num_classes=80,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2]),
reg_class_agnostic=False,
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
loss_bbox=dict(type='L1Loss', loss_weight=1.0))),
# model training and testing settings
train_cfg=dict(
rpn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
match_low_quality=True,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
allowed_border=-1,
pos_weight=-1,
debug=False),
rpn_proposal=dict(
nms_pre=12000,
max_per_img=2000,
nms=dict(type='nms', iou_threshold=0.7),
min_bbox_size=0),
rcnn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0.5,
match_low_quality=False,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
pos_weight=-1,
debug=False)),
test_cfg=dict(
rpn=dict(
nms_pre=6000,
max_per_img=1000,
nms=dict(type='nms', iou_threshold=0.7),
min_bbox_size=0),
rcnn=dict(
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.5),
max_per_img=100)))
|
import warnings
from typing import Any, Dict, Union
import numpy as np
import PIL.Image
import torch
from torchvision.transforms import functional as _F
from torchvision.transforms.v2 import Transform
class ToTensor(Transform):
"""[BETA] Convert a ``PIL Image`` or ``numpy.ndarray`` to tensor.
.. betastatus:: ToTensor transform
.. warning::
:class:`v2.ToTensor` is deprecated and will be removed in a future release.
Please use instead ``transforms.Compose([transforms.ToImageTensor(), transforms.ConvertImageDtype()])``.
This transform does not support torchscript.
Converts a PIL Image or numpy.ndarray (H x W x C) in the range
[0, 255] to a torch.FloatTensor of shape (C x H x W) in the range [0.0, 1.0]
if the PIL Image belongs to one of the modes (L, LA, P, I, F, RGB, YCbCr, RGBA, CMYK, 1)
or if the numpy.ndarray has dtype = np.uint8
In the other cases, tensors are returned without scaling.
.. note::
Because the input image is scaled to [0.0, 1.0], this transformation should not be used when
transforming target image masks. See the `references`_ for implementing the transforms for image masks.
.. _references: https://github.com/pytorch/vision/tree/main/references/segmentation
"""
_transformed_types = (PIL.Image.Image, np.ndarray)
def __init__(self) -> None:
warnings.warn(
"The transform `ToTensor()` is deprecated and will be removed in a future release. "
"Instead, please use `transforms.Compose([transforms.ToImageTensor(), transforms.ConvertImageDtype()])`."
)
super().__init__()
def _transform(self, inpt: Union[PIL.Image.Image, np.ndarray], params: Dict[str, Any]) -> torch.Tensor:
return _F.to_tensor(inpt)
|
import warnings
from typing import Any, Dict, Union
import numpy as np
import PIL.Image
import torch
from torchvision.transforms import functional as _F
from torchvision.transforms.v2 import Transform
class ToTensor(Transform):
_transformed_types = (PIL.Image.Image, np.ndarray)
def __init__(self) -> None:
warnings.warn(
"The transform `ToTensor()` is deprecated and will be removed in a future release. "
"Instead, please use `transforms.Compose([transforms.ToImageTensor(), transforms.ConvertImageDtype()])`."
)
super().__init__()
def _transform(self, inpt: Union[PIL.Image.Image, np.ndarray], params: Dict[str, Any]) -> torch.Tensor:
return _F.to_tensor(inpt)
|
import os
import pytest
from jina.orchestrate.deployments import Deployment
@pytest.fixture()
def cuda_total_devices(request):
old_cuda_total_devices = os.environ.get('CUDA_TOTAL_DEVICES', None)
os.environ['CUDA_TOTAL_DEVICES'] = str(request.param)
yield
if old_cuda_total_devices is not None:
os.environ['CUDA_TOTAL_DEVICES'] = old_cuda_total_devices
else:
os.unsetenv('CUDA_TOTAL_DEVICES')
@pytest.mark.parametrize(
'device_str, replicas, expected, cuda_total_devices',
[
['1', 1, None, 3], # wont trigger device RB
['1', 2, None, 3], # wont trigger device RB
['1,2', 2, None, 3], # wont trigger device RB
['RR', 2, {0: 0, 1: 1}, 3],
['RR', 5, {0: 0, 1: 1, 2: 2, 3: 0, 4: 1}, 3],
['RR1:', 5, {0: 1, 1: 2, 2: 1, 3: 2, 4: 1}, 3],
['RR0:2', 5, {0: 0, 1: 1, 2: 0, 3: 1, 4: 0}, 3],
['RR1:2', 2, {0: 1, 1: 1}, 3],
['RR2', 2, {0: 2, 1: 2}, 3],
['RRUUID1', 2, {0: 'UUID1', 1: 'UUID1'}, 3],
['RR1:2', 1, {0: 1}, 3],
['RR0,2,3', 3, {0: 0, 1: 2, 2: 3}, 4],
['RR0,2,3', 5, {0: 0, 1: 2, 2: 3, 3: 0, 4: 2}, 4],
[
'RRUUID1,UUID2,UUID3',
5,
{0: 'UUID1', 1: 'UUID2', 2: 'UUID3', 3: 'UUID1', 4: 'UUID2'},
4,
],
[
'RRGPU-0aaaaaaa-74d2-7297-d557-12771b6a79d5,GPU-0bbbbbbb-74d2-7297-d557-12771b6a79d5,GPU-0ccccccc-74d2-7297-d557-12771b6a79d5,GPU-0ddddddd-74d2-7297-d557-12771b6a79d5',
5,
{
0: 'GPU-0aaaaaaa-74d2-7297-d557-12771b6a79d5',
1: 'GPU-0bbbbbbb-74d2-7297-d557-12771b6a79d5',
2: 'GPU-0ccccccc-74d2-7297-d557-12771b6a79d5',
3: 'GPU-0ddddddd-74d2-7297-d557-12771b6a79d5',
4: 'GPU-0aaaaaaa-74d2-7297-d557-12771b6a79d5',
},
4,
],
],
indirect=['cuda_total_devices'],
)
def test_cuda_assignment(device_str, replicas, expected, cuda_total_devices):
actual = Deployment._roundrobin_cuda_device(device_str, replicas)
assert actual == expected
|
import os
import pytest
from jina.orchestrate.deployments import Deployment
@pytest.fixture()
def cuda_total_devices(request):
old_cuda_total_devices = os.environ.get('CUDA_TOTAL_DEVICES', None)
os.environ['CUDA_TOTAL_DEVICES'] = str(request.param)
yield
if old_cuda_total_devices is not None:
os.environ['CUDA_TOTAL_DEVICES'] = old_cuda_total_devices
else:
os.unsetenv('CUDA_TOTAL_DEVICES')
@pytest.mark.parametrize(
'device_str, replicas, expected, cuda_total_devices',
[
['1', 1, None, 3], # wont trigger device RB
['1', 2, None, 3], # wont trigger device RB
['1,2', 2, None, 3], # wont trigger device RB
['RR', 2, {0: 0, 1: 1}, 3],
['RR', 5, {0: 0, 1: 1, 2: 2, 3: 0, 4: 1}, 3],
['RR1:', 5, {0: 1, 1: 2, 2: 1, 3: 2, 4: 1}, 3],
['RR0:2', 5, {0: 0, 1: 1, 2: 0, 3: 1, 4: 0}, 3],
['RR1:2', 2, {0: 1, 1: 1}, 3],
['RR2', 2, {0: 2, 1: 2}, 3],
['RRUUID1', 2, {0: 'UUID1', 1: 'UUID1'}, 3],
['RR1:2', 1, {0: 1}, 3],
['RR0,2,3', 3, {0: 0, 1: 2, 2: 3}, 4],
['RR0,2,3', 5, {0: 0, 1: 2, 2: 3, 3: 0, 4: 2}, 4],
['RRUUID1,UUID2,UUID3', 5, {0: 'UUID1', 1: 'UUID2', 2: 'UUID3', 3: 'UUID1', 4: 'UUID2'}, 4],
['RRGPU-0aaaaaaa-74d2-7297-d557-12771b6a79d5,GPU-0bbbbbbb-74d2-7297-d557-12771b6a79d5,GPU-0ccccccc-74d2-7297-d557-12771b6a79d5,GPU-0ddddddd-74d2-7297-d557-12771b6a79d5', 5, {0: 'GPU-0aaaaaaa-74d2-7297-d557-12771b6a79d5', 1: 'GPU-0bbbbbbb-74d2-7297-d557-12771b6a79d5', 2: 'GPU-0ccccccc-74d2-7297-d557-12771b6a79d5', 3: 'GPU-0ddddddd-74d2-7297-d557-12771b6a79d5', 4: 'GPU-0aaaaaaa-74d2-7297-d557-12771b6a79d5'}, 4],
], indirect=['cuda_total_devices']
)
def test_cuda_assignment(device_str, replicas, expected, cuda_total_devices):
actual = Deployment._roundrobin_cuda_device(device_str, replicas)
assert actual == expected
|
import json
from typing import Any, Type, TypeVar, overload
from fastapi.encoders import jsonable_encoder
from .type import type_match
def to_dict(data) -> dict:
return jsonable_encoder(data)
def dumps(data) -> str:
return json.dumps(jsonable_encoder(data))
T = TypeVar("T")
@overload
def loads(data: str, *args, target_type: Type[T], **kwargs) -> T: ...
@overload
def loads(data: str, *args, **kwargs) -> Any: ...
def loads(data: str, *args, target_type: Type[T] | None = None, **kwargs) -> Any:
parsed = json.loads(data, *args, **kwargs)
if target_type:
return type_match(parsed, target_type)
return parsed
|
import json
from fastapi.encoders import jsonable_encoder
def to_dict(data) -> dict:
return jsonable_encoder(data)
def dumps(data) -> str:
return json.dumps(jsonable_encoder(data))
loads = json.loads
|
"""
This file loads sentences from a provided text file. It is expected, that the there is one sentence per line in that text file.
TSDAE will be training using these sentences. Checkpoints are stored every 500 steps to the output folder.
Usage:
python train_tsdae_from_file.py path/to/sentences.txt
"""
from sentence_transformers import SentenceTransformer, LoggingHandler
from sentence_transformers import models, datasets, losses
import logging
import gzip
from torch.utils.data import DataLoader
from datetime import datetime
import sys
import tqdm
#### Just some code to print debug information to stdout
logging.basicConfig(
format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO, handlers=[LoggingHandler()]
)
#### /print debug information to stdout
# Train Parameters
model_name = "bert-base-uncased"
batch_size = 8
# Input file path (a text file, each line a sentence)
if len(sys.argv) < 2:
print("Run this script with: python {} path/to/sentences.txt".format(sys.argv[0]))
exit()
filepath = sys.argv[1]
# Save path to store our model
output_name = ""
if len(sys.argv) >= 3:
output_name = "-" + sys.argv[2].replace(" ", "_").replace("/", "_").replace("\\", "_")
model_output_path = "output/train_tsdae{}-{}".format(output_name, datetime.now().strftime("%Y-%m-%d_%H-%M-%S"))
################# Read the train corpus #################
train_sentences = []
with gzip.open(filepath, "rt", encoding="utf8") if filepath.endswith(".gz") else open(
filepath, encoding="utf8"
) as fIn:
for line in tqdm.tqdm(fIn, desc="Read file"):
line = line.strip()
if len(line) >= 10:
train_sentences.append(line)
logging.info("{} train sentences".format(len(train_sentences)))
################# Initialize an SBERT model #################
word_embedding_model = models.Transformer(model_name)
# Apply **cls** pooling to get one fixed sized sentence vector
pooling_model = models.Pooling(word_embedding_model.get_word_embedding_dimension(), "cls")
model = SentenceTransformer(modules=[word_embedding_model, pooling_model])
################# Train and evaluate the model (it needs about 1 hour for one epoch of AskUbuntu) #################
# We wrap our training sentences in the DenoisingAutoEncoderDataset to add deletion noise on the fly
train_dataset = datasets.DenoisingAutoEncoderDataset(train_sentences)
train_dataloader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True, drop_last=True)
train_loss = losses.DenoisingAutoEncoderLoss(model, decoder_name_or_path=model_name, tie_encoder_decoder=True)
logging.info("Start training")
model.fit(
train_objectives=[(train_dataloader, train_loss)],
epochs=1,
weight_decay=0,
scheduler="constantlr",
optimizer_params={"lr": 3e-5},
show_progress_bar=True,
checkpoint_path=model_output_path,
use_amp=False, # Set to True, if your GPU supports FP16 cores
)
|
"""
This file loads sentences from a provided text file. It is expected, that the there is one sentence per line in that text file.
TSDAE will be training using these sentences. Checkpoints are stored every 500 steps to the output folder.
Usage:
python train_tsdae_from_file.py path/to/sentences.txt
"""
from sentence_transformers import SentenceTransformer, LoggingHandler
from sentence_transformers import models, datasets, losses
import logging
import gzip
from torch.utils.data import DataLoader
from datetime import datetime
import sys
import tqdm
#### Just some code to print debug information to stdout
logging.basicConfig(
format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO, handlers=[LoggingHandler()]
)
#### /print debug information to stdout
# Train Parameters
model_name = "bert-base-uncased"
batch_size = 8
# Input file path (a text file, each line a sentence)
if len(sys.argv) < 2:
print("Run this script with: python {} path/to/sentences.txt".format(sys.argv[0]))
exit()
filepath = sys.argv[1]
# Save path to store our model
output_name = ""
if len(sys.argv) >= 3:
output_name = "-" + sys.argv[2].replace(" ", "_").replace("/", "_").replace("\\", "_")
model_output_path = "output/train_tsdae{}-{}".format(output_name, datetime.now().strftime("%Y-%m-%d_%H-%M-%S"))
################# Read the train corpus #################
train_sentences = []
with gzip.open(filepath, "rt", encoding="utf8") if filepath.endswith(".gz") else open(
filepath, encoding="utf8"
) as fIn:
for line in tqdm.tqdm(fIn, desc="Read file"):
line = line.strip()
if len(line) >= 10:
train_sentences.append(line)
logging.info("{} train sentences".format(len(train_sentences)))
################# Initialize an SBERT model #################
word_embedding_model = models.Transformer(model_name)
# Apply **cls** pooling to get one fixed sized sentence vector
pooling_model = models.Pooling(word_embedding_model.get_word_embedding_dimension(), "cls")
model = SentenceTransformer(modules=[word_embedding_model, pooling_model])
################# Train and evaluate the model (it needs about 1 hour for one epoch of AskUbuntu) #################
# We wrap our training sentences in the DenoisingAutoEncoderDataset to add deletion noise on the fly
train_dataset = datasets.DenoisingAutoEncoderDataset(train_sentences)
train_dataloader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True, drop_last=True)
train_loss = losses.DenoisingAutoEncoderLoss(model, decoder_name_or_path=model_name, tie_encoder_decoder=True)
logging.info("Start training")
model.fit(
train_objectives=[(train_dataloader, train_loss)],
epochs=1,
weight_decay=0,
scheduler="constantlr",
optimizer_params={"lr": 3e-5},
show_progress_bar=True,
checkpoint_path=model_output_path,
use_amp=False, # Set to True, if your GPU supports FP16 cores
)
|
import gc
import asyncio
from llama_index.core.memory import ChatMemoryBuffer
from llama_index.core.base.llms.types import (
ChatMessage,
CompletionResponse,
CompletionResponseGen,
)
from typing import Any
from llama_index.core.llms.callbacks import llm_completion_callback
from llama_index.core.llms.mock import MockLLM
import pytest
from llama_index.core.base.llms.types import ChatMessage, MessageRole
from llama_index.core.chat_engine.simple import SimpleChatEngine
def test_simple_chat_engine() -> None:
engine = SimpleChatEngine.from_defaults()
engine.reset()
response = engine.chat("Test message 1")
assert str(response) == "user: Test message 1\nassistant: "
response = engine.chat("Test message 2")
assert (
str(response)
== "user: Test message 1\nassistant: user: Test message 1\nassistant: \n"
"user: Test message 2\nassistant: "
)
engine.reset()
response = engine.chat("Test message 3")
assert str(response) == "user: Test message 3\nassistant: "
def test_simple_chat_engine_with_init_history() -> None:
engine = SimpleChatEngine.from_defaults(
chat_history=[
ChatMessage(role=MessageRole.USER, content="test human message"),
ChatMessage(role=MessageRole.ASSISTANT, content="test ai message"),
],
)
response = engine.chat("new human message")
assert (
str(response) == "user: test human message\nassistant: test ai message\n"
"user: new human message\nassistant: "
)
@pytest.mark.asyncio
async def test_simple_chat_engine_astream():
engine = SimpleChatEngine.from_defaults()
response = await engine.astream_chat("Hello World!")
num_iters = 0
async for response_part in response.async_response_gen():
num_iters += 1
assert num_iters > 10
assert "Hello World!" in response.unformatted_response
assert len(engine.chat_history) == 2
response = await engine.astream_chat("What is the capital of the moon?")
num_iters = 0
async for _ in response.async_response_gen():
num_iters += 1
assert num_iters > 10
assert "Hello World!" in response.unformatted_response
assert "What is the capital of the moon?" in response.unformatted_response
def test_simple_chat_engine_astream_exception_handling():
"""
Test that an exception thrown while retrieving the streamed LLM response gets bubbled up to the user.
Also tests that the non-retrieved exception does not remain in an task that was not awaited leading to
a 'Task exception was never retrieved' message during garbage collection.
"""
class ExceptionThrownInTest(Exception):
pass
class ExceptionMockLLM(MockLLM):
"""Raises an exception while streaming back the mocked LLM response."""
@classmethod
def class_name(cls) -> str:
return "ExceptionMockLLM"
@llm_completion_callback()
def stream_complete(
self, prompt: str, formatted: bool = False, **kwargs: Any
) -> CompletionResponseGen:
def gen_prompt() -> CompletionResponseGen:
for ch in prompt:
yield CompletionResponse(
text=prompt,
delta=ch,
)
raise ExceptionThrownInTest("Exception thrown for testing purposes")
return gen_prompt()
async def async_part():
engine = SimpleChatEngine.from_defaults(
llm=ExceptionMockLLM(), memory=ChatMemoryBuffer.from_defaults()
)
response = await engine.astream_chat("Hello World!")
with pytest.raises(ExceptionThrownInTest):
async for response_part in response.async_response_gen():
pass
not_retrieved_exception = False
def custom_exception_handler(loop, context):
if context.get("message") == "Task exception was never retrieved":
nonlocal not_retrieved_exception
not_retrieved_exception = True
loop = asyncio.new_event_loop()
loop.set_exception_handler(custom_exception_handler)
result = loop.run_until_complete(async_part())
loop.close()
gc.collect()
if not_retrieved_exception:
pytest.fail(
"Exception was not correctly handled - ended up in asyncio cleanup performed during garbage collection"
)
|
import gc
import asyncio
from llama_index.core.memory import ChatMemoryBuffer
from llama_index.core.base.llms.types import (
ChatMessage,
CompletionResponse,
CompletionResponseGen,
)
from typing import Any
from llama_index.core.llms.callbacks import llm_completion_callback
from llama_index.core.llms.mock import MockLLM
import pytest
from llama_index.core.base.llms.types import ChatMessage, MessageRole
from llama_index.core.chat_engine.simple import SimpleChatEngine
def test_simple_chat_engine() -> None:
engine = SimpleChatEngine.from_defaults()
engine.reset()
response = engine.chat("Test message 1")
assert str(response) == "user: Test message 1\nassistant: "
response = engine.chat("Test message 2")
assert (
str(response)
== "user: Test message 1\nassistant: user: Test message 1\nassistant: \n"
"user: Test message 2\nassistant: "
)
engine.reset()
response = engine.chat("Test message 3")
assert str(response) == "user: Test message 3\nassistant: "
def test_simple_chat_engine_with_init_history() -> None:
engine = SimpleChatEngine.from_defaults(
chat_history=[
ChatMessage(role=MessageRole.USER, content="test human message"),
ChatMessage(role=MessageRole.ASSISTANT, content="test ai message"),
],
)
response = engine.chat("new human message")
assert (
str(response) == "user: test human message\nassistant: test ai message\n"
"user: new human message\nassistant: "
)
@pytest.mark.asyncio()
async def test_simple_chat_engine_astream():
engine = SimpleChatEngine.from_defaults()
response = await engine.astream_chat("Hello World!")
num_iters = 0
async for response_part in response.async_response_gen():
num_iters += 1
assert num_iters > 10
assert "Hello World!" in response.unformatted_response
assert len(engine.chat_history) == 2
response = await engine.astream_chat("What is the capital of the moon?")
num_iters = 0
async for _ in response.async_response_gen():
num_iters += 1
assert num_iters > 10
assert "Hello World!" in response.unformatted_response
assert "What is the capital of the moon?" in response.unformatted_response
def test_simple_chat_engine_astream_exception_handling():
"""Test that an exception thrown while retrieving the streamed LLM response gets bubbled up to the user.
Also tests that the non-retrieved exception does not remain in an task that was not awaited leading to
a 'Task exception was never retrieved' message during garbage collection.
"""
class ExceptionThrownInTest(Exception):
pass
class ExceptionMockLLM(MockLLM):
"""Raises an exception while streaming back the mocked LLM response."""
@classmethod
def class_name(cls) -> str:
return "ExceptionMockLLM"
@llm_completion_callback()
def stream_complete(
self, prompt: str, formatted: bool = False, **kwargs: Any
) -> CompletionResponseGen:
def gen_prompt() -> CompletionResponseGen:
for ch in prompt:
yield CompletionResponse(
text=prompt,
delta=ch,
)
raise ExceptionThrownInTest("Exception thrown for testing purposes")
return gen_prompt()
async def async_part():
engine = SimpleChatEngine.from_defaults(
llm=ExceptionMockLLM(), memory=ChatMemoryBuffer.from_defaults()
)
response = await engine.astream_chat("Hello World!")
with pytest.raises(ExceptionThrownInTest):
async for response_part in response.async_response_gen():
pass
not_retrieved_exception = False
def custom_exception_handler(loop, context):
if context.get("message") == "Task exception was never retrieved":
nonlocal not_retrieved_exception
not_retrieved_exception = True
loop = asyncio.new_event_loop()
loop.set_exception_handler(custom_exception_handler)
result = loop.run_until_complete(async_part())
loop.close()
gc.collect()
if not_retrieved_exception:
pytest.fail(
"Exception was not correctly handled - ended up in asyncio cleanup performed during garbage collection"
)
|
# Copyright (c) OpenMMLab. All rights reserved.
# yapf: disable
from .lr_scheduler import (ConstantLR, CosineAnnealingLR, CosineRestartLR,
ExponentialLR, LinearLR, MultiStepLR, OneCycleLR,
PolyLR, ReduceOnPlateauLR, StepLR)
from .momentum_scheduler import (ConstantMomentum, CosineAnnealingMomentum,
CosineRestartMomentum, ExponentialMomentum,
LinearMomentum, MultiStepMomentum,
PolyMomentum, ReduceOnPlateauMomentum,
StepMomentum)
from .param_scheduler import (ConstantParamScheduler,
CosineAnnealingParamScheduler,
CosineRestartParamScheduler,
ExponentialParamScheduler, LinearParamScheduler,
MultiStepParamScheduler, OneCycleParamScheduler,
PolyParamScheduler,
ReduceOnPlateauParamScheduler,
StepParamScheduler, _ParamScheduler)
# yapf: enable
__all__ = [
'ConstantLR', 'CosineAnnealingLR', 'ExponentialLR', 'LinearLR',
'MultiStepLR', 'StepLR', 'ConstantMomentum', 'CosineAnnealingMomentum',
'ExponentialMomentum', 'LinearMomentum', 'MultiStepMomentum',
'StepMomentum', 'ConstantParamScheduler', 'CosineAnnealingParamScheduler',
'ExponentialParamScheduler', 'LinearParamScheduler',
'MultiStepParamScheduler', 'StepParamScheduler', '_ParamScheduler',
'PolyParamScheduler', 'PolyLR', 'PolyMomentum', 'OneCycleParamScheduler',
'OneCycleLR', 'CosineRestartParamScheduler', 'CosineRestartLR',
'CosineRestartMomentum', 'ReduceOnPlateauParamScheduler',
'ReduceOnPlateauLR', 'ReduceOnPlateauMomentum'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
# yapf: disable
from .lr_scheduler import (ConstantLR, CosineAnnealingLR, CosineRestartLR,
ExponentialLR, LinearLR, MultiStepLR, OneCycleLR,
PolyLR, StepLR)
from .momentum_scheduler import (ConstantMomentum, CosineAnnealingMomentum,
CosineRestartMomentum, ExponentialMomentum,
LinearMomentum, MultiStepMomentum,
PolyMomentum, StepMomentum)
from .param_scheduler import (ConstantParamScheduler,
CosineAnnealingParamScheduler,
CosineRestartParamScheduler,
ExponentialParamScheduler, LinearParamScheduler,
MultiStepParamScheduler, OneCycleParamScheduler,
PolyParamScheduler, StepParamScheduler,
_ParamScheduler)
# yapf: enable
__all__ = [
'ConstantLR', 'CosineAnnealingLR', 'ExponentialLR', 'LinearLR',
'MultiStepLR', 'StepLR', 'ConstantMomentum', 'CosineAnnealingMomentum',
'ExponentialMomentum', 'LinearMomentum', 'MultiStepMomentum',
'StepMomentum', 'ConstantParamScheduler', 'CosineAnnealingParamScheduler',
'ExponentialParamScheduler', 'LinearParamScheduler',
'MultiStepParamScheduler', 'StepParamScheduler', '_ParamScheduler',
'PolyParamScheduler', 'PolyLR', 'PolyMomentum', 'OneCycleParamScheduler',
'OneCycleLR', 'CosineRestartParamScheduler', 'CosineRestartLR',
'CosineRestartMomentum'
]
|
import dataclasses
from typing import Any, Dict, Optional, Type
from jina.jaml.parsers.base import BaseLegacyParser
from jina.serve.executors import BaseExecutor
from jina.serve.executors.metas import get_default_metas
class ExecutorLegacyParser(BaseLegacyParser):
"""Legacy parser for executor."""
def parse(
self,
cls: Type['BaseExecutor'],
data: Dict,
runtime_args: Optional[Dict[str, Any]] = None,
) -> 'BaseExecutor':
"""
:param cls: target class type to parse into, must be a :class:`JAMLCompatible` type
:param data: flow yaml file loaded as python dict
:param runtime_args: Optional runtime_args to be directly passed without being parsed into a yaml config
:return: the Flow YAML parser given the syntax version number
"""
from jina.logging.predefined import default_logger
_meta_config = get_default_metas()
_meta_config.update(data.get('metas', {}))
if _meta_config:
data['metas'] = _meta_config
cls._init_from_yaml = True
# tmp_p = {kk: expand_env_var(vv) for kk, vv in data.get('with', {}).items()}
if dataclasses.is_dataclass(cls):
obj = cls(
**data.get('with', {}),
)
cls.__bases__[0].__init__(
obj,
**data.get('with', {}),
metas=data.get('metas', {}),
requests=data.get('requests', {}),
dynamic_batching=data.get('dynamic_batching', {}),
runtime_args=runtime_args,
)
else:
obj = cls(
**data.get('with', {}),
metas=data.get('metas', {}),
requests=data.get('requests', {}),
dynamic_batching=data.get('dynamic_batching', {}),
runtime_args=runtime_args,
)
cls._init_from_yaml = False
# check if the yaml file used to instanciate 'cls' has arguments that are not in 'cls'
arguments_from_cls = ExecutorLegacyParser._get_all_arguments(cls)
arguments_from_yaml = set(data.get('with', {}))
difference_set = arguments_from_yaml - arguments_from_cls
# only log warnings about unknown args for main Pod
if any(difference_set) and not ExecutorLegacyParser.is_tail_or_head(data):
default_logger.warning(
f'The given arguments {difference_set} are not defined in `{cls.__name__}.__init__`'
)
if not _meta_config:
default_logger.warning(
'"metas" config is not found in this yaml file, '
'this map is important as it provides an unique identifier when '
'persisting the executor on disk.'
)
# for compound executor
if 'components' in data:
obj.components = lambda: data['components']
obj.is_updated = False
return obj
@staticmethod
def is_tail_or_head(data: Dict) -> bool:
"""Based on name, compute if this is a tail/head Pod or a main Pod
:param data: the data for the parser
:return: True if it is tail/head, False otherwise
"""
try:
name = data.get('runtime_args', {}).get('name', '')
return 'head' in name or 'tail' in name
except Exception as _:
pass # name can be None in tests since it's not passed
def dump(self, data: 'BaseExecutor') -> Dict:
"""
:param data: versioned executor object
:return: the dictionary given a versioned flow object
"""
# note: we only save non-default property for the sake of clarity
_defaults = get_default_metas()
p = (
{
k: getattr(data.metas, k)
for k, v in _defaults.items()
if getattr(data.metas, k) != v
}
if hasattr(data, 'metas')
else {}
)
a = {k: v for k, v in data._init_kwargs_dict.items() if k not in _defaults}
r = {}
if a:
r['with'] = a
if p:
r['metas'] = p
if hasattr(data, 'requests'):
r['requests'] = {k: v.__name__ for k, v in data.requests.items()}
if hasattr(data, 'dynamic_batching'):
r['dynamic_batching'] = data.dynamic_batching
if hasattr(data, 'components'):
r['components'] = data.components
return r
|
import dataclasses
from typing import Any, Dict, Optional, Type
from jina.jaml.parsers.base import BaseLegacyParser
from jina.serve.executors import BaseExecutor
from jina.serve.executors.metas import get_default_metas
class ExecutorLegacyParser(BaseLegacyParser):
"""Legacy parser for executor."""
def parse(
self,
cls: Type['BaseExecutor'],
data: Dict,
runtime_args: Optional[Dict[str, Any]] = None,
) -> 'BaseExecutor':
"""
:param cls: target class type to parse into, must be a :class:`JAMLCompatible` type
:param data: flow yaml file loaded as python dict
:param runtime_args: Optional runtime_args to be directly passed without being parsed into a yaml config
:return: the Flow YAML parser given the syntax version number
"""
from jina.logging.predefined import default_logger
_meta_config = get_default_metas()
_meta_config.update(data.get('metas', {}))
if _meta_config:
data['metas'] = _meta_config
cls._init_from_yaml = True
# tmp_p = {kk: expand_env_var(vv) for kk, vv in data.get('with', {}).items()}
if dataclasses.is_dataclass(cls):
obj = cls(
**data.get('with', {}),
)
cls.__bases__[0].__init__(
obj,
**data.get('with', {}),
metas=data.get('metas', {}),
requests=data.get('requests', {}),
runtime_args=runtime_args,
)
else:
obj = cls(
**data.get('with', {}),
metas=data.get('metas', {}),
requests=data.get('requests', {}),
runtime_args=runtime_args,
)
cls._init_from_yaml = False
# check if the yaml file used to instanciate 'cls' has arguments that are not in 'cls'
arguments_from_cls = ExecutorLegacyParser._get_all_arguments(cls)
arguments_from_yaml = set(data.get('with', {}))
difference_set = arguments_from_yaml - arguments_from_cls
# only log warnings about unknown args for main Pod
if any(difference_set) and not ExecutorLegacyParser.is_tail_or_head(data):
default_logger.warning(
f'The given arguments {difference_set} are not defined in `{cls.__name__}.__init__`'
)
if not _meta_config:
default_logger.warning(
'"metas" config is not found in this yaml file, '
'this map is important as it provides an unique identifier when '
'persisting the executor on disk.'
)
# for compound executor
if 'components' in data:
obj.components = lambda: data['components']
obj.is_updated = False
return obj
@staticmethod
def is_tail_or_head(data: Dict) -> bool:
"""Based on name, compute if this is a tail/head Pod or a main Pod
:param data: the data for the parser
:return: True if it is tail/head, False otherwise
"""
try:
name = data.get('runtime_args', {}).get('name', '')
return 'head' in name or 'tail' in name
except Exception as _:
pass # name can be None in tests since it's not passed
def dump(self, data: 'BaseExecutor') -> Dict:
"""
:param data: versioned executor object
:return: the dictionary given a versioned flow object
"""
# note: we only save non-default property for the sake of clarity
_defaults = get_default_metas()
p = (
{
k: getattr(data.metas, k)
for k, v in _defaults.items()
if getattr(data.metas, k) != v
}
if hasattr(data, 'metas')
else {}
)
a = {k: v for k, v in data._init_kwargs_dict.items() if k not in _defaults}
r = {}
if a:
r['with'] = a
if p:
r['metas'] = p
if hasattr(data, 'requests'):
r['requests'] = {k: v.__name__ for k, v in data.requests.items()}
if hasattr(data, 'components'):
r['components'] = data.components
return r
|
from typing import List
import datasets
from datasets.tasks import AudioClassification
from ..folder_based_builder import folder_based_builder
logger = datasets.utils.logging.get_logger(__name__)
class AudioFolderConfig(folder_based_builder.FolderBasedBuilderConfig):
"""Builder Config for AudioFolder."""
drop_labels: bool = None
drop_metadata: bool = None
class AudioFolder(folder_based_builder.FolderBasedBuilder):
BASE_FEATURE = datasets.Audio
BASE_COLUMN_NAME = "audio"
BUILDER_CONFIG_CLASS = AudioFolderConfig
EXTENSIONS: List[str] # definition at the bottom of the script
CLASSIFICATION_TASK = AudioClassification(audio_column="audio", label_column="label")
# Obtained with:
# ```
# import soundfile as sf
#
# AUDIO_EXTENSIONS = [f".{format.lower()}" for format in sf.available_formats().keys()]
#
# # .mp3 is currently decoded via `torchaudio`, .opus decoding is supported if version of `libsndfile` >= 1.0.30:
# AUDIO_EXTENSIONS.extend([".mp3", ".opus"])
# ```
# We intentionally do not run this code on launch because:
# (1) Soundfile is an optional dependency, so importing it in global namespace is not allowed
# (2) To ensure the list of supported extensions is deterministic
AUDIO_EXTENSIONS = [
".aiff",
".au",
".avr",
".caf",
".flac",
".htk",
".svx",
".mat4",
".mat5",
".mpc2k",
".ogg",
".paf",
".pvf",
".raw",
".rf64",
".sd2",
".sds",
".ircam",
".voc",
".w64",
".wav",
".nist",
".wavex",
".wve",
".xi",
".mp3",
".opus",
]
AudioFolder.EXTENSIONS = AUDIO_EXTENSIONS
|
from typing import List
import datasets
from datasets.tasks import AudioClassification
from ..folder_based_builder import folder_based_builder
logger = datasets.utils.logging.get_logger(__name__)
class AudioFolderConfig(folder_based_builder.FolderBasedBuilderConfig):
"""Builder Config for AudioFolder."""
drop_labels: bool = None
drop_metadata: bool = None
class AudioFolder(folder_based_builder.FolderBasedBuilder):
BASE_FEATURE = datasets.Audio()
BASE_COLUMN_NAME = "audio"
BUILDER_CONFIG_CLASS = AudioFolderConfig
EXTENSIONS: List[str] # definition at the bottom of the script
CLASSIFICATION_TASK = AudioClassification(audio_column="audio", label_column="label")
# Obtained with:
# ```
# import soundfile as sf
#
# AUDIO_EXTENSIONS = [f".{format.lower()}" for format in sf.available_formats().keys()]
#
# # .mp3 is currently decoded via `torchaudio`, .opus decoding is supported if version of `libsndfile` >= 1.0.30:
# AUDIO_EXTENSIONS.extend([".mp3", ".opus"])
# ```
# We intentionally do not run this code on launch because:
# (1) Soundfile is an optional dependency, so importing it in global namespace is not allowed
# (2) To ensure the list of supported extensions is deterministic
AUDIO_EXTENSIONS = [
".aiff",
".au",
".avr",
".caf",
".flac",
".htk",
".svx",
".mat4",
".mat5",
".mpc2k",
".ogg",
".paf",
".pvf",
".raw",
".rf64",
".sd2",
".sds",
".ircam",
".voc",
".w64",
".wav",
".nist",
".wavex",
".wve",
".xi",
".mp3",
".opus",
]
AudioFolder.EXTENSIONS = AUDIO_EXTENSIONS
|
"""Feishu docs reader."""
import json
import os
import time
from typing import List
import requests
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
# Copyright (2023) Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class FeishuDocsReader(BaseReader):
"""
Feishu Docs reader.
Reads a page from Google Docs
"""
host = "https://open.feishu.cn"
documents_raw_content_url_path = "/open-apis/docx/v1/documents/{}/raw_content"
tenant_access_token_internal_url_path = (
"/open-apis/auth/v3/tenant_access_token/internal"
)
def __init__(self, app_id, app_secret) -> None:
"""
Args:
app_id: The unique identifier of the application is obtained after the application is created.
app_secret: Application key, obtained after creating the application.
"""
super().__init__()
self.app_id = app_id
self.app_secret = app_secret
self.tenant_access_token = ""
self.expire = 0
def load_data(self, document_ids: List[str]) -> List[Document]:
"""
Load data from the input directory.
Args:
document_ids (List[str]): a list of document ids.
"""
if document_ids is None:
raise ValueError('Must specify a "document_ids" in `load_kwargs`.')
results = []
for document_id in document_ids:
doc = self._load_doc(document_id)
results.append(Document(text=doc, extra_info={"document_id": document_id}))
return results
def _load_doc(self, document_id) -> str:
"""
Load a document from Feishu Docs.
Args:
document_id: the document id.
Returns:
The document text.
"""
url = self.host + self.documents_raw_content_url_path.format(document_id)
if self.tenant_access_token == "" or self.expire < time.time():
self._update_tenant_access_token()
headers = {
"Authorization": f"Bearer {self.tenant_access_token}",
"Content-Type": "application/json; charset=utf-8",
}
response = requests.get(url, headers=headers)
return response.json()["data"]["content"]
def _update_tenant_access_token(self):
"""For update tenant_access_token."""
url = self.host + self.tenant_access_token_internal_url_path
headers = {"Content-Type": "application/json; charset=utf-8"}
data = {"app_id": self.app_id, "app_secret": self.app_secret}
response = requests.post(url, data=json.dumps(data), headers=headers)
self.tenant_access_token = response.json()["tenant_access_token"]
self.expire = time.time() + response.json()["expire"]
def set_lark_domain(self):
"""The default API endpoints are for Feishu, in order to switch to Lark, we should use set_lark_domain."""
self.host = "https://open.larksuite.com"
if __name__ == "__main__":
app_id = os.environ.get("FEISHU_APP_ID")
app_secret = os.environ.get("FEISHU_APP_SECRET")
reader = FeishuDocsReader(app_id, app_secret)
print(reader.load_data(document_ids=[os.environ.get("FEISHU_DOC_ID")]))
|
"""Feishu docs reader."""
import json
import os
import time
from typing import List
import requests
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
# Copyright (2023) Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class FeishuDocsReader(BaseReader):
"""Feishu Docs reader.
Reads a page from Google Docs
"""
host = "https://open.feishu.cn"
documents_raw_content_url_path = "/open-apis/docx/v1/documents/{}/raw_content"
tenant_access_token_internal_url_path = (
"/open-apis/auth/v3/tenant_access_token/internal"
)
def __init__(self, app_id, app_secret) -> None:
"""
Args:
app_id: The unique identifier of the application is obtained after the application is created.
app_secret: Application key, obtained after creating the application.
"""
super().__init__()
self.app_id = app_id
self.app_secret = app_secret
self.tenant_access_token = ""
self.expire = 0
def load_data(self, document_ids: List[str]) -> List[Document]:
"""Load data from the input directory.
Args:
document_ids (List[str]): a list of document ids.
"""
if document_ids is None:
raise ValueError('Must specify a "document_ids" in `load_kwargs`.')
results = []
for document_id in document_ids:
doc = self._load_doc(document_id)
results.append(Document(text=doc, extra_info={"document_id": document_id}))
return results
def _load_doc(self, document_id) -> str:
"""Load a document from Feishu Docs.
Args:
document_id: the document id.
Returns:
The document text.
"""
url = self.host + self.documents_raw_content_url_path.format(document_id)
if self.tenant_access_token == "" or self.expire < time.time():
self._update_tenant_access_token()
headers = {
"Authorization": f"Bearer {self.tenant_access_token}",
"Content-Type": "application/json; charset=utf-8",
}
response = requests.get(url, headers=headers)
return response.json()["data"]["content"]
def _update_tenant_access_token(self):
"""For update tenant_access_token."""
url = self.host + self.tenant_access_token_internal_url_path
headers = {"Content-Type": "application/json; charset=utf-8"}
data = {"app_id": self.app_id, "app_secret": self.app_secret}
response = requests.post(url, data=json.dumps(data), headers=headers)
self.tenant_access_token = response.json()["tenant_access_token"]
self.expire = time.time() + response.json()["expire"]
def set_lark_domain(self):
"""The default API endpoints are for Feishu, in order to switch to Lark, we should use set_lark_domain."""
self.host = "https://open.larksuite.com"
if __name__ == "__main__":
app_id = os.environ.get("FEISHU_APP_ID")
app_secret = os.environ.get("FEISHU_APP_SECRET")
reader = FeishuDocsReader(app_id, app_secret)
print(reader.load_data(document_ids=[os.environ.get("FEISHU_DOC_ID")]))
|
import functools
import numbers
from collections import defaultdict
from typing import Any, Dict, Literal, Sequence, Type, TypeVar, Union
from torchvision.prototype import datapoints
from torchvision.prototype.datapoints._datapoint import FillType, FillTypeJIT
from torchvision.transforms.transforms import _check_sequence_input, _setup_angle, _setup_size # noqa: F401
def _setup_float_or_seq(arg: Union[float, Sequence[float]], name: str, req_size: int = 2) -> Sequence[float]:
if not isinstance(arg, (float, Sequence)):
raise TypeError(f"{name} should be float or a sequence of floats. Got {type(arg)}")
if isinstance(arg, Sequence) and len(arg) != req_size:
raise ValueError(f"If {name} is a sequence its length should be one of {req_size}. Got {len(arg)}")
if isinstance(arg, Sequence):
for element in arg:
if not isinstance(element, float):
raise ValueError(f"{name} should be a sequence of floats. Got {type(element)}")
if isinstance(arg, float):
arg = [float(arg), float(arg)]
if isinstance(arg, (list, tuple)) and len(arg) == 1:
arg = [arg[0], arg[0]]
return arg
def _check_fill_arg(fill: Union[FillType, Dict[Type, FillType]]) -> None:
if isinstance(fill, dict):
for key, value in fill.items():
# Check key for type
_check_fill_arg(value)
if isinstance(fill, defaultdict) and callable(fill.default_factory):
default_value = fill.default_factory()
_check_fill_arg(default_value)
else:
if fill is not None and not isinstance(fill, (numbers.Number, tuple, list)):
raise TypeError("Got inappropriate fill arg, only Numbers, tuples, lists and dicts are allowed.")
T = TypeVar("T")
def _default_arg(value: T) -> T:
return value
def _get_defaultdict(default: T) -> Dict[Any, T]:
# This weird looking construct only exists, since `lambda`'s cannot be serialized by pickle.
# If it were possible, we could replace this with `defaultdict(lambda: default)`
return defaultdict(functools.partial(_default_arg, default))
def _convert_fill_arg(fill: datapoints.FillType) -> datapoints.FillTypeJIT:
# Fill = 0 is not equivalent to None, https://github.com/pytorch/vision/issues/6517
# So, we can't reassign fill to 0
# if fill is None:
# fill = 0
if fill is None:
return fill
# This cast does Sequence -> List[float] to please mypy and torch.jit.script
if not isinstance(fill, (int, float)):
fill = [float(v) for v in list(fill)]
return fill
def _setup_fill_arg(fill: Union[FillType, Dict[Type, FillType]]) -> Dict[Type, FillTypeJIT]:
_check_fill_arg(fill)
if isinstance(fill, dict):
for k, v in fill.items():
fill[k] = _convert_fill_arg(v)
if isinstance(fill, defaultdict) and callable(fill.default_factory):
default_value = fill.default_factory()
sanitized_default = _convert_fill_arg(default_value)
fill.default_factory = functools.partial(_default_arg, sanitized_default)
return fill # type: ignore[return-value]
return _get_defaultdict(_convert_fill_arg(fill))
def _check_padding_arg(padding: Union[int, Sequence[int]]) -> None:
if not isinstance(padding, (numbers.Number, tuple, list)):
raise TypeError("Got inappropriate padding arg")
if isinstance(padding, (tuple, list)) and len(padding) not in [1, 2, 4]:
raise ValueError(f"Padding must be an int or a 1, 2, or 4 element tuple, not a {len(padding)} element tuple")
# TODO: let's use torchvision._utils.StrEnum to have the best of both worlds (strings and enums)
# https://github.com/pytorch/vision/issues/6250
def _check_padding_mode_arg(padding_mode: Literal["constant", "edge", "reflect", "symmetric"]) -> None:
if padding_mode not in ["constant", "edge", "reflect", "symmetric"]:
raise ValueError("Padding mode should be either constant, edge, reflect or symmetric")
|
import functools
import numbers
from collections import defaultdict
from typing import Any, Dict, Sequence, Type, TypeVar, Union
from torchvision.prototype import datapoints
from torchvision.prototype.datapoints._datapoint import FillType, FillTypeJIT
from torchvision.transforms.transforms import _check_sequence_input, _setup_angle, _setup_size # noqa: F401
from typing_extensions import Literal
def _setup_float_or_seq(arg: Union[float, Sequence[float]], name: str, req_size: int = 2) -> Sequence[float]:
if not isinstance(arg, (float, Sequence)):
raise TypeError(f"{name} should be float or a sequence of floats. Got {type(arg)}")
if isinstance(arg, Sequence) and len(arg) != req_size:
raise ValueError(f"If {name} is a sequence its length should be one of {req_size}. Got {len(arg)}")
if isinstance(arg, Sequence):
for element in arg:
if not isinstance(element, float):
raise ValueError(f"{name} should be a sequence of floats. Got {type(element)}")
if isinstance(arg, float):
arg = [float(arg), float(arg)]
if isinstance(arg, (list, tuple)) and len(arg) == 1:
arg = [arg[0], arg[0]]
return arg
def _check_fill_arg(fill: Union[FillType, Dict[Type, FillType]]) -> None:
if isinstance(fill, dict):
for key, value in fill.items():
# Check key for type
_check_fill_arg(value)
if isinstance(fill, defaultdict) and callable(fill.default_factory):
default_value = fill.default_factory()
_check_fill_arg(default_value)
else:
if fill is not None and not isinstance(fill, (numbers.Number, tuple, list)):
raise TypeError("Got inappropriate fill arg, only Numbers, tuples, lists and dicts are allowed.")
T = TypeVar("T")
def _default_arg(value: T) -> T:
return value
def _get_defaultdict(default: T) -> Dict[Any, T]:
# This weird looking construct only exists, since `lambda`'s cannot be serialized by pickle.
# If it were possible, we could replace this with `defaultdict(lambda: default)`
return defaultdict(functools.partial(_default_arg, default))
def _convert_fill_arg(fill: datapoints.FillType) -> datapoints.FillTypeJIT:
# Fill = 0 is not equivalent to None, https://github.com/pytorch/vision/issues/6517
# So, we can't reassign fill to 0
# if fill is None:
# fill = 0
if fill is None:
return fill
# This cast does Sequence -> List[float] to please mypy and torch.jit.script
if not isinstance(fill, (int, float)):
fill = [float(v) for v in list(fill)]
return fill
def _setup_fill_arg(fill: Union[FillType, Dict[Type, FillType]]) -> Dict[Type, FillTypeJIT]:
_check_fill_arg(fill)
if isinstance(fill, dict):
for k, v in fill.items():
fill[k] = _convert_fill_arg(v)
if isinstance(fill, defaultdict) and callable(fill.default_factory):
default_value = fill.default_factory()
sanitized_default = _convert_fill_arg(default_value)
fill.default_factory = functools.partial(_default_arg, sanitized_default)
return fill # type: ignore[return-value]
return _get_defaultdict(_convert_fill_arg(fill))
def _check_padding_arg(padding: Union[int, Sequence[int]]) -> None:
if not isinstance(padding, (numbers.Number, tuple, list)):
raise TypeError("Got inappropriate padding arg")
if isinstance(padding, (tuple, list)) and len(padding) not in [1, 2, 4]:
raise ValueError(f"Padding must be an int or a 1, 2, or 4 element tuple, not a {len(padding)} element tuple")
# TODO: let's use torchvision._utils.StrEnum to have the best of both worlds (strings and enums)
# https://github.com/pytorch/vision/issues/6250
def _check_padding_mode_arg(padding_mode: Literal["constant", "edge", "reflect", "symmetric"]) -> None:
if padding_mode not in ["constant", "edge", "reflect", "symmetric"]:
raise ValueError("Padding mode should be either constant, edge, reflect or symmetric")
|
import numpy as np
import pytest
from pydantic.tools import parse_obj_as, schema_json_of
from docarray.base_doc.io.json import orjson_dumps
from docarray.typing import AnyEmbedding
@pytest.mark.proto
def test_proto_embedding():
embedding = parse_obj_as(AnyEmbedding, np.zeros((3, 224, 224)))
embedding._to_node_protobuf()
def test_json_schema():
schema_json_of(AnyEmbedding)
def test_dump_json():
tensor = parse_obj_as(AnyEmbedding, np.zeros((3, 224, 224)))
orjson_dumps(tensor)
|
import numpy as np
import pytest
from pydantic.tools import parse_obj_as, schema_json_of
from docarray.base_doc.io.json import orjson_dumps
from docarray.typing import AnyEmbedding
@pytest.mark.proto
def test_proto_embedding():
embedding = parse_obj_as(AnyEmbedding, np.zeros((3, 224, 224)))
embedding._to_node_protobuf()
def test_json_schema():
schema_json_of(AnyEmbedding)
def test_dump_json():
tensor = parse_obj_as(AnyEmbedding, np.zeros((3, 224, 224)))
orjson_dumps(tensor)
|
# Copyright (c) OpenMMLab. All rights reserved.
import functools
import torch
import torch.nn.functional as F
def reduce_loss(loss, reduction):
"""Reduce loss as specified.
Args:
loss (Tensor): Elementwise loss tensor.
reduction (str): Options are "none", "mean" and "sum".
Return:
Tensor: Reduced loss tensor.
"""
reduction_enum = F._Reduction.get_enum(reduction)
# none: 0, elementwise_mean:1, sum: 2
if reduction_enum == 0:
return loss
elif reduction_enum == 1:
return loss.mean()
elif reduction_enum == 2:
return loss.sum()
def weight_reduce_loss(loss, weight=None, reduction='mean', avg_factor=None):
"""Apply element-wise weight and reduce loss.
Args:
loss (Tensor): Element-wise loss.
weight (Tensor): Element-wise weights.
reduction (str): Same as built-in losses of PyTorch.
avg_factor (float): Average factor when computing the mean of losses.
Returns:
Tensor: Processed loss values.
"""
# if weight is specified, apply element-wise weight
if weight is not None:
loss = loss * weight
# if avg_factor is not specified, just reduce the loss
if avg_factor is None:
loss = reduce_loss(loss, reduction)
else:
# if reduction is mean, then average the loss by avg_factor
if reduction == 'mean':
# Avoid causing ZeroDivisionError when avg_factor is 0.0,
# i.e., all labels of an image belong to ignore index.
eps = torch.finfo(torch.float32).eps
loss = loss.sum() / (avg_factor + eps)
# if reduction is 'none', then do nothing, otherwise raise an error
elif reduction != 'none':
raise ValueError('avg_factor can not be used with reduction="sum"')
return loss
def weighted_loss(loss_func):
"""Create a weighted version of a given loss function.
To use this decorator, the loss function must have the signature like
`loss_func(pred, target, **kwargs)`. The function only needs to compute
element-wise loss without any reduction. This decorator will add weight
and reduction arguments to the function. The decorated function will have
the signature like `loss_func(pred, target, weight=None, reduction='mean',
avg_factor=None, **kwargs)`.
:Example:
>>> import torch
>>> @weighted_loss
>>> def l1_loss(pred, target):
>>> return (pred - target).abs()
>>> pred = torch.Tensor([0, 2, 3])
>>> target = torch.Tensor([1, 1, 1])
>>> weight = torch.Tensor([1, 0, 1])
>>> l1_loss(pred, target)
tensor(1.3333)
>>> l1_loss(pred, target, weight)
tensor(1.)
>>> l1_loss(pred, target, reduction='none')
tensor([1., 1., 2.])
>>> l1_loss(pred, target, weight, avg_factor=2)
tensor(1.5000)
"""
@functools.wraps(loss_func)
def wrapper(pred,
target,
weight=None,
reduction='mean',
avg_factor=None,
**kwargs):
# get element-wise loss
loss = loss_func(pred, target, **kwargs)
loss = weight_reduce_loss(loss, weight, reduction, avg_factor)
return loss
return wrapper
|
# Copyright (c) OpenMMLab. All rights reserved.
import functools
import mmcv
import torch
import torch.nn.functional as F
def reduce_loss(loss, reduction):
"""Reduce loss as specified.
Args:
loss (Tensor): Elementwise loss tensor.
reduction (str): Options are "none", "mean" and "sum".
Return:
Tensor: Reduced loss tensor.
"""
reduction_enum = F._Reduction.get_enum(reduction)
# none: 0, elementwise_mean:1, sum: 2
if reduction_enum == 0:
return loss
elif reduction_enum == 1:
return loss.mean()
elif reduction_enum == 2:
return loss.sum()
@mmcv.jit(derivate=True, coderize=True)
def weight_reduce_loss(loss, weight=None, reduction='mean', avg_factor=None):
"""Apply element-wise weight and reduce loss.
Args:
loss (Tensor): Element-wise loss.
weight (Tensor): Element-wise weights.
reduction (str): Same as built-in losses of PyTorch.
avg_factor (float): Average factor when computing the mean of losses.
Returns:
Tensor: Processed loss values.
"""
# if weight is specified, apply element-wise weight
if weight is not None:
loss = loss * weight
# if avg_factor is not specified, just reduce the loss
if avg_factor is None:
loss = reduce_loss(loss, reduction)
else:
# if reduction is mean, then average the loss by avg_factor
if reduction == 'mean':
# Avoid causing ZeroDivisionError when avg_factor is 0.0,
# i.e., all labels of an image belong to ignore index.
eps = torch.finfo(torch.float32).eps
loss = loss.sum() / (avg_factor + eps)
# if reduction is 'none', then do nothing, otherwise raise an error
elif reduction != 'none':
raise ValueError('avg_factor can not be used with reduction="sum"')
return loss
def weighted_loss(loss_func):
"""Create a weighted version of a given loss function.
To use this decorator, the loss function must have the signature like
`loss_func(pred, target, **kwargs)`. The function only needs to compute
element-wise loss without any reduction. This decorator will add weight
and reduction arguments to the function. The decorated function will have
the signature like `loss_func(pred, target, weight=None, reduction='mean',
avg_factor=None, **kwargs)`.
:Example:
>>> import torch
>>> @weighted_loss
>>> def l1_loss(pred, target):
>>> return (pred - target).abs()
>>> pred = torch.Tensor([0, 2, 3])
>>> target = torch.Tensor([1, 1, 1])
>>> weight = torch.Tensor([1, 0, 1])
>>> l1_loss(pred, target)
tensor(1.3333)
>>> l1_loss(pred, target, weight)
tensor(1.)
>>> l1_loss(pred, target, reduction='none')
tensor([1., 1., 2.])
>>> l1_loss(pred, target, weight, avg_factor=2)
tensor(1.5000)
"""
@functools.wraps(loss_func)
def wrapper(pred,
target,
weight=None,
reduction='mean',
avg_factor=None,
**kwargs):
# get element-wise loss
loss = loss_func(pred, target, **kwargs)
loss = weight_reduce_loss(loss, weight, reduction, avg_factor)
return loss
return wrapper
|
"""Documents module.
**Document** module is a collection of classes that handle documents
and their transformations.
"""
from langchain_core.documents.base import Document
from langchain_core.documents.compressor import BaseDocumentCompressor
from langchain_core.documents.transformers import BaseDocumentTransformer
__all__ = ["Document", "BaseDocumentTransformer", "BaseDocumentCompressor"]
|
"""**Document** module is a collection of classes that handle documents
and their transformations.
"""
from langchain_core.documents.base import Document
from langchain_core.documents.compressor import BaseDocumentCompressor
from langchain_core.documents.transformers import BaseDocumentTransformer
__all__ = ["Document", "BaseDocumentTransformer", "BaseDocumentCompressor"]
|
import pytest
from pydantic import Field
from docarray import BaseDoc
from docarray.index import ElasticDocIndex
from tests.index.elastic.fixture import start_storage_v8 # noqa: F401
pytestmark = [pytest.mark.slow, pytest.mark.index, pytest.mark.elasticv8]
def test_column_config():
class MyDoc(BaseDoc):
text: str
color: str = Field(col_type='keyword')
index = ElasticDocIndex[MyDoc]()
index_docs = [
MyDoc(id='0', text='hello world', color='red'),
MyDoc(id='1', text='never gonna give you up', color='blue'),
MyDoc(id='2', text='we are the world', color='green'),
]
index.index(index_docs)
query = 'world'
docs, _ = index.text_search(query, search_field='text')
assert [doc.id for doc in docs] == ['0', '2']
filter_query = {'terms': {'color': ['red', 'blue']}}
docs = index.filter(filter_query)
assert [doc.id for doc in docs] == ['0', '1']
def test_field_object():
class MyDoc(BaseDoc):
manager: dict = Field(
properties={
'age': {'type': 'integer'},
'name': {
'properties': {
'first': {'type': 'keyword'},
'last': {'type': 'keyword'},
}
},
}
)
index = ElasticDocIndex[MyDoc]()
doc = [
MyDoc(manager={'age': 25, 'name': {'first': 'Rachel', 'last': 'Green'}}),
MyDoc(manager={'age': 30, 'name': {'first': 'Monica', 'last': 'Geller'}}),
MyDoc(manager={'age': 35, 'name': {'first': 'Phoebe', 'last': 'Buffay'}}),
]
index.index(doc)
id_ = doc[0].id
assert index[id_].id == id_
assert index[id_].manager == doc[0].manager
filter_query = {'range': {'manager.age': {'gte': 30}}}
docs = index.filter(filter_query)
assert [doc.id for doc in docs] == [doc[1].id, doc[2].id]
def test_field_geo_point():
class MyDoc(BaseDoc):
location: dict = Field(col_type='geo_point')
index = ElasticDocIndex[MyDoc]()
doc = [
MyDoc(location={'lat': 40.12, 'lon': -72.34}),
MyDoc(location={'lat': 41.12, 'lon': -73.34}),
MyDoc(location={'lat': 42.12, 'lon': -74.34}),
]
index.index(doc)
query = {
'query': {
'geo_bounding_box': {
'location': {
'top_left': {'lat': 42, 'lon': -74},
'bottom_right': {'lat': 40, 'lon': -72},
}
}
},
}
docs, _ = index.execute_query(query)
assert [doc['id'] for doc in docs] == [doc[0].id, doc[1].id]
def test_field_range():
class MyDoc(BaseDoc):
expected_attendees: dict = Field(col_type='integer_range')
time_frame: dict = Field(col_type='date_range', format='yyyy-MM-dd')
index = ElasticDocIndex[MyDoc]()
doc = [
MyDoc(
expected_attendees={'gte': 10, 'lt': 20},
time_frame={'gte': '2023-01-01', 'lt': '2023-02-01'},
),
MyDoc(
expected_attendees={'gte': 20, 'lt': 30},
time_frame={'gte': '2023-02-01', 'lt': '2023-03-01'},
),
MyDoc(
expected_attendees={'gte': 30, 'lt': 40},
time_frame={'gte': '2023-03-01', 'lt': '2023-04-01'},
),
]
index.index(doc)
query = {
'query': {
'bool': {
'should': [
{'term': {'expected_attendees': {'value': 15}}},
{
'range': {
'time_frame': {
'gte': '2023-02-05',
'lt': '2023-02-10',
'relation': 'contains',
}
}
},
]
}
},
}
docs, _ = index.execute_query(query)
assert [doc['id'] for doc in docs] == [doc[0].id, doc[1].id]
|
import pytest
from pydantic import Field
from docarray import BaseDoc
from docarray.index import ElasticDocIndex
from tests.index.elastic.fixture import start_storage_v8 # noqa: F401
pytestmark = [pytest.mark.slow, pytest.mark.index, pytest.mark.elasticv8]
def test_column_config():
class MyDoc(BaseDoc):
text: str
color: str = Field(col_type='keyword')
store = ElasticDocIndex[MyDoc]()
index_docs = [
MyDoc(id='0', text='hello world', color='red'),
MyDoc(id='1', text='never gonna give you up', color='blue'),
MyDoc(id='2', text='we are the world', color='green'),
]
store.index(index_docs)
query = 'world'
docs, _ = store.text_search(query, search_field='text')
assert [doc.id for doc in docs] == ['0', '2']
filter_query = {'terms': {'color': ['red', 'blue']}}
docs = store.filter(filter_query)
assert [doc.id for doc in docs] == ['0', '1']
def test_field_object():
class MyDoc(BaseDoc):
manager: dict = Field(
properties={
'age': {'type': 'integer'},
'name': {
'properties': {
'first': {'type': 'keyword'},
'last': {'type': 'keyword'},
}
},
}
)
store = ElasticDocIndex[MyDoc]()
doc = [
MyDoc(manager={'age': 25, 'name': {'first': 'Rachel', 'last': 'Green'}}),
MyDoc(manager={'age': 30, 'name': {'first': 'Monica', 'last': 'Geller'}}),
MyDoc(manager={'age': 35, 'name': {'first': 'Phoebe', 'last': 'Buffay'}}),
]
store.index(doc)
id_ = doc[0].id
assert store[id_].id == id_
assert store[id_].manager == doc[0].manager
filter_query = {'range': {'manager.age': {'gte': 30}}}
docs = store.filter(filter_query)
assert [doc.id for doc in docs] == [doc[1].id, doc[2].id]
def test_field_geo_point():
class MyDoc(BaseDoc):
location: dict = Field(col_type='geo_point')
store = ElasticDocIndex[MyDoc]()
doc = [
MyDoc(location={'lat': 40.12, 'lon': -72.34}),
MyDoc(location={'lat': 41.12, 'lon': -73.34}),
MyDoc(location={'lat': 42.12, 'lon': -74.34}),
]
store.index(doc)
query = {
'query': {
'geo_bounding_box': {
'location': {
'top_left': {'lat': 42, 'lon': -74},
'bottom_right': {'lat': 40, 'lon': -72},
}
}
},
}
docs, _ = store.execute_query(query)
assert [doc['id'] for doc in docs] == [doc[0].id, doc[1].id]
def test_field_range():
class MyDoc(BaseDoc):
expected_attendees: dict = Field(col_type='integer_range')
time_frame: dict = Field(col_type='date_range', format='yyyy-MM-dd')
store = ElasticDocIndex[MyDoc]()
doc = [
MyDoc(
expected_attendees={'gte': 10, 'lt': 20},
time_frame={'gte': '2023-01-01', 'lt': '2023-02-01'},
),
MyDoc(
expected_attendees={'gte': 20, 'lt': 30},
time_frame={'gte': '2023-02-01', 'lt': '2023-03-01'},
),
MyDoc(
expected_attendees={'gte': 30, 'lt': 40},
time_frame={'gte': '2023-03-01', 'lt': '2023-04-01'},
),
]
store.index(doc)
query = {
'query': {
'bool': {
'should': [
{'term': {'expected_attendees': {'value': 15}}},
{
'range': {
'time_frame': {
'gte': '2023-02-05',
'lt': '2023-02-10',
'relation': 'contains',
}
}
},
]
}
},
}
docs, _ = store.execute_query(query)
assert [doc['id'] for doc in docs] == [doc[0].id, doc[1].id]
|
from typing import Any, Dict, Optional, Union
import numpy as np
import PIL.Image
import torch
from torch.nn.functional import one_hot
from torchvision.prototype import features
from torchvision.prototype.transforms import functional as F, Transform
class DecodeImage(Transform):
_transformed_types = (features.EncodedImage,)
def _transform(self, inpt: torch.Tensor, params: Dict[str, Any]) -> features.Image:
return F.decode_image_with_pil(inpt) # type: ignore[no-any-return]
class LabelToOneHot(Transform):
_transformed_types = (features.Label,)
def __init__(self, num_categories: int = -1):
super().__init__()
self.num_categories = num_categories
def _transform(self, inpt: features.Label, params: Dict[str, Any]) -> features.OneHotLabel:
num_categories = self.num_categories
if num_categories == -1 and inpt.categories is not None:
num_categories = len(inpt.categories)
output = one_hot(inpt.as_subclass(torch.Tensor), num_classes=num_categories)
return features.OneHotLabel(output, categories=inpt.categories)
def extra_repr(self) -> str:
if self.num_categories == -1:
return ""
return f"num_categories={self.num_categories}"
class PILToTensor(Transform):
_transformed_types = (PIL.Image.Image,)
def _transform(self, inpt: Union[PIL.Image.Image], params: Dict[str, Any]) -> torch.Tensor:
return F.pil_to_tensor(inpt)
class ToImageTensor(Transform):
_transformed_types = (features.is_simple_tensor, PIL.Image.Image, np.ndarray)
def _transform(
self, inpt: Union[torch.Tensor, PIL.Image.Image, np.ndarray], params: Dict[str, Any]
) -> features.Image:
return F.to_image_tensor(inpt) # type: ignore[no-any-return]
class ToImagePIL(Transform):
_transformed_types = (features.is_simple_tensor, features.Image, np.ndarray)
def __init__(self, mode: Optional[str] = None) -> None:
super().__init__()
self.mode = mode
def _transform(
self, inpt: Union[torch.Tensor, PIL.Image.Image, np.ndarray], params: Dict[str, Any]
) -> PIL.Image.Image:
return F.to_image_pil(inpt, mode=self.mode)
# We changed the name to align them with the new naming scheme. Still, `ToPILImage` is
# prevalent and well understood. Thus, we just alias it without deprecating the old name.
ToPILImage = ToImagePIL
|
from typing import Any, cast, Dict, Optional, Union
import numpy as np
import PIL.Image
import torch
from torch.nn.functional import one_hot
from torchvision.prototype import features
from torchvision.prototype.transforms import functional as F, Transform
class DecodeImage(Transform):
_transformed_types = (features.EncodedImage,)
def _transform(self, inpt: torch.Tensor, params: Dict[str, Any]) -> features.Image:
return cast(features.Image, F.decode_image_with_pil(inpt))
class LabelToOneHot(Transform):
_transformed_types = (features.Label,)
def __init__(self, num_categories: int = -1):
super().__init__()
self.num_categories = num_categories
def _transform(self, inpt: features.Label, params: Dict[str, Any]) -> features.OneHotLabel:
num_categories = self.num_categories
if num_categories == -1 and inpt.categories is not None:
num_categories = len(inpt.categories)
output = one_hot(inpt, num_classes=num_categories)
return features.OneHotLabel(output, categories=inpt.categories)
def extra_repr(self) -> str:
if self.num_categories == -1:
return ""
return f"num_categories={self.num_categories}"
class PILToTensor(Transform):
_transformed_types = (PIL.Image.Image,)
def _transform(self, inpt: Union[PIL.Image.Image], params: Dict[str, Any]) -> torch.Tensor:
return F.pil_to_tensor(inpt)
class ToImageTensor(Transform):
_transformed_types = (features.is_simple_tensor, PIL.Image.Image, np.ndarray)
def _transform(
self, inpt: Union[torch.Tensor, PIL.Image.Image, np.ndarray], params: Dict[str, Any]
) -> features.Image:
return cast(features.Image, F.to_image_tensor(inpt))
class ToImagePIL(Transform):
_transformed_types = (features.is_simple_tensor, features.Image, np.ndarray)
def __init__(self, mode: Optional[str] = None) -> None:
super().__init__()
self.mode = mode
def _transform(
self, inpt: Union[torch.Tensor, PIL.Image.Image, np.ndarray], params: Dict[str, Any]
) -> PIL.Image.Image:
return F.to_image_pil(inpt, mode=self.mode)
# We changed the name to align them with the new naming scheme. Still, `ToPILImage` is
# prevalent and well understood. Thus, we just alias it without deprecating the old name.
ToPILImage = ToImagePIL
|
#!/usr/bin/env python3
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
"""
Run inference for pre-processed data with a trained model.
"""
import datetime as dt
import logging
from fairseq import options
from interactive_asr.utils import (
add_asr_eval_argument,
get_microphone_transcription,
setup_asr,
transcribe_file,
)
def main(args):
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
task, generator, models, sp, tgt_dict = setup_asr(args, logger)
print("READY!")
if args.input_file:
transcription_time, transcription = transcribe_file(args, task, generator, models, sp, tgt_dict)
print("transcription:", transcription)
print("transcription_time:", transcription_time)
else:
for transcription in get_microphone_transcription(args, task, generator, models, sp, tgt_dict):
print("{}: {}".format(dt.datetime.now().strftime("%H:%M:%S"), transcription[0][0]))
def cli_main():
parser = options.get_generation_parser()
parser = add_asr_eval_argument(parser)
args = options.parse_args_and_arch(parser)
main(args)
if __name__ == "__main__":
cli_main()
|
#!/usr/bin/env python3
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
"""
Run inference for pre-processed data with a trained model.
"""
import datetime as dt
import logging
from fairseq import options
from interactive_asr.utils import add_asr_eval_argument, setup_asr, get_microphone_transcription, transcribe_file
def main(args):
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
task, generator, models, sp, tgt_dict = setup_asr(args, logger)
print("READY!")
if args.input_file:
transcription_time, transcription = transcribe_file(args, task, generator, models, sp, tgt_dict)
print("transcription:", transcription)
print("transcription_time:", transcription_time)
else:
for transcription in get_microphone_transcription(args, task, generator, models, sp, tgt_dict):
print("{}: {}".format(dt.datetime.now().strftime("%H:%M:%S"), transcription[0][0]))
def cli_main():
parser = options.get_generation_parser()
parser = add_asr_eval_argument(parser)
args = options.parse_args_and_arch(parser)
main(args)
if __name__ == "__main__":
cli_main()
|
import datetime
import json
import typing
import prisma.models
import pydantic
import backend.data.block
import backend.data.graph
import backend.server.model
class LibraryAgent(pydantic.BaseModel):
id: str # Changed from agent_id to match GraphMeta
agent_id: str
agent_version: int # Changed from agent_version to match GraphMeta
preset_id: str | None
updated_at: datetime.datetime
name: str
description: str
# Made input_schema and output_schema match GraphMeta's type
input_schema: dict[str, typing.Any] # Should be BlockIOObjectSubSchema in frontend
output_schema: dict[str, typing.Any] # Should be BlockIOObjectSubSchema in frontend
is_favorite: bool
is_created_by_user: bool
is_latest_version: bool
@staticmethod
def from_db(agent: prisma.models.LibraryAgent):
if not agent.Agent:
raise ValueError("AgentGraph is required")
graph = backend.data.graph.GraphModel.from_db(agent.Agent)
agent_updated_at = agent.Agent.updatedAt
lib_agent_updated_at = agent.updatedAt
# Take the latest updated_at timestamp either when the graph was updated or the library agent was updated
updated_at = (
max(agent_updated_at, lib_agent_updated_at)
if agent_updated_at
else lib_agent_updated_at
)
return LibraryAgent(
id=agent.id,
agent_id=agent.agentId,
agent_version=agent.agentVersion,
updated_at=updated_at,
name=graph.name,
description=graph.description,
input_schema=graph.input_schema,
output_schema=graph.output_schema,
is_favorite=agent.isFavorite,
is_created_by_user=agent.isCreatedByUser,
is_latest_version=graph.is_active,
preset_id=agent.AgentPreset.id if agent.AgentPreset else None,
)
class LibraryAgentPreset(pydantic.BaseModel):
id: str
updated_at: datetime.datetime
agent_id: str
agent_version: int
name: str
description: str
is_active: bool
inputs: dict[str, typing.Union[backend.data.block.BlockInput, typing.Any]]
@staticmethod
def from_db(preset: prisma.models.AgentPreset):
input_data = {}
for data in preset.InputPresets or []:
input_data[data.name] = json.loads(data.data)
return LibraryAgentPreset(
id=preset.id,
updated_at=preset.updatedAt,
agent_id=preset.agentId,
agent_version=preset.agentVersion,
name=preset.name,
description=preset.description,
is_active=preset.isActive,
inputs=input_data,
)
class LibraryAgentPresetResponse(pydantic.BaseModel):
presets: list[LibraryAgentPreset]
pagination: backend.server.model.Pagination
class CreateLibraryAgentPresetRequest(pydantic.BaseModel):
name: str
description: str
inputs: dict[str, typing.Union[backend.data.block.BlockInput, typing.Any]]
agent_id: str
agent_version: int
is_active: bool
|
import datetime
import json
import typing
import prisma.models
import pydantic
import backend.data.block
import backend.data.graph
import backend.server.model
class LibraryAgent(pydantic.BaseModel):
id: str # Changed from agent_id to match GraphMeta
agent_id: str
agent_version: int # Changed from agent_version to match GraphMeta
preset_id: str | None
updated_at: datetime.datetime
name: str
description: str
# Made input_schema and output_schema match GraphMeta's type
input_schema: dict[str, typing.Any] # Should be BlockIOObjectSubSchema in frontend
output_schema: dict[str, typing.Any] # Should be BlockIOObjectSubSchema in frontend
is_favorite: bool
is_created_by_user: bool
is_latest_version: bool
@staticmethod
def from_db(agent: prisma.models.LibraryAgent):
if not agent.Agent:
raise ValueError("AgentGraph is required")
graph = backend.data.graph.GraphModel.from_db(agent.Agent)
agent_updated_at = agent.Agent.updatedAt
lib_agent_updated_at = agent.updatedAt
# Take the latest updated_at timestamp either when the graph was updated or the library agent was updated
updated_at = (
max(agent_updated_at, lib_agent_updated_at)
if agent_updated_at
else lib_agent_updated_at
)
return LibraryAgent(
id=agent.id,
agent_id=agent.agentId,
agent_version=agent.agentVersion,
updated_at=updated_at,
name=graph.name,
description=graph.description,
input_schema=graph.input_schema,
output_schema=graph.output_schema,
is_favorite=agent.isFavorite,
is_created_by_user=agent.isCreatedByUser,
is_latest_version=graph.is_active,
preset_id=agent.AgentPreset.id if agent.AgentPreset else None,
)
class LibraryAgentPreset(pydantic.BaseModel):
id: str
updated_at: datetime.datetime
agent_id: str
agent_version: int
name: str
description: str
is_active: bool
inputs: dict[str, typing.Union[backend.data.block.BlockInput, typing.Any]]
@staticmethod
def from_db(preset: prisma.models.AgentPreset):
input_data = {}
for data in preset.InputPresets or []:
input_data[data.name] = json.loads(data.data)
return LibraryAgentPreset(
id=preset.id,
updated_at=preset.updatedAt,
agent_id=preset.agentId,
agent_version=preset.agentVersion,
name=preset.name,
description=preset.description,
is_active=preset.isActive,
inputs=input_data,
)
class LibraryAgentPresetResponse(pydantic.BaseModel):
presets: list[LibraryAgentPreset]
pagination: backend.server.model.Pagination
class CreateLibraryAgentPresetRequest(pydantic.BaseModel):
name: str
description: str
inputs: dict[str, typing.Union[backend.data.block.BlockInput, typing.Any]]
agent_id: str
agent_version: int
is_active: bool
|
import io
import pathlib
from collections import namedtuple
from collections.abc import Iterator
from typing import Any, Optional, Union
from torchdata.datapipes.iter import IterDataPipe, Mapper, Zipper
from torchvision.prototype.datasets.utils import Dataset, GDriveResource, OnlineResource
from torchvision.prototype.datasets.utils._internal import hint_sharding, hint_shuffling
from torchvision.prototype.tv_tensors import Label
from torchvision.tv_tensors import Image
from .._api import register_dataset, register_info
NAME = "pcam"
class PCAMH5Reader(IterDataPipe[tuple[str, io.IOBase]]):
def __init__(
self,
datapipe: IterDataPipe[tuple[str, io.IOBase]],
key: Optional[str] = None, # Note: this key thing might be very specific to the PCAM dataset
) -> None:
self.datapipe = datapipe
self.key = key
def __iter__(self) -> Iterator[tuple[str, io.IOBase]]:
import h5py
for _, handle in self.datapipe:
try:
with h5py.File(handle) as data:
if self.key is not None:
data = data[self.key]
yield from data
finally:
handle.close()
_Resource = namedtuple("_Resource", ("file_name", "gdrive_id", "sha256"))
@register_info(NAME)
def _info() -> dict[str, Any]:
return dict(categories=["0", "1"])
@register_dataset(NAME)
class PCAM(Dataset):
# TODO write proper docstring
"""PCAM Dataset
homepage="https://github.com/basveeling/pcam"
"""
def __init__(
self, root: Union[str, pathlib.Path], split: str = "train", *, skip_integrity_check: bool = False
) -> None:
self._split = self._verify_str_arg(split, "split", {"train", "val", "test"})
self._categories = _info()["categories"]
super().__init__(root, skip_integrity_check=skip_integrity_check, dependencies=("h5py",))
_RESOURCES = {
"train": (
_Resource( # Images
file_name="camelyonpatch_level_2_split_train_x.h5.gz",
gdrive_id="1Ka0XfEMiwgCYPdTI-vv6eUElOBnKFKQ2",
sha256="d619e741468a7ab35c7e4a75e6821b7e7e6c9411705d45708f2a0efc8960656c",
),
_Resource( # Targets
file_name="camelyonpatch_level_2_split_train_y.h5.gz",
gdrive_id="1269yhu3pZDP8UYFQs-NYs3FPwuK-nGSG",
sha256="b74126d2c01b20d3661f9b46765d29cf4e4fba6faba29c8e0d09d406331ab75a",
),
),
"test": (
_Resource( # Images
file_name="camelyonpatch_level_2_split_test_x.h5.gz",
gdrive_id="1qV65ZqZvWzuIVthK8eVDhIwrbnsJdbg_",
sha256="79174c2201ad521602a5888be8f36ee10875f37403dd3f2086caf2182ef87245",
),
_Resource( # Targets
file_name="camelyonpatch_level_2_split_test_y.h5.gz",
gdrive_id="17BHrSrwWKjYsOgTMmoqrIjDy6Fa2o_gP",
sha256="0a522005fccc8bbd04c5a117bfaf81d8da2676f03a29d7499f71d0a0bd6068ef",
),
),
"val": (
_Resource( # Images
file_name="camelyonpatch_level_2_split_valid_x.h5.gz",
gdrive_id="1hgshYGWK8V-eGRy8LToWJJgDU_rXWVJ3",
sha256="f82ee1670d027b4ec388048d9eabc2186b77c009655dae76d624c0ecb053ccb2",
),
_Resource( # Targets
file_name="camelyonpatch_level_2_split_valid_y.h5.gz",
gdrive_id="1bH8ZRbhSVAhScTS0p9-ZzGnX91cHT3uO",
sha256="ce1ae30f08feb468447971cfd0472e7becd0ad96d877c64120c72571439ae48c",
),
),
}
def _resources(self) -> list[OnlineResource]:
return [ # = [images resource, targets resource]
GDriveResource(file_name=file_name, id=gdrive_id, sha256=sha256, preprocess="decompress")
for file_name, gdrive_id, sha256 in self._RESOURCES[self._split]
]
def _prepare_sample(self, data: tuple[Any, Any]) -> dict[str, Any]:
image, target = data # They're both numpy arrays at this point
return {
"image": Image(image.transpose(2, 0, 1)),
"label": Label(target.item(), categories=self._categories),
}
def _datapipe(self, resource_dps: list[IterDataPipe]) -> IterDataPipe[dict[str, Any]]:
images_dp, targets_dp = resource_dps
images_dp = PCAMH5Reader(images_dp, key="x")
targets_dp = PCAMH5Reader(targets_dp, key="y")
dp = Zipper(images_dp, targets_dp)
dp = hint_shuffling(dp)
dp = hint_sharding(dp)
return Mapper(dp, self._prepare_sample)
def __len__(self) -> int:
return 262_144 if self._split == "train" else 32_768
|
import io
import pathlib
from collections import namedtuple
from typing import Any, Dict, Iterator, List, Optional, Tuple, Union
from torchdata.datapipes.iter import IterDataPipe, Mapper, Zipper
from torchvision.prototype.datasets.utils import Dataset, GDriveResource, OnlineResource
from torchvision.prototype.datasets.utils._internal import hint_sharding, hint_shuffling
from torchvision.prototype.tv_tensors import Label
from torchvision.tv_tensors import Image
from .._api import register_dataset, register_info
NAME = "pcam"
class PCAMH5Reader(IterDataPipe[Tuple[str, io.IOBase]]):
def __init__(
self,
datapipe: IterDataPipe[Tuple[str, io.IOBase]],
key: Optional[str] = None, # Note: this key thing might be very specific to the PCAM dataset
) -> None:
self.datapipe = datapipe
self.key = key
def __iter__(self) -> Iterator[Tuple[str, io.IOBase]]:
import h5py
for _, handle in self.datapipe:
try:
with h5py.File(handle) as data:
if self.key is not None:
data = data[self.key]
yield from data
finally:
handle.close()
_Resource = namedtuple("_Resource", ("file_name", "gdrive_id", "sha256"))
@register_info(NAME)
def _info() -> Dict[str, Any]:
return dict(categories=["0", "1"])
@register_dataset(NAME)
class PCAM(Dataset):
# TODO write proper docstring
"""PCAM Dataset
homepage="https://github.com/basveeling/pcam"
"""
def __init__(
self, root: Union[str, pathlib.Path], split: str = "train", *, skip_integrity_check: bool = False
) -> None:
self._split = self._verify_str_arg(split, "split", {"train", "val", "test"})
self._categories = _info()["categories"]
super().__init__(root, skip_integrity_check=skip_integrity_check, dependencies=("h5py",))
_RESOURCES = {
"train": (
_Resource( # Images
file_name="camelyonpatch_level_2_split_train_x.h5.gz",
gdrive_id="1Ka0XfEMiwgCYPdTI-vv6eUElOBnKFKQ2",
sha256="d619e741468a7ab35c7e4a75e6821b7e7e6c9411705d45708f2a0efc8960656c",
),
_Resource( # Targets
file_name="camelyonpatch_level_2_split_train_y.h5.gz",
gdrive_id="1269yhu3pZDP8UYFQs-NYs3FPwuK-nGSG",
sha256="b74126d2c01b20d3661f9b46765d29cf4e4fba6faba29c8e0d09d406331ab75a",
),
),
"test": (
_Resource( # Images
file_name="camelyonpatch_level_2_split_test_x.h5.gz",
gdrive_id="1qV65ZqZvWzuIVthK8eVDhIwrbnsJdbg_",
sha256="79174c2201ad521602a5888be8f36ee10875f37403dd3f2086caf2182ef87245",
),
_Resource( # Targets
file_name="camelyonpatch_level_2_split_test_y.h5.gz",
gdrive_id="17BHrSrwWKjYsOgTMmoqrIjDy6Fa2o_gP",
sha256="0a522005fccc8bbd04c5a117bfaf81d8da2676f03a29d7499f71d0a0bd6068ef",
),
),
"val": (
_Resource( # Images
file_name="camelyonpatch_level_2_split_valid_x.h5.gz",
gdrive_id="1hgshYGWK8V-eGRy8LToWJJgDU_rXWVJ3",
sha256="f82ee1670d027b4ec388048d9eabc2186b77c009655dae76d624c0ecb053ccb2",
),
_Resource( # Targets
file_name="camelyonpatch_level_2_split_valid_y.h5.gz",
gdrive_id="1bH8ZRbhSVAhScTS0p9-ZzGnX91cHT3uO",
sha256="ce1ae30f08feb468447971cfd0472e7becd0ad96d877c64120c72571439ae48c",
),
),
}
def _resources(self) -> List[OnlineResource]:
return [ # = [images resource, targets resource]
GDriveResource(file_name=file_name, id=gdrive_id, sha256=sha256, preprocess="decompress")
for file_name, gdrive_id, sha256 in self._RESOURCES[self._split]
]
def _prepare_sample(self, data: Tuple[Any, Any]) -> Dict[str, Any]:
image, target = data # They're both numpy arrays at this point
return {
"image": Image(image.transpose(2, 0, 1)),
"label": Label(target.item(), categories=self._categories),
}
def _datapipe(self, resource_dps: List[IterDataPipe]) -> IterDataPipe[Dict[str, Any]]:
images_dp, targets_dp = resource_dps
images_dp = PCAMH5Reader(images_dp, key="x")
targets_dp = PCAMH5Reader(targets_dp, key="y")
dp = Zipper(images_dp, targets_dp)
dp = hint_shuffling(dp)
dp = hint_sharding(dp)
return Mapper(dp, self._prepare_sample)
def __len__(self) -> int:
return 262_144 if self._split == "train" else 32_768
|
_base_ = [
'../_base_/models/retinanet_r50_fpn.py',
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
# model settings
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')),
bbox_head=dict(
_delete_=True,
type='SABLRetinaHead',
num_classes=80,
in_channels=256,
stacked_convs=4,
feat_channels=256,
approx_anchor_generator=dict(
type='AnchorGenerator',
octave_base_scale=4,
scales_per_octave=3,
ratios=[0.5, 1.0, 2.0],
strides=[8, 16, 32, 64, 128]),
square_anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
scales=[4],
strides=[8, 16, 32, 64, 128]),
bbox_coder=dict(
type='BucketingBBoxCoder', num_buckets=14, scale_factor=3.0),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.5),
loss_bbox_reg=dict(
type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.5)),
# training and testing settings
train_cfg=dict(
assigner=dict(
type='ApproxMaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.4,
min_pos_iou=0.0,
ignore_iof_thr=-1),
allowed_border=-1,
pos_weight=-1,
debug=False))
# optimizer
optim_wrapper = dict(
optimizer=dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001))
|
_base_ = [
'../_base_/models/retinanet_r50_fpn.py',
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
# model settings
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')),
bbox_head=dict(
_delete_=True,
type='SABLRetinaHead',
num_classes=80,
in_channels=256,
stacked_convs=4,
feat_channels=256,
approx_anchor_generator=dict(
type='AnchorGenerator',
octave_base_scale=4,
scales_per_octave=3,
ratios=[0.5, 1.0, 2.0],
strides=[8, 16, 32, 64, 128]),
square_anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
scales=[4],
strides=[8, 16, 32, 64, 128]),
bbox_coder=dict(
type='BucketingBBoxCoder', num_buckets=14, scale_factor=3.0),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.5),
loss_bbox_reg=dict(
type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.5)),
# training and testing settings
train_cfg=dict(
assigner=dict(
type='ApproxMaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.4,
min_pos_iou=0.0,
ignore_iof_thr=-1),
allowed_border=-1,
pos_weight=-1,
debug=False))
# optimizer
optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001)
|
# Copyright (c) OpenMMLab. All rights reserved.
from .atss import ATSS
from .autoassign import AutoAssign
from .base import BaseDetector
from .cascade_rcnn import CascadeRCNN
from .centernet import CenterNet
from .cornernet import CornerNet
from .deformable_detr import DeformableDETR
from .detr import DETR
from .fast_rcnn import FastRCNN
from .faster_rcnn import FasterRCNN
from .fcos import FCOS
from .fovea import FOVEA
from .fsaf import FSAF
from .gfl import GFL
from .grid_rcnn import GridRCNN
from .htc import HybridTaskCascade
from .kd_one_stage import KnowledgeDistillationSingleStageDetector
from .lad import LAD
from .mask_rcnn import MaskRCNN
from .mask_scoring_rcnn import MaskScoringRCNN
from .maskformer import MaskFormer
from .nasfcos import NASFCOS
from .paa import PAA
from .panoptic_fpn import PanopticFPN
from .panoptic_two_stage_segmentor import TwoStagePanopticSegmentor
from .point_rend import PointRend
from .queryinst import QueryInst
from .reppoints_detector import RepPointsDetector
from .retinanet import RetinaNet
from .rpn import RPN
from .scnet import SCNet
from .single_stage import SingleStageDetector
from .solo import SOLO
from .sparse_rcnn import SparseRCNN
from .tood import TOOD
from .trident_faster_rcnn import TridentFasterRCNN
from .two_stage import TwoStageDetector
from .vfnet import VFNet
from .yolact import YOLACT
from .yolo import YOLOV3
from .yolof import YOLOF
from .yolox import YOLOX
__all__ = [
'ATSS', 'BaseDetector', 'SingleStageDetector', 'TwoStageDetector', 'RPN',
'KnowledgeDistillationSingleStageDetector', 'FastRCNN', 'FasterRCNN',
'MaskRCNN', 'CascadeRCNN', 'HybridTaskCascade', 'RetinaNet', 'FCOS',
'GridRCNN', 'MaskScoringRCNN', 'RepPointsDetector', 'FOVEA', 'FSAF',
'NASFCOS', 'PointRend', 'GFL', 'CornerNet', 'PAA', 'YOLOV3', 'YOLACT',
'VFNet', 'DETR', 'TridentFasterRCNN', 'SparseRCNN', 'SCNet', 'SOLO',
'DeformableDETR', 'AutoAssign', 'YOLOF', 'CenterNet', 'YOLOX',
'TwoStagePanopticSegmentor', 'PanopticFPN', 'QueryInst', 'LAD', 'TOOD',
'MaskFormer'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .atss import ATSS
from .autoassign import AutoAssign
from .base import BaseDetector
from .cascade_rcnn import CascadeRCNN
from .centernet import CenterNet
from .cornernet import CornerNet
from .deformable_detr import DeformableDETR
from .detr import DETR
from .fast_rcnn import FastRCNN
from .faster_rcnn import FasterRCNN
from .fcos import FCOS
from .fovea import FOVEA
from .fsaf import FSAF
from .gfl import GFL
from .grid_rcnn import GridRCNN
from .htc import HybridTaskCascade
from .kd_one_stage import KnowledgeDistillationSingleStageDetector
from .lad import LAD
from .mask_rcnn import MaskRCNN
from .mask_scoring_rcnn import MaskScoringRCNN
from .nasfcos import NASFCOS
from .paa import PAA
from .panoptic_fpn import PanopticFPN
from .panoptic_two_stage_segmentor import TwoStagePanopticSegmentor
from .point_rend import PointRend
from .queryinst import QueryInst
from .reppoints_detector import RepPointsDetector
from .retinanet import RetinaNet
from .rpn import RPN
from .scnet import SCNet
from .single_stage import SingleStageDetector
from .solo import SOLO
from .sparse_rcnn import SparseRCNN
from .tood import TOOD
from .trident_faster_rcnn import TridentFasterRCNN
from .two_stage import TwoStageDetector
from .vfnet import VFNet
from .yolact import YOLACT
from .yolo import YOLOV3
from .yolof import YOLOF
from .yolox import YOLOX
__all__ = [
'ATSS', 'BaseDetector', 'SingleStageDetector', 'TwoStageDetector', 'RPN',
'KnowledgeDistillationSingleStageDetector', 'FastRCNN', 'FasterRCNN',
'MaskRCNN', 'CascadeRCNN', 'HybridTaskCascade', 'RetinaNet', 'FCOS',
'GridRCNN', 'MaskScoringRCNN', 'RepPointsDetector', 'FOVEA', 'FSAF',
'NASFCOS', 'PointRend', 'GFL', 'CornerNet', 'PAA', 'YOLOV3', 'YOLACT',
'VFNet', 'DETR', 'TridentFasterRCNN', 'SparseRCNN', 'SCNet', 'SOLO',
'DeformableDETR', 'AutoAssign', 'YOLOF', 'CenterNet', 'YOLOX',
'TwoStagePanopticSegmentor', 'PanopticFPN', 'QueryInst', 'LAD', 'TOOD'
]
|
"""Tests related to the `DataIter` interface."""
import numpy as np
import xgboost
from xgboost import testing as tm
def run_mixed_sparsity(device: str) -> None:
"""Check QDM with mixed batches."""
X_0, y_0, _ = tm.make_regression(128, 16, False)
if device.startswith("cuda"):
X_1, y_1 = tm.make_sparse_regression(256, 16, 0.1, True)
else:
X_1, y_1 = tm.make_sparse_regression(256, 16, 0.1, False)
X_2, y_2 = tm.make_sparse_regression(512, 16, 0.9, True)
X = [X_0, X_1, X_2]
y = [y_0, y_1, y_2]
if device.startswith("cuda"):
import cupy as cp # pylint: disable=import-error
X = [cp.array(batch) for batch in X]
it = tm.IteratorForTest(X, y, None, None)
Xy_0 = xgboost.QuantileDMatrix(it)
X_1, y_1 = tm.make_sparse_regression(256, 16, 0.1, True)
X = [X_0, X_1, X_2]
y = [y_0, y_1, y_2]
X_arr = np.concatenate(X, axis=0)
y_arr = np.concatenate(y, axis=0)
Xy_1 = xgboost.QuantileDMatrix(X_arr, y_arr)
assert tm.predictor_equal(Xy_0, Xy_1)
|
"""Tests related to the `DataIter` interface."""
import numpy as np
import xgboost
from xgboost import testing as tm
def run_mixed_sparsity(device: str) -> None:
"""Check QDM with mixed batches."""
X_0, y_0, _ = tm.make_regression(128, 16, False)
if device.startswith("cuda"):
X_1, y_1 = tm.make_sparse_regression(256, 16, 0.1, True)
else:
X_1, y_1 = tm.make_sparse_regression(256, 16, 0.1, False)
X_2, y_2 = tm.make_sparse_regression(512, 16, 0.9, True)
X = [X_0, X_1, X_2]
y = [y_0, y_1, y_2]
if device.startswith("cuda"):
import cupy as cp # pylint: disable=import-error
X = [cp.array(batch) for batch in X]
it = tm.IteratorForTest(X, y, None, None)
Xy_0 = xgboost.QuantileDMatrix(it)
X_1, y_1 = tm.make_sparse_regression(256, 16, 0.1, True)
X = [X_0, X_1, X_2]
y = [y_0, y_1, y_2]
X_arr = np.concatenate(X, axis=0)
y_arr = np.concatenate(y, axis=0)
Xy_1 = xgboost.QuantileDMatrix(X_arr, y_arr)
assert tm.predictor_equal(Xy_0, Xy_1)
|
"""
This application demonstrates how to find duplicate questions (paraphrases) in a long
list of sentences.
"""
from sentence_transformers import SentenceTransformer, util
# Questions can be a long list of sentences up to 100k sentences or more.
# For demonstration purposes, we limit it to a few questions which all have on duplicate
questions = [
"How did you catch your spouse cheating?",
"How can I find out if my husband is cheating?",
"Is my wife cheating?",
"How do I know if my partner is cheating?",
"Why is Starbucks in India overrated?",
"Is Starbucks overrated in india?",
"How can I lose weight fast without exercise?",
"Can I lose weight without exercise?",
"Which city is the best in India? Why?",
"Which is the best city in India?",
"How can I stay focused in class?",
"How can I stay focused on my school work?",
"How can I Remotely hack a mobile phone?",
"How can I hack my phone?",
"Where should I stay in Goa?",
"Which are the best hotels in Goa?",
"Why does hair turn white?",
"What causes older peoples hair to turn grey?",
"What is the easiest way to get followers on Quora?",
"How do I get more followers for my Quora?",
]
model = SentenceTransformer("all-MiniLM-L6-v2")
# Given a model and a List of strings (texts), evaluation.ParaphraseMiningEvaluator.paraphrase_mining performs a
# mining task by computing cosine similarity between all possible combinations and returning the ones with the highest scores.
# It returns a list of tuples (score, i, j) with i, j representing the index in the questions list.
pairs = util.paraphrase_mining(model, questions)
# Output Top-20 pairs:
for score, qid1, qid2 in pairs[0:20]:
print("{:.3f}\t{}\t\t\t{}".format(score, questions[qid1], questions[qid2]))
|
"""
This application demonstrates how to find duplicate questions (paraphrases) in a long
list of sentences.
"""
from sentence_transformers import SentenceTransformer, util
# Questions can be a long list of sentences up to 100k sentences or more.
# For demonstration purposes, we limit it to a few questions which all have on duplicate
questions = [
'How did you catch your spouse cheating?',
'How can I find out if my husband is cheating?',
'Is my wife cheating?',
'How do I know if my partner is cheating?',
'Why is Starbucks in India overrated?',
'Is Starbucks overrated in india?',
'How can I lose weight fast without exercise?',
'Can I lose weight without exercise?',
'Which city is the best in India? Why?',
'Which is the best city in India?',
'How can I stay focused in class?',
'How can I stay focused on my school work?',
'How can I Remotely hack a mobile phone?',
'How can I hack my phone?',
'Where should I stay in Goa?',
'Which are the best hotels in Goa?',
'Why does hair turn white?',
'What causes older peoples hair to turn grey?',
'What is the easiest way to get followers on Quora?',
'How do I get more followers for my Quora?'
]
model = SentenceTransformer('all-MiniLM-L6-v2')
# Given a model and a List of strings (texts), evaluation.ParaphraseMiningEvaluator.paraphrase_mining performs a
# mining task by computing cosine similarity between all possible combinations and returning the ones with the highest scores.
# It returns a list of tuples (score, i, j) with i, j representing the index in the questions list.
pairs = util.paraphrase_mining(model, questions)
#Output Top-20 pairs:
for score, qid1, qid2 in pairs[0:20]:
print("{:.3f}\t{}\t\t\t{}".format(score, questions[qid1], questions[qid2]))
|
from typing import Union
import google.ai.generativelanguage as glm
import google.generativeai as genai
from llama_index.core.base.llms.types import (
ChatMessage,
ChatResponse,
CompletionResponse,
ImageBlock,
TextBlock,
)
from llama_index.core.multi_modal_llms.base import ChatMessage
from llama_index.core.utilities.gemini_utils import ROLES_FROM_GEMINI, ROLES_TO_GEMINI
def _error_if_finished_early(candidate: "glm.Candidate") -> None: # type: ignore[name-defined] # only until release
if (finish_reason := candidate.finish_reason) > 1: # 1=STOP (normally)
reason = finish_reason.name
# Safety reasons have more detail, so include that if we can.
if finish_reason == 3: # 3=Safety
relevant_safety = list(
filter(
lambda sr: sr.probability > 1, # 1=Negligible
candidate.safety_ratings,
)
)
reason += f" {relevant_safety}"
raise RuntimeError(f"Response was terminated early: {reason}")
def completion_from_gemini_response(
response: Union[
"genai.types.GenerateContentResponse",
"genai.types.AsyncGenerateContentResponse",
],
) -> CompletionResponse:
top_candidate = response.candidates[0]
_error_if_finished_early(top_candidate)
raw = {
**(type(top_candidate).to_dict(top_candidate)), # type: ignore
**(type(response.prompt_feedback).to_dict(response.prompt_feedback)), # type: ignore
}
if response.usage_metadata:
raw["usage_metadata"] = type(response.usage_metadata).to_dict(
response.usage_metadata
)
return CompletionResponse(text=response.text, raw=raw)
def chat_from_gemini_response(
response: Union[
"genai.types.GenerateContentResponse",
"genai.types.AsyncGenerateContentResponse",
],
) -> ChatResponse:
top_candidate = response.candidates[0]
_error_if_finished_early(top_candidate)
raw = {
**(type(top_candidate).to_dict(top_candidate)), # type: ignore
**(type(response.prompt_feedback).to_dict(response.prompt_feedback)), # type: ignore
}
if response.usage_metadata:
raw["usage_metadata"] = type(response.usage_metadata).to_dict(
response.usage_metadata
)
role = ROLES_FROM_GEMINI[top_candidate.content.role]
return ChatResponse(message=ChatMessage(role=role, content=response.text), raw=raw)
def chat_message_to_gemini(message: ChatMessage) -> "genai.types.ContentDict":
"""Convert ChatMessages to Gemini-specific history, including ImageDocuments."""
parts = []
content_txt = ""
for block in message.blocks:
if isinstance(block, TextBlock):
parts.append(block.text)
elif isinstance(block, ImageBlock):
base64_bytes = block.resolve_image(as_base64=False).read()
parts.append(
{
"mime_type": block.image_mimetype,
"data": base64_bytes,
}
)
else:
msg = f"Unsupported content block type: {type(block).__name__}"
raise ValueError(msg)
return {
"role": ROLES_TO_GEMINI[message.role],
"parts": parts,
}
|
from typing import Union
import google.ai.generativelanguage as glm
import google.generativeai as genai
import PIL
from llama_index.core.base.llms.types import (
ChatMessage,
ChatResponse,
CompletionResponse,
)
from llama_index.core.utilities.gemini_utils import ROLES_FROM_GEMINI, ROLES_TO_GEMINI
def _error_if_finished_early(candidate: "glm.Candidate") -> None: # type: ignore[name-defined] # only until release
if (finish_reason := candidate.finish_reason) > 1: # 1=STOP (normally)
reason = finish_reason.name
# Safety reasons have more detail, so include that if we can.
if finish_reason == 3: # 3=Safety
relevant_safety = list(
filter(
lambda sr: sr.probability > 1, # 1=Negligible
candidate.safety_ratings,
)
)
reason += f" {relevant_safety}"
raise RuntimeError(f"Response was terminated early: {reason}")
def completion_from_gemini_response(
response: Union[
"genai.types.GenerateContentResponse",
"genai.types.AsyncGenerateContentResponse",
],
) -> CompletionResponse:
top_candidate = response.candidates[0]
_error_if_finished_early(top_candidate)
raw = {
**(type(top_candidate).to_dict(top_candidate)),
**(type(response.prompt_feedback).to_dict(response.prompt_feedback)),
}
if response.usage_metadata:
raw["usage_metadata"] = type(response.usage_metadata).to_dict(
response.usage_metadata
)
return CompletionResponse(text=response.text, raw=raw)
def chat_from_gemini_response(
response: Union[
"genai.types.GenerateContentResponse",
"genai.types.AsyncGenerateContentResponse",
],
) -> ChatResponse:
top_candidate = response.candidates[0]
_error_if_finished_early(top_candidate)
raw = {
**(type(top_candidate).to_dict(top_candidate)),
**(type(response.prompt_feedback).to_dict(response.prompt_feedback)),
}
if response.usage_metadata:
raw["usage_metadata"] = type(response.usage_metadata).to_dict(
response.usage_metadata
)
role = ROLES_FROM_GEMINI[top_candidate.content.role]
return ChatResponse(message=ChatMessage(role=role, content=response.text), raw=raw)
def chat_message_to_gemini(message: ChatMessage) -> "genai.types.ContentDict":
"""Convert ChatMessages to Gemini-specific history, including ImageDocuments."""
parts = [message.content]
if images := message.additional_kwargs.get("images"):
parts += [PIL.Image.open(doc.resolve_image()) for doc in images]
return {
"role": ROLES_TO_GEMINI[message.role],
"parts": parts,
}
|
import numpy as np
from docarray import BaseDoc
from docarray.typing import NdArray
def test_tensor_ops():
class A(BaseDoc):
tensor: NdArray[3, 224, 224]
class B(BaseDoc):
tensor: NdArray[3, 112, 224]
tensor = A(tensor=np.ones((3, 224, 224))).tensor
tensord = A(tensor=np.ones((3, 224, 224))).tensor
tensorn = np.zeros((3, 224, 224))
tensorhalf = B(tensor=np.ones((3, 112, 224))).tensor
tensorfull = np.concatenate([tensorhalf, tensorhalf], axis=1)
assert type(tensor) == NdArray
assert type(tensor + tensord) == NdArray
assert type(tensor + tensorn) == NdArray
assert type(tensor + tensorfull) == NdArray
|
import numpy as np
from docarray import BaseDocument
from docarray.typing import NdArray
def test_tensor_ops():
class A(BaseDocument):
tensor: NdArray[3, 224, 224]
class B(BaseDocument):
tensor: NdArray[3, 112, 224]
tensor = A(tensor=np.ones((3, 224, 224))).tensor
tensord = A(tensor=np.ones((3, 224, 224))).tensor
tensorn = np.zeros((3, 224, 224))
tensorhalf = B(tensor=np.ones((3, 112, 224))).tensor
tensorfull = np.concatenate([tensorhalf, tensorhalf], axis=1)
assert type(tensor) == NdArray
assert type(tensor + tensord) == NdArray
assert type(tensor + tensorn) == NdArray
assert type(tensor + tensorfull) == NdArray
|
_base_ = './faster-rcnn_r50_fpn_gn-ws-all_1x_coco.py'
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://jhu/resnet101_gn_ws')))
|
_base_ = './faster_rcnn_r50_fpn_gn_ws-all_1x_coco.py'
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://jhu/resnet101_gn_ws')))
|
from typing import List
import numpy as np
import pytest
from fastapi import FastAPI
from httpx import AsyncClient
from docarray import BaseDoc, DocList
from docarray.base_doc import DocArrayResponse
from docarray.documents import ImageDoc, TextDoc
from docarray.typing import NdArray
@pytest.mark.asyncio
async def test_fast_api():
class Mmdoc(BaseDoc):
img: ImageDoc
text: TextDoc
title: str
input_doc = Mmdoc(
img=ImageDoc(tensor=np.zeros((3, 224, 224))), text=TextDoc(), title='hello'
)
app = FastAPI()
@app.post("/doc/", response_model=Mmdoc, response_class=DocArrayResponse)
async def create_item(doc: Mmdoc) -> Mmdoc:
return doc
async with AsyncClient(app=app, base_url="http://test") as ac:
response = await ac.post("/doc/", data=input_doc.json())
resp_doc = await ac.get("/docs")
resp_redoc = await ac.get("/redoc")
assert response.status_code == 200
assert resp_doc.status_code == 200
assert resp_redoc.status_code == 200
@pytest.mark.asyncio
async def test_image():
class InputDoc(BaseDoc):
img: ImageDoc
class OutputDoc(BaseDoc):
embedding_clip: NdArray
embedding_bert: NdArray
input_doc = InputDoc(img=ImageDoc(tensor=np.zeros((3, 224, 224))))
app = FastAPI()
@app.post("/doc/", response_model=OutputDoc, response_class=DocArrayResponse)
async def create_item(doc: InputDoc) -> OutputDoc:
## call my fancy model to generate the embeddings
doc = OutputDoc(
embedding_clip=np.zeros((100, 1)), embedding_bert=np.zeros((100, 1))
)
return doc
async with AsyncClient(app=app, base_url="http://test") as ac:
response = await ac.post("/doc/", data=input_doc.json())
resp_doc = await ac.get("/docs")
resp_redoc = await ac.get("/redoc")
assert response.status_code == 200
assert resp_doc.status_code == 200
assert resp_redoc.status_code == 200
doc = OutputDoc.parse_raw(response.content.decode())
assert isinstance(doc, OutputDoc)
assert doc.embedding_clip.shape == (100, 1)
assert doc.embedding_bert.shape == (100, 1)
@pytest.mark.asyncio
async def test_sentence_to_embeddings():
class InputDoc(BaseDoc):
text: str
class OutputDoc(BaseDoc):
embedding_clip: NdArray
embedding_bert: NdArray
input_doc = InputDoc(text='hello')
app = FastAPI()
@app.post("/doc/", response_model=OutputDoc, response_class=DocArrayResponse)
async def create_item(doc: InputDoc) -> OutputDoc:
## call my fancy model to generate the embeddings
return OutputDoc(
embedding_clip=np.zeros((100, 1)), embedding_bert=np.zeros((100, 1))
)
async with AsyncClient(app=app, base_url="http://test") as ac:
response = await ac.post("/doc/", data=input_doc.json())
resp_doc = await ac.get("/docs")
resp_redoc = await ac.get("/redoc")
assert response.status_code == 200
assert resp_doc.status_code == 200
assert resp_redoc.status_code == 200
doc = OutputDoc.parse_raw(response.content.decode())
assert isinstance(doc, OutputDoc)
assert doc.embedding_clip.shape == (100, 1)
assert doc.embedding_bert.shape == (100, 1)
@pytest.mark.asyncio
async def test_docarray():
doc = ImageDoc(tensor=np.zeros((3, 224, 224)))
docs = DocList[ImageDoc]([doc, doc])
app = FastAPI()
@app.post("/doc/", response_class=DocArrayResponse)
async def func(fastapi_docs: List[ImageDoc]) -> List[ImageDoc]:
docarray_docs = DocList[ImageDoc].construct(fastapi_docs)
return list(docarray_docs)
async with AsyncClient(app=app, base_url="http://test") as ac:
response = await ac.post("/doc/", data=docs.to_json())
resp_doc = await ac.get("/docs")
resp_redoc = await ac.get("/redoc")
assert response.status_code == 200
assert resp_doc.status_code == 200
assert resp_redoc.status_code == 200
docs = DocList[ImageDoc].from_json(response.content.decode())
assert len(docs) == 2
assert docs[0].tensor.shape == (3, 224, 224)
|
from typing import List
import numpy as np
import pytest
from fastapi import FastAPI
from httpx import AsyncClient
from docarray import BaseDoc, DocArray
from docarray.base_doc import DocArrayResponse
from docarray.documents import ImageDoc, TextDoc
from docarray.typing import NdArray
@pytest.mark.asyncio
async def test_fast_api():
class Mmdoc(BaseDoc):
img: ImageDoc
text: TextDoc
title: str
input_doc = Mmdoc(
img=ImageDoc(tensor=np.zeros((3, 224, 224))), text=TextDoc(), title='hello'
)
app = FastAPI()
@app.post("/doc/", response_model=Mmdoc, response_class=DocArrayResponse)
async def create_item(doc: Mmdoc) -> Mmdoc:
return doc
async with AsyncClient(app=app, base_url="http://test") as ac:
response = await ac.post("/doc/", data=input_doc.json())
resp_doc = await ac.get("/docs")
resp_redoc = await ac.get("/redoc")
assert response.status_code == 200
assert resp_doc.status_code == 200
assert resp_redoc.status_code == 200
@pytest.mark.asyncio
async def test_image():
class InputDoc(BaseDoc):
img: ImageDoc
class OutputDoc(BaseDoc):
embedding_clip: NdArray
embedding_bert: NdArray
input_doc = InputDoc(img=ImageDoc(tensor=np.zeros((3, 224, 224))))
app = FastAPI()
@app.post("/doc/", response_model=OutputDoc, response_class=DocArrayResponse)
async def create_item(doc: InputDoc) -> OutputDoc:
## call my fancy model to generate the embeddings
doc = OutputDoc(
embedding_clip=np.zeros((100, 1)), embedding_bert=np.zeros((100, 1))
)
return doc
async with AsyncClient(app=app, base_url="http://test") as ac:
response = await ac.post("/doc/", data=input_doc.json())
resp_doc = await ac.get("/docs")
resp_redoc = await ac.get("/redoc")
assert response.status_code == 200
assert resp_doc.status_code == 200
assert resp_redoc.status_code == 200
doc = OutputDoc.parse_raw(response.content.decode())
assert isinstance(doc, OutputDoc)
assert doc.embedding_clip.shape == (100, 1)
assert doc.embedding_bert.shape == (100, 1)
@pytest.mark.asyncio
async def test_sentence_to_embeddings():
class InputDoc(BaseDoc):
text: str
class OutputDoc(BaseDoc):
embedding_clip: NdArray
embedding_bert: NdArray
input_doc = InputDoc(text='hello')
app = FastAPI()
@app.post("/doc/", response_model=OutputDoc, response_class=DocArrayResponse)
async def create_item(doc: InputDoc) -> OutputDoc:
## call my fancy model to generate the embeddings
return OutputDoc(
embedding_clip=np.zeros((100, 1)), embedding_bert=np.zeros((100, 1))
)
async with AsyncClient(app=app, base_url="http://test") as ac:
response = await ac.post("/doc/", data=input_doc.json())
resp_doc = await ac.get("/docs")
resp_redoc = await ac.get("/redoc")
assert response.status_code == 200
assert resp_doc.status_code == 200
assert resp_redoc.status_code == 200
doc = OutputDoc.parse_raw(response.content.decode())
assert isinstance(doc, OutputDoc)
assert doc.embedding_clip.shape == (100, 1)
assert doc.embedding_bert.shape == (100, 1)
@pytest.mark.asyncio
async def test_docarray():
doc = ImageDoc(tensor=np.zeros((3, 224, 224)))
docs = DocArray[ImageDoc]([doc, doc])
app = FastAPI()
@app.post("/doc/", response_class=DocArrayResponse)
async def func(fastapi_docs: List[ImageDoc]) -> List[ImageDoc]:
docarray_docs = DocArray[ImageDoc].construct(fastapi_docs)
return list(docarray_docs)
async with AsyncClient(app=app, base_url="http://test") as ac:
response = await ac.post("/doc/", data=docs.to_json())
resp_doc = await ac.get("/docs")
resp_redoc = await ac.get("/redoc")
assert response.status_code == 200
assert resp_doc.status_code == 200
assert resp_redoc.status_code == 200
docs = DocArray[ImageDoc].from_json(response.content.decode())
assert len(docs) == 2
assert docs[0].tensor.shape == (3, 224, 224)
|
from pathlib import Path
from typing import List
import pytest
from jina import Document, DocumentArray, Executor
from sentence_encoder import TransformerSentenceEncoder
_EMBEDDING_DIM = 384
@pytest.fixture(scope='session')
def basic_encoder() -> TransformerSentenceEncoder:
return TransformerSentenceEncoder()
def test_config():
ex = Executor.load_config(str(Path(__file__).parents[2] / 'config.yml'))
assert ex.__class__.__name__ == 'TransformerSentenceEncoder'
def test_encoding_cpu():
enc = TransformerSentenceEncoder(device='cpu')
input_data = DocumentArray([Document(text='hello world')])
enc.encode(docs=input_data, parameters={})
assert input_data[0].embedding.shape == (_EMBEDDING_DIM,)
@pytest.mark.gpu
def test_encoding_gpu():
enc = TransformerSentenceEncoder(device='cuda')
input_data = DocumentArray([Document(text='hello world')])
enc.encode(docs=input_data, parameters={})
assert input_data[0].embedding.shape == (_EMBEDDING_DIM,)
@pytest.mark.parametrize(
'model_name, emb_dim',
[
('sentence-transformers/multi-qa-MiniLM-L6-cos-v1', 384),
('sentence-transformers/msmarco-distilbert-base-tas-b', 768),
('distilbert-base-uncased', 768),
],
)
def test_models(model_name: str, emb_dim: int):
encoder = TransformerSentenceEncoder(model_name)
input_data = DocumentArray([Document(text='hello world')])
encoder.encode(docs=input_data, parameters={})
assert input_data[0].embedding.shape == (emb_dim,)
@pytest.mark.parametrize(
'traversal_paths, counts',
[
(['r'], [['r', 1], ['c', 0], ['cc', 0]]),
(['c'], [['r', 0], ['c', 3], ['cc', 0]]),
(['cc'], [['r', 0], ['c', 0], ['cc', 2]]),
(['cc', 'r'], [['r', 1], ['c', 0], ['cc', 2]]),
],
)
def test_traversal_path(
traversal_paths: List[str], counts: List, basic_encoder: TransformerSentenceEncoder
):
text = 'blah'
docs = DocumentArray([Document(id='root1', text=text)])
docs[0].chunks = [
Document(id='chunk11', text=text),
Document(id='chunk12', text=text),
Document(id='chunk13', text=text),
]
docs[0].chunks[0].chunks = [
Document(id='chunk111', text=text),
Document(id='chunk112', text=text),
]
basic_encoder.encode(docs=docs, parameters={'traversal_paths': traversal_paths})
for path, count in counts:
embeddings = docs.traverse_flat([path]).get_attributes('embedding')
assert len(list(filter(lambda x: x is not None, embeddings))) == count
@pytest.mark.parametrize('batch_size', [1, 2, 4, 8])
def test_batch_size(basic_encoder: TransformerSentenceEncoder, batch_size: int):
docs = DocumentArray([Document(text='hello there') for _ in range(32)])
basic_encoder.encode(docs, parameters={'batch_size': batch_size})
for doc in docs:
assert doc.embedding.shape == (_EMBEDDING_DIM,)
def test_quality_embeddings(basic_encoder: TransformerSentenceEncoder):
docs = DocumentArray(
[
Document(id='A', text='a furry animal that with a long tail'),
Document(id='B', text='a domesticated mammal with four legs'),
Document(id='C', text='a type of aircraft that uses rotating wings'),
Document(id='D', text='flying vehicle that has fixed wings and engines'),
]
)
basic_encoder.encode(DocumentArray(docs), {})
# assert semantic meaning is captured in the encoding
docs.match(docs)
matches = ['B', 'A', 'D', 'C']
for i, doc in enumerate(docs):
assert doc.matches[1].id == matches[i]
|
from pathlib import Path
from typing import List
import pytest
from jina import Document, DocumentArray, Executor
from ...sentence_encoder import TransformerSentenceEncoder
_EMBEDDING_DIM = 384
@pytest.fixture(scope='session')
def basic_encoder() -> TransformerSentenceEncoder:
return TransformerSentenceEncoder()
def test_config():
ex = Executor.load_config(str(Path(__file__).parents[2] / 'config.yml'))
assert ex.__class__.__name__ == 'TransformerSentenceEncoder'
def test_encoding_cpu():
enc = TransformerSentenceEncoder(device='cpu')
input_data = DocumentArray([Document(text='hello world')])
enc.encode(docs=input_data, parameters={})
assert input_data[0].embedding.shape == (_EMBEDDING_DIM,)
@pytest.mark.gpu
def test_encoding_gpu():
enc = TransformerSentenceEncoder(device='cuda')
input_data = DocumentArray([Document(text='hello world')])
enc.encode(docs=input_data, parameters={})
assert input_data[0].embedding.shape == (_EMBEDDING_DIM,)
@pytest.mark.parametrize(
'model_name, emb_dim',
[
('sentence-transformers/multi-qa-MiniLM-L6-cos-v1', 384),
('sentence-transformers/msmarco-distilbert-base-tas-b', 768),
('distilbert-base-uncased', 768),
],
)
def test_models(model_name: str, emb_dim: int):
encoder = TransformerSentenceEncoder(model_name)
input_data = DocumentArray([Document(text='hello world')])
encoder.encode(docs=input_data, parameters={})
assert input_data[0].embedding.shape == (emb_dim,)
@pytest.mark.parametrize(
'traversal_paths, counts',
[
(['r'], [['r', 1], ['c', 0], ['cc', 0]]),
(['c'], [['r', 0], ['c', 3], ['cc', 0]]),
(['cc'], [['r', 0], ['c', 0], ['cc', 2]]),
(['cc', 'r'], [['r', 1], ['c', 0], ['cc', 2]]),
],
)
def test_traversal_path(
traversal_paths: List[str], counts: List, basic_encoder: TransformerSentenceEncoder
):
text = 'blah'
docs = DocumentArray([Document(id='root1', text=text)])
docs[0].chunks = [
Document(id='chunk11', text=text),
Document(id='chunk12', text=text),
Document(id='chunk13', text=text),
]
docs[0].chunks[0].chunks = [
Document(id='chunk111', text=text),
Document(id='chunk112', text=text),
]
basic_encoder.encode(docs=docs, parameters={'traversal_paths': traversal_paths})
for path, count in counts:
embeddings = docs.traverse_flat([path]).get_attributes('embedding')
assert len(list(filter(lambda x: x is not None, embeddings))) == count
@pytest.mark.parametrize('batch_size', [1, 2, 4, 8])
def test_batch_size(basic_encoder: TransformerSentenceEncoder, batch_size: int):
docs = DocumentArray([Document(text='hello there') for _ in range(32)])
basic_encoder.encode(docs, parameters={'batch_size': batch_size})
for doc in docs:
assert doc.embedding.shape == (_EMBEDDING_DIM,)
def test_quality_embeddings(basic_encoder: TransformerSentenceEncoder):
docs = DocumentArray(
[
Document(id='A', text='a furry animal that with a long tail'),
Document(id='B', text='a domesticated mammal with four legs'),
Document(id='C', text='a type of aircraft that uses rotating wings'),
Document(id='D', text='flying vehicle that has fixed wings and engines'),
]
)
basic_encoder.encode(DocumentArray(docs), {})
# assert semantic meaning is captured in the encoding
docs.match(docs)
matches = ['B', 'A', 'D', 'C']
for i, doc in enumerate(docs):
assert doc.matches[1].id == matches[i]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.