input
stringlengths 33
5k
| output
stringlengths 32
5k
|
---|---|
import os
import sysconfig
from typing import Optional
from torch.utils._triton import has_triton
def enable_triton(lib_dir: Optional[str] = None) -> dict[str, str]:
"""
Enable NVSHMEM device functions for Triton. It performs a NVSHMEM
device-side initialization on the kernel module created by Triton.
Args:
lib_dir (Optional[str]): The directory where the NVSHMEM device library
is located. If not provided, it will use the default path where NVSHMEM
wheel is installed.
Returns:
dict[str, str]: A dictionary containing the NVSHMEM device library name
and path.
"""
from triton.runtime.jit import JITFunction
from torch._C._distributed_c10d import _nvshmemx_cumodule_init
# Detect NVSHMEM device library path from python library path
if lib_dir is None:
py_lib_path = sysconfig.get_path("purelib")
lib_dir = py_lib_path + "/nvidia/nvshmem/lib"
lib_path = os.path.join(lib_dir, "libnvshmem_device.bc")
if not os.path.exists(lib_path):
raise RuntimeError("NVSHMEM device library not found")
extern_libs = {"libnvshmem_device": lib_path}
# A hook function to initialize NVSHMEM in Triton
def nvshmem_init_hook(*args, **kwargs) -> None: # type: ignore[no-untyped-def]
key = kwargs["key"]
device = kwargs["compile"]["device"]
jit_function = kwargs["fn"].jit_function
kernel_cache, _, _, _ = jit_function.device_caches[device]
kernel = kernel_cache.get(key, None)
kernel.run
_nvshmemx_cumodule_init(kernel.module)
# Register the function as a post-compile hook
JITFunction.compiled_hook = nvshmem_init_hook
# Return to user so that they can use it in Triton kernel invocation
return extern_libs
if has_triton():
from triton.language import core
@core.extern
def putmem_block(dst, src, nelems, pe, _builder=None): # type: ignore[no-untyped-def]
return core.extern_elementwise(
"",
"",
[dst, src, nelems, pe],
{
(
core.dtype("int64"),
core.dtype("int64"),
core.dtype("int64"),
core.dtype("int64"),
): ("nvshmemx_putmem_block", core.dtype("int32"))
},
is_pure=False,
_builder=_builder,
)
@core.extern
def getmem_block(dst, src, nelems, pe, _builder=None): # type: ignore[no-untyped-def]
return core.extern_elementwise(
"",
"",
[dst, src, nelems, pe],
{
(
core.dtype("int64"),
core.dtype("int64"),
core.dtype("int64"),
core.dtype("int64"),
): ("nvshmemx_getmem_block", core.dtype("int32"))
},
is_pure=False,
_builder=_builder,
)
@core.extern
def putmem_signal_block( # type: ignore[no-untyped-def]
dst,
src,
nelems,
sig_addr,
signal,
sig_op,
pe,
_builder=None,
): # type: ignore[no-untyped-def]
return core.extern_elementwise(
"",
"",
[dst, src, nelems, sig_addr, signal, sig_op, pe],
{
(
core.dtype("int64"),
core.dtype("int64"),
core.dtype("int64"),
core.dtype("int64"),
core.dtype("int64"),
core.dtype("int64"),
core.dtype("int64"),
): ("nvshmemx_putmem_signal_block", core.dtype("int32"))
},
is_pure=False,
_builder=_builder,
)
@core.extern
def wait_until(ivar, cmp, cmp_val, _builder=None): # type: ignore[no-untyped-def]
return core.extern_elementwise(
"",
"",
[ivar, cmp, cmp_val],
{
(
core.dtype("int64"),
core.dtype("int64"),
core.dtype("int64"),
): ("nvshmem_longlong_wait_until", core.dtype("int32"))
},
is_pure=False,
_builder=_builder,
)
@core.extern
def signal_wait_until(sig_addr, cmp, cmp_val, _builder=None): # type: ignore[no-untyped-def]
return core.extern_elementwise(
"",
"",
[sig_addr, cmp, cmp_val],
{
(
core.dtype("int64"),
core.dtype("int64"),
core.dtype("int64"),
): ("nvshmem_signal_wait_until", core.dtype("int32"))
},
is_pure=False,
_builder=_builder,
)
|
import os
import sysconfig
from typing import Optional
from torch.utils._triton import has_triton
def enable_triton(lib_dir: Optional[str] = None) -> dict[str, str]:
"""
Enable NVSHMEM device functions for Triton. It performs a NVSHMEM
device-side initialization on the kernel module created by Triton.
Args:
lib_dir (Optional[str]): The directory where the NVSHMEM device library
is located. If not provided, it will use the default path where NVSHMEM
wheel is installed.
Returns:
dict[str, str]: A dictionary containing the NVSHMEM device library name
and path.
"""
from triton.runtime.jit import JITFunction
from torch._C._distributed_c10d import _nvshmemx_cumodule_init
# Detect NVSHMEM device library path from python library path
if lib_dir is None:
py_lib_path = sysconfig.get_path("purelib")
lib_dir = py_lib_path + "/nvidia/nvshmem/lib"
lib_path = os.path.join(lib_dir, "libnvshmem_device.bc")
if not os.path.exists(lib_path):
raise RuntimeError("NVSHMEM device library not found")
extern_libs = {"libnvshmem_device": lib_path}
# A hook function to initialize NVSHMEM in Triton
def nvshmem_init_hook(*args, **kwargs) -> None: # type: ignore[no-untyped-def]
key = kwargs["key"]
device = kwargs["compile"]["device"]
jit_function = kwargs["fn"].jit_function
kernel_cache, _, _, _ = jit_function.device_caches[device]
kernel = kernel_cache.get(key, None)
kernel.run
_nvshmemx_cumodule_init(kernel.module)
# Register the function as a post-compile hook
JITFunction.compiled_hook = nvshmem_init_hook
# Return to user so that they can use it in Triton kernel invocation
return extern_libs
if has_triton():
from triton.language import core
@core.extern
def putmem_block(dst, src, nelems, pe, _builder=None): # type: ignore[no-untyped-def]
return core.extern_elementwise(
"",
"",
[dst, src, nelems, pe],
{
(
core.dtype("int64"),
core.dtype("int64"),
core.dtype("int64"),
core.dtype("int64"),
): ("nvshmemx_putmem_block", core.dtype("int32"))
},
is_pure=False,
_builder=_builder,
)
@core.extern
def getmem_block(dst, src, nelems, pe, _builder=None): # type: ignore[no-untyped-def]
return core.extern_elementwise(
"",
"",
[dst, src, nelems, pe],
{
(
core.dtype("int64"),
core.dtype("int64"),
core.dtype("int64"),
core.dtype("int64"),
): ("nvshmemx_getmem_block", core.dtype("int32"))
},
is_pure=False,
_builder=_builder,
)
@core.extern
def putmem_signal_block( # type: ignore[no-untyped-def]
dst,
src,
nelems,
sig_addr,
signal,
sig_op,
pe,
_builder=None,
): # type: ignore[no-untyped-def]
return core.extern_elementwise(
"",
"",
[dst, src, nelems, sig_addr, signal, sig_op, pe],
{
(
core.dtype("int64"),
core.dtype("int64"),
core.dtype("int64"),
core.dtype("int64"),
core.dtype("int64"),
core.dtype("int64"),
core.dtype("int64"),
): ("nvshmemx_putmem_signal_block", core.dtype("int32"))
},
is_pure=False,
_builder=_builder,
)
@core.extern
def wait_until(ivar, cmp, cmp_val, _builder=None): # type: ignore[no-untyped-def]
return core.extern_elementwise(
"",
"",
[ivar, cmp, cmp_val],
{
(
core.dtype("int64"),
core.dtype("int64"),
core.dtype("int64"),
): ("nvshmem_longlong_wait_until", core.dtype("int32"))
},
is_pure=False,
_builder=_builder,
)
|
import numpy as np
import pytest
import torch
from pydantic import parse_obj_as
from docarray import BaseDoc
from docarray.documents import VideoDoc
from docarray.typing import AudioNdArray, NdArray, VideoNdArray
from docarray.utils._internal.misc import is_tf_available
from tests import TOYDATA_DIR
tf_available = is_tf_available()
if tf_available:
import tensorflow as tf
import tensorflow._api.v2.experimental.numpy as tnp
LOCAL_VIDEO_FILE = str(TOYDATA_DIR / 'mov_bbb.mp4')
REMOTE_VIDEO_FILE = 'https://github.com/docarray/docarray/blob/main/tests/toydata/mov_bbb.mp4?raw=true' # noqa: E501
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize('file_url', [LOCAL_VIDEO_FILE, REMOTE_VIDEO_FILE])
def test_video(file_url):
vid = VideoDoc(url=file_url)
vid.tensor, vid.audio.tensor, vid.key_frame_indices = vid.url.load()
assert isinstance(vid.tensor, VideoNdArray)
assert isinstance(vid.audio.tensor, AudioNdArray)
assert isinstance(vid.key_frame_indices, NdArray)
def test_video_np():
video = parse_obj_as(VideoDoc, np.zeros((10, 10, 3)))
assert (video.tensor == np.zeros((10, 10, 3))).all()
def test_video_torch():
video = parse_obj_as(VideoDoc, torch.zeros(10, 10, 3))
assert (video.tensor == torch.zeros(10, 10, 3)).all()
@pytest.mark.tensorflow
def test_video_tensorflow():
video = parse_obj_as(VideoDoc, tf.zeros((10, 10, 3)))
assert tnp.allclose(video.tensor.tensor, tf.zeros((10, 10, 3)))
def test_video_shortcut_doc():
class MyDoc(BaseDoc):
video: VideoDoc
video2: VideoDoc
video3: VideoDoc
doc = MyDoc(
video='http://myurl.mp4',
video2=np.zeros((10, 10, 3)),
video3=torch.zeros(10, 10, 3),
)
assert doc.video.url == 'http://myurl.mp4'
assert (doc.video2.tensor == np.zeros((10, 10, 3))).all()
assert (doc.video3.tensor == torch.zeros(10, 10, 3)).all()
|
import numpy as np
import pytest
import torch
from pydantic import parse_obj_as
from docarray import BaseDoc
from docarray.documents import VideoDoc
from docarray.typing import AudioNdArray, NdArray, VideoNdArray
from docarray.utils._internal.misc import is_tf_available
from tests import TOYDATA_DIR
tf_available = is_tf_available()
if tf_available:
import tensorflow as tf
import tensorflow._api.v2.experimental.numpy as tnp
LOCAL_VIDEO_FILE = str(TOYDATA_DIR / 'mov_bbb.mp4')
REMOTE_VIDEO_FILE = 'https://github.com/docarray/docarray/blob/feat-rewrite-v2/tests/toydata/mov_bbb.mp4?raw=true' # noqa: E501
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize('file_url', [LOCAL_VIDEO_FILE, REMOTE_VIDEO_FILE])
def test_video(file_url):
vid = VideoDoc(url=file_url)
vid.tensor, vid.audio.tensor, vid.key_frame_indices = vid.url.load()
assert isinstance(vid.tensor, VideoNdArray)
assert isinstance(vid.audio.tensor, AudioNdArray)
assert isinstance(vid.key_frame_indices, NdArray)
def test_video_np():
video = parse_obj_as(VideoDoc, np.zeros((10, 10, 3)))
assert (video.tensor == np.zeros((10, 10, 3))).all()
def test_video_torch():
video = parse_obj_as(VideoDoc, torch.zeros(10, 10, 3))
assert (video.tensor == torch.zeros(10, 10, 3)).all()
@pytest.mark.tensorflow
def test_video_tensorflow():
video = parse_obj_as(VideoDoc, tf.zeros((10, 10, 3)))
assert tnp.allclose(video.tensor.tensor, tf.zeros((10, 10, 3)))
def test_video_shortcut_doc():
class MyDoc(BaseDoc):
video: VideoDoc
video2: VideoDoc
video3: VideoDoc
doc = MyDoc(
video='http://myurl.mp4',
video2=np.zeros((10, 10, 3)),
video3=torch.zeros(10, 10, 3),
)
assert doc.video.url == 'http://myurl.mp4'
assert (doc.video2.tensor == np.zeros((10, 10, 3))).all()
assert (doc.video3.tensor == torch.zeros(10, 10, 3)).all()
|
# Copyright (c) OpenMMLab. All rights reserved.
from .batch_sampler import (AspectRatioBatchSampler,
MultiDataAspectRatioBatchSampler,
TrackAspectRatioBatchSampler)
from .class_aware_sampler import ClassAwareSampler
from .multi_data_sampler import MultiDataSampler
from .multi_source_sampler import GroupMultiSourceSampler, MultiSourceSampler
from .track_img_sampler import TrackImgSampler
__all__ = [
'ClassAwareSampler', 'AspectRatioBatchSampler', 'MultiSourceSampler',
'GroupMultiSourceSampler', 'TrackImgSampler',
'TrackAspectRatioBatchSampler', 'MultiDataSampler',
'MultiDataAspectRatioBatchSampler'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .batch_sampler import (AspectRatioBatchSampler,
TrackAspectRatioBatchSampler)
from .class_aware_sampler import ClassAwareSampler
from .multi_source_sampler import GroupMultiSourceSampler, MultiSourceSampler
from .track_img_sampler import TrackImgSampler
__all__ = [
'ClassAwareSampler', 'AspectRatioBatchSampler', 'MultiSourceSampler',
'GroupMultiSourceSampler', 'TrackImgSampler',
'TrackAspectRatioBatchSampler'
]
|
from typing import TypeVar
from docarray.typing.proto_register import _register_proto
from docarray.typing.tensor.image.abstract_image_tensor import AbstractImageTensor
from docarray.typing.tensor.tensorflow_tensor import TensorFlowTensor, metaTensorFlow
T = TypeVar('T', bound='ImageTensorFlowTensor')
@_register_proto(proto_type_name='image_tensorflow_tensor')
class ImageTensorFlowTensor(
TensorFlowTensor, AbstractImageTensor, metaclass=metaTensorFlow
):
"""
Subclass of TensorFlowTensor, to represent an image tensor.
Adds image-specific features to the tensor.
For instance the ability convert the tensor back to image bytes which are
optimized to send over the wire
---
```python
from typing import Optional
from docarray import BaseDoc
from docarray.typing import ImageBytes, ImageTensorFlowTensor, ImageUrl
class MyImageDoc(BaseDoc):
title: str
tensor: Optional[ImageTensorFlowTensor]
url: Optional[ImageUrl]
bytes: Optional[ImageBytes]
doc = MyImageDoc(
title='my_second_image_doc',
url="https://upload.wikimedia.org/wikipedia/commons/8/80/"
"Dag_Sebastian_Ahlander_at_G%C3%B6teborg_Book_Fair_2012b.jpg",
)
doc.tensor = doc.url.load()
doc.bytes = doc.tensor.to_bytes()
```
---
"""
...
|
from typing import TypeVar
from docarray.typing.proto_register import _register_proto
from docarray.typing.tensor.image.abstract_image_tensor import AbstractImageTensor
from docarray.typing.tensor.tensorflow_tensor import TensorFlowTensor, metaTensorFlow
T = TypeVar('T', bound='ImageTensorFlowTensor')
@_register_proto(proto_type_name='image_tensorflow_tensor')
class ImageTensorFlowTensor(
TensorFlowTensor, AbstractImageTensor, metaclass=metaTensorFlow
):
"""
Subclass of TensorFlowTensor, to represent an image tensor.
Adds image-specific features to the tensor.
For instance the ability convert the tensor back to image bytes which are
optimized to send over the wire
---
```python
from typing import Optional
from docarray import BaseDoc
from docarray.typing import ImageTensorFlowTensor, ImageUrl
class MyImageDoc(BaseDoc):
title: str
tensor: Optional[ImageTensorFlowTensor]
url: Optional[ImageUrl]
bytes: Optional[bytes]
doc = MyImageDoc(
title='my_second_image_doc',
url="https://upload.wikimedia.org/wikipedia/commons/8/80/"
"Dag_Sebastian_Ahlander_at_G%C3%B6teborg_Book_Fair_2012b.jpg",
)
doc.tensor = doc.url.load()
doc.bytes = doc.tensor.to_bytes()
```
---
"""
...
|
import json
import os
from typing import Dict
import torch
from torch import Tensor, nn
class LayerNorm(nn.Module):
def __init__(self, dimension: int):
super(LayerNorm, self).__init__()
self.dimension = dimension
self.norm = nn.LayerNorm(dimension)
def forward(self, features: Dict[str, Tensor]):
features["sentence_embedding"] = self.norm(features["sentence_embedding"])
return features
def get_sentence_embedding_dimension(self):
return self.dimension
def save(self, output_path):
with open(os.path.join(output_path, "config.json"), "w") as fOut:
json.dump({"dimension": self.dimension}, fOut, indent=2)
torch.save(self.state_dict(), os.path.join(output_path, "pytorch_model.bin"))
@staticmethod
def load(input_path):
with open(os.path.join(input_path, "config.json")) as fIn:
config = json.load(fIn)
model = LayerNorm(**config)
model.load_state_dict(
torch.load(os.path.join(input_path, "pytorch_model.bin"), map_location=torch.device("cpu"))
)
return model
|
import torch
from torch import Tensor
from torch import nn
from typing import Union, Tuple, List, Iterable, Dict
import os
import json
class LayerNorm(nn.Module):
def __init__(self, dimension: int):
super(LayerNorm, self).__init__()
self.dimension = dimension
self.norm = nn.LayerNorm(dimension)
def forward(self, features: Dict[str, Tensor]):
features['sentence_embedding'] = self.norm(features['sentence_embedding'])
return features
def get_sentence_embedding_dimension(self):
return self.dimension
def save(self, output_path):
with open(os.path.join(output_path, 'config.json'), 'w') as fOut:
json.dump({'dimension': self.dimension}, fOut, indent=2)
torch.save(self.state_dict(), os.path.join(output_path, 'pytorch_model.bin'))
@staticmethod
def load(input_path):
with open(os.path.join(input_path, 'config.json')) as fIn:
config = json.load(fIn)
model = LayerNorm(**config)
model.load_state_dict(torch.load(os.path.join(input_path, 'pytorch_model.bin'), map_location=torch.device('cpu')))
return model
|
from keras.src import backend
from keras.src.api_export import keras_export
from keras.src.layers.preprocessing.image_preprocessing.base_image_preprocessing_layer import ( # noqa: E501
BaseImagePreprocessingLayer,
)
@keras_export("keras.layers.RandomGrayscale")
class RandomGrayscale(BaseImagePreprocessingLayer):
"""Preprocessing layer for random conversion of RGB images to grayscale.
This layer randomly converts input images to grayscale with a specified
factor. When applied, it maintains the original number of channels
but sets all channels to the same grayscale value. This can be useful
for data augmentation and training models to be robust to color
variations.
The conversion preserves the perceived luminance of the original color
image using standard RGB to grayscale conversion coefficients. Images
that are not selected for conversion remain unchanged.
**Note:** This layer is safe to use inside a `tf.data` pipeline
(independently of which backend you're using).
Args:
factor: Float between 0 and 1, specifying the factor of
converting each image to grayscale. Defaults to 0.5. A value of
1.0 means all images will be converted, while 0.0 means no images
will be converted.
data_format: String, one of `"channels_last"` (default) or
`"channels_first"`. The ordering of the dimensions in the inputs.
`"channels_last"` corresponds to inputs with shape
`(batch, height, width, channels)` while `"channels_first"`
corresponds to inputs with shape
`(batch, channels, height, width)`.
Input shape:
3D (unbatched) or 4D (batched) tensor with shape:
`(..., height, width, channels)`, in `"channels_last"` format,
or `(..., channels, height, width)`, in `"channels_first"` format.
Output shape:
Same as input shape. The output maintains the same number of channels
as the input, even for grayscale-converted images where all channels
will have the same value.
"""
def __init__(self, factor=0.5, data_format=None, seed=None, **kwargs):
super().__init__(**kwargs)
if factor < 0 or factor > 1:
raise ValueError(
f"`factor` should be between 0 and 1. Received: factor={factor}"
)
self.factor = factor
self.data_format = backend.standardize_data_format(data_format)
self.seed = seed
self.generator = self.backend.random.SeedGenerator(seed)
def get_random_transformation(self, images, training=True, seed=None):
if seed is None:
seed = self._get_seed_generator(self.backend._backend)
# Base case: Unbatched data
batch_size = 1
if len(images.shape) == 4:
# This is a batch of images (4D input)
batch_size = self.backend.core.shape(images)[0]
random_values = self.backend.random.uniform(
shape=(batch_size,),
minval=0,
maxval=1,
seed=seed,
)
should_apply = self.backend.numpy.expand_dims(
random_values < self.factor, axis=[1, 2, 3]
)
return should_apply
def transform_images(self, images, transformation, training=True):
if training:
should_apply = (
transformation
if transformation is not None
else self.get_random_transformation(images)
)
grayscale_images = self.backend.image.rgb_to_grayscale(
images, data_format=self.data_format
)
return self.backend.numpy.where(
should_apply, grayscale_images, images
)
return images
def compute_output_shape(self, input_shape):
return input_shape
def compute_output_spec(self, inputs, **kwargs):
return inputs
def transform_bounding_boxes(self, bounding_boxes, **kwargs):
return bounding_boxes
def transform_labels(self, labels, transformations=None, **kwargs):
return labels
def transform_segmentation_masks(
self, segmentation_masks, transformations=None, **kwargs
):
return segmentation_masks
def get_config(self):
config = super().get_config()
config.update({"factor": self.factor})
return config
|
from keras.src import backend
from keras.src.api_export import keras_export
from keras.src.layers.preprocessing.image_preprocessing.base_image_preprocessing_layer import ( # noqa: E501
BaseImagePreprocessingLayer,
)
@keras_export("keras.layers.RandomGrayscale")
class RandomGrayscale(BaseImagePreprocessingLayer):
"""Preprocessing layer for random conversion of RGB images to grayscale.
This layer randomly converts input images to grayscale with a specified
factor. When applied, it maintains the original number of channels
but sets all channels to the same grayscale value. This can be useful
for data augmentation and training models to be robust to color
variations.
The conversion preserves the perceived luminance of the original color
image using standard RGB to grayscale conversion coefficients. Images
that are not selected for conversion remain unchanged.
**Note:** This layer is safe to use inside a `tf.data` pipeline
(independently of which backend you're using).
Args:
factor: Float between 0 and 1, specifying the factor of
converting each image to grayscale. Defaults to 0.5. A value of
1.0 means all images will be converted, while 0.0 means no images
will be converted.
data_format: String, one of `"channels_last"` (default) or
`"channels_first"`. The ordering of the dimensions in the inputs.
`"channels_last"` corresponds to inputs with shape
`(batch, height, width, channels)` while `"channels_first"`
corresponds to inputs with shape
`(batch, channels, height, width)`.
Input shape:
3D (unbatched) or 4D (batched) tensor with shape:
`(..., height, width, channels)`, in `"channels_last"` format,
or `(..., channels, height, width)`, in `"channels_first"` format.
Output shape:
Same as input shape. The output maintains the same number of channels
as the input, even for grayscale-converted images where all channels
will have the same value.
"""
def __init__(self, factor=0.5, data_format=None, seed=None, **kwargs):
super().__init__(**kwargs)
if factor < 0 or factor > 1:
raise ValueError(
f"`factor` should be between 0 and 1. Received: factor={factor}"
)
self.factor = factor
self.data_format = backend.standardize_data_format(data_format)
self.seed = seed
self.generator = self.backend.random.SeedGenerator(seed)
def get_random_transformation(self, images, training=True, seed=None):
if seed is None:
seed = self._get_seed_generator(self.backend._backend)
random_values = self.backend.random.uniform(
shape=(self.backend.core.shape(images)[0],),
minval=0,
maxval=1,
seed=seed,
)
should_apply = self.backend.numpy.expand_dims(
random_values < self.factor, axis=[1, 2, 3]
)
return should_apply
def transform_images(self, images, transformation, training=True):
if training:
should_apply = (
transformation
if transformation is not None
else self.get_random_transformation(images)
)
grayscale_images = self.backend.image.rgb_to_grayscale(
images, data_format=self.data_format
)
return self.backend.numpy.where(
should_apply, grayscale_images, images
)
return images
def compute_output_shape(self, input_shape):
return input_shape
def compute_output_spec(self, inputs, **kwargs):
return inputs
def transform_bounding_boxes(self, bounding_boxes, **kwargs):
return bounding_boxes
def transform_labels(self, labels, transformations=None, **kwargs):
return labels
def transform_segmentation_masks(
self, segmentation_masks, transformations=None, **kwargs
):
return segmentation_masks
def get_config(self):
config = super().get_config()
config.update({"factor": self.factor})
return config
|
# Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import _LazyModule
from ...utils.import_utils import define_import_structure
if TYPE_CHECKING:
from .configuration_mobilenet_v1 import *
from .feature_extraction_mobilenet_v1 import *
from .image_processing_mobilenet_v1 import *
from .image_processing_mobilenet_v1_fast import *
from .modeling_mobilenet_v1 import *
else:
import sys
_file = globals()["__file__"]
sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__)
|
# Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import _LazyModule
from ...utils.import_utils import define_import_structure
if TYPE_CHECKING:
from .configuration_mobilenet_v1 import *
from .feature_extraction_mobilenet_v1 import *
from .image_processing_mobilenet_v1 import *
from .modeling_mobilenet_v1 import *
else:
import sys
_file = globals()["__file__"]
sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__)
|
# Copyright (c) OpenMMLab. All rights reserved.
from .backbones import * # noqa: F401,F403
from .data_preprocessors import * # noqa: F401,F403
from .dense_heads import * # noqa: F401,F403
from .detectors import * # noqa: F401,F403
from .language_models import * # noqa: F401,F403
from .layers import * # noqa: F401,F403
from .losses import * # noqa: F401,F403
from .mot import * # noqa: F401,F403
from .necks import * # noqa: F401,F403
from .reid import * # noqa: F401,F403
from .roi_heads import * # noqa: F401,F403
from .seg_heads import * # noqa: F401,F403
from .task_modules import * # noqa: F401,F403
from .test_time_augs import * # noqa: F401,F403
from .trackers import * # noqa: F401,F403
from .tracking_heads import * # noqa: F401,F403
from .vis import * # noqa: F401,F403
|
# Copyright (c) OpenMMLab. All rights reserved.
from .backbones import * # noqa: F401,F403
from .data_preprocessors import * # noqa: F401,F403
from .dense_heads import * # noqa: F401,F403
from .detectors import * # noqa: F401,F403
from .layers import * # noqa: F401,F403
from .losses import * # noqa: F401,F403
from .mot import * # noqa: F401,F403
from .necks import * # noqa: F401,F403
from .reid import * # noqa: F401,F403
from .roi_heads import * # noqa: F401,F403
from .seg_heads import * # noqa: F401,F403
from .task_modules import * # noqa: F401,F403
from .test_time_augs import * # noqa: F401,F403
from .trackers import * # noqa: F401,F403
from .tracking_heads import * # noqa: F401,F403
from .vis import * # noqa: F401,F403
|
"""Chain-of-Abstraction Output Parser."""
import asyncio
import json
import networkx as nx
import re
from collections import defaultdict
from typing import Dict, Tuple
from llama_index.core.tools import AsyncBaseTool, ToolOutput
from llama_index.core.types import BaseOutputParser
class ChainOfAbstractionParser(BaseOutputParser):
"""
Chain of abstraction output parser.
This parser is used to parse the output using the default prompt
defined in prompts.py.
If the prompt formatting changes the function format, this parser
will not work and should be updated.
"""
def __init__(self, verbose: bool = False):
"""Init params."""
self._verbose = verbose
def parse(
self, solution: str, tools_by_name: Dict[str, AsyncBaseTool]
) -> Tuple[str, int]:
return asyncio.run(self.aparse(solution, tools_by_name))
async def aparse(
self, solution: str, tools_by_name: Dict[str, AsyncBaseTool]
) -> Tuple[str, int]:
# Extract function calls and placeholders
func_calls = re.findall(r"\[FUNC (\w+)\((.*?)\) = (\w+)\]", solution)
placeholders = set()
for match in re.finditer(r"\[FUNC (\w+)\((.*?)\) = (\w+)\]", solution):
placeholders.add(match.group(3))
# Create a dependency graph
graph = nx.DiGraph()
for func_name, inputs, output in func_calls:
parsed_inputs = []
if inputs.strip(): # Ensure inputs string is not empty
input_parts = [part.strip() for part in inputs.split(",")]
for part in input_parts:
try:
# Try to parse as a JSON literal (e.g., number, bool)
parsed_inputs.append(json.loads(part))
except json.JSONDecodeError:
# If it fails, treat it as a bare string/placeholder
parsed_inputs.append(part)
graph.add_node(output, func_name=func_name, inputs=parsed_inputs)
for inp in parsed_inputs:
# Add an edge only if the input is a placeholder from a previous step
if isinstance(inp, str) and inp in placeholders:
graph.add_edge(inp, output)
# Find the execution levels
execution_levels = defaultdict(list)
for node in nx.topological_sort(graph):
level = (
max(
[execution_levels[pred] for pred in graph.predecessors(node)],
default=-1,
)
+ 1
)
execution_levels[node] = level
# Group nodes by execution level
level_groups = defaultdict(list)
for node, level in execution_levels.items():
level_groups[level].append(node)
# Execute functions and replace placeholders
results = {}
tool_outputs = []
graph_nodes = {node[0]: node[1] for node in graph.nodes(data=True)}
for level in sorted(level_groups.keys()):
level_nodes = level_groups[level]
parallel_results = {}
for placeholder in level_nodes:
if len(graph_nodes[placeholder]) == 0:
continue
# get function name and inputs
func_name, inputs = (
graph_nodes[placeholder]["func_name"],
graph_nodes[placeholder]["inputs"],
)
# loop up any inputs that depend on other functions
input_values = [results.get(inp, inp) for inp in inputs]
if self._verbose:
print(
f"==== Executing {func_name} with inputs {input_values} ====",
flush=True,
)
# execute function and store result
try:
tool_output = await tools_by_name[func_name].acall(*input_values)
tool_outputs.append(tool_output)
except Exception as e:
tool_outputs.append(
ToolOutput(
content=str(e),
tool_name=func_name,
raw_output=None,
raw_input={"args": input_values},
is_error=True,
)
)
# If an error occurs, stop execution
break
parallel_results[placeholder] = tool_output.raw_output
results.update(parallel_results)
# Replace placeholders in the solution text
for placeholder, value in results.items():
solution = solution.replace(f"{placeholder}", '"' + str(value) + '"')
return solution, tool_outputs
|
"""Chain-of-Abstraction Output Parser."""
import asyncio
import json
import networkx as nx
import re
from collections import defaultdict
from typing import Dict, Tuple
from llama_index.core.tools import AsyncBaseTool, ToolOutput
from llama_index.core.types import BaseOutputParser
class ChainOfAbstractionParser(BaseOutputParser):
"""
Chain of abstraction output parser.
This parser is used to parse the output using the default prompt
defined in prompts.py.
If the prompt formatting changes the function format, this parser
will not work and should be updated.
"""
def __init__(self, verbose: bool = False):
"""Init params."""
self._verbose = verbose
def parse(
self, solution: str, tools_by_name: Dict[str, AsyncBaseTool]
) -> Tuple[str, int]:
return asyncio.run(self.aparse(solution, tools_by_name))
async def aparse(
self, solution: str, tools_by_name: Dict[str, AsyncBaseTool]
) -> Tuple[str, int]:
# Extract function calls and placeholders
func_calls = re.findall(r"\[FUNC (\w+)\((.*?)\) = (\w+)\]", solution)
placeholders = set()
for match in re.finditer(r"\[FUNC (\w+)\((.*?)\) = (\w+)\]", solution):
placeholders.add(match.group(3))
# Create a dependency graph
graph = nx.DiGraph()
for func_name, inputs, output in func_calls:
inputs = json.loads("[" + inputs + "]")
graph.add_node(output, func_name=func_name, inputs=inputs)
for inp in inputs:
graph.add_edge(inp, output)
# Find the execution levels
execution_levels = defaultdict(list)
for node in nx.topological_sort(graph):
level = (
max(
[execution_levels[pred] for pred in graph.predecessors(node)],
default=-1,
)
+ 1
)
execution_levels[node] = level
# Group nodes by execution level
level_groups = defaultdict(list)
for node, level in execution_levels.items():
level_groups[level].append(node)
# Execute functions and replace placeholders
results = {}
tool_outputs = []
graph_nodes = {node[0]: node[1] for node in graph.nodes(data=True)}
for level in sorted(level_groups.keys()):
level_nodes = level_groups[level]
parallel_results = {}
for placeholder in level_nodes:
if len(graph_nodes[placeholder]) == 0:
continue
# get function name and inputs
func_name, inputs = (
graph_nodes[placeholder]["func_name"],
graph_nodes[placeholder]["inputs"],
)
# loop up any inputs that depend on other functions
breakpoint()
input_values = [results.get(inp, inp) for inp in inputs]
if self._verbose:
print(
f"==== Executing {func_name} with inputs {input_values} ====",
flush=True,
)
# execute function and store result
try:
tool_output = await tools_by_name[func_name].acall(*input_values)
tool_outputs.append(tool_output)
except Exception as e:
tool_outputs.append(
ToolOutput(
content=str(e),
tool_name=func_name,
raw_output=None,
raw_input={"args": input_values},
is_error=True,
)
)
# If an error occurs, stop execution
break
parallel_results[placeholder] = tool_output.raw_output
results.update(parallel_results)
# Replace placeholders in the solution text
for placeholder, value in results.items():
solution = solution.replace(f"{placeholder}", '"' + str(value) + '"')
return solution, tool_outputs
|
from typing import Callable, Dict, Generic, List, Optional, Type, TypeVar
from torch.utils.data import Dataset
from docarray import BaseDoc, DocList, DocVec
from docarray.typing import TorchTensor
from docarray.utils._internal._typing import change_cls_name
T_doc = TypeVar('T_doc', bound=BaseDoc)
class MultiModalDataset(Dataset, Generic[T_doc]):
"""
A dataset that can be used inside a PyTorch DataLoader.
In other words, it implements the PyTorch Dataset interface.
The preprocessing dictionary passed to the constructor consists of keys that are
field names and values that are functions that take a single argument and return
a single argument.
---
```python
from torch.utils.data import DataLoader
from docarray import DocList
from docarray.data import MultiModalDataset
from docarray.documents import TextDoc
def prepend_number(text: str):
return f"Number {text}"
docs = DocList[TextDoc](TextDoc(text=str(i)) for i in range(16))
ds = MultiModalDataset[TextDoc](docs, preprocessing={'text': prepend_number})
loader = DataLoader(ds, batch_size=4, collate_fn=MultiModalDataset[TextDoc].collate_fn)
for batch in loader:
print(batch.text)
```
---
Nested fields can be accessed by using dot notation.
The document itself can be accessed using the empty string as the key.
Transformations that operate on reference types (such as Documents) can optionally
not return a value.
The transformations will be applied according to their order in the dictionary.
---
```python
import torch
from torch.utils.data import DataLoader
from docarray import DocList, BaseDoc
from docarray.data import MultiModalDataset
from docarray.documents import TextDoc
class Thesis(BaseDoc):
title: TextDoc
class Student(BaseDoc):
thesis: Thesis
def embed_title(title: TextDoc):
title.embedding = torch.ones(4)
def normalize_embedding(thesis: Thesis):
thesis.title.embedding = thesis.title.embedding / thesis.title.embedding.norm()
def add_nonsense(student: Student):
student.thesis.title.embedding = student.thesis.title.embedding + int(
student.thesis.title.text
)
docs = DocList[Student](Student(thesis=Thesis(title=str(i))) for i in range(16))
ds = MultiModalDataset[Student](
docs,
preprocessing={
"thesis.title": embed_title,
"thesis": normalize_embedding,
"": add_nonsense,
},
)
loader = DataLoader(ds, batch_size=4, collate_fn=ds.collate_fn)
for batch in loader:
print(batch.thesis.title.embedding)
```
---
:param docs: the `DocList` to be used as the dataset
:param preprocessing: a dictionary of field names and preprocessing functions
"""
doc_type: Optional[Type[BaseDoc]] = None
__typed_ds__: Dict[Type[BaseDoc], Type['MultiModalDataset']] = {}
def __init__(
self, docs: 'DocList[T_doc]', preprocessing: Dict[str, Callable]
) -> None:
self.docs = docs
self._preprocessing = preprocessing
def __len__(self):
return len(self.docs)
def __getitem__(self, item: int):
doc = self.docs[item].copy(deep=True)
for field, preprocess in self._preprocessing.items():
if len(field) == 0:
doc = preprocess(doc) or doc
else:
acc_path = field.split('.')
_field_ref = doc
for attr in acc_path[:-1]:
_field_ref = getattr(_field_ref, attr)
attr = acc_path[-1]
value = getattr(_field_ref, attr)
setattr(_field_ref, attr, preprocess(value) or value)
return doc
@classmethod
def collate_fn(cls, batch: List[T_doc]):
doc_type = cls.doc_type
if doc_type:
batch_da = DocVec[doc_type]( # type: ignore
batch,
tensor_type=TorchTensor,
)
else:
batch_da = DocVec(batch, tensor_type=TorchTensor)
return batch_da
@classmethod
def __class_getitem__(cls, item: Type[BaseDoc]) -> Type['MultiModalDataset']:
if not issubclass(item, BaseDoc):
raise ValueError(
f'{cls.__name__}[item] item should be a Document not a {item} '
)
if item not in cls.__typed_ds__:
global _TypedDataset
class _TypedDataset(cls): # type: ignore
doc_type = item
change_cls_name(
_TypedDataset, f'{cls.__name__}[{item.__name__}]', globals()
)
cls.__typed_ds__[item] = _TypedDataset
return cls.__typed_ds__[item]
|
from typing import Callable, Dict, Generic, List, Optional, Type, TypeVar
from torch.utils.data import Dataset
from docarray import BaseDoc, DocList, DocVec
from docarray.typing import TorchTensor
from docarray.utils._internal._typing import change_cls_name
T_doc = TypeVar('T_doc', bound=BaseDoc)
class MultiModalDataset(Dataset, Generic[T_doc]):
"""
A dataset that can be used inside a PyTorch DataLoader.
In other words, it implements the PyTorch Dataset interface.
:param docs: the DocList to be used as the dataset
:param preprocessing: a dictionary of field names and preprocessing functions
The preprocessing dictionary passed to the constructor consists of keys that are
field names and values that are functions that take a single argument and return
a single argument.
EXAMPLE USAGE
.. code-block:: python
from torch.utils.data import DataLoader
from docarray import DocList
from docarray.data import MultiModalDataset
from docarray.documents import Text
def prepend_number(text: str):
return f"Number {text}"
docs = DocList[Text](Text(text=str(i)) for i in range(16))
ds = MultiModalDataset[Text](docs, preprocessing={'text': prepend_number})
loader = DataLoader(ds, batch_size=4, collate_fn=MultiModalDataset[Text].collate_fn)
for batch in loader:
print(batch.text)
Nested fields can be accessed by using dot notation.
The document itself can be accessed using the empty string as the key.
Transformations that operate on reference types (such as Documents) can optionally
not return a value.
The transformations will be applied according to their order in the dictionary.
EXAMPLE USAGE
.. code-block:: python
import torch
from torch.utils.data import DataLoader
from docarray import DocList, BaseDoc
from docarray.data import MultiModalDataset
from docarray.documents import Text
class Thesis(BaseDoc):
title: Text
class Student(BaseDoc):
thesis: Thesis
def embed_title(title: Text):
title.embedding = torch.ones(4)
def normalize_embedding(thesis: Thesis):
thesis.title.embedding = thesis.title.embedding / thesis.title.embedding.norm()
def add_nonsense(student: Student):
student.thesis.title.embedding = student.thesis.title.embedding + int(
student.thesis.title.text
)
docs = DocList[Student](Student(thesis=Thesis(title=str(i))) for i in range(16))
ds = MultiModalDataset[Student](
docs,
preprocessing={
"thesis.title": embed_title,
"thesis": normalize_embedding,
"": add_nonsense,
},
)
loader = DataLoader(ds, batch_size=4, collate_fn=ds.collate_fn)
for batch in loader:
print(batch.thesis.title.embedding)
"""
doc_type: Optional[Type[BaseDoc]] = None
__typed_ds__: Dict[Type[BaseDoc], Type['MultiModalDataset']] = {}
def __init__(
self, docs: 'DocList[T_doc]', preprocessing: Dict[str, Callable]
) -> None:
self.docs = docs
self._preprocessing = preprocessing
def __len__(self):
return len(self.docs)
def __getitem__(self, item: int):
doc = self.docs[item].copy(deep=True)
for field, preprocess in self._preprocessing.items():
if len(field) == 0:
doc = preprocess(doc) or doc
else:
acc_path = field.split('.')
_field_ref = doc
for attr in acc_path[:-1]:
_field_ref = getattr(_field_ref, attr)
attr = acc_path[-1]
value = getattr(_field_ref, attr)
setattr(_field_ref, attr, preprocess(value) or value)
return doc
@classmethod
def collate_fn(cls, batch: List[T_doc]):
doc_type = cls.doc_type
if doc_type:
batch_da = DocVec[doc_type]( # type: ignore
batch,
tensor_type=TorchTensor,
)
else:
batch_da = DocVec(batch, tensor_type=TorchTensor)
return batch_da
@classmethod
def __class_getitem__(cls, item: Type[BaseDoc]) -> Type['MultiModalDataset']:
if not issubclass(item, BaseDoc):
raise ValueError(
f'{cls.__name__}[item] item should be a Document not a {item} '
)
if item not in cls.__typed_ds__:
global _TypedDataset
class _TypedDataset(cls): # type: ignore
doc_type = item
change_cls_name(
_TypedDataset, f'{cls.__name__}[{item.__name__}]', globals()
)
cls.__typed_ds__[item] = _TypedDataset
return cls.__typed_ds__[item]
|
"""Defines utilities for switching audio backends"""
import warnings
from typing import List, Optional
import torchaudio
from torchaudio._internal import module_utils as _mod_utils
from . import no_backend, soundfile_backend, sox_io_backend
__all__ = [
"list_audio_backends",
"get_audio_backend",
"set_audio_backend",
]
def list_audio_backends() -> List[str]:
"""List available backends
Returns:
List[str]: The list of available backends.
"""
backends = []
if _mod_utils.is_module_available("soundfile"):
backends.append("soundfile")
if torchaudio._extension._SOX_INITIALIZED:
backends.append("sox_io")
return backends
def set_audio_backend(backend: Optional[str]):
"""Set the backend for I/O operation
Args:
backend (str or None): Name of the backend.
One of ``"sox_io"`` or ``"soundfile"`` based on availability
of the system. If ``None`` is provided the current backend is unassigned.
"""
if backend is not None and backend not in list_audio_backends():
raise RuntimeError(f'Backend "{backend}" is not one of ' f"available backends: {list_audio_backends()}.")
if backend is None:
module = no_backend
elif backend == "sox_io":
module = sox_io_backend
elif backend == "soundfile":
module = soundfile_backend
else:
raise NotImplementedError(f'Unexpected backend "{backend}"')
for func in ["save", "load", "info"]:
setattr(torchaudio, func, getattr(module, func))
def _init_backend():
backends = list_audio_backends()
if "sox_io" in backends:
set_audio_backend("sox_io")
elif "soundfile" in backends:
set_audio_backend("soundfile")
else:
warnings.warn("No audio backend is available.")
set_audio_backend(None)
def get_audio_backend() -> Optional[str]:
"""Get the name of the current backend
Returns:
Optional[str]: The name of the current backend or ``None`` if no backend is assigned.
"""
if torchaudio.load == no_backend.load:
return None
if torchaudio.load == sox_io_backend.load:
return "sox_io"
if torchaudio.load == soundfile_backend.load:
return "soundfile"
raise ValueError("Unknown backend.")
|
"""Defines utilities for switching audio backends"""
import os
import warnings
from typing import List, Optional
import torchaudio
from torchaudio._internal import module_utils as _mod_utils
from . import no_backend, soundfile_backend, sox_io_backend
__all__ = [
"list_audio_backends",
"get_audio_backend",
"set_audio_backend",
]
def _is_backend_dispatcher_enabled() -> bool:
return os.getenv("TORCHAUDIO_USE_BACKEND_DISPATCHER", default="1") == "1"
def list_audio_backends() -> List[str]:
"""List available backends
Returns:
List[str]: The list of available backends.
"""
if _is_backend_dispatcher_enabled():
warnings.warn("list_audio_backend's return value is irrelevant when the I/O backend dispatcher is enabled.")
backends = []
if _mod_utils.is_module_available("soundfile"):
backends.append("soundfile")
if torchaudio._extension._SOX_INITIALIZED:
backends.append("sox_io")
return backends
def set_audio_backend(backend: Optional[str]):
"""Set the backend for I/O operation
Args:
backend (str or None): Name of the backend.
One of ``"sox_io"`` or ``"soundfile"`` based on availability
of the system. If ``None`` is provided the current backend is unassigned.
"""
if _is_backend_dispatcher_enabled():
warnings.warn("set_audio_backend is a no-op when the I/O backend dispatcher is enabled.")
return
if backend is not None and backend not in list_audio_backends():
raise RuntimeError(f'Backend "{backend}" is not one of ' f"available backends: {list_audio_backends()}.")
if backend is None:
module = no_backend
elif backend == "sox_io":
module = sox_io_backend
elif backend == "soundfile":
module = soundfile_backend
else:
raise NotImplementedError(f'Unexpected backend "{backend}"')
for func in ["save", "load", "info"]:
setattr(torchaudio, func, getattr(module, func))
def _init_audio_backend():
backends = list_audio_backends()
if "sox_io" in backends:
set_audio_backend("sox_io")
elif "soundfile" in backends:
set_audio_backend("soundfile")
else:
warnings.warn("No audio backend is available.")
set_audio_backend(None)
def get_audio_backend() -> Optional[str]:
"""Get the name of the current backend
Returns:
Optional[str]: The name of the current backend or ``None`` if no backend is assigned.
"""
if _is_backend_dispatcher_enabled():
warnings.warn("get_audio_backend's return value is irrelevant when the I/O backend dispatcher is enabled.")
if torchaudio.load == no_backend.load:
return None
if torchaudio.load == sox_io_backend.load:
return "sox_io"
if torchaudio.load == soundfile_backend.load:
return "soundfile"
raise ValueError("Unknown backend.")
|
"""
Top-level module of Jina.
The primary function of this module is to import all of the public Jina
interfaces into a single place. The interfaces themselves are located in
sub-modules, as described below.
"""
import os as _os
import platform as _platform
import signal as _signal
import sys as _sys
import warnings as _warnings
import docarray as _docarray
if _sys.version_info < (3, 7, 0):
raise OSError(f'Jina requires Python >= 3.7, but yours is {_sys.version_info}')
def _warning_on_one_line(message, category, filename, lineno, *args, **kwargs):
return '\033[1;33m%s: %s\033[0m \033[1;30m(raised from %s:%s)\033[0m\n' % (
category.__name__,
message,
filename,
lineno,
)
def _ignore_google_warnings():
import warnings
warnings.filterwarnings(
'ignore',
category=DeprecationWarning,
message='Deprecated call to `pkg_resources.declare_namespace(\'google\')`.',
append=True
)
_warnings.formatwarning = _warning_on_one_line
_warnings.simplefilter('always', DeprecationWarning, append=True)
_ignore_google_warnings()
# fix fork error on MacOS but seems no effect? must do EXPORT manually before jina start
_os.environ['OBJC_DISABLE_INITIALIZE_FORK_SAFETY'] = 'YES'
# JINA_MP_START_METHOD has higher priority than os-patch
_start_method = _os.environ.get('JINA_MP_START_METHOD', None)
if _start_method and _start_method.lower() in {'fork', 'spawn', 'forkserver'}:
from multiprocessing import set_start_method as _set_start_method
try:
_set_start_method(_start_method.lower())
_warnings.warn(
f'multiprocessing start method is set to `{_start_method.lower()}`'
)
except Exception as e:
_warnings.warn(
f'failed to set multiprocessing start_method to `{_start_method.lower()}`: {e!r}'
)
elif _sys.version_info >= (3, 8, 0) and _platform.system() == 'Darwin':
# DO SOME OS-WISE PATCHES
# temporary fix for python 3.8 on macos where the default start is set to "spawn"
# https://docs.python.org/3/library/multiprocessing.html#contexts-and-start-methods
from multiprocessing import set_start_method as _set_start_method
try:
_set_start_method('fork')
_warnings.warn(f'multiprocessing start method is set to `fork`')
except Exception as e:
_warnings.warn(f'failed to set multiprocessing start_method to `fork`: {e!r}')
# do not change this line manually
# this is managed by git tag and updated on every release
# NOTE: this represents the NEXT release version
__version__ = '3.20.2'
# do not change this line manually
# this is managed by proto/build-proto.sh and updated on every execution
__proto_version__ = '0.1.27'
try:
__docarray_version__ = _docarray.__version__
except AttributeError as e:
raise RuntimeError(
'`docarray` dependency is not installed correctly, please reinstall with `pip install -U --force-reinstall docarray`'
)
try:
_signal.signal(_signal.SIGINT, _signal.default_int_handler)
except Exception as exc:
_warnings.warn(f'failed to set default signal handler: {exc!r}`')
def _set_nofile(nofile_atleast=4096):
"""
Set nofile soft limit to at least 4096, useful for running matlplotlib/seaborn on
parallel executing plot generators vs. Ubuntu default ulimit -n 1024 or OS X El Captian 256
temporary setting extinguishing with Python session.
:param nofile_atleast: nofile soft limit
:return: nofile soft limit and nofile hard limit
"""
try:
import resource as res
except ImportError: # Windows
res = None
if res is None:
return (None,) * 2
soft, ohard = res.getrlimit(res.RLIMIT_NOFILE)
hard = ohard
if soft < nofile_atleast:
soft = nofile_atleast
if hard < soft:
hard = soft
try:
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except (ValueError, res.error):
try:
hard = soft
print(f'trouble with max limit, retrying with soft,hard {soft},{hard}')
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except Exception:
print('failed to set ulimit, giving up')
soft, hard = res.getrlimit(res.RLIMIT_NOFILE)
return soft, hard
_set_nofile()
# ONLY FIRST CLASS CITIZENS ARE ALLOWED HERE, namely Document, Executor Flow
# Document
from jina._docarray import Document, DocumentArray
# Client
from jina.clients import Client
# Deployment
from jina.orchestrate.deployments import Deployment
from jina.orchestrate.flow.asyncio import AsyncFlow
# Flow
from jina.orchestrate.flow.base import Flow
# Executor
from jina.serve.executors import BaseExecutor as Executor
from jina.serve.executors.decorators import dynamic_batching, monitor, requests
# Custom Gateway
from jina.serve.runtimes.gateway.gateway import Gateway
|
"""
Top-level module of Jina.
The primary function of this module is to import all of the public Jina
interfaces into a single place. The interfaces themselves are located in
sub-modules, as described below.
"""
import os as _os
import platform as _platform
import signal as _signal
import sys as _sys
import warnings as _warnings
import docarray as _docarray
if _sys.version_info < (3, 7, 0):
raise OSError(f'Jina requires Python >= 3.7, but yours is {_sys.version_info}')
def _warning_on_one_line(message, category, filename, lineno, *args, **kwargs):
return '\033[1;33m%s: %s\033[0m \033[1;30m(raised from %s:%s)\033[0m\n' % (
category.__name__,
message,
filename,
lineno,
)
def _ignore_google_warnings():
import warnings
warnings.filterwarnings(
'ignore',
category=DeprecationWarning,
message='Deprecated call to `pkg_resources.declare_namespace(\'google\')`.',
append=True
)
_warnings.formatwarning = _warning_on_one_line
_warnings.simplefilter('always', DeprecationWarning, append=True)
_ignore_google_warnings()
# fix fork error on MacOS but seems no effect? must do EXPORT manually before jina start
_os.environ['OBJC_DISABLE_INITIALIZE_FORK_SAFETY'] = 'YES'
# JINA_MP_START_METHOD has higher priority than os-patch
_start_method = _os.environ.get('JINA_MP_START_METHOD', None)
if _start_method and _start_method.lower() in {'fork', 'spawn', 'forkserver'}:
from multiprocessing import set_start_method as _set_start_method
try:
_set_start_method(_start_method.lower())
_warnings.warn(
f'multiprocessing start method is set to `{_start_method.lower()}`'
)
except Exception as e:
_warnings.warn(
f'failed to set multiprocessing start_method to `{_start_method.lower()}`: {e!r}'
)
elif _sys.version_info >= (3, 8, 0) and _platform.system() == 'Darwin':
# DO SOME OS-WISE PATCHES
# temporary fix for python 3.8 on macos where the default start is set to "spawn"
# https://docs.python.org/3/library/multiprocessing.html#contexts-and-start-methods
from multiprocessing import set_start_method as _set_start_method
try:
_set_start_method('fork')
_warnings.warn(f'multiprocessing start method is set to `fork`')
except Exception as e:
_warnings.warn(f'failed to set multiprocessing start_method to `fork`: {e!r}')
# do not change this line manually
# this is managed by git tag and updated on every release
# NOTE: this represents the NEXT release version
__version__ = '3.20.1'
# do not change this line manually
# this is managed by proto/build-proto.sh and updated on every execution
__proto_version__ = '0.1.27'
try:
__docarray_version__ = _docarray.__version__
except AttributeError as e:
raise RuntimeError(
'`docarray` dependency is not installed correctly, please reinstall with `pip install -U --force-reinstall docarray`'
)
try:
_signal.signal(_signal.SIGINT, _signal.default_int_handler)
except Exception as exc:
_warnings.warn(f'failed to set default signal handler: {exc!r}`')
def _set_nofile(nofile_atleast=4096):
"""
Set nofile soft limit to at least 4096, useful for running matlplotlib/seaborn on
parallel executing plot generators vs. Ubuntu default ulimit -n 1024 or OS X El Captian 256
temporary setting extinguishing with Python session.
:param nofile_atleast: nofile soft limit
:return: nofile soft limit and nofile hard limit
"""
try:
import resource as res
except ImportError: # Windows
res = None
if res is None:
return (None,) * 2
soft, ohard = res.getrlimit(res.RLIMIT_NOFILE)
hard = ohard
if soft < nofile_atleast:
soft = nofile_atleast
if hard < soft:
hard = soft
try:
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except (ValueError, res.error):
try:
hard = soft
print(f'trouble with max limit, retrying with soft,hard {soft},{hard}')
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except Exception:
print('failed to set ulimit, giving up')
soft, hard = res.getrlimit(res.RLIMIT_NOFILE)
return soft, hard
_set_nofile()
# ONLY FIRST CLASS CITIZENS ARE ALLOWED HERE, namely Document, Executor Flow
# Document
from jina._docarray import Document, DocumentArray
# Client
from jina.clients import Client
# Deployment
from jina.orchestrate.deployments import Deployment
from jina.orchestrate.flow.asyncio import AsyncFlow
# Flow
from jina.orchestrate.flow.base import Flow
# Executor
from jina.serve.executors import BaseExecutor as Executor
from jina.serve.executors.decorators import dynamic_batching, monitor, requests
# Custom Gateway
from jina.serve.runtimes.gateway.gateway import Gateway
|
from docarray import Document, DocumentArray
import pytest
@pytest.mark.filterwarnings('ignore::UserWarning')
@pytest.mark.parametrize('deleted_elmnts', [[0, 1], ['r0', 'r1']])
def test_delete_offset_success_sync_es_offset_index(deleted_elmnts, start_storage):
elastic_doc = DocumentArray(
storage='elasticsearch',
config={
'n_dim': 3,
'columns': [('price', 'int')],
'distance': 'l2_norm',
'index_name': 'test_delete_offset_success_sync_es_offset_index',
},
)
with elastic_doc:
elastic_doc.extend(
[
Document(id='r0', embedding=[0, 0, 0]),
Document(id='r1', embedding=[1, 1, 1]),
Document(id='r2', embedding=[2, 2, 2]),
Document(id='r3', embedding=[3, 3, 3]),
Document(id='r4', embedding=[4, 4, 4]),
Document(id='r5', embedding=[5, 5, 5]),
Document(id='r6', embedding=[6, 6, 6]),
Document(id='r7', embedding=[7, 7, 7]),
]
)
expected_offset_after_del = ['r2', 'r3', 'r4', 'r5', 'r6', 'r7']
with elastic_doc:
del elastic_doc[deleted_elmnts]
indexed_offset_count = elastic_doc._client.count(
index=elastic_doc._index_name_offset2id
)['count']
assert len(elastic_doc._offset2ids.ids) == indexed_offset_count
assert len(elastic_doc._offset2ids.ids) == 6
assert len(elastic_doc[:, 'embedding']) == 6
for id in expected_offset_after_del:
expected_offset = str(expected_offset_after_del.index(id))
actual_offset_index = elastic_doc._client.search(
index=elastic_doc._index_name_offset2id, query={'match': {'blob': id}}
)['hits']['hits'][0]['_id']
assert actual_offset_index == expected_offset
@pytest.mark.filterwarnings('ignore::UserWarning')
def test_success_handle_bulk_delete_not_found(start_storage):
elastic_doc = DocumentArray(
storage='elasticsearch',
config={
'n_dim': 3,
'columns': [('price', 'int')],
'distance': 'l2_norm',
'index_name': 'test_bulk_delete_not_found',
},
)
with elastic_doc:
elastic_doc.extend(
[
Document(id='r0', embedding=[0, 0, 0]),
Document(id='r1', embedding=[1, 1, 1]),
]
)
offset_index = elastic_doc._index_name_offset2id
expected_to_be_fail_del_data = [
{
'_op_type': 'delete',
'_id': 0, # offset data exist
'_index': offset_index,
},
{
'_op_type': 'delete',
'_id': 2, # offset data not exist, expect to fail
'_index': offset_index,
},
]
info = elastic_doc._send_requests(expected_to_be_fail_del_data)
assert len(info) == 1
assert 'delete' in info[0].keys()
|
from docarray import Document, DocumentArray
import pytest
@pytest.mark.parametrize('deleted_elmnts', [[0, 1], ['r0', 'r1']])
def test_delete_offset_success_sync_es_offset_index(deleted_elmnts, start_storage):
elastic_doc = DocumentArray(
storage='elasticsearch',
config={
'n_dim': 3,
'columns': [('price', 'int')],
'distance': 'l2_norm',
'index_name': 'test_delete_offset_success_sync_es_offset_index',
},
)
with elastic_doc:
elastic_doc.extend(
[
Document(id='r0', embedding=[0, 0, 0]),
Document(id='r1', embedding=[1, 1, 1]),
Document(id='r2', embedding=[2, 2, 2]),
Document(id='r3', embedding=[3, 3, 3]),
Document(id='r4', embedding=[4, 4, 4]),
Document(id='r5', embedding=[5, 5, 5]),
Document(id='r6', embedding=[6, 6, 6]),
Document(id='r7', embedding=[7, 7, 7]),
]
)
expected_offset_after_del = ['r2', 'r3', 'r4', 'r5', 'r6', 'r7']
with elastic_doc:
del elastic_doc[deleted_elmnts]
indexed_offset_count = elastic_doc._client.count(
index=elastic_doc._index_name_offset2id
)['count']
assert len(elastic_doc._offset2ids.ids) == indexed_offset_count
assert len(elastic_doc._offset2ids.ids) == 6
assert len(elastic_doc[:, 'embedding']) == 6
for id in expected_offset_after_del:
expected_offset = str(expected_offset_after_del.index(id))
actual_offset_index = elastic_doc._client.search(
index=elastic_doc._index_name_offset2id, query={'match': {'blob': id}}
)['hits']['hits'][0]['_id']
assert actual_offset_index == expected_offset
|
"""Tool for the Wolfram Alpha API."""
from typing import Optional
from langchain_core.callbacks import CallbackManagerForToolRun
from langchain_core.tools import BaseTool
from langchain_community.utilities.wolfram_alpha import WolframAlphaAPIWrapper
class WolframAlphaQueryRun(BaseTool):
"""Tool that queries using the Wolfram Alpha SDK."""
name: str = "wolfram_alpha"
description: str = (
"A wrapper around Wolfram Alpha. "
"Useful for when you need to answer questions about Math, "
"Science, Technology, Culture, Society and Everyday Life. "
"Input should be a search query."
)
api_wrapper: WolframAlphaAPIWrapper
def _run(
self,
query: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the WolframAlpha tool."""
return self.api_wrapper.run(query)
|
"""Tool for the Wolfram Alpha API."""
from typing import Optional
from langchain_core.callbacks import CallbackManagerForToolRun
from langchain_core.tools import BaseTool
from langchain_community.utilities.wolfram_alpha import WolframAlphaAPIWrapper
class WolframAlphaQueryRun(BaseTool): # type: ignore[override]
"""Tool that queries using the Wolfram Alpha SDK."""
name: str = "wolfram_alpha"
description: str = (
"A wrapper around Wolfram Alpha. "
"Useful for when you need to answer questions about Math, "
"Science, Technology, Culture, Society and Everyday Life. "
"Input should be a search query."
)
api_wrapper: WolframAlphaAPIWrapper
def _run(
self,
query: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the WolframAlpha tool."""
return self.api_wrapper.run(query)
|
import contextlib
import os
import shutil
import time
from jina import DocumentArray, Flow
cur_dir = os.path.dirname(os.path.abspath(__file__))
@contextlib.contextmanager
def _update_file(input_file_path, output_file_path, temp_path):
backup_file = os.path.join(temp_path, 'backup.py')
try:
shutil.copy2(output_file_path, backup_file)
shutil.copy(input_file_path, output_file_path)
time.sleep(2.0)
yield
finally:
shutil.copy2(backup_file, output_file_path)
time.sleep(2.0)
def test_reload_simple_executor(tmpdir):
from tests.integration.hot_reload.exec1.my_executor1 import MyExecutorToReload1
f = Flow().add(uses=MyExecutorToReload1, reload=True)
with f:
res = f.post(on='/', inputs=DocumentArray.empty(10))
assert len(res) == 10
for doc in res:
assert doc.text == 'MyExecutorBeforeReload'
with _update_file(
os.path.join(cur_dir, 'my_executor_1_new.py'),
os.path.join(cur_dir, 'exec1/my_executor1.py'),
str(tmpdir),
):
res = f.post(on='/', inputs=DocumentArray.empty(10))
assert len(res) == 10
for doc in res:
assert doc.text == 'MyExecutorAfterReload'
res = f.post(on='/', inputs=DocumentArray.empty(10))
assert len(res) == 10
for doc in res:
assert doc.text == 'MyExecutorBeforeReload'
def test_reload_with_dynamic_batching(tmpdir):
from tests.integration.hot_reload.exec1.my_executor1 import MyExecutorToReload1
f = Flow().add(
uses=MyExecutorToReload1,
reload=True,
uses_dynamic_batching={'/bar': {'preferred_batch_size': 1, 'timeout': 1000}},
)
with f:
res = f.post(on='/bar', inputs=DocumentArray.empty(10))
assert len(res) == 10
for doc in res:
assert doc.text == 'MyExecutorBeforeReloadBar'
with _update_file(
os.path.join(cur_dir, 'my_executor_1_new.py'),
os.path.join(cur_dir, 'exec1/my_executor1.py'),
str(tmpdir),
):
res = f.post(on='/bar', inputs=DocumentArray.empty(10))
assert len(res) == 10
for doc in res:
assert doc.text == 'MyExecutorAfterReloadBar'
res = f.post(on='/bar', inputs=DocumentArray.empty(10))
assert len(res) == 10
for doc in res:
assert doc.text == 'MyExecutorBeforeReloadBar'
def test_reload_helper(tmpdir):
from tests.integration.hot_reload.exec2.my_executor2 import MyExecutorToReload2
f = Flow().add(uses=MyExecutorToReload2, reload=True)
with f:
res = f.post(on='/', inputs=DocumentArray.empty(10))
assert len(res) == 10
for doc in res:
assert doc.text == 'MyExecutorBeforeReload'
with _update_file(
os.path.join(cur_dir, 'helper2.py'),
os.path.join(cur_dir, 'exec2/helper.py'),
str(tmpdir),
):
res = f.post(on='/', inputs=DocumentArray.empty(10))
assert len(res) == 10
for doc in res:
assert doc.text == 'MyExecutorAfterReload'
res = f.post(on='/', inputs=DocumentArray.empty(10))
assert len(res) == 10
for doc in res:
assert doc.text == 'MyExecutorBeforeReload'
def test_reload_with_inheritance(tmpdir):
from tests.integration.hot_reload.exec3.my_executor3 import A, EnhancedExecutor
f = Flow().add(uses=A, reload=True)
with f:
res = f.post(on='/', inputs=DocumentArray.empty(10))
assert len(res) == 10
for doc in res:
assert doc.text == 'ABeforeReload'
with _update_file(
os.path.join(cur_dir, 'my_executor_3_new.py'),
os.path.join(cur_dir, 'exec3/my_executor3.py'),
str(tmpdir),
):
res = f.post(on='/', inputs=DocumentArray.empty(10))
assert len(res) == 10
for doc in res:
assert doc.text == 'AAfterReload'
res = f.post(on='/', inputs=DocumentArray.empty(10))
assert len(res) == 10
for doc in res:
assert doc.text == 'ABeforeReload'
|
import contextlib
import os
import shutil
import time
from jina import DocumentArray, Flow
cur_dir = os.path.dirname(os.path.abspath(__file__))
@contextlib.contextmanager
def _update_file(input_file_path, output_file_path, temp_path):
backup_file = os.path.join(temp_path, 'backup.py')
try:
shutil.copy2(output_file_path, backup_file)
shutil.copy(input_file_path, output_file_path)
time.sleep(2.0)
yield
finally:
shutil.copy2(backup_file, output_file_path)
time.sleep(2.0)
def test_reload_simple_executor(tmpdir):
from tests.integration.hot_reload.exec1.my_executor1 import MyExecutorToReload1
f = Flow().add(uses=MyExecutorToReload1, reload=True)
with f:
res = f.post(on='/', inputs=DocumentArray.empty(10))
assert len(res) == 10
for doc in res:
assert doc.text == 'MyExecutorBeforeReload'
with _update_file(
os.path.join(cur_dir, 'my_executor_1_new.py'),
os.path.join(cur_dir, 'exec1/my_executor1.py'),
str(tmpdir),
):
res = f.post(on='/', inputs=DocumentArray.empty(10))
assert len(res) == 10
for doc in res:
assert doc.text == 'MyExecutorAfterReload'
res = f.post(on='/', inputs=DocumentArray.empty(10))
assert len(res) == 10
for doc in res:
assert doc.text == 'MyExecutorBeforeReload'
def test_reload_helper(tmpdir):
from tests.integration.hot_reload.exec2.my_executor2 import MyExecutorToReload2
f = Flow().add(uses=MyExecutorToReload2, reload=True)
with f:
res = f.post(on='/', inputs=DocumentArray.empty(10))
assert len(res) == 10
for doc in res:
assert doc.text == 'MyExecutorBeforeReload'
with _update_file(
os.path.join(cur_dir, 'helper2.py'),
os.path.join(cur_dir, 'exec2/helper.py'),
str(tmpdir),
):
res = f.post(on='/', inputs=DocumentArray.empty(10))
assert len(res) == 10
for doc in res:
assert doc.text == 'MyExecutorAfterReload'
res = f.post(on='/', inputs=DocumentArray.empty(10))
assert len(res) == 10
for doc in res:
assert doc.text == 'MyExecutorBeforeReload'
def test_reload_with_inheritance(tmpdir):
from tests.integration.hot_reload.exec3.my_executor3 import A, EnhancedExecutor
f = Flow().add(uses=A, reload=True)
with f:
res = f.post(on='/', inputs=DocumentArray.empty(10))
assert len(res) == 10
for doc in res:
assert doc.text == 'ABeforeReload'
with _update_file(
os.path.join(cur_dir, 'my_executor_3_new.py'),
os.path.join(cur_dir, 'exec3/my_executor3.py'),
str(tmpdir),
):
res = f.post(on='/', inputs=DocumentArray.empty(10))
assert len(res) == 10
for doc in res:
assert doc.text == 'AAfterReload'
res = f.post(on='/', inputs=DocumentArray.empty(10))
assert len(res) == 10
for doc in res:
assert doc.text == 'ABeforeReload'
|
# Copyright (c) OpenMMLab. All rights reserved.
from contextlib import contextmanager
import torch
from mmengine.utils import TORCH_VERSION, digit_version
@contextmanager
def autocast(enabled: bool = True, **kwargs):
"""A wrapper of ``torch.autocast`` and ``toch.cuda.amp.autocast``.
Pytorch 1.6.0 provide ``torch.cuda.amp.autocast`` for running in
mixed precision , and update it to ``torch.autocast`` in 1.10.0.
Both interfaces have different arguments, and ``torch.autocast``
support running with cpu additionally.
This function provides a unified interface by wrapping
``torch.autocast`` and ``torch.cuda.amp.autocast``, which resolves the
compatibility issues that ``torch.cuda.amp.autocast`` does not support
running mixed precision with cpu, and both contexts have different
arguments. We suggest users using this function in the code
to achieve maximized compatibility of different PyTorch versions.
Note:
``autocast`` requires pytorch version >= 1.5.0. If pytorch version
<= 1.10.0 and cuda is not available, it will raise an error with
``enabled=True``, since ``torch.cuda.amp.autocast`` only support cuda
mode.
Examples:
>>> # case1: 1.10 > Pytorch version >= 1.5.0
>>> with autocast():
>>> # run in mixed precision context
>>> pass
>>> with autocast(device_type='cpu')::
>>> # raise error, torch.cuda.amp.autocast only support cuda mode.
>>> pass
>>> # case2: Pytorch version >= 1.10.0
>>> with autocast():
>>> # default cuda mixed precision context
>>> pass
>>> with autocast(device_type='cpu'):
>>> # cpu mixed precision context
>>> pass
>>> with autocast(
>>> device_type='cuda', enabled=True, cache_enabled=True):
>>> # enable precision context with more specific arguments.
>>> pass
Args:
enabled (bool): Whether autocasting should be enabled in the region.
Defaults to True.
kwargs (dict): Arguments of torch.autocast except for ``enabled``.
"""
# If `enabled` is True, enable an empty context and all calculations
# are performed under fp32.
assert digit_version(TORCH_VERSION) >= digit_version('1.5.0'), (
'The minimum pytorch version requirements of mmengine is 1.5.0, but '
f'got {TORCH_VERSION}')
if (digit_version('1.5.0') <= digit_version(TORCH_VERSION) <
digit_version('1.10.0')):
# If pytorch version is between 1.5.0 and 1.10.0, the default value of
# dtype for `torch.cuda.amp.autocast` is torch.float16.
assert not kwargs, (
f'autocast under pytorch {TORCH_VERSION} only accept `enabled` '
'arguments.')
if torch.cuda.is_available():
with torch.cuda.amp.autocast(enabled=enabled):
yield
else:
if not enabled:
yield
else:
raise RuntimeError(
'If pytorch versions is between 1.5.0 and 1.10, '
'`autocast` is only available in gpu mode')
elif (digit_version('1.11.0') > digit_version(TORCH_VERSION) >=
digit_version('1.10.0')):
if torch.cuda.is_available():
kwargs.setdefault('device_type', 'cuda')
else:
kwargs.setdefault('device_type', 'cpu')
# torch.autocast only support `dtype=torch.bfloat16` in
# pytorch 1.10
kwargs.setdefault('dtype', torch.bfloat16)
with torch.autocast(enabled=enabled, **kwargs):
yield
elif digit_version(TORCH_VERSION) >= digit_version('1.11.0'):
if torch.cuda.is_available():
kwargs.setdefault('device_type', 'cuda')
else:
kwargs.setdefault('device_type', 'cpu')
with torch.autocast(enabled=enabled, **kwargs):
yield
|
# Copyright (c) OpenMMLab. All rights reserved.
from contextlib import contextmanager
import torch
from mmengine.utils import TORCH_VERSION, digit_version
@contextmanager
def autocast(enabled: bool = True, **kwargs):
"""A wrapper of ``torch.autocast`` and ``toch.cuda.amp.autocast``.
Pytorch 1.6.0 provide ``torch.cuda.amp.autocast`` for running in
mixed precision , and update it to ``torch.autocast`` in 1.10.0.
Both interfaces have different arguments, and ``torch.autocast``
support running with cpu additionally.
This function provides a unified interface by wrapping
``torch.autocast`` and ``torch.cuda.amp.autocast``, which resolves the
compatibility issues that ``torch.cuda.amp.autocast`` does not support
running mixed precision with cpu, and both contexts have different
arguments. We suggest users using this function in the code
to achieve maximized compatibility of different PyTorch versions.
Note:
``autocast`` requires pytorch version >= 1.5.0. If pytorch version
<= 1.10.0 and cuda is not available, it will raise an error with
``enabled=True``, since ``torch.cuda.amp.autocast`` only support cuda
mode.
Examples:
>>> # case1: 1.10 > Pytorch version >= 1.5.0
>>> with autocast():
>>> # run in mixed precision context
>>> pass
>>> with autocast(device_type='cpu')::
>>> # raise error, torch.cuda.amp.autocast only support cuda mode.
>>> pass
>>> # case2: Pytorch version >= 1.10.0
>>> with autocast():
>>> # default cuda mixed precision context
>>> pass
>>> with autocast(device_type='cpu'):
>>> # cpu mixed precision context
>>> pass
>>> with autocast(
>>> device_type='cuda', enabled=True, cache_enabled=True):
>>> # enable precision context with more specific arguments.
>>> pass
Args:
enabled (bool): Whether autocasting should be enabled in the region.
Defaults to True.
kwargs (dict): Arguments of torch.autocast except for ``enabled``.
"""
# If `enabled` is True, enable an empty context and all calculations
# are performed under fp32.
assert digit_version(TORCH_VERSION) >= digit_version('1.5.0'), (
'The minimum pytorch version requirements of mmengine is 1.5.0, but '
f'got {TORCH_VERSION}')
if (digit_version('1.5.0') <= digit_version(TORCH_VERSION) <
digit_version('1.10.0')):
# If pytorch version is between 1.5.0 and 1.10.0, the default value of
# dtype for `torch.cuda.amp.autocast` is torch.float16.
assert not kwargs, (
f'autocast under pytorch {TORCH_VERSION} only accept `enabled` '
'arguments.')
if torch.cuda.is_available():
with torch.cuda.amp.autocast(enabled=enabled):
yield
else:
if not enabled:
yield
else:
raise RuntimeError(
'If pytorch versions is between 1.5.0 and 1.10, '
'`autocast` is only available in gpu mode')
elif digit_version(TORCH_VERSION) >= digit_version('1.10.0'):
if torch.cuda.is_available():
kwargs.setdefault('device_type', 'cuda')
else:
kwargs.setdefault('device_type', 'cpu')
with torch.autocast(enabled=enabled, **kwargs):
yield
|
from llama_index.tools.mcp.base import McpToolSpec
from llama_index.tools.mcp.client import BasicMCPClient
from llama_index.tools.mcp.utils import workflow_as_mcp, get_tools_from_mcp_url, aget_tools_from_mcp_url
__all__ = [
"McpToolSpec",
"BasicMCPClient",
"workflow_as_mcp",
"get_tools_from_mcp_url",
"aget_tools_from_mcp_url"
]
|
from llama_index.tools.mcp.base import McpToolSpec
from llama_index.tools.mcp.client import BasicMCPClient
__all__ = ["McpToolSpec", "BasicMCPClient"]
|
_base_ = [
'../_base_/models/mask-rcnn_r50_fpn.py',
'../_base_/datasets/coco_panoptic.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
model = dict(
type='PanopticFPN',
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
bgr_to_rgb=True,
pad_size_divisor=32,
pad_mask=True,
mask_pad_value=0,
pad_seg=True,
seg_pad_value=255),
semantic_head=dict(
type='PanopticFPNHead',
num_things_classes=80,
num_stuff_classes=53,
in_channels=256,
inner_channels=128,
start_level=0,
end_level=4,
norm_cfg=dict(type='GN', num_groups=32, requires_grad=True),
conv_cfg=None,
loss_seg=dict(
type='CrossEntropyLoss', ignore_index=255, loss_weight=0.5)),
panoptic_fusion_head=dict(
type='HeuristicFusionHead',
num_things_classes=80,
num_stuff_classes=53),
test_cfg=dict(
rcnn=dict(
score_thr=0.6,
nms=dict(type='nms', iou_threshold=0.5, class_agnostic=True),
max_per_img=100,
mask_thr_binary=0.5),
# used in HeuristicFusionHead
panoptic=dict(mask_overlap=0.5, stuff_area_limit=4096)))
# Forced to remove NumClassCheckHook
custom_hooks = []
|
_base_ = [
'../_base_/models/mask_rcnn_r50_fpn.py',
'../_base_/datasets/coco_panoptic.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
model = dict(
type='PanopticFPN',
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
bgr_to_rgb=True,
pad_size_divisor=32,
pad_mask=True,
mask_pad_value=0,
pad_seg=True,
seg_pad_value=255),
semantic_head=dict(
type='PanopticFPNHead',
num_things_classes=80,
num_stuff_classes=53,
in_channels=256,
inner_channels=128,
start_level=0,
end_level=4,
norm_cfg=dict(type='GN', num_groups=32, requires_grad=True),
conv_cfg=None,
loss_seg=dict(
type='CrossEntropyLoss', ignore_index=255, loss_weight=0.5)),
panoptic_fusion_head=dict(
type='HeuristicFusionHead',
num_things_classes=80,
num_stuff_classes=53),
test_cfg=dict(
rcnn=dict(
score_thr=0.6,
nms=dict(type='nms', iou_threshold=0.5, class_agnostic=True),
max_per_img=100,
mask_thr_binary=0.5),
# used in HeuristicFusionHead
panoptic=dict(mask_overlap=0.5, stuff_area_limit=4096)))
# Forced to remove NumClassCheckHook
custom_hooks = []
|
import pathlib
from typing import Any, Union
from torchdata.datapipes.iter import Filter, IterDataPipe, Mapper
from torchvision.prototype.datasets.utils import Dataset, EncodedImage, HttpResource, OnlineResource
from torchvision.prototype.datasets.utils._internal import (
hint_sharding,
hint_shuffling,
path_comparator,
read_categories_file,
)
from torchvision.prototype.tv_tensors import Label
from .._api import register_dataset, register_info
NAME = "country211"
@register_info(NAME)
def _info() -> dict[str, Any]:
return dict(categories=read_categories_file(NAME))
@register_dataset(NAME)
class Country211(Dataset):
"""
- **homepage**: https://github.com/openai/CLIP/blob/main/data/country211.md
"""
def __init__(
self,
root: Union[str, pathlib.Path],
*,
split: str = "train",
skip_integrity_check: bool = False,
) -> None:
self._split = self._verify_str_arg(split, "split", ("train", "val", "test"))
self._split_folder_name = "valid" if split == "val" else split
self._categories = _info()["categories"]
super().__init__(root, skip_integrity_check=skip_integrity_check)
def _resources(self) -> list[OnlineResource]:
return [
HttpResource(
"https://openaipublic.azureedge.net/clip/data/country211.tgz",
sha256="c011343cdc1296a8c31ff1d7129cf0b5e5b8605462cffd24f89266d6e6f4da3c",
)
]
def _prepare_sample(self, data: tuple[str, Any]) -> dict[str, Any]:
path, buffer = data
category = pathlib.Path(path).parent.name
return dict(
label=Label.from_category(category, categories=self._categories),
path=path,
image=EncodedImage.from_file(buffer),
)
def _filter_split(self, data: tuple[str, Any], *, split: str) -> bool:
return pathlib.Path(data[0]).parent.parent.name == split
def _datapipe(self, resource_dps: list[IterDataPipe]) -> IterDataPipe[dict[str, Any]]:
dp = resource_dps[0]
dp = Filter(dp, path_comparator("parent.parent.name", self._split_folder_name))
dp = hint_shuffling(dp)
dp = hint_sharding(dp)
return Mapper(dp, self._prepare_sample)
def __len__(self) -> int:
return {
"train": 31_650,
"val": 10_550,
"test": 21_100,
}[self._split]
def _generate_categories(self) -> list[str]:
resources = self._resources()
dp = resources[0].load(self._root)
return sorted({pathlib.Path(path).parent.name for path, _ in dp})
|
import pathlib
from typing import Any, Dict, List, Tuple, Union
from torchdata.datapipes.iter import Filter, IterDataPipe, Mapper
from torchvision.prototype.datasets.utils import Dataset, EncodedImage, HttpResource, OnlineResource
from torchvision.prototype.datasets.utils._internal import (
hint_sharding,
hint_shuffling,
path_comparator,
read_categories_file,
)
from torchvision.prototype.tv_tensors import Label
from .._api import register_dataset, register_info
NAME = "country211"
@register_info(NAME)
def _info() -> Dict[str, Any]:
return dict(categories=read_categories_file(NAME))
@register_dataset(NAME)
class Country211(Dataset):
"""
- **homepage**: https://github.com/openai/CLIP/blob/main/data/country211.md
"""
def __init__(
self,
root: Union[str, pathlib.Path],
*,
split: str = "train",
skip_integrity_check: bool = False,
) -> None:
self._split = self._verify_str_arg(split, "split", ("train", "val", "test"))
self._split_folder_name = "valid" if split == "val" else split
self._categories = _info()["categories"]
super().__init__(root, skip_integrity_check=skip_integrity_check)
def _resources(self) -> List[OnlineResource]:
return [
HttpResource(
"https://openaipublic.azureedge.net/clip/data/country211.tgz",
sha256="c011343cdc1296a8c31ff1d7129cf0b5e5b8605462cffd24f89266d6e6f4da3c",
)
]
def _prepare_sample(self, data: Tuple[str, Any]) -> Dict[str, Any]:
path, buffer = data
category = pathlib.Path(path).parent.name
return dict(
label=Label.from_category(category, categories=self._categories),
path=path,
image=EncodedImage.from_file(buffer),
)
def _filter_split(self, data: Tuple[str, Any], *, split: str) -> bool:
return pathlib.Path(data[0]).parent.parent.name == split
def _datapipe(self, resource_dps: List[IterDataPipe]) -> IterDataPipe[Dict[str, Any]]:
dp = resource_dps[0]
dp = Filter(dp, path_comparator("parent.parent.name", self._split_folder_name))
dp = hint_shuffling(dp)
dp = hint_sharding(dp)
return Mapper(dp, self._prepare_sample)
def __len__(self) -> int:
return {
"train": 31_650,
"val": 10_550,
"test": 21_100,
}[self._split]
def _generate_categories(self) -> List[str]:
resources = self._resources()
dp = resources[0].load(self._root)
return sorted({pathlib.Path(path).parent.name for path, _ in dp})
|
import os
from typing import Dict
import numpy as np
import pytest
import xgboost
from xgboost import testing as tm
from xgboost.testing.ranking import run_normalization
pytestmark = tm.timeout(30)
def comp_training_with_rank_objective(
dtrain: xgboost.DMatrix,
dtest: xgboost.DMatrix,
rank_objective: str,
metric_name: str,
tolerance: float = 1e-02,
) -> None:
"""Internal method that trains the dataset using the rank objective on GPU and CPU,
evaluates the metric and determines if the delta between the metric is within the
tolerance level.
"""
# specify validations set to watch performance
watchlist = [(dtest, "eval"), (dtrain, "train")]
params = {
"booster": "gbtree",
"tree_method": "gpu_hist",
"gpu_id": 0,
}
num_trees = 100
check_metric_improvement_rounds = 10
evals_result: Dict[str, Dict] = {}
params["objective"] = rank_objective
params["eval_metric"] = metric_name
bst = xgboost.train(
params,
dtrain,
num_boost_round=num_trees,
early_stopping_rounds=check_metric_improvement_rounds,
evals=watchlist,
evals_result=evals_result,
)
gpu_scores = evals_result["train"][metric_name][-1]
evals_result = {}
cpu_params = {
"booster": "gbtree",
"tree_method": "hist",
"gpu_id": -1,
}
cpu_params["objective"] = rank_objective
cpu_params["eval_metric"] = metric_name
bstc = xgboost.train(
cpu_params,
dtrain,
num_boost_round=num_trees,
early_stopping_rounds=check_metric_improvement_rounds,
evals=watchlist,
evals_result=evals_result,
)
cpu_scores = evals_result["train"][metric_name][-1]
info = (rank_objective, metric_name)
assert np.allclose(gpu_scores, cpu_scores, tolerance, tolerance), info
assert np.allclose(bst.best_score, bstc.best_score, tolerance, tolerance), info
evals_result_weighted: Dict[str, Dict] = {}
dtest.set_weight(np.ones((dtest.get_group().size,)))
dtrain.set_weight(np.ones((dtrain.get_group().size,)))
watchlist = [(dtest, "eval"), (dtrain, "train")]
bst_w = xgboost.train(
params,
dtrain,
num_boost_round=num_trees,
early_stopping_rounds=check_metric_improvement_rounds,
evals=watchlist,
evals_result=evals_result_weighted,
)
weighted_metric = evals_result_weighted["train"][metric_name][-1]
tolerance = 1e-5
assert np.allclose(bst_w.best_score, bst.best_score, tolerance, tolerance)
assert np.allclose(weighted_metric, gpu_scores, tolerance, tolerance)
@pytest.mark.parametrize(
"objective,metric",
[
("rank:pairwise", "auc"),
("rank:pairwise", "ndcg"),
("rank:pairwise", "map"),
("rank:ndcg", "auc"),
("rank:ndcg", "ndcg"),
("rank:ndcg", "map"),
("rank:map", "auc"),
("rank:map", "ndcg"),
("rank:map", "map"),
],
)
def test_with_mq2008(objective, metric) -> None:
(
x_train,
y_train,
qid_train,
x_test,
y_test,
qid_test,
x_valid,
y_valid,
qid_valid,
) = tm.data.get_mq2008(os.path.join(os.path.join(tm.demo_dir(__file__), "rank")))
if metric.find("map") != -1 or objective.find("map") != -1:
y_train[y_train <= 1] = 0.0
y_train[y_train > 1] = 1.0
y_test[y_test <= 1] = 0.0
y_test[y_test > 1] = 1.0
dtrain = xgboost.DMatrix(x_train, y_train, qid=qid_train)
dtest = xgboost.DMatrix(x_test, y_test, qid=qid_test)
comp_training_with_rank_objective(dtrain, dtest, objective, metric)
def test_normalization() -> None:
run_normalization("cuda")
|
import os
from typing import Dict
import numpy as np
import pytest
import xgboost
from xgboost import testing as tm
pytestmark = tm.timeout(30)
def comp_training_with_rank_objective(
dtrain: xgboost.DMatrix,
dtest: xgboost.DMatrix,
rank_objective: str,
metric_name: str,
tolerance: float = 1e-02,
) -> None:
"""Internal method that trains the dataset using the rank objective on GPU and CPU,
evaluates the metric and determines if the delta between the metric is within the
tolerance level.
"""
# specify validations set to watch performance
watchlist = [(dtest, "eval"), (dtrain, "train")]
params = {
"booster": "gbtree",
"tree_method": "gpu_hist",
"gpu_id": 0,
}
num_trees = 100
check_metric_improvement_rounds = 10
evals_result: Dict[str, Dict] = {}
params["objective"] = rank_objective
params["eval_metric"] = metric_name
bst = xgboost.train(
params,
dtrain,
num_boost_round=num_trees,
early_stopping_rounds=check_metric_improvement_rounds,
evals=watchlist,
evals_result=evals_result,
)
gpu_scores = evals_result["train"][metric_name][-1]
evals_result = {}
cpu_params = {
"booster": "gbtree",
"tree_method": "hist",
"gpu_id": -1,
}
cpu_params["objective"] = rank_objective
cpu_params["eval_metric"] = metric_name
bstc = xgboost.train(
cpu_params,
dtrain,
num_boost_round=num_trees,
early_stopping_rounds=check_metric_improvement_rounds,
evals=watchlist,
evals_result=evals_result,
)
cpu_scores = evals_result["train"][metric_name][-1]
info = (rank_objective, metric_name)
assert np.allclose(gpu_scores, cpu_scores, tolerance, tolerance), info
assert np.allclose(bst.best_score, bstc.best_score, tolerance, tolerance), info
evals_result_weighted: Dict[str, Dict] = {}
dtest.set_weight(np.ones((dtest.get_group().size,)))
dtrain.set_weight(np.ones((dtrain.get_group().size,)))
watchlist = [(dtest, "eval"), (dtrain, "train")]
bst_w = xgboost.train(
params,
dtrain,
num_boost_round=num_trees,
early_stopping_rounds=check_metric_improvement_rounds,
evals=watchlist,
evals_result=evals_result_weighted,
)
weighted_metric = evals_result_weighted["train"][metric_name][-1]
tolerance = 1e-5
assert np.allclose(bst_w.best_score, bst.best_score, tolerance, tolerance)
assert np.allclose(weighted_metric, gpu_scores, tolerance, tolerance)
@pytest.mark.parametrize(
"objective,metric",
[
("rank:pairwise", "auc"),
("rank:pairwise", "ndcg"),
("rank:pairwise", "map"),
("rank:ndcg", "auc"),
("rank:ndcg", "ndcg"),
("rank:ndcg", "map"),
("rank:map", "auc"),
("rank:map", "ndcg"),
("rank:map", "map"),
],
)
def test_with_mq2008(objective, metric) -> None:
(
x_train,
y_train,
qid_train,
x_test,
y_test,
qid_test,
x_valid,
y_valid,
qid_valid,
) = tm.data.get_mq2008(os.path.join(os.path.join(tm.demo_dir(__file__), "rank")))
if metric.find("map") != -1 or objective.find("map") != -1:
y_train[y_train <= 1] = 0.0
y_train[y_train > 1] = 1.0
y_test[y_test <= 1] = 0.0
y_test[y_test > 1] = 1.0
dtrain = xgboost.DMatrix(x_train, y_train, qid=qid_train)
dtest = xgboost.DMatrix(x_test, y_test, qid=qid_test)
comp_training_with_rank_objective(dtrain, dtest, objective, metric)
|
from __future__ import annotations
import csv
import logging
import os
from scipy.stats import pearsonr, spearmanr
from sentence_transformers import InputExample
logger = logging.getLogger(__name__)
class CECorrelationEvaluator:
"""
This evaluator can be used with the CrossEncoder class. Given sentence pairs and continuous scores,
it compute the pearson & spearman correlation between the predicted score for the sentence pair
and the gold score.
"""
def __init__(self, sentence_pairs: list[list[str]], scores: list[float], name: str = "", write_csv: bool = True):
self.sentence_pairs = sentence_pairs
self.scores = scores
self.name = name
self.csv_file = "CECorrelationEvaluator" + ("_" + name if name else "") + "_results.csv"
self.csv_headers = ["epoch", "steps", "Pearson_Correlation", "Spearman_Correlation"]
self.write_csv = write_csv
@classmethod
def from_input_examples(cls, examples: list[InputExample], **kwargs):
sentence_pairs = []
scores = []
for example in examples:
sentence_pairs.append(example.texts)
scores.append(example.label)
return cls(sentence_pairs, scores, **kwargs)
def __call__(self, model, output_path: str = None, epoch: int = -1, steps: int = -1) -> float:
if epoch != -1:
if steps == -1:
out_txt = " after epoch {}:".format(epoch)
else:
out_txt = " in epoch {} after {} steps:".format(epoch, steps)
else:
out_txt = ":"
logger.info("CECorrelationEvaluator: Evaluating the model on " + self.name + " dataset" + out_txt)
pred_scores = model.predict(self.sentence_pairs, convert_to_numpy=True, show_progress_bar=False)
eval_pearson, _ = pearsonr(self.scores, pred_scores)
eval_spearman, _ = spearmanr(self.scores, pred_scores)
logger.info("Correlation:\tPearson: {:.4f}\tSpearman: {:.4f}".format(eval_pearson, eval_spearman))
if output_path is not None and self.write_csv:
csv_path = os.path.join(output_path, self.csv_file)
output_file_exists = os.path.isfile(csv_path)
with open(csv_path, mode="a" if output_file_exists else "w", encoding="utf-8") as f:
writer = csv.writer(f)
if not output_file_exists:
writer.writerow(self.csv_headers)
writer.writerow([epoch, steps, eval_pearson, eval_spearman])
return eval_spearman
|
import csv
import logging
import os
from typing import List
from scipy.stats import pearsonr, spearmanr
from sentence_transformers import InputExample
logger = logging.getLogger(__name__)
class CECorrelationEvaluator:
"""
This evaluator can be used with the CrossEncoder class. Given sentence pairs and continuous scores,
it compute the pearson & spearman correlation between the predicted score for the sentence pair
and the gold score.
"""
def __init__(self, sentence_pairs: List[List[str]], scores: List[float], name: str = "", write_csv: bool = True):
self.sentence_pairs = sentence_pairs
self.scores = scores
self.name = name
self.csv_file = "CECorrelationEvaluator" + ("_" + name if name else "") + "_results.csv"
self.csv_headers = ["epoch", "steps", "Pearson_Correlation", "Spearman_Correlation"]
self.write_csv = write_csv
@classmethod
def from_input_examples(cls, examples: List[InputExample], **kwargs):
sentence_pairs = []
scores = []
for example in examples:
sentence_pairs.append(example.texts)
scores.append(example.label)
return cls(sentence_pairs, scores, **kwargs)
def __call__(self, model, output_path: str = None, epoch: int = -1, steps: int = -1) -> float:
if epoch != -1:
if steps == -1:
out_txt = " after epoch {}:".format(epoch)
else:
out_txt = " in epoch {} after {} steps:".format(epoch, steps)
else:
out_txt = ":"
logger.info("CECorrelationEvaluator: Evaluating the model on " + self.name + " dataset" + out_txt)
pred_scores = model.predict(self.sentence_pairs, convert_to_numpy=True, show_progress_bar=False)
eval_pearson, _ = pearsonr(self.scores, pred_scores)
eval_spearman, _ = spearmanr(self.scores, pred_scores)
logger.info("Correlation:\tPearson: {:.4f}\tSpearman: {:.4f}".format(eval_pearson, eval_spearman))
if output_path is not None and self.write_csv:
csv_path = os.path.join(output_path, self.csv_file)
output_file_exists = os.path.isfile(csv_path)
with open(csv_path, mode="a" if output_file_exists else "w", encoding="utf-8") as f:
writer = csv.writer(f)
if not output_file_exists:
writer.writerow(self.csv_headers)
writer.writerow([epoch, steps, eval_pearson, eval_spearman])
return eval_spearman
|
from typing import Any, Optional, Type, TypeVar, Union
import numpy as np
from docarray.base_document import BaseDocument
from docarray.typing import AnyEmbedding, AnyTensor, ImageUrl
from docarray.typing.tensor.abstract_tensor import AbstractTensor
T = TypeVar('T', bound='Image')
try:
import torch
torch_available = True
except ImportError:
torch_available = False
class Image(BaseDocument):
"""
Document for handling images.
It can contain an ImageUrl (`Image.url`), an AnyTensor (`Image.tensor`),
and an AnyEmbedding (`Image.embedding`).
EXAMPLE USAGE:
You can use this Document directly:
.. code-block:: python
from docarray.documents import Image
# use it directly
image = Image(url='http://www.jina.ai/image.jpg')
image.tensor = image.url.load()
model = MyEmbeddingModel()
image.embedding = model(image.tensor)
You can extend this Document:
.. code-block:: python
from docarray.documents import Image
from docarray.typing import AnyEmbedding
from typing import Optional
# extend it
class MyImage(Image):
second_embedding: Optional[AnyEmbedding]
image = MyImage(url='http://www.jina.ai/image.jpg')
image.tensor = image.url.load()
model = MyEmbeddingModel()
image.embedding = model(image.tensor)
image.second_embedding = model(image.tensor)
You can use this Document for composition:
.. code-block:: python
from docarray import BaseDocument
from docarray.documents import Image, Text
# compose it
class MultiModalDoc(BaseDocument):
image: Image
text: Text
mmdoc = MultiModalDoc(
image=Image(url="http://www.jina.ai/image.jpg"),
text=Text(text="hello world, how are you doing?"),
)
mmdoc.image.tensor = mmdoc.image.url.load()
"""
url: Optional[ImageUrl]
tensor: Optional[AnyTensor]
embedding: Optional[AnyEmbedding]
@classmethod
def validate(
cls: Type[T],
value: Union[str, AbstractTensor, Any],
) -> T:
if isinstance(value, str):
value = cls(url=value)
elif isinstance(value, (AbstractTensor, np.ndarray)) or (
torch_available and isinstance(value, torch.Tensor)
):
value = cls(tensor=value)
return super().validate(value)
|
from typing import Optional
from docarray.base_document import BaseDocument
from docarray.typing import AnyEmbedding, AnyTensor, ImageUrl
class Image(BaseDocument):
"""
Document for handling images.
It can contain an ImageUrl (`Image.url`), an AnyTensor (`Image.tensor`),
and an AnyEmbedding (`Image.embedding`).
EXAMPLE USAGE:
You can use this Document directly:
.. code-block:: python
from docarray.documents import Image
# use it directly
image = Image(url='http://www.jina.ai/image.jpg')
image.tensor = image.url.load()
model = MyEmbeddingModel()
image.embedding = model(image.tensor)
You can extend this Document:
.. code-block:: python
from docarray.documents import Image
from docarray.typing import AnyEmbedding
from typing import Optional
# extend it
class MyImage(Image):
second_embedding: Optional[AnyEmbedding]
image = MyImage(url='http://www.jina.ai/image.jpg')
image.tensor = image.url.load()
model = MyEmbeddingModel()
image.embedding = model(image.tensor)
image.second_embedding = model(image.tensor)
You can use this Document for composition:
.. code-block:: python
from docarray import BaseDocument
from docarray.documents import Image, Text
# compose it
class MultiModalDoc(BaseDocument):
image: Image
text: Text
mmdoc = MultiModalDoc(
image=Image(url="http://www.jina.ai/image.jpg"),
text=Text(text="hello world, how are you doing?"),
)
mmdoc.image.tensor = mmdoc.image.url.load()
"""
url: Optional[ImageUrl]
tensor: Optional[AnyTensor]
embedding: Optional[AnyEmbedding]
|
"""Test CodeHierarchyNodeParser reading itself."""
from typing import Sequence
import pytest
from llama_index.core import SimpleDirectoryReader
from pytest import fixture
from llama_index.packs.code_hierarchy import CodeHierarchyNodeParser
from llama_index.core.text_splitter import CodeSplitter
from pathlib import Path
from llama_index.core.schema import BaseNode
import re
from llama_index.packs.code_hierarchy import CodeHierarchyKeywordQueryEngine
def print_python(python_text: str) -> None:
"""This function prints python text in ipynb nicely formatted."""
print("```python\n" + python_text + "```")
@fixture(params=[(80, 1000, 10), (500, 5000, 100)])
def code_hierarchy_nodes(request) -> Sequence[BaseNode]:
reader = SimpleDirectoryReader(
input_files=[
Path(__file__).parent
/ Path("../llama_index/packs/code_hierarchy/code_hierarchy.py")
],
file_metadata=lambda x: {"filepath": x},
)
nodes = reader.load_data()
return CodeHierarchyNodeParser(
language="python",
chunk_min_characters=request.param[0],
# You can further parameterize the CodeSplitter to split the code
# into "chunks" that match your context window size using
# chunck_lines and max_chars parameters, here we just use the defaults
code_splitter=CodeSplitter(
language="python", max_chars=request.param[1], chunk_lines=request.param[2]
),
).get_nodes_from_documents(nodes)
def test_code_splitter_NEXT_relationship_indention(
code_hierarchy_nodes: Sequence[BaseNode],
) -> None:
"""When using jupyter I found that the final brevity comment was indented when it shouldn't be."""
for node in code_hierarchy_nodes:
last_line = node.text.split("\n")[-1]
if "Code replaced for brevity" in last_line and "NEXT" in node.relationships:
assert not last_line.startswith(" ")
assert not last_line.startswith("\t")
def test_query_by_module_name(code_hierarchy_nodes: Sequence[BaseNode]) -> None:
"""Test querying the index by filename."""
index = CodeHierarchyKeywordQueryEngine(nodes=code_hierarchy_nodes)
query = "code_hierarchy"
results = index.query(query)
assert len(results.response) >= 1 and results.response != "None"
@pytest.mark.parametrize(
"name",
[
"CodeHierarchyNodeParser",
"_parse_node",
"recur",
"__init__",
],
)
def test_query_by_item_name(
name: str, code_hierarchy_nodes: Sequence[BaseNode]
) -> None:
"""Test querying the index by signature."""
index = CodeHierarchyKeywordQueryEngine(nodes=code_hierarchy_nodes)
query = "CodeHierarchyNodeParser"
results = index.query(query)
assert len(results.response) >= 1 and results.response != "None"
def test_query_by_all_uuids(code_hierarchy_nodes: Sequence[BaseNode]) -> None:
"""Test querying the index by signature."""
index = CodeHierarchyKeywordQueryEngine(nodes=code_hierarchy_nodes)
for node in code_hierarchy_nodes:
# Find all uuids in the node
uuids = re.findall(r"[\w-]{36}", node.text)
for uuid in uuids:
results = index.query(uuid)
assert len(results.response) >= 1 and results.response != "None"
|
"""Test CodeHierarchyNodeParser reading itself."""
from typing import Sequence
import pytest
from llama_index.core import SimpleDirectoryReader
from pytest import fixture
from llama_index.packs.code_hierarchy import CodeHierarchyNodeParser
from llama_index.core.text_splitter import CodeSplitter
from pathlib import Path
from llama_index.core.schema import BaseNode
import re
from llama_index.packs.code_hierarchy import CodeHierarchyKeywordQueryEngine
def print_python(python_text: str) -> None:
"""This function prints python text in ipynb nicely formatted."""
print("```python\n" + python_text + "```")
@fixture(params=[(80, 1000, 10), (500, 5000, 100)])
def code_hierarchy_nodes(request) -> Sequence[BaseNode]:
reader = SimpleDirectoryReader(
input_files=[
Path(__file__).parent
/ Path("../llama_index/packs/code_hierarchy/code_hierarchy.py")
],
file_metadata=lambda x: {"filepath": x},
)
nodes = reader.load_data()
return CodeHierarchyNodeParser(
language="python",
chunk_min_characters=request.param[0],
# You can further parameterize the CodeSplitter to split the code
# into "chunks" that match your context window size using
# chunck_lines and max_chars parameters, here we just use the defaults
code_splitter=CodeSplitter(
language="python", max_chars=request.param[1], chunk_lines=request.param[2]
),
).get_nodes_from_documents(nodes)
def test_code_splitter_NEXT_relationship_indention(
code_hierarchy_nodes: Sequence[BaseNode],
) -> None:
"""When using jupyter I found that the final brevity comment was indented when it shouldn't be."""
for node in code_hierarchy_nodes:
last_line = node.text.split("\n")[-1]
if "Code replaced for brevity" in last_line and "NEXT" in node.relationships:
assert not last_line.startswith(" ")
assert not last_line.startswith("\t")
def test_query_by_module_name(code_hierarchy_nodes: Sequence[BaseNode]) -> None:
"""Test querying the index by filename."""
index = CodeHierarchyKeywordQueryEngine(nodes=code_hierarchy_nodes)
query = "code_hierarchy"
results = index.query(query)
assert len(results.response) >= 1 and results.response != "None"
@pytest.mark.parametrize(
"name",
[
"CodeHierarchyNodeParser",
"_parse_node",
"recur",
"__init__",
],
)
def test_query_by_item_name(
name: str, code_hierarchy_nodes: Sequence[BaseNode]
) -> None:
"""Test querying the index by signature."""
index = CodeHierarchyKeywordQueryEngine(nodes=code_hierarchy_nodes)
query = "CodeHierarchyNodeParser"
results = index.query(query)
assert len(results.response) >= 1 and results.response != "None"
def test_query_by_all_uuids(code_hierarchy_nodes: Sequence[BaseNode]) -> None:
"""Test querying the index by signature."""
index = CodeHierarchyKeywordQueryEngine(nodes=code_hierarchy_nodes)
for node in code_hierarchy_nodes:
# Find all uuids in the node
uuids = re.findall(r"[\w-]{36}", node.text)
for uuid in uuids:
results = index.query(uuid)
assert len(results.response) >= 1 and results.response != "None"
|
import pytest
from docarray import BaseDoc, DocList, DocVec
from docarray.documents import ImageDoc
from docarray.typing import NdArray, TorchTensor
class MyDoc(BaseDoc):
embedding: NdArray
text: str
image: ImageDoc
@pytest.mark.parametrize(
'protocol', ['pickle-array', 'protobuf-array', 'protobuf', 'pickle']
)
@pytest.mark.parametrize('compress', ['lz4', 'bz2', 'lzma', 'zlib', 'gzip', None])
@pytest.mark.parametrize('show_progress', [False, True])
@pytest.mark.parametrize('array_cls', [DocList, DocVec])
def test_from_to_bytes(protocol, compress, show_progress, array_cls):
da = array_cls[MyDoc](
[
MyDoc(
embedding=[1, 2, 3, 4, 5], text='hello', image=ImageDoc(url='aux.png')
),
MyDoc(embedding=[5, 4, 3, 2, 1], text='hello world', image=ImageDoc()),
]
)
bytes_da = da.to_bytes(
protocol=protocol, compress=compress, show_progress=show_progress
)
da2 = array_cls[MyDoc].from_bytes(
bytes_da, protocol=protocol, compress=compress, show_progress=show_progress
)
assert len(da2) == 2
assert len(da) == len(da2)
for d1, d2 in zip(da, da2):
assert d1.embedding.tolist() == d2.embedding.tolist()
assert d1.text == d2.text
assert d1.image.url == d2.image.url
assert da[1].image.url is None
assert da2[1].image.url is None
@pytest.mark.parametrize(
'protocol', ['protobuf'] # ['pickle-array', 'protobuf-array', 'protobuf', 'pickle']
)
@pytest.mark.parametrize('compress', ['lz4']) # , 'bz2', 'lzma', 'zlib', 'gzip', None])
@pytest.mark.parametrize('show_progress', [False]) # [False, True])
@pytest.mark.parametrize('array_cls', [DocVec]) # [DocList, DocVec])
def test_from_to_base64(protocol, compress, show_progress, array_cls):
da = array_cls[MyDoc](
[
MyDoc(
embedding=[1, 2, 3, 4, 5], text='hello', image=ImageDoc(url='aux.png')
),
MyDoc(embedding=[5, 4, 3, 2, 1], text='hello world', image=ImageDoc()),
]
)
bytes_da = da.to_base64(
protocol=protocol, compress=compress, show_progress=show_progress
)
da2 = array_cls[MyDoc].from_base64(
bytes_da, protocol=protocol, compress=compress, show_progress=show_progress
)
assert len(da2) == 2
assert len(da) == len(da2)
for d1, d2 in zip(da, da2):
assert d1.embedding.tolist() == d2.embedding.tolist()
assert d1.text == d2.text
assert d1.image.url == d2.image.url
assert da[1].image.url is None
assert da2[1].image.url is None
# test_from_to_base64('protobuf', 'lz4', False, DocVec)
@pytest.mark.parametrize('tensor_type', [NdArray, TorchTensor])
@pytest.mark.parametrize('protocol', ['protobuf-array', 'pickle-array'])
def test_from_to_base64_tensor_type(tensor_type, protocol):
class MyDoc(BaseDoc):
embedding: tensor_type
text: str
image: ImageDoc
da = DocVec[MyDoc](
[
MyDoc(
embedding=[1, 2, 3, 4, 5], text='hello', image=ImageDoc(url='aux.png')
),
MyDoc(embedding=[5, 4, 3, 2, 1], text='hello world', image=ImageDoc()),
],
tensor_type=tensor_type,
)
bytes_da = da.to_base64(protocol=protocol)
da2 = DocVec[MyDoc].from_base64(
bytes_da, tensor_type=tensor_type, protocol=protocol
)
assert da2.tensor_type == tensor_type
assert isinstance(da2.embedding, tensor_type)
@pytest.mark.parametrize('tensor_type', [NdArray, TorchTensor])
def test_from_to_bytes_tensor_type(tensor_type):
da = DocVec[MyDoc](
[
MyDoc(
embedding=[1, 2, 3, 4, 5], text='hello', image=ImageDoc(url='aux.png')
),
MyDoc(embedding=[5, 4, 3, 2, 1], text='hello world', image=ImageDoc()),
],
tensor_type=tensor_type,
)
bytes_da = da.to_bytes()
da2 = DocVec[MyDoc].from_bytes(bytes_da, tensor_type=tensor_type)
assert da2.tensor_type == tensor_type
assert isinstance(da2.embedding, tensor_type)
def test_union_type_error(tmp_path):
from typing import Union
from docarray.documents import TextDoc
class CustomDoc(BaseDoc):
ud: Union[TextDoc, ImageDoc] = TextDoc(text='union type')
docs = DocList[CustomDoc]([CustomDoc(ud=TextDoc(text='union type'))])
with pytest.raises(ValueError):
docs.from_bytes(docs.to_bytes())
class BasisUnion(BaseDoc):
ud: Union[int, str]
docs_basic = DocList[BasisUnion]([BasisUnion(ud="hello")])
docs_copy = DocList[BasisUnion].from_bytes(docs_basic.to_bytes())
assert docs_copy == docs_basic
|
import pytest
from docarray import BaseDoc, DocList, DocVec
from docarray.documents import ImageDoc
from docarray.typing import NdArray, TorchTensor
class MyDoc(BaseDoc):
embedding: NdArray
text: str
image: ImageDoc
@pytest.mark.parametrize(
'protocol', ['pickle-array', 'protobuf-array', 'protobuf', 'pickle']
)
@pytest.mark.parametrize('compress', ['lz4', 'bz2', 'lzma', 'zlib', 'gzip', None])
@pytest.mark.parametrize('show_progress', [False, True])
@pytest.mark.parametrize('array_cls', [DocList, DocVec])
def test_from_to_bytes(protocol, compress, show_progress, array_cls):
da = array_cls[MyDoc](
[
MyDoc(
embedding=[1, 2, 3, 4, 5], text='hello', image=ImageDoc(url='aux.png')
),
MyDoc(embedding=[5, 4, 3, 2, 1], text='hello world', image=ImageDoc()),
]
)
bytes_da = da.to_bytes(
protocol=protocol, compress=compress, show_progress=show_progress
)
da2 = array_cls[MyDoc].from_bytes(
bytes_da, protocol=protocol, compress=compress, show_progress=show_progress
)
assert len(da2) == 2
assert len(da) == len(da2)
for d1, d2 in zip(da, da2):
assert d1.embedding.tolist() == d2.embedding.tolist()
assert d1.text == d2.text
assert d1.image.url == d2.image.url
assert da[1].image.url is None
assert da2[1].image.url is None
@pytest.mark.parametrize(
'protocol', ['pickle-array', 'protobuf-array', 'protobuf', 'pickle']
)
@pytest.mark.parametrize('compress', ['lz4', 'bz2', 'lzma', 'zlib', 'gzip', None])
@pytest.mark.parametrize('show_progress', [False, True])
@pytest.mark.parametrize('array_cls', [DocList, DocVec])
def test_from_to_base64(protocol, compress, show_progress, array_cls):
da = array_cls[MyDoc](
[
MyDoc(
embedding=[1, 2, 3, 4, 5], text='hello', image=ImageDoc(url='aux.png')
),
MyDoc(embedding=[5, 4, 3, 2, 1], text='hello world', image=ImageDoc()),
]
)
bytes_da = da.to_base64(
protocol=protocol, compress=compress, show_progress=show_progress
)
da2 = array_cls[MyDoc].from_base64(
bytes_da, protocol=protocol, compress=compress, show_progress=show_progress
)
assert len(da2) == 2
assert len(da) == len(da2)
for d1, d2 in zip(da, da2):
assert d1.embedding.tolist() == d2.embedding.tolist()
assert d1.text == d2.text
assert d1.image.url == d2.image.url
assert da[1].image.url is None
assert da2[1].image.url is None
@pytest.mark.parametrize('tensor_type', [NdArray, TorchTensor])
@pytest.mark.parametrize('protocol', ['protobuf-array', 'pickle-array'])
def test_from_to_base64_tensor_type(tensor_type, protocol):
class MyDoc(BaseDoc):
embedding: tensor_type
text: str
image: ImageDoc
da = DocVec[MyDoc](
[
MyDoc(
embedding=[1, 2, 3, 4, 5], text='hello', image=ImageDoc(url='aux.png')
),
MyDoc(embedding=[5, 4, 3, 2, 1], text='hello world', image=ImageDoc()),
],
tensor_type=tensor_type,
)
bytes_da = da.to_base64(protocol=protocol)
da2 = DocVec[MyDoc].from_base64(
bytes_da, tensor_type=tensor_type, protocol=protocol
)
assert da2.tensor_type == tensor_type
assert isinstance(da2.embedding, tensor_type)
@pytest.mark.parametrize('tensor_type', [NdArray, TorchTensor])
def test_from_to_bytes_tensor_type(tensor_type):
da = DocVec[MyDoc](
[
MyDoc(
embedding=[1, 2, 3, 4, 5], text='hello', image=ImageDoc(url='aux.png')
),
MyDoc(embedding=[5, 4, 3, 2, 1], text='hello world', image=ImageDoc()),
],
tensor_type=tensor_type,
)
bytes_da = da.to_bytes()
da2 = DocVec[MyDoc].from_bytes(bytes_da, tensor_type=tensor_type)
assert da2.tensor_type == tensor_type
assert isinstance(da2.embedding, tensor_type)
def test_union_type_error(tmp_path):
from typing import Union
from docarray.documents import TextDoc
class CustomDoc(BaseDoc):
ud: Union[TextDoc, ImageDoc] = TextDoc(text='union type')
docs = DocList[CustomDoc]([CustomDoc(ud=TextDoc(text='union type'))])
with pytest.raises(ValueError):
docs.from_bytes(docs.to_bytes())
class BasisUnion(BaseDoc):
ud: Union[int, str]
docs_basic = DocList[BasisUnion]([BasisUnion(ud="hello")])
docs_copy = DocList[BasisUnion].from_bytes(docs_basic.to_bytes())
assert docs_copy == docs_basic
|
from .sentence_encoder import TransformerSentenceEncoder
|
from .sentence_encoder import TransformerSentenceEncoder
|
import numpy as np
from keras.src import backend
from keras.src import constraints
from keras.src import testing
def get_example_array():
np.random.seed(3537)
example_array = np.random.random((100, 100)) * 100.0 - 50.0
example_array[0, 0] = 0.0 # Possible edge case
return example_array
class ConstraintsTest(testing.TestCase):
def test_max_norm(self):
constraint_fn = constraints.MaxNorm(2.0)
x = np.array([[0, 0, 0], [1.0, 0, 0], [3, 0, 0], [3, 3, 3]]).T
target = np.array(
[
[0, 0, 0],
[1.0, 0, 0],
[2.0, 0, 0],
[2.0 / np.sqrt(3), 2.0 / np.sqrt(3), 2.0 / np.sqrt(3)],
]
).T
output = constraint_fn(x)
self.assertAllClose(target, output)
def test_non_neg(self):
constraint_fn = constraints.NonNeg()
output = constraint_fn(get_example_array())
output = backend.convert_to_numpy(output)
self.assertTrue((np.min(output, axis=1) >= 0.0).all())
def test_unit_norm(self):
constraint_fn = constraints.UnitNorm()
output = constraint_fn(get_example_array())
output = backend.convert_to_numpy(output)
l2 = np.sqrt(np.sum(np.square(output), axis=0))
self.assertAllClose(l2, 1.0)
def test_min_max_norm(self):
constraint_fn = constraints.MinMaxNorm(min_value=0.2, max_value=0.5)
output = constraint_fn(get_example_array())
output = backend.convert_to_numpy(output)
l2 = np.sqrt(np.sum(np.square(output), axis=0))
self.assertTrue(np.all(l2 >= 0.2))
self.assertTrue(np.all(l2 <= 0.5 + 1e-6))
def test_get_method(self):
obj = constraints.get("unit_norm")
self.assertTrue(obj, constraints.UnitNorm)
obj = constraints.get(None)
self.assertEqual(obj, None)
with self.assertRaises(ValueError):
constraints.get("typo")
def test_default_constraint_call(self):
constraint_fn = constraints.Constraint()
x = np.array([1.0, 2.0, 3.0])
output = constraint_fn(x)
self.assertAllClose(x, output)
def test_constraint_get_config(self):
constraint_fn = constraints.Constraint()
config = constraint_fn.get_config()
self.assertEqual(config, {})
def test_constraint_from_config(self):
constraint_fn = constraints.Constraint()
config = constraint_fn.get_config()
recreated_constraint_fn = constraints.Constraint.from_config(config)
self.assertIsInstance(recreated_constraint_fn, constraints.Constraint)
def test_max_norm_get_config(self):
constraint_fn = constraints.MaxNorm(max_value=3.0, axis=1)
config = constraint_fn.get_config()
expected_config = {"max_value": 3.0, "axis": 1}
self.assertEqual(config, expected_config)
def test_unit_norm_get_config(self):
constraint_fn = constraints.UnitNorm(axis=1)
config = constraint_fn.get_config()
expected_config = {"axis": 1}
self.assertEqual(config, expected_config)
def test_min_max_norm_get_config(self):
constraint_fn = constraints.MinMaxNorm(
min_value=0.5, max_value=2.0, rate=0.7, axis=1
)
config = constraint_fn.get_config()
expected_config = {
"min_value": 0.5,
"max_value": 2.0,
"rate": 0.7,
"axis": 1,
}
self.assertEqual(config, expected_config)
|
import numpy as np
from keras.src import backend
from keras.src import constraints
from keras.src import testing
def get_example_array():
np.random.seed(3537)
example_array = np.random.random((100, 100)) * 100.0 - 50.0
example_array[0, 0] = 0.0 # Possible edge case
return example_array
class ConstraintsTest(testing.TestCase):
def test_max_norm(self):
constraint_fn = constraints.MaxNorm(2.0)
x = np.array([[0, 0, 0], [1.0, 0, 0], [3, 0, 0], [3, 3, 3]]).T
target = np.array(
[
[0, 0, 0],
[1.0, 0, 0],
[2.0, 0, 0],
[2.0 / np.sqrt(3), 2.0 / np.sqrt(3), 2.0 / np.sqrt(3)],
]
).T
output = constraint_fn(x)
self.assertAllClose(target, output)
def test_non_neg(self):
constraint_fn = constraints.NonNeg()
output = constraint_fn(get_example_array())
output = backend.convert_to_numpy(output)
self.assertTrue((np.min(output, axis=1) >= 0.0).all())
def test_unit_norm(self):
constraint_fn = constraints.UnitNorm()
output = constraint_fn(get_example_array())
output = backend.convert_to_numpy(output)
l2 = np.sqrt(np.sum(np.square(output), axis=0))
self.assertAllClose(l2, 1.0)
def test_min_max_norm(self):
constraint_fn = constraints.MinMaxNorm(min_value=0.2, max_value=0.5)
output = constraint_fn(get_example_array())
output = backend.convert_to_numpy(output)
l2 = np.sqrt(np.sum(np.square(output), axis=0))
self.assertFalse(l2[l2 < 0.2])
self.assertFalse(l2[l2 > 0.5 + 1e-6])
def test_get_method(self):
obj = constraints.get("unit_norm")
self.assertTrue(obj, constraints.UnitNorm)
obj = constraints.get(None)
self.assertEqual(obj, None)
with self.assertRaises(ValueError):
constraints.get("typo")
def test_default_constraint_call(self):
constraint_fn = constraints.Constraint()
x = np.array([1.0, 2.0, 3.0])
output = constraint_fn(x)
self.assertAllClose(x, output)
def test_constraint_get_config(self):
constraint_fn = constraints.Constraint()
config = constraint_fn.get_config()
self.assertEqual(config, {})
def test_constraint_from_config(self):
constraint_fn = constraints.Constraint()
config = constraint_fn.get_config()
recreated_constraint_fn = constraints.Constraint.from_config(config)
self.assertIsInstance(recreated_constraint_fn, constraints.Constraint)
def test_max_norm_get_config(self):
constraint_fn = constraints.MaxNorm(max_value=3.0, axis=1)
config = constraint_fn.get_config()
expected_config = {"max_value": 3.0, "axis": 1}
self.assertEqual(config, expected_config)
def test_unit_norm_get_config(self):
constraint_fn = constraints.UnitNorm(axis=1)
config = constraint_fn.get_config()
expected_config = {"axis": 1}
self.assertEqual(config, expected_config)
def test_min_max_norm_get_config(self):
constraint_fn = constraints.MinMaxNorm(
min_value=0.5, max_value=2.0, rate=0.7, axis=1
)
config = constraint_fn.get_config()
expected_config = {
"min_value": 0.5,
"max_value": 2.0,
"rate": 0.7,
"axis": 1,
}
self.assertEqual(config, expected_config)
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import math
import os
import os.path as osp
from multiprocessing import Pool
import torch
from mmengine.config import Config
from mmengine.utils import mkdir_or_exist
def download(url, out_file, min_bytes=math.pow(1024, 2), progress=True):
# math.pow(1024, 2) is mean 1 MB
assert_msg = f"Downloaded url '{url}' does not exist " \
f'or size is < min_bytes={min_bytes}'
try:
print(f'Downloading {url} to {out_file}...')
torch.hub.download_url_to_file(url, str(out_file), progress=progress)
assert osp.exists(
out_file) and osp.getsize(out_file) > min_bytes, assert_msg
except Exception as e:
if osp.exists(out_file):
os.remove(out_file)
print(f'ERROR: {e}\nRe-attempting {url} to {out_file} ...')
os.system(f"curl -L '{url}' -o '{out_file}' --retry 3 -C -"
) # curl download, retry and resume on fail
finally:
if osp.exists(out_file) and osp.getsize(out_file) < min_bytes:
os.remove(out_file) # remove partial downloads
if not osp.exists(out_file):
print(f'ERROR: {assert_msg}\n')
print('=========================================\n')
def parse_args():
parser = argparse.ArgumentParser(description='Download checkpoints')
parser.add_argument('config', help='test config file path')
parser.add_argument(
'out', type=str, help='output dir of checkpoints to be stored')
parser.add_argument(
'--nproc', type=int, default=16, help='num of Processes')
parser.add_argument(
'--intranet',
action='store_true',
help='switch to internal network url')
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
mkdir_or_exist(args.out)
cfg = Config.fromfile(args.config)
checkpoint_url_list = []
checkpoint_out_list = []
for model in cfg:
model_infos = cfg[model]
if not isinstance(model_infos, list):
model_infos = [model_infos]
for model_info in model_infos:
checkpoint = model_info['checkpoint']
out_file = osp.join(args.out, checkpoint)
if not osp.exists(out_file):
url = model_info['url']
if args.intranet is True:
url = url.replace('.com', '.sensetime.com')
url = url.replace('https', 'http')
checkpoint_url_list.append(url)
checkpoint_out_list.append(out_file)
if len(checkpoint_url_list) > 0:
pool = Pool(min(os.cpu_count(), args.nproc))
pool.starmap(download, zip(checkpoint_url_list, checkpoint_out_list))
else:
print('No files to download!')
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import math
import os
import os.path as osp
from multiprocessing import Pool
import mmcv
import torch
from mmcv import Config
def download(url, out_file, min_bytes=math.pow(1024, 2), progress=True):
# math.pow(1024, 2) is mean 1 MB
assert_msg = f"Downloaded url '{url}' does not exist " \
f'or size is < min_bytes={min_bytes}'
try:
print(f'Downloading {url} to {out_file}...')
torch.hub.download_url_to_file(url, str(out_file), progress=progress)
assert osp.exists(
out_file) and osp.getsize(out_file) > min_bytes, assert_msg
except Exception as e:
if osp.exists(out_file):
os.remove(out_file)
print(f'ERROR: {e}\nRe-attempting {url} to {out_file} ...')
os.system(f"curl -L '{url}' -o '{out_file}' --retry 3 -C -"
) # curl download, retry and resume on fail
finally:
if osp.exists(out_file) and osp.getsize(out_file) < min_bytes:
os.remove(out_file) # remove partial downloads
if not osp.exists(out_file):
print(f'ERROR: {assert_msg}\n')
print('=========================================\n')
def parse_args():
parser = argparse.ArgumentParser(description='Download checkpoints')
parser.add_argument('config', help='test config file path')
parser.add_argument(
'out', type=str, help='output dir of checkpoints to be stored')
parser.add_argument(
'--nproc', type=int, default=16, help='num of Processes')
parser.add_argument(
'--intranet',
action='store_true',
help='switch to internal network url')
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
mmcv.mkdir_or_exist(args.out)
cfg = Config.fromfile(args.config)
checkpoint_url_list = []
checkpoint_out_list = []
for model in cfg:
model_infos = cfg[model]
if not isinstance(model_infos, list):
model_infos = [model_infos]
for model_info in model_infos:
checkpoint = model_info['checkpoint']
out_file = osp.join(args.out, checkpoint)
if not osp.exists(out_file):
url = model_info['url']
if args.intranet is True:
url = url.replace('.com', '.sensetime.com')
url = url.replace('https', 'http')
checkpoint_url_list.append(url)
checkpoint_out_list.append(out_file)
if len(checkpoint_url_list) > 0:
pool = Pool(min(os.cpu_count(), args.nproc))
pool.starmap(download, zip(checkpoint_url_list, checkpoint_out_list))
else:
print('No files to download!')
|
import os
import urllib
import numpy as np
import PIL
import pytest
from pydantic.tools import parse_obj_as, schema_json_of
from docarray.document.io.json import orjson_dumps
from docarray.typing import ImageUrl
CUR_DIR = os.path.dirname(os.path.abspath(__file__))
PATH_TO_IMAGE_DATA = os.path.join(CUR_DIR, '..', '..', '..', 'toydata', 'image-data')
IMAGE_PATHS = {
'png': os.path.join(PATH_TO_IMAGE_DATA, 'so_good.png'),
'jpg': os.path.join(PATH_TO_IMAGE_DATA, '05984.jpg'),
'jpeg': os.path.join(PATH_TO_IMAGE_DATA, '05984-2.jpeg'),
}
REMOTE_JPG = (
'https://upload.wikimedia.org/wikipedia/commons/8/80/'
'Dag_Sebastian_Ahlander_at_G%C3%B6teborg_Book_Fair_2012b.jpg'
)
@pytest.mark.slow
@pytest.mark.internet
def test_image_url():
uri = parse_obj_as(ImageUrl, REMOTE_JPG)
tensor = uri.load()
assert isinstance(tensor, np.ndarray)
def test_proto_image_url():
uri = parse_obj_as(ImageUrl, REMOTE_JPG)
uri._to_node_protobuf()
def test_json_schema():
schema_json_of(ImageUrl)
def test_dump_json():
url = parse_obj_as(ImageUrl, 'http://jina.ai/img.png')
orjson_dumps(url)
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'image_format,path_to_img',
[
('png', IMAGE_PATHS['png']),
('jpg', IMAGE_PATHS['jpg']),
('jpeg', IMAGE_PATHS['jpeg']),
('remote-jpg', REMOTE_JPG),
],
)
def test_load(image_format, path_to_img):
url = parse_obj_as(ImageUrl, path_to_img)
tensor = url.load()
assert isinstance(tensor, np.ndarray)
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'image_format,path_to_img',
[
('png', IMAGE_PATHS['png']),
('jpg', IMAGE_PATHS['jpg']),
('jpeg', IMAGE_PATHS['jpeg']),
('remote-jpg', REMOTE_JPG),
],
)
@pytest.mark.parametrize('width,height', [(224, None), (None, 224), (224, 224)])
def test_load_width_height(image_format, path_to_img, width, height):
url = parse_obj_as(ImageUrl, path_to_img)
tensor = url.load(width=width, height=height)
assert isinstance(tensor, np.ndarray)
shape = tensor.shape
if width:
assert shape[1] == width
if height:
assert shape[0] == height
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'image_format,path_to_img',
[
('png', IMAGE_PATHS['png']),
('jpg', IMAGE_PATHS['jpg']),
('jpeg', IMAGE_PATHS['jpeg']),
('remote-jpg', REMOTE_JPG),
],
)
@pytest.mark.parametrize(
'axis_layout',
[
('H', 'W', 'C'),
('H', 'C', 'W'),
('C', 'H', 'W'),
('C', 'W', 'H'),
('W', 'C', 'H'),
('W', 'H', 'C'),
],
)
def test_load_channel_axis(image_format, path_to_img, axis_layout):
sizes = {'H': 100, 'W': 200, 'C': 3}
url = parse_obj_as(ImageUrl, path_to_img)
tensor = url.load(axis_layout=axis_layout, height=sizes['H'], width=sizes['W'])
assert isinstance(tensor, np.ndarray)
shape = tensor.shape
for axis, axis_name in enumerate(axis_layout):
assert shape[axis] == sizes[axis_name]
@pytest.mark.internet
def test_load_timeout():
url = parse_obj_as(ImageUrl, REMOTE_JPG)
with pytest.raises(urllib.error.URLError):
_ = url.load(timeout=0.001)
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'image_format,path_to_img',
[
('png', IMAGE_PATHS['png']),
('jpg', IMAGE_PATHS['jpg']),
('jpeg', IMAGE_PATHS['jpeg']),
('jpg', REMOTE_JPG),
],
)
def test_load_to_bytes(image_format, path_to_img):
w, h = 224, 224
url = parse_obj_as(ImageUrl, path_to_img)
_bytes = url.load_to_bytes(width=w, height=h)
assert isinstance(_bytes, bytes)
img = PIL.Image.frombytes(mode='1', size=(w, h), data=_bytes)
assert isinstance(img, PIL.Image.Image)
@pytest.mark.parametrize(
'image_format,path_to_img',
[
('png', IMAGE_PATHS['png']),
('jpg', IMAGE_PATHS['jpg']),
('jpeg', IMAGE_PATHS['jpeg']),
('jpg', REMOTE_JPG),
('illegal', 'illegal'),
('illegal', 'https://www.google.com'),
('illegal', 'my/local/text/file.txt'),
],
)
def test_validation(image_format, path_to_img):
if image_format == 'illegal':
with pytest.raises(ValueError):
parse_obj_as(ImageUrl, path_to_img)
else:
url = parse_obj_as(ImageUrl, path_to_img)
assert isinstance(url, ImageUrl)
assert isinstance(url, str)
|
import os
import urllib
import numpy as np
import PIL
import pytest
from pydantic.tools import parse_obj_as, schema_json_of
from docarray.document.io.json import orjson_dumps
from docarray.typing import ImageUrl
CUR_DIR = os.path.dirname(os.path.abspath(__file__))
PATH_TO_IMAGE_DATA = os.path.join(CUR_DIR, '..', '..', '..', 'toydata', 'image-data')
IMAGE_PATHS = {
'png': os.path.join(PATH_TO_IMAGE_DATA, 'so_good.png'),
'jpg': os.path.join(PATH_TO_IMAGE_DATA, '05984.jpg'),
'jpeg': os.path.join(PATH_TO_IMAGE_DATA, '05984-2.jpeg'),
}
REMOTE_JPG = (
'https://upload.wikimedia.org/wikipedia/commons/8/80/'
'Dag_Sebastian_Ahlander_at_G%C3%B6teborg_Book_Fair_2012b.jpg'
)
@pytest.mark.slow
@pytest.mark.internet
def test_image_url():
uri = parse_obj_as(ImageUrl, REMOTE_JPG)
tensor = uri.load()
assert isinstance(tensor, np.ndarray)
def test_proto_image_url():
uri = parse_obj_as(ImageUrl, REMOTE_JPG)
uri._to_node_protobuf()
def test_json_schema():
schema_json_of(ImageUrl)
def test_dump_json():
url = parse_obj_as(ImageUrl, 'http://jina.ai/img.png')
orjson_dumps(url)
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'image_format,path_to_img',
[
('png', IMAGE_PATHS['png']),
('jpg', IMAGE_PATHS['jpg']),
('jpeg', IMAGE_PATHS['jpeg']),
('remote-jpg', REMOTE_JPG),
],
)
def test_load(image_format, path_to_img):
url = parse_obj_as(ImageUrl, path_to_img)
tensor = url.load()
assert isinstance(tensor, np.ndarray)
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'image_format,path_to_img',
[
('png', IMAGE_PATHS['png']),
('jpg', IMAGE_PATHS['jpg']),
('jpeg', IMAGE_PATHS['jpeg']),
('remote-jpg', REMOTE_JPG),
],
)
@pytest.mark.parametrize('width,height', [(224, None), (None, 224), (224, 224)])
def test_load_width_height(image_format, path_to_img, width, height):
url = parse_obj_as(ImageUrl, path_to_img)
tensor = url.load(width=width, height=height)
assert isinstance(tensor, np.ndarray)
shape = tensor.shape
if width:
assert shape[1] == width
if height:
assert shape[0] == height
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'image_format,path_to_img',
[
('png', IMAGE_PATHS['png']),
('jpg', IMAGE_PATHS['jpg']),
('jpeg', IMAGE_PATHS['jpeg']),
('remote-jpg', REMOTE_JPG),
],
)
@pytest.mark.parametrize(
'axis_layout',
[
('H', 'W', 'C'),
('H', 'C', 'W'),
('C', 'H', 'W'),
('C', 'W', 'H'),
('W', 'C', 'H'),
('W', 'H', 'C'),
],
)
def test_load_channel_axis(image_format, path_to_img, axis_layout):
sizes = {'H': 100, 'W': 200, 'C': 3}
url = parse_obj_as(ImageUrl, path_to_img)
tensor = url.load(axis_layout=axis_layout, height=sizes['H'], width=sizes['W'])
assert isinstance(tensor, np.ndarray)
shape = tensor.shape
for axis, axis_name in enumerate(axis_layout):
assert shape[axis] == sizes[axis_name]
def test_load_timeout():
url = parse_obj_as(ImageUrl, REMOTE_JPG)
with pytest.raises(urllib.error.URLError):
_ = url.load(timeout=0.001)
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'image_format,path_to_img',
[
('png', IMAGE_PATHS['png']),
('jpg', IMAGE_PATHS['jpg']),
('jpeg', IMAGE_PATHS['jpeg']),
('jpg', REMOTE_JPG),
],
)
def test_load_to_bytes(image_format, path_to_img):
w, h = 224, 224
url = parse_obj_as(ImageUrl, path_to_img)
_bytes = url.load_to_bytes(width=w, height=h)
assert isinstance(_bytes, bytes)
img = PIL.Image.frombytes(mode='1', size=(w, h), data=_bytes)
assert isinstance(img, PIL.Image.Image)
@pytest.mark.parametrize(
'image_format,path_to_img',
[
('png', IMAGE_PATHS['png']),
('jpg', IMAGE_PATHS['jpg']),
('jpeg', IMAGE_PATHS['jpeg']),
('jpg', REMOTE_JPG),
('illegal', 'illegal'),
('illegal', 'https://www.google.com'),
('illegal', 'my/local/text/file.txt'),
],
)
def test_validation(image_format, path_to_img):
if image_format == 'illegal':
with pytest.raises(ValueError):
parse_obj_as(ImageUrl, path_to_img)
else:
url = parse_obj_as(ImageUrl, path_to_img)
assert isinstance(url, ImageUrl)
assert isinstance(url, str)
|
from collections import defaultdict
from typing import TYPE_CHECKING, Optional
from google.protobuf.json_format import MessageToDict
from google.protobuf.struct_pb2 import Struct
from docarray.proto.io.ndarray import flush_ndarray, read_ndarray
from docarray.proto.docarray_pb2 import NdArrayProto, DocumentProto
if TYPE_CHECKING: # pragma: no cover
from docarray import Document
def parse_proto(pb_msg: 'DocumentProto') -> 'Document':
from docarray import Document
from docarray.score import NamedScore
fields = {}
for (field, value) in pb_msg.ListFields():
f_name = field.name
if f_name == 'chunks' or f_name == 'matches':
fields[f_name] = [Document.from_protobuf(d) for d in value]
elif isinstance(value, NdArrayProto):
fields[f_name] = read_ndarray(value)
elif isinstance(value, Struct):
fields[f_name] = MessageToDict(value, preserving_proto_field_name=True)
elif f_name == 'location':
fields[f_name] = list(value)
elif f_name == 'scores' or f_name == 'evaluations':
fields[f_name] = defaultdict(NamedScore)
for k, v in value.items():
fields[f_name][k] = NamedScore(
{ff.name: vv for (ff, vv) in v.ListFields()}
)
else:
fields[f_name] = value
return Document(**fields)
def flush_proto(doc: 'Document', ndarray_type: Optional[str] = None) -> 'DocumentProto':
pb_msg = DocumentProto()
for key in doc.non_empty_fields:
try:
value = getattr(doc, key)
if key in ('tensor', 'embedding'):
flush_ndarray(getattr(pb_msg, key), value, ndarray_type=ndarray_type)
elif key in ('chunks', 'matches'):
for d in value:
d: Document
docs = getattr(pb_msg, key)
docs.append(d.to_protobuf())
elif key == 'tags':
pb_msg.tags.update(value)
elif key == '_metadata':
pb_msg._metadata.update(value)
elif key in ('scores', 'evaluations'):
for kk, vv in value.items():
for ff in vv.non_empty_fields:
setattr(getattr(pb_msg, key)[kk], ff, getattr(vv, ff))
elif key == 'location':
pb_msg.location.extend(value)
elif key == 'content':
pass # intentionally ignore `content` field as it is just a proxy
else:
# other simple fields
setattr(pb_msg, key, value)
except RecursionError as ex:
if len(ex.args) >= 1:
ex.args = (
f'Field `{key}` contains cyclic reference in memory. '
f'Could it be your Document is referring to itself?',
)
raise
except Exception as ex:
if len(ex.args) >= 1:
ex.args = (f'Field `{key}` is problematic',) + ex.args
raise
return pb_msg
|
from collections import defaultdict
from typing import TYPE_CHECKING, Optional
from google.protobuf.json_format import MessageToDict
from google.protobuf.struct_pb2 import Struct
from docarray.proto.io.ndarray import flush_ndarray, read_ndarray
from docarray.proto.docarray_pb2 import NdArrayProto, DocumentProto
if TYPE_CHECKING:
from docarray import Document
def parse_proto(pb_msg: 'DocumentProto') -> 'Document':
from docarray import Document
from docarray.score import NamedScore
fields = {}
for (field, value) in pb_msg.ListFields():
f_name = field.name
if f_name == 'chunks' or f_name == 'matches':
fields[f_name] = [Document.from_protobuf(d) for d in value]
elif isinstance(value, NdArrayProto):
fields[f_name] = read_ndarray(value)
elif isinstance(value, Struct):
fields[f_name] = MessageToDict(value, preserving_proto_field_name=True)
elif f_name == 'location':
fields[f_name] = list(value)
elif f_name == 'scores' or f_name == 'evaluations':
fields[f_name] = defaultdict(NamedScore)
for k, v in value.items():
fields[f_name][k] = NamedScore(
{ff.name: vv for (ff, vv) in v.ListFields()}
)
else:
fields[f_name] = value
return Document(**fields)
def flush_proto(doc: 'Document', ndarray_type: Optional[str] = None) -> 'DocumentProto':
pb_msg = DocumentProto()
for key in doc.non_empty_fields:
try:
value = getattr(doc, key)
if key in ('tensor', 'embedding'):
flush_ndarray(getattr(pb_msg, key), value, ndarray_type=ndarray_type)
elif key in ('chunks', 'matches'):
for d in value:
d: Document
docs = getattr(pb_msg, key)
docs.append(d.to_protobuf())
elif key == 'tags':
pb_msg.tags.update(value)
elif key == '_metadata':
pb_msg._metadata.update(value)
elif key in ('scores', 'evaluations'):
for kk, vv in value.items():
for ff in vv.non_empty_fields:
setattr(getattr(pb_msg, key)[kk], ff, getattr(vv, ff))
elif key == 'location':
pb_msg.location.extend(value)
elif key == 'content':
pass # intentionally ignore `content` field as it is just a proxy
else:
# other simple fields
setattr(pb_msg, key, value)
except RecursionError as ex:
if len(ex.args) >= 1:
ex.args = (
f'Field `{key}` contains cyclic reference in memory. '
f'Could it be your Document is referring to itself?',
)
raise
except Exception as ex:
if len(ex.args) >= 1:
ex.args = (f'Field `{key}` is problematic',) + ex.args
raise
return pb_msg
|
_base_ = './fcos_r50-caffe_fpn_gn-head_1x_coco.py'
# model settings
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron/resnet101_caffe')))
# dataset settings
train_pipeline = [
dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='RandomChoiceResize',
scale=[(1333, 640), (1333, 800)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
# training schedule for 2x
max_epochs = 24
train_cfg = dict(max_epochs=max_epochs)
# learning rate
param_scheduler = [
dict(type='ConstantLR', factor=1.0 / 3, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[16, 22],
gamma=0.1)
]
|
_base_ = './fcos_r50-caffe_fpn_gn-head_1x_coco.py'
# model settings
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron/resnet101_caffe')))
# dataset settings
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='RandomChoiceResize',
scale=[(1333, 640), (1333, 800)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
# training schedule for 2x
max_epochs = 24
train_cfg = dict(max_epochs=max_epochs)
# learning rate
param_scheduler = [
dict(type='ConstantLR', factor=1.0 / 3, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[16, 22],
gamma=0.1)
]
|
"""
Feature agglomeration. Base classes and functions for performing feature
agglomeration.
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import numpy as np
from scipy.sparse import issparse
from ..base import TransformerMixin
from ..utils.validation import check_is_fitted, validate_data
###############################################################################
# Mixin class for feature agglomeration.
class AgglomerationTransform(TransformerMixin):
"""
A class for feature agglomeration via the transform interface.
"""
def transform(self, X):
"""
Transform a new matrix using the built clustering.
Parameters
----------
X : array-like of shape (n_samples, n_features) or \
(n_samples, n_samples)
A M by N array of M observations in N dimensions or a length
M array of M one-dimensional observations.
Returns
-------
Y : ndarray of shape (n_samples, n_clusters) or (n_clusters,)
The pooled values for each feature cluster.
"""
check_is_fitted(self)
X = validate_data(self, X, reset=False)
if self.pooling_func == np.mean and not issparse(X):
size = np.bincount(self.labels_)
n_samples = X.shape[0]
# a fast way to compute the mean of grouped features
nX = np.array(
[np.bincount(self.labels_, X[i, :]) / size for i in range(n_samples)]
)
else:
nX = [
self.pooling_func(X[:, self.labels_ == l], axis=1)
for l in np.unique(self.labels_)
]
nX = np.array(nX).T
return nX
def inverse_transform(self, X):
"""
Inverse the transformation and return a vector of size `n_features`.
Parameters
----------
X : array-like of shape (n_samples, n_clusters) or (n_clusters,)
The values to be assigned to each cluster of samples.
Returns
-------
X_original : ndarray of shape (n_samples, n_features) or (n_features,)
A vector of size `n_samples` with the values of `X` assigned to
each of the cluster of samples.
"""
check_is_fitted(self)
unil, inverse = np.unique(self.labels_, return_inverse=True)
return X[..., inverse]
|
"""
Feature agglomeration. Base classes and functions for performing feature
agglomeration.
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import numpy as np
from scipy.sparse import issparse
from ..base import TransformerMixin
from ..utils.validation import check_is_fitted, validate_data
###############################################################################
# Mixin class for feature agglomeration.
class AgglomerationTransform(TransformerMixin):
"""
A class for feature agglomeration via the transform interface.
"""
def transform(self, X):
"""
Transform a new matrix using the built clustering.
Parameters
----------
X : array-like of shape (n_samples, n_features) or \
(n_samples, n_samples)
A M by N array of M observations in N dimensions or a length
M array of M one-dimensional observations.
Returns
-------
Y : ndarray of shape (n_samples, n_clusters) or (n_clusters,)
The pooled values for each feature cluster.
"""
check_is_fitted(self)
X = validate_data(self, X, reset=False)
if self.pooling_func == np.mean and not issparse(X):
size = np.bincount(self.labels_)
n_samples = X.shape[0]
# a fast way to compute the mean of grouped features
nX = np.array(
[np.bincount(self.labels_, X[i, :]) / size for i in range(n_samples)]
)
else:
nX = [
self.pooling_func(X[:, self.labels_ == l], axis=1)
for l in np.unique(self.labels_)
]
nX = np.array(nX).T
return nX
def inverse_transform(self, X):
"""
Inverse the transformation and return a vector of size `n_features`.
Parameters
----------
X : array-like of shape (n_samples, n_clusters) or (n_clusters,)
The values to be assigned to each cluster of samples.
Returns
-------
X : ndarray of shape (n_samples, n_features) or (n_features,)
A vector of size `n_samples` with the values of `Xred` assigned to
each of the cluster of samples.
"""
check_is_fitted(self)
unil, inverse = np.unique(self.labels_, return_inverse=True)
return X[..., inverse]
|
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
from itertools import groupby
from typing import Dict, Iterable
from jina import DocumentArray, Executor, requests
class SimpleRanker(Executor):
"""
:class:`SimpleRanker` aggregates the score of the matched doc from the
matched chunks. For each matched doc, the score is aggregated from all the
matched chunks belonging to that doc. The score of the document is the minimum
score (min distance) among the chunks. The aggregated matches are sorted by
score (ascending).
"""
def __init__(
self,
metric: str = 'cosine',
ranking: str = 'min',
traversal_paths: Iterable[str] = ('r',),
*args,
**kwargs
):
"""
:param metric: the distance metric used in `scores`
:param renking: The ranking function that the executor uses. There are multiple
options:
- min: Select minimum score/distance and sort by minimum
- max: Select maximum score/distance and sort by maximum
- mean_min: Calculate mean score/distance and sort by minimum mean
- mean_max: Calculate mean score/distance and sort by maximum mean
:param traversal_paths: traverse path on docs, e.g. ['r'], ['c']
"""
super().__init__(*args, **kwargs)
self.metric = metric
assert ranking in ['min', 'max', 'mean_min', 'mean_max']
self.ranking = ranking
self.traversal_paths = traversal_paths
@requests(on='/search')
def rank(self, docs: DocumentArray, parameters: Dict, *args, **kwargs):
traversal_paths = parameters.get('traversal_paths', self.traversal_paths)
for doc in docs.traverse_flat(traversal_paths):
matches_of_chunks = []
for chunk in doc.chunks:
matches_of_chunks.extend(chunk.matches)
groups = groupby(
sorted(matches_of_chunks, key=lambda d: d.parent_id),
lambda d: d.parent_id,
)
for key, group in groups:
chunk_match_list = list(group)
if self.ranking == 'min':
chunk_match_list.sort(key=lambda m: m.scores[self.metric].value)
elif self.ranking == 'max':
chunk_match_list.sort(key=lambda m: -m.scores[self.metric].value)
match = chunk_match_list[0]
match.id = chunk_match_list[0].parent_id
if self.ranking in ['mean_min', 'mean_max']:
scores = [el.scores[self.metric].value for el in chunk_match_list]
match.scores[self.metric] = sum(scores) / len(scores)
doc.matches.append(match)
if self.ranking in ['min', 'mean_min']:
doc.matches.sort(key=lambda d: d.scores[self.metric].value)
elif self.ranking in ['max', 'mean_max']:
doc.matches.sort(key=lambda d: -d.scores[self.metric].value)
|
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
from itertools import groupby
from typing import Dict, Iterable
from jina import DocumentArray, Executor, requests
class SimpleRanker(Executor):
"""
:class:`SimpleRanker` aggregates the score of the matched doc from the
matched chunks. For each matched doc, the score is aggregated from all the
matched chunks belonging to that doc. The score of the document is the minimum
score (min distance) among the chunks. The aggregated matches are sorted by
score (ascending).
"""
def __init__(
self,
metric: str = 'cosine',
ranking: str = 'min',
default_traversal_paths: Iterable[str] = ('r',),
*args,
**kwargs
):
"""
:param metric: the distance metric used in `scores`
:param renking: The ranking function that the executor uses. There are multiple
options:
- min: Select minimum score/distance and sort by minimum
- max: Select maximum score/distance and sort by maximum
- mean_min: Calculate mean score/distance and sort by minimum mean
- mean_max: Calculate mean score/distance and sort by maximum mean
:param default_traversal_paths: traverse path on docs, e.g. ['r'], ['c']
"""
super().__init__(*args, **kwargs)
self.metric = metric
assert ranking in ['min', 'max', 'mean_min', 'mean_max']
self.ranking = ranking
self.default_traversal_paths = default_traversal_paths
@requests(on='/search')
def rank(self, docs: DocumentArray, parameters: Dict, *args, **kwargs):
traversal_paths = parameters.get(
'traversal_paths', self.default_traversal_paths
)
for doc in docs.traverse_flat(traversal_paths):
matches_of_chunks = []
for chunk in doc.chunks:
matches_of_chunks.extend(chunk.matches)
groups = groupby(
sorted(matches_of_chunks, key=lambda d: d.parent_id),
lambda d: d.parent_id,
)
for key, group in groups:
chunk_match_list = list(group)
if self.ranking == 'min':
chunk_match_list.sort(key=lambda m: m.scores[self.metric].value)
elif self.ranking == 'max':
chunk_match_list.sort(key=lambda m: -m.scores[self.metric].value)
match = chunk_match_list[0]
match.id = chunk_match_list[0].parent_id
if self.ranking in ['mean_min', 'mean_max']:
scores = [el.scores[self.metric].value for el in chunk_match_list]
match.scores[self.metric] = sum(scores) / len(scores)
doc.matches.append(match)
if self.ranking in ['min', 'mean_min']:
doc.matches.sort(key=lambda d: d.scores[self.metric].value)
elif self.ranking in ['max', 'mean_max']:
doc.matches.sort(key=lambda d: -d.scores[self.metric].value)
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Optional, Sequence, Union
import torch
from mmengine.registry import HOOKS
from mmengine.structures import BaseDataElement
from .hook import Hook
DATA_BATCH = Optional[Sequence[dict]]
@HOOKS.register_module()
class EmptyCacheHook(Hook):
"""Releases all unoccupied cached GPU memory during the process of
training.
Args:
before_epoch (bool): Whether to release cache before an epoch. Defaults
to False.
after_epoch (bool): Whether to release cache after an epoch. Defaults
to True.
after_iter (bool): Whether to release cache after an iteration.
Defaults to False.
"""
priority = 'NORMAL'
def __init__(self,
before_epoch: bool = False,
after_epoch: bool = True,
after_iter: bool = False) -> None:
self._do_before_epoch = before_epoch
self._do_after_epoch = after_epoch
self._do_after_iter = after_iter
def _after_iter(self,
runner,
batch_idx: int,
data_batch: DATA_BATCH = None,
outputs: Optional[Union[dict,
Sequence[BaseDataElement]]] = None,
mode: str = 'train') -> None:
"""Empty cache after an iteration.
Args:
runner (Runner): The runner of the training process.
batch_idx (int): The index of the current batch in the loop.
data_batch (Sequence[dict], optional): Data from dataloader.
Defaults to None.
outputs (dict or sequence, optional): Outputs from model.
Defaults to None.
mode (str): Current mode of runner. Defaults to 'train'.
"""
if self._do_after_iter:
torch.cuda.empty_cache()
def _before_epoch(self, runner, mode: str = 'train') -> None:
"""Empty cache before an epoch.
Args:
runner (Runner): The runner of the training process.
mode (str): Current mode of runner. Defaults to 'train'.
"""
if self._do_before_epoch:
torch.cuda.empty_cache()
def _after_epoch(self, runner, mode: str = 'train') -> None:
"""Empty cache after an epoch.
Args:
runner (Runner): The runner of the training process.
mode (str): Current mode of runner. Defaults to 'train'.
"""
if self._do_after_epoch:
torch.cuda.empty_cache()
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Optional, Sequence, Union
import torch
from mmengine.data import BaseDataElement
from mmengine.registry import HOOKS
from .hook import Hook
DATA_BATCH = Optional[Sequence[dict]]
@HOOKS.register_module()
class EmptyCacheHook(Hook):
"""Releases all unoccupied cached GPU memory during the process of
training.
Args:
before_epoch (bool): Whether to release cache before an epoch. Defaults
to False.
after_epoch (bool): Whether to release cache after an epoch. Defaults
to True.
after_iter (bool): Whether to release cache after an iteration.
Defaults to False.
"""
priority = 'NORMAL'
def __init__(self,
before_epoch: bool = False,
after_epoch: bool = True,
after_iter: bool = False) -> None:
self._do_before_epoch = before_epoch
self._do_after_epoch = after_epoch
self._do_after_iter = after_iter
def _after_iter(self,
runner,
batch_idx: int,
data_batch: DATA_BATCH = None,
outputs: Optional[Union[dict,
Sequence[BaseDataElement]]] = None,
mode: str = 'train') -> None:
"""Empty cache after an iteration.
Args:
runner (Runner): The runner of the training process.
batch_idx (int): The index of the current batch in the loop.
data_batch (Sequence[dict], optional): Data from dataloader.
Defaults to None.
outputs (dict or sequence, optional): Outputs from model.
Defaults to None.
mode (str): Current mode of runner. Defaults to 'train'.
"""
if self._do_after_iter:
torch.cuda.empty_cache()
def _before_epoch(self, runner, mode: str = 'train') -> None:
"""Empty cache before an epoch.
Args:
runner (Runner): The runner of the training process.
mode (str): Current mode of runner. Defaults to 'train'.
"""
if self._do_before_epoch:
torch.cuda.empty_cache()
def _after_epoch(self, runner, mode: str = 'train') -> None:
"""Empty cache after an epoch.
Args:
runner (Runner): The runner of the training process.
mode (str): Current mode of runner. Defaults to 'train'.
"""
if self._do_after_epoch:
torch.cuda.empty_cache()
|
# Copyright (c) OpenMMLab. All rights reserved.
from .brick_wrappers import AdaptiveAvgPool2d, adaptive_avg_pool2d
from .builder import build_linear_layer, build_transformer
from .ckpt_convert import pvt_convert
from .conv_upsample import ConvUpsample
from .csp_layer import CSPLayer
from .gaussian_target import gaussian_radius, gen_gaussian_target
from .inverted_residual import InvertedResidual
from .make_divisible import make_divisible
from .misc import interpolate_as, sigmoid_geometric_mean
from .normed_predictor import NormedConv2d, NormedLinear
from .panoptic_gt_processing import preprocess_panoptic_gt
from .point_sample import get_uncertain_point_coords_with_randomness
from .positional_encoding import (LearnedPositionalEncoding,
SinePositionalEncoding)
from .res_layer import ResLayer, SimplifiedBasicBlock
from .se_layer import DyReLU, SELayer
from .transformer import (DetrTransformerDecoder, DetrTransformerDecoderLayer,
DynamicConv, PatchEmbed, Transformer, nchw_to_nlc,
nlc_to_nchw)
__all__ = [
'ResLayer', 'gaussian_radius', 'gen_gaussian_target',
'DetrTransformerDecoderLayer', 'DetrTransformerDecoder', 'Transformer',
'build_transformer', 'build_linear_layer', 'SinePositionalEncoding',
'LearnedPositionalEncoding', 'DynamicConv', 'SimplifiedBasicBlock',
'NormedLinear', 'NormedConv2d', 'make_divisible', 'InvertedResidual',
'SELayer', 'interpolate_as', 'ConvUpsample', 'CSPLayer',
'adaptive_avg_pool2d', 'AdaptiveAvgPool2d', 'PatchEmbed', 'nchw_to_nlc',
'nlc_to_nchw', 'pvt_convert', 'sigmoid_geometric_mean',
'preprocess_panoptic_gt', 'DyReLU',
'get_uncertain_point_coords_with_randomness'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .brick_wrappers import AdaptiveAvgPool2d, adaptive_avg_pool2d
from .builder import build_linear_layer, build_transformer
from .ckpt_convert import pvt_convert
from .conv_upsample import ConvUpsample
from .csp_layer import CSPLayer
from .gaussian_target import gaussian_radius, gen_gaussian_target
from .inverted_residual import InvertedResidual
from .make_divisible import make_divisible
from .misc import interpolate_as, sigmoid_geometric_mean
from .normed_predictor import NormedConv2d, NormedLinear
from .panoptic_gt_processing import preprocess_panoptic_gt
from .positional_encoding import (LearnedPositionalEncoding,
SinePositionalEncoding)
from .res_layer import ResLayer, SimplifiedBasicBlock
from .se_layer import DyReLU, SELayer
from .transformer import (DetrTransformerDecoder, DetrTransformerDecoderLayer,
DynamicConv, PatchEmbed, Transformer, nchw_to_nlc,
nlc_to_nchw)
__all__ = [
'ResLayer', 'gaussian_radius', 'gen_gaussian_target',
'DetrTransformerDecoderLayer', 'DetrTransformerDecoder', 'Transformer',
'build_transformer', 'build_linear_layer', 'SinePositionalEncoding',
'LearnedPositionalEncoding', 'DynamicConv', 'SimplifiedBasicBlock',
'NormedLinear', 'NormedConv2d', 'make_divisible', 'InvertedResidual',
'SELayer', 'interpolate_as', 'ConvUpsample', 'CSPLayer',
'adaptive_avg_pool2d', 'AdaptiveAvgPool2d', 'PatchEmbed', 'nchw_to_nlc',
'nlc_to_nchw', 'pvt_convert', 'sigmoid_geometric_mean',
'preprocess_panoptic_gt', 'DyReLU'
]
|
import numpy as np
import torch
from docarray import BaseDocument, DocumentArray
from docarray.documents import Image, Text
from docarray.typing import (
AnyEmbedding,
AnyTensor,
AnyUrl,
ImageUrl,
Mesh3DUrl,
NdArray,
PointCloud3DUrl,
TextUrl,
TorchEmbedding,
TorchTensor,
)
from docarray.typing.tensor import NdArrayEmbedding
def test_multi_modal_doc_proto():
class MyMultiModalDoc(BaseDocument):
image: Image
text: Text
class MySUperDoc(BaseDocument):
doc: MyMultiModalDoc
description: str
doc = MyMultiModalDoc(
image=Image(tensor=np.zeros((3, 224, 224))), text=Text(text='hello')
)
MyMultiModalDoc.from_protobuf(doc.to_protobuf())
def test_all_types():
class NestedDoc(BaseDocument):
tensor: NdArray
class MyDoc(BaseDocument):
img_url: ImageUrl
txt_url: TextUrl
mesh_url: Mesh3DUrl
point_cloud_url: PointCloud3DUrl
any_url: AnyUrl
torch_tensor: TorchTensor
torch_tensor_param: TorchTensor[224, 224, 3]
np_array: NdArray
np_array_param: NdArray[224, 224, 3]
generic_nd_array: AnyTensor
generic_torch_tensor: AnyTensor
embedding: AnyEmbedding
torch_embedding: TorchEmbedding[128]
np_embedding: NdArrayEmbedding[128]
nested_docs: DocumentArray[NestedDoc]
doc = MyDoc(
img_url='test.png',
txt_url='test.txt',
mesh_url='test.obj',
point_cloud_url='test.obj',
any_url='www.jina.ai',
torch_tensor=torch.zeros((3, 224, 224)),
torch_tensor_param=torch.zeros((3, 224, 224)),
np_array=np.zeros((3, 224, 224)),
np_array_param=np.zeros((3, 224, 224)),
generic_nd_array=np.zeros((3, 224, 224)),
generic_torch_tensor=torch.zeros((3, 224, 224)),
embedding=np.zeros((3, 224, 224)),
torch_embedding=torch.zeros((128,)),
np_embedding=np.zeros((128,)),
nested_docs=DocumentArray[NestedDoc]([NestedDoc(tensor=np.zeros((128,)))]),
)
doc = doc.to_protobuf()
doc = MyDoc.from_protobuf(doc)
assert doc.img_url == 'test.png'
assert doc.txt_url == 'test.txt'
assert doc.mesh_url == 'test.obj'
assert doc.point_cloud_url == 'test.obj'
assert doc.any_url == 'www.jina.ai'
assert (doc.torch_tensor == torch.zeros((3, 224, 224))).all()
assert isinstance(doc.torch_tensor, torch.Tensor)
assert (doc.torch_tensor_param == torch.zeros((224, 224, 3))).all()
assert isinstance(doc.torch_tensor_param, torch.Tensor)
assert (doc.np_array == np.zeros((3, 224, 224))).all()
assert isinstance(doc.np_array, np.ndarray)
assert doc.np_array.flags.writeable
assert (doc.np_array_param == np.zeros((224, 224, 3))).all()
assert isinstance(doc.np_array_param, np.ndarray)
assert (doc.generic_nd_array == np.zeros((3, 224, 224))).all()
assert isinstance(doc.generic_nd_array, np.ndarray)
assert (doc.generic_torch_tensor == torch.zeros((3, 224, 224))).all()
assert isinstance(doc.generic_torch_tensor, torch.Tensor)
assert (doc.torch_embedding == torch.zeros((128,))).all()
assert isinstance(doc.torch_embedding, torch.Tensor)
assert (doc.np_embedding == np.zeros((128,))).all()
assert isinstance(doc.np_embedding, np.ndarray)
assert (doc.embedding == np.zeros((3, 224, 224))).all()
|
import numpy as np
import torch
from docarray import BaseDocument, DocumentArray
from docarray.documents import Image, Text
from docarray.typing import (
AnyEmbedding,
AnyTensor,
AnyUrl,
ImageUrl,
Mesh3DUrl,
NdArray,
PointCloud3DUrl,
TextUrl,
TorchEmbedding,
TorchTensor,
)
from docarray.typing.tensor import NdArrayEmbedding
def test_multi_modal_doc_proto():
class MyMultiModalDoc(BaseDocument):
image: Image
text: Text
class MySUperDoc(BaseDocument):
doc: MyMultiModalDoc
description: str
doc = MyMultiModalDoc(
image=Image(tensor=np.zeros((3, 224, 224))), text=Text(text='hello')
)
MyMultiModalDoc.from_protobuf(doc.to_protobuf())
def test_all_types():
class NestedDoc(BaseDocument):
tensor: NdArray
class MyDoc(BaseDocument):
img_url: ImageUrl
txt_url: TextUrl
mesh_url: Mesh3DUrl
point_cloud_url: PointCloud3DUrl
any_url: AnyUrl
torch_tensor: TorchTensor
torch_tensor_param: TorchTensor[224, 224, 3]
np_array: NdArray
np_array_param: NdArray[224, 224, 3]
generic_nd_array: AnyTensor
generic_torch_tensor: AnyTensor
embedding: AnyEmbedding
torch_embedding: TorchEmbedding[128]
np_embedding: NdArrayEmbedding[128]
nested_docs: DocumentArray[NestedDoc]
doc = MyDoc(
img_url='test.png',
txt_url='test.txt',
mesh_url='test.obj',
point_cloud_url='test.obj',
any_url='www.jina.ai',
torch_tensor=torch.zeros((3, 224, 224)),
torch_tensor_param=torch.zeros((3, 224, 224)),
np_array=np.zeros((3, 224, 224)),
np_array_param=np.zeros((3, 224, 224)),
generic_nd_array=np.zeros((3, 224, 224)),
generic_torch_tensor=torch.zeros((3, 224, 224)),
embedding=np.zeros((3, 224, 224)),
torch_embedding=torch.zeros((128,)),
np_embedding=np.zeros((128,)),
nested_docs=DocumentArray[NestedDoc]([NestedDoc(tensor=np.zeros((128,)))]),
)
doc = MyDoc.from_protobuf(doc.to_protobuf())
assert doc.img_url == 'test.png'
assert doc.txt_url == 'test.txt'
assert doc.mesh_url == 'test.obj'
assert doc.point_cloud_url == 'test.obj'
assert doc.any_url == 'www.jina.ai'
assert (doc.torch_tensor == torch.zeros((3, 224, 224))).all()
assert isinstance(doc.torch_tensor, torch.Tensor)
assert (doc.torch_tensor_param == torch.zeros((224, 224, 3))).all()
assert isinstance(doc.torch_tensor_param, torch.Tensor)
assert (doc.np_array == np.zeros((3, 224, 224))).all()
assert isinstance(doc.np_array, np.ndarray)
assert doc.np_array.flags.writeable
assert (doc.np_array_param == np.zeros((224, 224, 3))).all()
assert isinstance(doc.np_array_param, np.ndarray)
assert (doc.generic_nd_array == np.zeros((3, 224, 224))).all()
assert isinstance(doc.generic_nd_array, np.ndarray)
assert (doc.generic_torch_tensor == torch.zeros((3, 224, 224))).all()
assert isinstance(doc.generic_torch_tensor, torch.Tensor)
assert (doc.torch_embedding == torch.zeros((128,))).all()
assert isinstance(doc.torch_embedding, torch.Tensor)
assert (doc.np_embedding == np.zeros((128,))).all()
assert isinstance(doc.np_embedding, np.ndarray)
assert (doc.embedding == np.zeros((3, 224, 224))).all()
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.document_loaders.parsers.audio import OpenAIWhisperParser
from langchain_community.document_loaders.parsers.docai import DocAIParser
from langchain_community.document_loaders.parsers.grobid import GrobidParser
from langchain_community.document_loaders.parsers.html.bs4 import BS4HTMLParser
from langchain_community.document_loaders.parsers.language.language_parser import (
LanguageParser,
)
from langchain_community.document_loaders.parsers.pdf import (
PDFMinerParser,
PDFPlumberParser,
PyMuPDFParser,
PyPDFium2Parser,
PyPDFParser,
)
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"BS4HTMLParser": "langchain_community.document_loaders.parsers.html.bs4",
"DocAIParser": "langchain_community.document_loaders.parsers.docai",
"GrobidParser": "langchain_community.document_loaders.parsers.grobid",
"LanguageParser": (
"langchain_community.document_loaders.parsers.language.language_parser"
),
"OpenAIWhisperParser": "langchain_community.document_loaders.parsers.audio",
"PDFMinerParser": "langchain_community.document_loaders.parsers.pdf",
"PDFPlumberParser": "langchain_community.document_loaders.parsers.pdf",
"PyMuPDFParser": "langchain_community.document_loaders.parsers.pdf",
"PyPDFium2Parser": "langchain_community.document_loaders.parsers.pdf",
"PyPDFParser": "langchain_community.document_loaders.parsers.pdf",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"BS4HTMLParser",
"DocAIParser",
"GrobidParser",
"LanguageParser",
"OpenAIWhisperParser",
"PDFMinerParser",
"PDFPlumberParser",
"PyMuPDFParser",
"PyPDFParser",
"PyPDFium2Parser",
]
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.document_loaders.parsers.audio import OpenAIWhisperParser
from langchain_community.document_loaders.parsers.docai import DocAIParser
from langchain_community.document_loaders.parsers.grobid import GrobidParser
from langchain_community.document_loaders.parsers.html.bs4 import BS4HTMLParser
from langchain_community.document_loaders.parsers.language.language_parser import (
LanguageParser,
)
from langchain_community.document_loaders.parsers.pdf import (
PDFMinerParser,
PDFPlumberParser,
PyMuPDFParser,
PyPDFium2Parser,
PyPDFParser,
)
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"BS4HTMLParser": "langchain_community.document_loaders.parsers.html.bs4",
"DocAIParser": "langchain_community.document_loaders.parsers.docai",
"GrobidParser": "langchain_community.document_loaders.parsers.grobid",
"LanguageParser": (
"langchain_community.document_loaders.parsers.language.language_parser"
),
"OpenAIWhisperParser": "langchain_community.document_loaders.parsers.audio",
"PDFMinerParser": "langchain_community.document_loaders.parsers.pdf",
"PDFPlumberParser": "langchain_community.document_loaders.parsers.pdf",
"PyMuPDFParser": "langchain_community.document_loaders.parsers.pdf",
"PyPDFium2Parser": "langchain_community.document_loaders.parsers.pdf",
"PyPDFParser": "langchain_community.document_loaders.parsers.pdf",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"BS4HTMLParser",
"DocAIParser",
"GrobidParser",
"LanguageParser",
"OpenAIWhisperParser",
"PDFMinerParser",
"PDFPlumberParser",
"PyMuPDFParser",
"PyPDFium2Parser",
"PyPDFParser",
]
|
"""Weaviate Retry query engine pack."""
from typing import Any, Dict, List, Optional
from llama_index.core.evaluation.guideline import DEFAULT_GUIDELINES, GuidelineEvaluator
from llama_index.core.indices.vector_store import VectorStoreIndex
from llama_index.core.llama_pack.base import BaseLlamaPack
from llama_index.core.query_engine.retry_query_engine import (
RetryGuidelineQueryEngine,
)
from llama_index.core.schema import TextNode
from llama_index.core.storage.storage_context import StorageContext
from llama_index.core.vector_stores.types import VectorStoreInfo
from llama_index.vector_stores.weaviate import WeaviateVectorStore
class WeaviateRetryEnginePack(BaseLlamaPack):
"""Weaviate Retry query engine pack."""
def __init__(
self,
collection_name: str,
vector_store_info: VectorStoreInfo,
host: str,
auth_client_secret: str,
nodes: Optional[List[TextNode]] = None,
**kwargs: Any,
) -> None:
"""Init params."""
from weaviate import Client
self.client: Client = Client(host, auth_client_secret=auth_client_secret)
weaviate_client = self.client
weaviate_collection = weaviate_client.get_or_create_collection(collection_name)
self._vector_store = WeaviateVectorStore(
weaviate_collection=weaviate_collection
)
if nodes is not None:
self._storage_context = StorageContext.from_defaults(
vector_store=self._vector_store
)
self._index = VectorStoreIndex(
nodes, storage_context=self._storage_context, **kwargs
)
else:
self._index = VectorStoreIndex.from_vector_store(
self._vector_store, **kwargs
)
self._storage_context = self._index.storage_context
self.retriever = self._index.as_retriever()
base_query_engine = self._index.as_query_engine()
guideline_eval = GuidelineEvaluator(guidelines=DEFAULT_GUIDELINES)
self.query_engine = RetryGuidelineQueryEngine(
base_query_engine, guideline_eval, resynthesize_query=True
)
def get_modules(self) -> Dict[str, Any]:
"""Get modules."""
return {
"vector_store": self._vector_store,
"storage_context": self._storage_context,
"index": self._index,
"retriever": self.retriever,
"query_engine": self.query_engine,
}
def retrieve(self, query_str: str) -> Any:
"""Retrieve."""
return self.retriever.retrieve(query_str)
def run(self, *args: Any, **kwargs: Any) -> Any:
"""Run the pipeline."""
return self.query_engine.query(*args, **kwargs)
|
"""Weaviate Retry query engine pack."""
from typing import Any, Dict, List, Optional
from llama_index.core.evaluation.guideline import DEFAULT_GUIDELINES, GuidelineEvaluator
from llama_index.core.indices.vector_store import VectorStoreIndex
from llama_index.core.llama_pack.base import BaseLlamaPack
from llama_index.core.query_engine.retry_query_engine import (
RetryGuidelineQueryEngine,
)
from llama_index.core.schema import TextNode
from llama_index.core.storage.storage_context import StorageContext
from llama_index.core.vector_stores.types import VectorStoreInfo
from llama_index.vector_stores.weaviate import WeaviateVectorStore
class WeaviateRetryEnginePack(BaseLlamaPack):
"""Weaviate Retry query engine pack."""
def __init__(
self,
collection_name: str,
vector_store_info: VectorStoreInfo,
host: str,
auth_client_secret: str,
nodes: Optional[List[TextNode]] = None,
**kwargs: Any,
) -> None:
"""Init params."""
from weaviate import Client
self.client: Client = Client(host, auth_client_secret=auth_client_secret)
weaviate_client = self.client
weaviate_collection = weaviate_client.get_or_create_collection(collection_name)
self._vector_store = WeaviateVectorStore(
weaviate_collection=weaviate_collection
)
if nodes is not None:
self._storage_context = StorageContext.from_defaults(
vector_store=self._vector_store
)
self._index = VectorStoreIndex(
nodes, storage_context=self._storage_context, **kwargs
)
else:
self._index = VectorStoreIndex.from_vector_store(
self._vector_store, **kwargs
)
self._storage_context = self._index.storage_context
self.retriever = self._index.as_retriever()
base_query_engine = self._index.as_query_engine()
guideline_eval = GuidelineEvaluator(guidelines=DEFAULT_GUIDELINES)
self.query_engine = RetryGuidelineQueryEngine(
base_query_engine, guideline_eval, resynthesize_query=True
)
def get_modules(self) -> Dict[str, Any]:
"""Get modules."""
return {
"vector_store": self._vector_store,
"storage_context": self._storage_context,
"index": self._index,
"retriever": self.retriever,
"query_engine": self.query_engine,
}
def retrieve(self, query_str: str) -> Any:
"""Retrieve."""
return self.retriever.retrieve(query_str)
def run(self, *args: Any, **kwargs: Any) -> Any:
"""Run the pipeline."""
return self.query_engine.query(*args, **kwargs)
|
from collections.abc import Sequence
from typing import Optional
from langchain_core.language_models import BaseLanguageModel
from langchain_core.prompts.chat import ChatPromptTemplate
from langchain_core.runnables import Runnable, RunnablePassthrough
from langchain_core.tools import BaseTool
from langchain_core.utils.function_calling import convert_to_openai_tool
from langchain.agents.format_scratchpad.openai_tools import (
format_to_openai_tool_messages,
)
from langchain.agents.output_parsers.openai_tools import OpenAIToolsAgentOutputParser
def create_openai_tools_agent(
llm: BaseLanguageModel,
tools: Sequence[BaseTool],
prompt: ChatPromptTemplate,
strict: Optional[bool] = None, # noqa: FBT001
) -> Runnable:
"""Create an agent that uses OpenAI tools.
Args:
llm: LLM to use as the agent.
tools: Tools this agent has access to.
prompt: The prompt to use. See Prompt section below for more on the expected
input variables.
Returns:
A Runnable sequence representing an agent. It takes as input all the same input
variables as the prompt passed in does. It returns as output either an
AgentAction or AgentFinish.
Raises:
ValueError: If the prompt is missing required variables.
Example:
.. code-block:: python
from langchain import hub
from langchain_community.chat_models import ChatOpenAI
from langchain.agents import AgentExecutor, create_openai_tools_agent
prompt = hub.pull("hwchase17/openai-tools-agent")
model = ChatOpenAI()
tools = ...
agent = create_openai_tools_agent(model, tools, prompt)
agent_executor = AgentExecutor(agent=agent, tools=tools)
agent_executor.invoke({"input": "hi"})
# Using with chat history
from langchain_core.messages import AIMessage, HumanMessage
agent_executor.invoke(
{
"input": "what's my name?",
"chat_history": [
HumanMessage(content="hi! my name is bob"),
AIMessage(content="Hello Bob! How can I assist you today?"),
],
}
)
Prompt:
The agent prompt must have an `agent_scratchpad` key that is a
``MessagesPlaceholder``. Intermediate agent actions and tool output
messages will be passed in here.
Here's an example:
.. code-block:: python
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
prompt = ChatPromptTemplate.from_messages(
[
("system", "You are a helpful assistant"),
MessagesPlaceholder("chat_history", optional=True),
("human", "{input}"),
MessagesPlaceholder("agent_scratchpad"),
]
)
"""
missing_vars = {"agent_scratchpad"}.difference(
prompt.input_variables + list(prompt.partial_variables),
)
if missing_vars:
msg = f"Prompt missing required variables: {missing_vars}"
raise ValueError(msg)
llm_with_tools = llm.bind(
tools=[convert_to_openai_tool(tool, strict=strict) for tool in tools],
)
return (
RunnablePassthrough.assign(
agent_scratchpad=lambda x: format_to_openai_tool_messages(
x["intermediate_steps"],
),
)
| prompt
| llm_with_tools
| OpenAIToolsAgentOutputParser()
)
|
from collections.abc import Sequence
from typing import Optional
from langchain_core.language_models import BaseLanguageModel
from langchain_core.prompts.chat import ChatPromptTemplate
from langchain_core.runnables import Runnable, RunnablePassthrough
from langchain_core.tools import BaseTool
from langchain_core.utils.function_calling import convert_to_openai_tool
from langchain.agents.format_scratchpad.openai_tools import (
format_to_openai_tool_messages,
)
from langchain.agents.output_parsers.openai_tools import OpenAIToolsAgentOutputParser
def create_openai_tools_agent(
llm: BaseLanguageModel,
tools: Sequence[BaseTool],
prompt: ChatPromptTemplate,
strict: Optional[bool] = None, # noqa: FBT001
) -> Runnable:
"""Create an agent that uses OpenAI tools.
Args:
llm: LLM to use as the agent.
tools: Tools this agent has access to.
prompt: The prompt to use. See Prompt section below for more on the expected
input variables.
Returns:
A Runnable sequence representing an agent. It takes as input all the same input
variables as the prompt passed in does. It returns as output either an
AgentAction or AgentFinish.
Raises:
ValueError: If the prompt is missing required variables.
Example:
.. code-block:: python
from langchain import hub
from langchain_community.chat_models import ChatOpenAI
from langchain.agents import AgentExecutor, create_openai_tools_agent
prompt = hub.pull("hwchase17/openai-tools-agent")
model = ChatOpenAI()
tools = ...
agent = create_openai_tools_agent(model, tools, prompt)
agent_executor = AgentExecutor(agent=agent, tools=tools)
agent_executor.invoke({"input": "hi"})
# Using with chat history
from langchain_core.messages import AIMessage, HumanMessage
agent_executor.invoke(
{
"input": "what's my name?",
"chat_history": [
HumanMessage(content="hi! my name is bob"),
AIMessage(content="Hello Bob! How can I assist you today?"),
],
}
)
Prompt:
The agent prompt must have an `agent_scratchpad` key that is a
``MessagesPlaceholder``. Intermediate agent actions and tool output
messages will be passed in here.
Here's an example:
.. code-block:: python
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
prompt = ChatPromptTemplate.from_messages(
[
("system", "You are a helpful assistant"),
MessagesPlaceholder("chat_history", optional=True),
("human", "{input}"),
MessagesPlaceholder("agent_scratchpad"),
]
)
"""
missing_vars = {"agent_scratchpad"}.difference(
prompt.input_variables + list(prompt.partial_variables)
)
if missing_vars:
msg = f"Prompt missing required variables: {missing_vars}"
raise ValueError(msg)
llm_with_tools = llm.bind(
tools=[convert_to_openai_tool(tool, strict=strict) for tool in tools]
)
return (
RunnablePassthrough.assign(
agent_scratchpad=lambda x: format_to_openai_tool_messages(
x["intermediate_steps"]
)
)
| prompt
| llm_with_tools
| OpenAIToolsAgentOutputParser()
)
|
"""**OutputParser** classes parse the output of an LLM call.
**Class hierarchy:**
.. code-block::
BaseLLMOutputParser --> BaseOutputParser --> <name>OutputParser # ListOutputParser, PydanticOutputParser
**Main helpers:**
.. code-block::
Serializable, Generation, PromptValue
""" # noqa: E501
from importlib import import_module
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from langchain_core.output_parsers.base import (
BaseGenerationOutputParser,
BaseLLMOutputParser,
BaseOutputParser,
)
from langchain_core.output_parsers.json import (
JsonOutputParser,
SimpleJsonOutputParser,
)
from langchain_core.output_parsers.list import (
CommaSeparatedListOutputParser,
ListOutputParser,
MarkdownListOutputParser,
NumberedListOutputParser,
)
from langchain_core.output_parsers.openai_tools import (
JsonOutputKeyToolsParser,
JsonOutputToolsParser,
PydanticToolsParser,
)
from langchain_core.output_parsers.pydantic import PydanticOutputParser
from langchain_core.output_parsers.string import StrOutputParser
from langchain_core.output_parsers.transform import (
BaseCumulativeTransformOutputParser,
BaseTransformOutputParser,
)
from langchain_core.output_parsers.xml import XMLOutputParser
__all__ = [
"BaseLLMOutputParser",
"BaseGenerationOutputParser",
"BaseOutputParser",
"ListOutputParser",
"CommaSeparatedListOutputParser",
"NumberedListOutputParser",
"MarkdownListOutputParser",
"StrOutputParser",
"BaseTransformOutputParser",
"BaseCumulativeTransformOutputParser",
"SimpleJsonOutputParser",
"XMLOutputParser",
"JsonOutputParser",
"PydanticOutputParser",
"JsonOutputToolsParser",
"JsonOutputKeyToolsParser",
"PydanticToolsParser",
]
_dynamic_imports = {
"BaseLLMOutputParser": "base",
"BaseGenerationOutputParser": "base",
"BaseOutputParser": "base",
"JsonOutputParser": "json",
"SimpleJsonOutputParser": "json",
"ListOutputParser": "list",
"CommaSeparatedListOutputParser": "list",
"MarkdownListOutputParser": "list",
"NumberedListOutputParser": "list",
"JsonOutputKeyToolsParser": "openai_tools",
"JsonOutputToolsParser": "openai_tools",
"PydanticToolsParser": "openai_tools",
"PydanticOutputParser": "pydantic",
"StrOutputParser": "string",
"BaseTransformOutputParser": "transform",
"BaseCumulativeTransformOutputParser": "transform",
"XMLOutputParser": "xml",
}
def __getattr__(attr_name: str) -> object:
module_name = _dynamic_imports.get(attr_name)
package = __spec__.parent
if module_name == "__module__" or module_name is None:
result = import_module(f".{attr_name}", package=package)
else:
module = import_module(f".{module_name}", package=package)
result = getattr(module, attr_name)
globals()[attr_name] = result
return result
def __dir__() -> list[str]:
return __all__
|
"""**OutputParser** classes parse the output of an LLM call.
**Class hierarchy:**
.. code-block::
BaseLLMOutputParser --> BaseOutputParser --> <name>OutputParser # ListOutputParser, PydanticOutputParser
**Main helpers:**
.. code-block::
Serializable, Generation, PromptValue
""" # noqa: E501
from importlib import import_module
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from langchain_core.output_parsers.base import (
BaseGenerationOutputParser,
BaseLLMOutputParser,
BaseOutputParser,
)
from langchain_core.output_parsers.json import (
JsonOutputParser,
SimpleJsonOutputParser,
)
from langchain_core.output_parsers.list import (
CommaSeparatedListOutputParser,
ListOutputParser,
MarkdownListOutputParser,
NumberedListOutputParser,
)
from langchain_core.output_parsers.openai_tools import (
JsonOutputKeyToolsParser,
JsonOutputToolsParser,
PydanticToolsParser,
)
from langchain_core.output_parsers.pydantic import PydanticOutputParser
from langchain_core.output_parsers.string import StrOutputParser
from langchain_core.output_parsers.transform import (
BaseCumulativeTransformOutputParser,
BaseTransformOutputParser,
)
from langchain_core.output_parsers.xml import XMLOutputParser
__all__ = [
"BaseLLMOutputParser",
"BaseGenerationOutputParser",
"BaseOutputParser",
"ListOutputParser",
"CommaSeparatedListOutputParser",
"NumberedListOutputParser",
"MarkdownListOutputParser",
"StrOutputParser",
"BaseTransformOutputParser",
"BaseCumulativeTransformOutputParser",
"SimpleJsonOutputParser",
"XMLOutputParser",
"JsonOutputParser",
"PydanticOutputParser",
"JsonOutputToolsParser",
"JsonOutputKeyToolsParser",
"PydanticToolsParser",
]
_dynamic_imports = {
"BaseLLMOutputParser": "base",
"BaseGenerationOutputParser": "base",
"BaseOutputParser": "base",
"JsonOutputParser": "json",
"SimpleJsonOutputParser": "json",
"ListOutputParser": "list",
"CommaSeparatedListOutputParser": "list",
"MarkdownListOutputParser": "list",
"NumberedListOutputParser": "list",
"JsonOutputKeyToolsParser": "openai_tools",
"JsonOutputToolsParser": "openai_tools",
"PydanticToolsParser": "openai_tools",
"PydanticOutputParser": "pydantic",
"StrOutputParser": "string",
"BaseTransformOutputParser": "transform",
"BaseCumulativeTransformOutputParser": "transform",
"XMLOutputParser": "xml",
}
def __getattr__(attr_name: str) -> object:
module_name = _dynamic_imports.get(attr_name)
package = __spec__.parent
if module_name == "__module__" or module_name is None:
result = import_module(f".{attr_name}", package=package)
else:
module = import_module(f".{module_name}", package=package)
result = getattr(module, attr_name)
globals()[attr_name] = result
return result
def __dir__() -> list[str]:
return list(__all__)
|
from typing import TYPE_CHECKING
from ...utils import (
DIFFUSERS_SLOW_IMPORT,
OptionalDependencyNotAvailable,
_LazyModule,
get_objects_from_module,
is_torch_available,
is_transformers_available,
)
_dummy_objects = {}
_import_structure = {}
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils import dummy_torch_and_transformers_objects # noqa F403
_dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects))
else:
_import_structure["pipeline_cosmos2_text2image"] = ["Cosmos2TextToImagePipeline"]
_import_structure["pipeline_cosmos2_video2world"] = ["Cosmos2VideoToWorldPipeline"]
_import_structure["pipeline_cosmos_text2world"] = ["CosmosTextToWorldPipeline"]
_import_structure["pipeline_cosmos_video2world"] = ["CosmosVideoToWorldPipeline"]
if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT:
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import *
else:
from .pipeline_cosmos2_text2image import Cosmos2TextToImagePipeline
from .pipeline_cosmos2_video2world import Cosmos2VideoToWorldPipeline
from .pipeline_cosmos_text2world import CosmosTextToWorldPipeline
from .pipeline_cosmos_video2world import CosmosVideoToWorldPipeline
else:
import sys
sys.modules[__name__] = _LazyModule(
__name__,
globals()["__file__"],
_import_structure,
module_spec=__spec__,
)
for name, value in _dummy_objects.items():
setattr(sys.modules[__name__], name, value)
|
from typing import TYPE_CHECKING
from ...utils import (
DIFFUSERS_SLOW_IMPORT,
OptionalDependencyNotAvailable,
_LazyModule,
get_objects_from_module,
is_torch_available,
is_transformers_available,
)
_dummy_objects = {}
_import_structure = {}
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils import dummy_torch_and_transformers_objects # noqa F403
_dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects))
else:
_import_structure["pipeline_cosmos_text2world"] = ["CosmosTextToWorldPipeline"]
_import_structure["pipeline_cosmos_video2world"] = ["CosmosVideoToWorldPipeline"]
if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT:
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import *
else:
from .pipeline_cosmos_text2world import CosmosTextToWorldPipeline
from .pipeline_cosmos_video2world import CosmosVideoToWorldPipeline
else:
import sys
sys.modules[__name__] = _LazyModule(
__name__,
globals()["__file__"],
_import_structure,
module_spec=__spec__,
)
for name, value in _dummy_objects.items():
setattr(sys.modules[__name__], name, value)
|
import requests
import pytest
import os
from llama_index.core.readers.base import BaseReader
from llama_index.readers.whisper import WhisperReader
from io import BytesIO
AUDIO_URL = "https://science.nasa.gov/wp-content/uploads/2024/04/sounds-of-mars-one-small-step-earth.wav"
AUDIO_URL = "https://audio-samples.github.io/samples/mp3/blizzard_primed/sample-0.mp3"
OPENAI_AVAILABLE = os.getenv("OPENAI_API_KEY") is not None
def test_class():
names_of_base_classes = [b.__name__ for b in WhisperReader.__mro__]
assert BaseReader.__name__ in names_of_base_classes
def test_get_file_or_bytes():
reader = WhisperReader(model="whisper-1", api_key="test")
audio_bytes = requests.get(AUDIO_URL).content
file_path_or_bytes = reader._get_file_path_or_bytes(audio_bytes)
assert isinstance(file_path_or_bytes, BytesIO)
def test_get_file_or_bytes_file():
reader = WhisperReader(model="whisper-1", api_key="test")
audio_bytes = requests.get(AUDIO_URL).content
# Create a temporary file-like object with a name
audio_file = BytesIO(audio_bytes)
audio_file.name = "audio.mp3"
file_path_or_bytes = reader._get_file_path_or_bytes(audio_file)
assert isinstance(file_path_or_bytes, BytesIO)
@pytest.mark.skipif(not OPENAI_AVAILABLE, reason="OpenAI API key not available")
def test_load_data_bytes():
reader = WhisperReader(model="whisper-1")
audio_bytes = requests.get(AUDIO_URL).content
audio_file = BytesIO(audio_bytes)
audio_file.name = "audio.mp3"
documents = reader.load_data(audio_file)
assert len(documents) == 1
assert documents[0].text is not None
assert documents[0].metadata is not None
@pytest.mark.skipif(not OPENAI_AVAILABLE, reason="OpenAI API key not available")
def test_load_data_file():
reader = WhisperReader(model="whisper-1")
audio_file = requests.get(AUDIO_URL)
with open("test_audio.mp3", "wb") as f:
f.write(audio_file.content)
documents = reader.load_data("test_audio.mp3")
assert len(documents) == 1
assert documents[0].text is not None
assert documents[0].metadata is not None
@pytest.mark.skipif(not OPENAI_AVAILABLE, reason="OpenAI API key not available")
@pytest.mark.asyncio
async def test_load_data_async_bytes():
reader = WhisperReader(model="whisper-1")
audio_bytes = requests.get(AUDIO_URL).content
documents = await reader.aload_data(audio_bytes)
assert len(documents) == 1
assert documents[0].text is not None
assert documents[0].metadata is not None
|
import requests
import pytest
import os
from llama_index.core.readers.base import BaseReader
from llama_index.readers.whisper import WhisperReader
from io import BytesIO
AUDIO_URL = "https://science.nasa.gov/wp-content/uploads/2024/04/sounds-of-mars-one-small-step-earth.wav"
AUDIO_URL = "https://audio-samples.github.io/samples/mp3/blizzard_primed/sample-0.mp3"
OPENAI_AVAILABLE = os.getenv("OPENAI_API_KEY") is not None
def test_class():
names_of_base_classes = [b.__name__ for b in WhisperReader.__mro__]
assert BaseReader.__name__ in names_of_base_classes
def test_get_file_or_bytes():
reader = WhisperReader(model="whisper-1", api_key="test")
audio_bytes = requests.get(AUDIO_URL).content
file_path_or_bytes = reader._get_file_path_or_bytes(audio_bytes)
assert isinstance(file_path_or_bytes, BytesIO)
def test_get_file_or_bytes_file():
reader = WhisperReader(model="whisper-1", api_key="test")
audio_bytes = requests.get(AUDIO_URL).content
# Create a temporary file-like object with a name
audio_file = BytesIO(audio_bytes)
audio_file.name = "audio.mp3"
file_path_or_bytes = reader._get_file_path_or_bytes(audio_file)
assert isinstance(file_path_or_bytes, BytesIO)
@pytest.mark.skipif(not OPENAI_AVAILABLE, reason="OpenAI API key not available")
def test_load_data_bytes():
reader = WhisperReader(model="whisper-1")
audio_bytes = requests.get(AUDIO_URL).content
audio_file = BytesIO(audio_bytes)
audio_file.name = "audio.mp3"
documents = reader.load_data(audio_file)
assert len(documents) == 1
assert documents[0].text is not None
assert documents[0].metadata is not None
@pytest.mark.skipif(not OPENAI_AVAILABLE, reason="OpenAI API key not available")
def test_load_data_file():
reader = WhisperReader(model="whisper-1")
audio_file = requests.get(AUDIO_URL)
with open("test_audio.mp3", "wb") as f:
f.write(audio_file.content)
documents = reader.load_data("test_audio.mp3")
assert len(documents) == 1
assert documents[0].text is not None
assert documents[0].metadata is not None
@pytest.mark.skipif(not OPENAI_AVAILABLE, reason="OpenAI API key not available")
@pytest.mark.asyncio()
async def test_load_data_async_bytes():
reader = WhisperReader(model="whisper-1")
audio_bytes = requests.get(AUDIO_URL).content
documents = await reader.aload_data(audio_bytes)
assert len(documents) == 1
assert documents[0].text is not None
assert documents[0].metadata is not None
|
from typing import Generator, Optional
import pytest
from docarray import BaseDoc, DocList
from docarray.documents import ImageDoc
from docarray.typing import ImageUrl, NdArray
from docarray.utils.map import map_docs, map_docs_batched
from tests.units.typing.test_bytes import IMAGE_PATHS
N_DOCS = 2
def load_from_doc(d: ImageDoc) -> ImageDoc:
if d.url is not None:
d.tensor = d.url.load()
return d
@pytest.fixture()
def da():
da = DocList[ImageDoc]([ImageDoc(url=IMAGE_PATHS['png']) for _ in range(N_DOCS)])
return da
@pytest.mark.parametrize('backend', ['thread', 'process'])
def test_map(da, backend):
for tensor in da.tensor:
assert tensor is None
docs = list(map_docs(docs=da, func=load_from_doc, backend=backend))
assert len(docs) == N_DOCS
for doc in docs:
assert doc.tensor is not None
def test_map_multiprocessing_lambda_func_raise_exception(da):
with pytest.raises(ValueError, match='Multiprocessing does not allow'):
list(map_docs(docs=da, func=lambda x: x, backend='process'))
def test_map_multiprocessing_local_func_raise_exception(da):
def local_func(x):
return x
with pytest.raises(ValueError, match='Multiprocessing does not allow'):
list(map_docs(docs=da, func=local_func, backend='process'))
@pytest.mark.parametrize('backend', ['thread', 'process'])
def test_check_order(backend):
da = DocList[ImageDoc]([ImageDoc(id=i) for i in range(N_DOCS)])
docs = list(map_docs(docs=da, func=load_from_doc, backend=backend))
assert len(docs) == N_DOCS
for i, doc in enumerate(docs):
assert doc.id == str(i)
def load_from_da(da: DocList) -> DocList:
for doc in da:
doc.tensor = doc.url.load()
return da
class MyImage(BaseDoc):
tensor: Optional[NdArray]
url: ImageUrl
@pytest.mark.slow
@pytest.mark.parametrize('n_docs,batch_size', [(10, 5), (10, 8)])
@pytest.mark.parametrize('backend', ['thread', 'process'])
def test_map_docs_batched(n_docs, batch_size, backend):
da = DocList[MyImage]([MyImage(url=IMAGE_PATHS['png']) for _ in range(n_docs)])
it = map_docs_batched(
docs=da, func=load_from_da, batch_size=batch_size, backend=backend
)
assert isinstance(it, Generator)
for batch in it:
assert isinstance(batch, DocList[MyImage])
|
from typing import Generator, Optional
import pytest
from docarray import BaseDoc, DocArray
from docarray.documents import ImageDoc
from docarray.typing import ImageUrl, NdArray
from docarray.utils.map import map_docs, map_docs_batched
from tests.units.typing.test_bytes import IMAGE_PATHS
N_DOCS = 2
def load_from_doc(d: ImageDoc) -> ImageDoc:
if d.url is not None:
d.tensor = d.url.load()
return d
@pytest.fixture()
def da():
da = DocArray[ImageDoc]([ImageDoc(url=IMAGE_PATHS['png']) for _ in range(N_DOCS)])
return da
@pytest.mark.parametrize('backend', ['thread', 'process'])
def test_map(da, backend):
for tensor in da.tensor:
assert tensor is None
docs = list(map_docs(da=da, func=load_from_doc, backend=backend))
assert len(docs) == N_DOCS
for doc in docs:
assert doc.tensor is not None
def test_map_multiprocessing_lambda_func_raise_exception(da):
with pytest.raises(ValueError, match='Multiprocessing does not allow'):
list(map_docs(da=da, func=lambda x: x, backend='process'))
def test_map_multiprocessing_local_func_raise_exception(da):
def local_func(x):
return x
with pytest.raises(ValueError, match='Multiprocessing does not allow'):
list(map_docs(da=da, func=local_func, backend='process'))
@pytest.mark.parametrize('backend', ['thread', 'process'])
def test_check_order(backend):
da = DocArray[ImageDoc]([ImageDoc(id=i) for i in range(N_DOCS)])
docs = list(map_docs(da=da, func=load_from_doc, backend=backend))
assert len(docs) == N_DOCS
for i, doc in enumerate(docs):
assert doc.id == str(i)
def load_from_da(da: DocArray) -> DocArray:
for doc in da:
doc.tensor = doc.url.load()
return da
class MyImage(BaseDoc):
tensor: Optional[NdArray]
url: ImageUrl
@pytest.mark.slow
@pytest.mark.parametrize('n_docs,batch_size', [(10, 5), (10, 8)])
@pytest.mark.parametrize('backend', ['thread', 'process'])
def test_map_docs_batched(n_docs, batch_size, backend):
da = DocArray[MyImage]([MyImage(url=IMAGE_PATHS['png']) for _ in range(n_docs)])
it = map_docs_batched(
da=da, func=load_from_da, batch_size=batch_size, backend=backend
)
assert isinstance(it, Generator)
for batch in it:
assert isinstance(batch, DocArray[MyImage])
|
from jina import Flow, Executor, requests
import pytest
class GoodExecutor(Executor):
def __init__(self, **kwargs):
super().__init__(**kwargs)
@requests
def foo(self, **kwargs):
pass
class GoodExecutor2(Executor):
def __init__(self, metas, requests, runtime_args, dynamic_batching):
pass
@requests
def foo(self, docs, parameters, docs_matrix):
pass
def test_bad_executor_constructor():
# executor can be used as out of Flow as Python object
exec1 = GoodExecutor()
exec2 = GoodExecutor2({}, {}, {}, {})
# can be used in the Flow
with Flow().add(uses=GoodExecutor):
pass
with Flow().add(uses=GoodExecutor2):
pass
# bad executor due to mismatch on args
with pytest.raises(TypeError):
class BadExecutor1(Executor):
def __init__(self):
pass
@requests
def foo(self, **kwargs):
pass
with pytest.raises(TypeError):
class BadExecutor2(Executor):
def __init__(self, **kwargs):
super().__init__(**kwargs)
@requests
def foo(self):
pass
|
from jina import Flow, Executor, requests
import pytest
class GoodExecutor(Executor):
def __init__(self, **kwargs):
super().__init__(**kwargs)
@requests
def foo(self, **kwargs):
pass
class GoodExecutor2(Executor):
def __init__(self, metas, requests, runtime_args):
pass
@requests
def foo(self, docs, parameters, docs_matrix):
pass
def test_bad_executor_constructor():
# executor can be used as out of Flow as Python object
exec1 = GoodExecutor()
exec2 = GoodExecutor2({}, {}, {})
# can be used in the Flow
with Flow().add(uses=GoodExecutor):
pass
with Flow().add(uses=GoodExecutor2):
pass
# bad executor due to mismatch on args
with pytest.raises(TypeError):
class BadExecutor1(Executor):
def __init__(self):
pass
@requests
def foo(self, **kwargs):
pass
with pytest.raises(TypeError):
class BadExecutor2(Executor):
def __init__(self, **kwargs):
super().__init__(**kwargs)
@requests
def foo(self):
pass
|
"""Meta-estimators for building composite models with transformers.
In addition to its current contents, this module will eventually be home to
refurbished versions of :class:`~sklearn.pipeline.Pipeline` and
:class:`~sklearn.pipeline.FeatureUnion`.
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
from ._column_transformer import (
ColumnTransformer,
make_column_selector,
make_column_transformer,
)
from ._target import TransformedTargetRegressor
__all__ = [
"ColumnTransformer",
"TransformedTargetRegressor",
"make_column_selector",
"make_column_transformer",
]
|
"""Meta-estimators for building composite models with transformers.
In addition to its current contents, this module will eventually be home to
refurbished versions of :class:`~sklearn.pipeline.Pipeline` and
:class:`~sklearn.pipeline.FeatureUnion`.
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
from ._column_transformer import (
ColumnTransformer,
make_column_selector,
make_column_transformer,
)
from ._target import TransformedTargetRegressor
__all__ = [
"ColumnTransformer",
"make_column_transformer",
"TransformedTargetRegressor",
"make_column_selector",
]
|
from __future__ import annotations
from .BinaryCrossEntropyLoss import BinaryCrossEntropyLoss
from .CachedMultipleNegativesRankingLoss import CachedMultipleNegativesRankingLoss
from .CrossEntropyLoss import CrossEntropyLoss
from .ListNetLoss import ListNetLoss
from .MarginMSELoss import MarginMSELoss
from .MSELoss import MSELoss
from .MultipleNegativesRankingLoss import MultipleNegativesRankingLoss
__all__ = [
"BinaryCrossEntropyLoss",
"CrossEntropyLoss",
"MultipleNegativesRankingLoss",
"CachedMultipleNegativesRankingLoss",
"MarginMSELoss",
"MSELoss",
"ListNetLoss",
]
|
from __future__ import annotations
from .BinaryCrossEntropyLoss import BinaryCrossEntropyLoss
from .CachedMultipleNegativesRankingLoss import CachedMultipleNegativesRankingLoss
from .CrossEntropyLoss import CrossEntropyLoss
from .MarginMSELoss import MarginMSELoss
from .MSELoss import MSELoss
from .MultipleNegativesRankingLoss import MultipleNegativesRankingLoss
__all__ = [
"BinaryCrossEntropyLoss",
"CrossEntropyLoss",
"MultipleNegativesRankingLoss",
"CachedMultipleNegativesRankingLoss",
"MarginMSELoss",
"MSELoss",
]
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.tools import GetElementsTool
from langchain_community.tools.playwright.get_elements import GetElementsToolInput
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"GetElementsToolInput": "langchain_community.tools.playwright.get_elements",
"GetElementsTool": "langchain_community.tools",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"GetElementsTool",
"GetElementsToolInput",
]
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.tools import GetElementsTool
from langchain_community.tools.playwright.get_elements import GetElementsToolInput
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"GetElementsToolInput": "langchain_community.tools.playwright.get_elements",
"GetElementsTool": "langchain_community.tools",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"GetElementsToolInput",
"GetElementsTool",
]
|
"""
This is a more complex example on performing clustering on large scale dataset.
This examples find in a large set of sentences local communities, i.e., groups of sentences that are highly
similar. You can freely configure the threshold what is considered as similar. A high threshold will
only find extremely similar sentences, a lower threshold will find more sentence that are less similar.
A second parameter is 'min_community_size': Only communities with at least a certain number of sentences will be returned.
The method for finding the communities is extremely fast, for clustering 50k sentences it requires only 5 seconds (plus embedding comuptation).
In this example, we download a large set of questions from Quora and then find similar questions in this set.
"""
import csv
import os
import time
from sentence_transformers import SentenceTransformer, util
# Model for computing sentence embeddings. We use one trained for similar questions detection
model = SentenceTransformer("all-MiniLM-L6-v2")
# We download the Quora Duplicate Questions Dataset (https://www.quora.com/q/quoradata/First-Quora-Dataset-Release-Question-Pairs)
# and find similar question in it
url = "http://qim.fs.quoracdn.net/quora_duplicate_questions.tsv"
dataset_path = "quora_duplicate_questions.tsv"
max_corpus_size = 50000 # We limit our corpus to only the first 50k questions
# Check if the dataset exists. If not, download and extract
# Download dataset if needed
if not os.path.exists(dataset_path):
print("Download dataset")
util.http_get(url, dataset_path)
# Get all unique sentences from the file
corpus_sentences = set()
with open(dataset_path, encoding="utf8") as fIn:
reader = csv.DictReader(fIn, delimiter="\t", quoting=csv.QUOTE_MINIMAL)
for row in reader:
corpus_sentences.add(row["question1"])
corpus_sentences.add(row["question2"])
if len(corpus_sentences) >= max_corpus_size:
break
corpus_sentences = list(corpus_sentences)
print("Encode the corpus. This might take a while")
corpus_embeddings = model.encode(corpus_sentences, batch_size=64, show_progress_bar=True, convert_to_tensor=True)
print("Start clustering")
start_time = time.time()
# Two parameters to tune:
# min_cluster_size: Only consider cluster that have at least 25 elements
# threshold: Consider sentence pairs with a cosine-similarity larger than threshold as similar
clusters = util.community_detection(corpus_embeddings, min_community_size=25, threshold=0.75)
print("Clustering done after {:.2f} sec".format(time.time() - start_time))
# Print for all clusters the top 3 and bottom 3 elements
for i, cluster in enumerate(clusters):
print("\nCluster {}, #{} Elements ".format(i + 1, len(cluster)))
for sentence_id in cluster[0:3]:
print("\t", corpus_sentences[sentence_id])
print("\t", "...")
for sentence_id in cluster[-3:]:
print("\t", corpus_sentences[sentence_id])
|
"""
This is a more complex example on performing clustering on large scale dataset.
This examples find in a large set of sentences local communities, i.e., groups of sentences that are highly
similar. You can freely configure the threshold what is considered as similar. A high threshold will
only find extremely similar sentences, a lower threshold will find more sentence that are less similar.
A second parameter is 'min_community_size': Only communities with at least a certain number of sentences will be returned.
The method for finding the communities is extremely fast, for clustering 50k sentences it requires only 5 seconds (plus embedding comuptation).
In this example, we download a large set of questions from Quora and then find similar questions in this set.
"""
from sentence_transformers import SentenceTransformer, util
import os
import csv
import time
# Model for computing sentence embeddings. We use one trained for similar questions detection
model = SentenceTransformer("all-MiniLM-L6-v2")
# We download the Quora Duplicate Questions Dataset (https://www.quora.com/q/quoradata/First-Quora-Dataset-Release-Question-Pairs)
# and find similar question in it
url = "http://qim.fs.quoracdn.net/quora_duplicate_questions.tsv"
dataset_path = "quora_duplicate_questions.tsv"
max_corpus_size = 50000 # We limit our corpus to only the first 50k questions
# Check if the dataset exists. If not, download and extract
# Download dataset if needed
if not os.path.exists(dataset_path):
print("Download dataset")
util.http_get(url, dataset_path)
# Get all unique sentences from the file
corpus_sentences = set()
with open(dataset_path, encoding="utf8") as fIn:
reader = csv.DictReader(fIn, delimiter="\t", quoting=csv.QUOTE_MINIMAL)
for row in reader:
corpus_sentences.add(row["question1"])
corpus_sentences.add(row["question2"])
if len(corpus_sentences) >= max_corpus_size:
break
corpus_sentences = list(corpus_sentences)
print("Encode the corpus. This might take a while")
corpus_embeddings = model.encode(corpus_sentences, batch_size=64, show_progress_bar=True, convert_to_tensor=True)
print("Start clustering")
start_time = time.time()
# Two parameters to tune:
# min_cluster_size: Only consider cluster that have at least 25 elements
# threshold: Consider sentence pairs with a cosine-similarity larger than threshold as similar
clusters = util.community_detection(corpus_embeddings, min_community_size=25, threshold=0.75)
print("Clustering done after {:.2f} sec".format(time.time() - start_time))
# Print for all clusters the top 3 and bottom 3 elements
for i, cluster in enumerate(clusters):
print("\nCluster {}, #{} Elements ".format(i + 1, len(cluster)))
for sentence_id in cluster[0:3]:
print("\t", corpus_sentences[sentence_id])
print("\t", "...")
for sentence_id in cluster[-3:]:
print("\t", corpus_sentences[sentence_id])
|
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import subprocess
from typing import Dict, Iterable, Optional
import spacy
from jina import DocumentArray, Executor, requests
_EXCLUDE_COMPONENTS = [
'tagger',
'parser',
'ner',
'senter',
'lemmatizer',
'attribute_ruler',
]
class SpacyTextEncoder(Executor):
"""
:class:`SpacyTextEncoder` encodes ``Document`` using models offered by Spacy
"""
def __init__(
self,
model_name: str = 'en_core_web_sm',
download_data: bool = True,
traversal_paths: Iterable[str] = ('r',),
batch_size: int = 32,
device: str = 'cpu',
*args,
**kwargs,
):
"""
:param model_name: pre-trained spaCy language pipeline name
:param traversal_paths: fallback traversal path in case there is not traversal path sent in the request
:param batch_size: fallback batch size in case there is not batch size sent in the request
:param device: device to use for encoding. ['cuda', 'cpu', 'cuda:2']
"""
super().__init__(*args, **kwargs)
self.batch_size = batch_size
self.traversal_paths = traversal_paths
self.device = device
if device.startswith('cuda'):
spacy.require_gpu()
if download_data:
subprocess.run(
['python3', '-m', 'spacy', 'download', model_name], check=True
)
self.spacy_model = spacy.load(model_name, exclude=_EXCLUDE_COMPONENTS)
@requests
def encode(
self, docs: Optional[DocumentArray] = None, parameters: Dict = {}, **kwargs
):
"""
Encode all docs with text and store the encodings in the embedding
attribute of the docs.
:param docs: documents sent to the encoder. The docs must have the
``text`` attribute.
:param parameters: dictionary to define the ``traversal_path`` and the
``batch_size``. For example,
``parameters={'traversal_paths': ['r'], 'batch_size': 10}``
"""
if self.device.startswith('cuda'):
from cupy import asnumpy
if docs:
batch_size = parameters.get('batch_size', self.batch_size)
document_batches_generator = docs.traverse_flat(
traversal_paths=parameters.get('traversal_paths', self.traversal_paths),
filter_fn=lambda doc:len(doc.text)>0
).batch(
batch_size=batch_size,
)
for document_batch in document_batches_generator:
texts = [doc.text for doc in document_batch]
for doc, spacy_doc in zip(
document_batch, self.spacy_model.pipe(texts, batch_size=batch_size)
):
if self.device.startswith('cuda'):
doc.embedding = asnumpy(spacy_doc.vector)
else:
doc.embedding = spacy_doc.vector
|
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import subprocess
from typing import Dict, Iterable, Optional
import spacy
from jina import DocumentArray, Executor, requests
_EXCLUDE_COMPONENTS = [
'tagger',
'parser',
'ner',
'senter',
'lemmatizer',
'attribute_ruler',
]
class SpacyTextEncoder(Executor):
"""
:class:`SpacyTextEncoder` encodes ``Document`` using models offered by Spacy
"""
def __init__(
self,
model_name: str = 'en_core_web_sm',
download_data: bool = True,
traversal_paths: Iterable[str] = ('r',),
batch_size: int = 32,
device: str = 'cpu',
*args,
**kwargs,
):
"""
:param model_name: pre-trained spaCy language pipeline name
:param traversal_paths: fallback traversal path in case there is not traversal path sent in the request
:param batch_size: fallback batch size in case there is not batch size sent in the request
:param device: device to use for encoding. ['cuda', 'cpu', 'cuda:2']
"""
super().__init__(*args, **kwargs)
self.batch_size = batch_size
self.traversal_paths = traversal_paths
self.device = device
if device.startswith('cuda'):
spacy.require_gpu()
if download_data:
subprocess.run(
['python3', '-m', 'spacy', 'download', model_name], check=True
)
self.spacy_model = spacy.load(model_name, exclude=_EXCLUDE_COMPONENTS)
@requests
def encode(
self, docs: Optional[DocumentArray] = None, parameters: Dict = {}, **kwargs
):
"""
Encode all docs with text and store the encodings in the embedding
attribute of the docs.
:param docs: documents sent to the encoder. The docs must have the
``text`` attribute.
:param parameters: dictionary to define the ``traversal_path`` and the
``batch_size``. For example,
``parameters={'traversal_paths': ['r'], 'batch_size': 10}``
"""
if self.device.startswith('cuda'):
from cupy import asnumpy
if docs:
batch_size = parameters.get('batch_size', self.batch_size)
document_batches_generator = docs.batch(
traversal_paths=parameters.get('traversal_paths', self.traversal_paths),
batch_size=batch_size,
require_attr='text',
)
for document_batch in document_batches_generator:
texts = [doc.text for doc in document_batch]
for doc, spacy_doc in zip(
document_batch, self.spacy_model.pipe(texts, batch_size=batch_size)
):
if self.device.startswith('cuda'):
doc.embedding = asnumpy(spacy_doc.vector)
else:
doc.embedding = spacy_doc.vector
|
import pytest
import torch
from PIL import Image
from torchvision import datapoints
@pytest.mark.parametrize("data", [torch.rand(3, 32, 32), Image.new("RGB", (32, 32), color=123)])
def test_image_instance(data):
image = datapoints.Image(data)
assert isinstance(image, torch.Tensor)
assert image.ndim == 3 and image.shape[0] == 3
@pytest.mark.parametrize("data", [torch.randint(0, 10, size=(1, 32, 32)), Image.new("L", (32, 32), color=2)])
def test_mask_instance(data):
mask = datapoints.Mask(data)
assert isinstance(mask, torch.Tensor)
assert mask.ndim == 3 and mask.shape[0] == 1
@pytest.mark.parametrize("data", [torch.randint(0, 32, size=(5, 4)), [[0, 0, 5, 5], [2, 2, 7, 7]]])
@pytest.mark.parametrize(
"format", ["XYXY", "CXCYWH", datapoints.BoundingBoxFormat.XYXY, datapoints.BoundingBoxFormat.XYWH]
)
def test_bbox_instance(data, format):
bboxes = datapoints.BoundingBox(data, format=format, spatial_size=(32, 32))
assert isinstance(bboxes, torch.Tensor)
assert bboxes.ndim == 2 and bboxes.shape[1] == 4
if isinstance(format, str):
format = datapoints.BoundingBoxFormat[(format.upper())]
assert bboxes.format == format
|
import pytest
import torch
from PIL import Image
from torchvision import datapoints
@pytest.mark.parametrize("data", [torch.rand(3, 32, 32), Image.new("RGB", (32, 32), color=123)])
def test_image_instance(data):
image = datapoints.Image(data)
assert isinstance(image, torch.Tensor)
assert image.ndim == 3 and image.shape[0] == 3
@pytest.mark.parametrize("data", [torch.randint(0, 10, size=(1, 32, 32)), Image.new("L", (32, 32), color=2)])
def test_mask_instance(data):
mask = datapoints.Mask(data)
assert isinstance(mask, torch.Tensor)
assert mask.ndim == 3 and mask.shape[0] == 1
@pytest.mark.parametrize("data", [torch.randint(0, 32, size=(5, 4)), [[0, 0, 5, 5], [2, 2, 7, 7]]])
@pytest.mark.parametrize(
"format", ["XYXY", "CXCYWH", datapoints.BoundingBoxFormat.XYXY, datapoints.BoundingBoxFormat.XYWH]
)
def test_bbox_instance(data, format):
bboxes = datapoints.BoundingBox(data, format=format, spatial_size=(32, 32))
assert isinstance(bboxes, torch.Tensor)
assert bboxes.ndim == 2 and bboxes.shape[1] == 4
if isinstance(format, str):
format = datapoints.BoundingBoxFormat.from_str(format.upper())
assert bboxes.format == format
|
"""
This file loads sentences from a provided text file. It is expected, that the there is one sentence per line in that text file.
TSDAE will be training using these sentences. Checkpoints are stored every 500 steps to the output folder.
Usage:
python train_tsdae_from_file.py path/to/sentences.txt
"""
import gzip
import logging
import sys
from datetime import datetime
import tqdm
from torch.utils.data import DataLoader
from sentence_transformers import LoggingHandler, SentenceTransformer, datasets, losses, models
#### Just some code to print debug information to stdout
logging.basicConfig(
format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO, handlers=[LoggingHandler()]
)
#### /print debug information to stdout
# Train Parameters
model_name = "bert-base-uncased"
batch_size = 8
# Input file path (a text file, each line a sentence)
if len(sys.argv) < 2:
print(f"Run this script with: python {sys.argv[0]} path/to/sentences.txt")
exit()
filepath = sys.argv[1]
# Save path to store our model
output_name = ""
if len(sys.argv) >= 3:
output_name = "-" + sys.argv[2].replace(" ", "_").replace("/", "_").replace("\\", "_")
model_output_path = "output/train_tsdae{}-{}".format(output_name, datetime.now().strftime("%Y-%m-%d_%H-%M-%S"))
################# Read the train corpus #################
train_sentences = []
with (
gzip.open(filepath, "rt", encoding="utf8") if filepath.endswith(".gz") else open(filepath, encoding="utf8") as fIn
):
for line in tqdm.tqdm(fIn, desc="Read file"):
line = line.strip()
if len(line) >= 10:
train_sentences.append(line)
logging.info(f"{len(train_sentences)} train sentences")
################# Initialize an SBERT model #################
word_embedding_model = models.Transformer(model_name)
# Apply **cls** pooling to get one fixed sized sentence vector
pooling_model = models.Pooling(word_embedding_model.get_word_embedding_dimension(), "cls")
model = SentenceTransformer(modules=[word_embedding_model, pooling_model])
################# Train and evaluate the model (it needs about 1 hour for one epoch of AskUbuntu) #################
# We wrap our training sentences in the DenoisingAutoEncoderDataset to add deletion noise on the fly
train_dataset = datasets.DenoisingAutoEncoderDataset(train_sentences)
train_dataloader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True, drop_last=True)
train_loss = losses.DenoisingAutoEncoderLoss(model, decoder_name_or_path=model_name, tie_encoder_decoder=True)
logging.info("Start training")
model.fit(
train_objectives=[(train_dataloader, train_loss)],
epochs=1,
weight_decay=0,
scheduler="constantlr",
optimizer_params={"lr": 3e-5},
show_progress_bar=True,
checkpoint_path=model_output_path,
use_amp=False, # Set to True, if your GPU supports FP16 cores
)
|
"""
This file loads sentences from a provided text file. It is expected, that the there is one sentence per line in that text file.
TSDAE will be training using these sentences. Checkpoints are stored every 500 steps to the output folder.
Usage:
python train_tsdae_from_file.py path/to/sentences.txt
"""
import gzip
import logging
import sys
from datetime import datetime
import tqdm
from torch.utils.data import DataLoader
from sentence_transformers import LoggingHandler, SentenceTransformer, datasets, losses, models
#### Just some code to print debug information to stdout
logging.basicConfig(
format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO, handlers=[LoggingHandler()]
)
#### /print debug information to stdout
# Train Parameters
model_name = "bert-base-uncased"
batch_size = 8
# Input file path (a text file, each line a sentence)
if len(sys.argv) < 2:
print(f"Run this script with: python {sys.argv[0]} path/to/sentences.txt")
exit()
filepath = sys.argv[1]
# Save path to store our model
output_name = ""
if len(sys.argv) >= 3:
output_name = "-" + sys.argv[2].replace(" ", "_").replace("/", "_").replace("\\", "_")
model_output_path = "output/train_tsdae{}-{}".format(output_name, datetime.now().strftime("%Y-%m-%d_%H-%M-%S"))
################# Read the train corpus #################
train_sentences = []
with gzip.open(filepath, "rt", encoding="utf8") if filepath.endswith(".gz") else open(
filepath, encoding="utf8"
) as fIn:
for line in tqdm.tqdm(fIn, desc="Read file"):
line = line.strip()
if len(line) >= 10:
train_sentences.append(line)
logging.info(f"{len(train_sentences)} train sentences")
################# Initialize an SBERT model #################
word_embedding_model = models.Transformer(model_name)
# Apply **cls** pooling to get one fixed sized sentence vector
pooling_model = models.Pooling(word_embedding_model.get_word_embedding_dimension(), "cls")
model = SentenceTransformer(modules=[word_embedding_model, pooling_model])
################# Train and evaluate the model (it needs about 1 hour for one epoch of AskUbuntu) #################
# We wrap our training sentences in the DenoisingAutoEncoderDataset to add deletion noise on the fly
train_dataset = datasets.DenoisingAutoEncoderDataset(train_sentences)
train_dataloader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True, drop_last=True)
train_loss = losses.DenoisingAutoEncoderLoss(model, decoder_name_or_path=model_name, tie_encoder_decoder=True)
logging.info("Start training")
model.fit(
train_objectives=[(train_dataloader, train_loss)],
epochs=1,
weight_decay=0,
scheduler="constantlr",
optimizer_params={"lr": 3e-5},
show_progress_bar=True,
checkpoint_path=model_output_path,
use_amp=False, # Set to True, if your GPU supports FP16 cores
)
|
import gc
import unittest
import numpy as np
import torch
from transformers import AutoTokenizer, GemmaConfig, GemmaForCausalLM
from diffusers import AutoencoderKL, FlowMatchEulerDiscreteScheduler, LuminaNextDiT2DModel, LuminaText2ImgPipeline
from diffusers.utils.testing_utils import (
backend_empty_cache,
numpy_cosine_similarity_distance,
require_torch_accelerator,
slow,
torch_device,
)
from ..test_pipelines_common import PipelineTesterMixin
class LuminaText2ImgPipelinePipelineFastTests(unittest.TestCase, PipelineTesterMixin):
pipeline_class = LuminaText2ImgPipeline
params = frozenset(
[
"prompt",
"height",
"width",
"guidance_scale",
"negative_prompt",
"prompt_embeds",
"negative_prompt_embeds",
]
)
batch_params = frozenset(["prompt", "negative_prompt"])
supports_dduf = False
test_layerwise_casting = True
test_group_offloading = True
def get_dummy_components(self):
torch.manual_seed(0)
transformer = LuminaNextDiT2DModel(
sample_size=4,
patch_size=2,
in_channels=4,
hidden_size=4,
num_layers=2,
num_attention_heads=1,
num_kv_heads=1,
multiple_of=16,
ffn_dim_multiplier=None,
norm_eps=1e-5,
learn_sigma=True,
qk_norm=True,
cross_attention_dim=8,
scaling_factor=1.0,
)
torch.manual_seed(0)
vae = AutoencoderKL()
scheduler = FlowMatchEulerDiscreteScheduler()
tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/dummy-gemma")
torch.manual_seed(0)
config = GemmaConfig(
head_dim=2,
hidden_size=8,
intermediate_size=37,
num_attention_heads=4,
num_hidden_layers=2,
num_key_value_heads=4,
)
text_encoder = GemmaForCausalLM(config)
components = {
"transformer": transformer.eval(),
"vae": vae.eval(),
"scheduler": scheduler,
"text_encoder": text_encoder.eval(),
"tokenizer": tokenizer,
}
return components
def get_dummy_inputs(self, device, seed=0):
if str(device).startswith("mps"):
generator = torch.manual_seed(seed)
else:
generator = torch.Generator(device="cpu").manual_seed(seed)
inputs = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 5.0,
"output_type": "np",
}
return inputs
@unittest.skip("xformers attention processor does not exist for Lumina")
def test_xformers_attention_forwardGenerator_pass(self):
pass
@slow
@require_torch_accelerator
class LuminaText2ImgPipelineSlowTests(unittest.TestCase):
pipeline_class = LuminaText2ImgPipeline
repo_id = "Alpha-VLLM/Lumina-Next-SFT-diffusers"
def setUp(self):
super().setUp()
gc.collect()
backend_empty_cache(torch_device)
def tearDown(self):
super().tearDown()
gc.collect()
backend_empty_cache(torch_device)
def get_inputs(self, device, seed=0):
if str(device).startswith("mps"):
generator = torch.manual_seed(seed)
else:
generator = torch.Generator(device="cpu").manual_seed(seed)
return {
"prompt": "A photo of a cat",
"num_inference_steps": 2,
"guidance_scale": 5.0,
"output_type": "np",
"generator": generator,
}
def test_lumina_inference(self):
pipe = self.pipeline_class.from_pretrained(self.repo_id, torch_dtype=torch.bfloat16)
pipe.enable_model_cpu_offload(device=torch_device)
inputs = self.get_inputs(torch_device)
image = pipe(**inputs).images[0]
image_slice = image[0, :10, :10]
expected_slice = np.array(
[
[0.17773438, 0.18554688, 0.22070312],
[0.046875, 0.06640625, 0.10351562],
[0.0, 0.0, 0.02148438],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
],
dtype=np.float32,
)
max_diff = numpy_cosine_similarity_distance(expected_slice.flatten(), image_slice.flatten())
assert max_diff < 1e-4
|
import gc
import unittest
import numpy as np
import torch
from transformers import AutoTokenizer, GemmaConfig, GemmaForCausalLM
from diffusers import AutoencoderKL, FlowMatchEulerDiscreteScheduler, LuminaNextDiT2DModel, LuminaText2ImgPipeline
from diffusers.utils.testing_utils import (
numpy_cosine_similarity_distance,
require_torch_gpu,
slow,
torch_device,
)
from ..test_pipelines_common import PipelineTesterMixin
class LuminaText2ImgPipelinePipelineFastTests(unittest.TestCase, PipelineTesterMixin):
pipeline_class = LuminaText2ImgPipeline
params = frozenset(
[
"prompt",
"height",
"width",
"guidance_scale",
"negative_prompt",
"prompt_embeds",
"negative_prompt_embeds",
]
)
batch_params = frozenset(["prompt", "negative_prompt"])
supports_dduf = False
test_layerwise_casting = True
test_group_offloading = True
def get_dummy_components(self):
torch.manual_seed(0)
transformer = LuminaNextDiT2DModel(
sample_size=4,
patch_size=2,
in_channels=4,
hidden_size=4,
num_layers=2,
num_attention_heads=1,
num_kv_heads=1,
multiple_of=16,
ffn_dim_multiplier=None,
norm_eps=1e-5,
learn_sigma=True,
qk_norm=True,
cross_attention_dim=8,
scaling_factor=1.0,
)
torch.manual_seed(0)
vae = AutoencoderKL()
scheduler = FlowMatchEulerDiscreteScheduler()
tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/dummy-gemma")
torch.manual_seed(0)
config = GemmaConfig(
head_dim=2,
hidden_size=8,
intermediate_size=37,
num_attention_heads=4,
num_hidden_layers=2,
num_key_value_heads=4,
)
text_encoder = GemmaForCausalLM(config)
components = {
"transformer": transformer.eval(),
"vae": vae.eval(),
"scheduler": scheduler,
"text_encoder": text_encoder.eval(),
"tokenizer": tokenizer,
}
return components
def get_dummy_inputs(self, device, seed=0):
if str(device).startswith("mps"):
generator = torch.manual_seed(seed)
else:
generator = torch.Generator(device="cpu").manual_seed(seed)
inputs = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 5.0,
"output_type": "np",
}
return inputs
@unittest.skip("xformers attention processor does not exist for Lumina")
def test_xformers_attention_forwardGenerator_pass(self):
pass
@slow
@require_torch_gpu
class LuminaText2ImgPipelineSlowTests(unittest.TestCase):
pipeline_class = LuminaText2ImgPipeline
repo_id = "Alpha-VLLM/Lumina-Next-SFT-diffusers"
def setUp(self):
super().setUp()
gc.collect()
torch.cuda.empty_cache()
def tearDown(self):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def get_inputs(self, device, seed=0):
if str(device).startswith("mps"):
generator = torch.manual_seed(seed)
else:
generator = torch.Generator(device="cpu").manual_seed(seed)
return {
"prompt": "A photo of a cat",
"num_inference_steps": 2,
"guidance_scale": 5.0,
"output_type": "np",
"generator": generator,
}
def test_lumina_inference(self):
pipe = self.pipeline_class.from_pretrained(self.repo_id, torch_dtype=torch.bfloat16)
pipe.enable_model_cpu_offload()
inputs = self.get_inputs(torch_device)
image = pipe(**inputs).images[0]
image_slice = image[0, :10, :10]
expected_slice = np.array(
[
[0.17773438, 0.18554688, 0.22070312],
[0.046875, 0.06640625, 0.10351562],
[0.0, 0.0, 0.02148438],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
],
dtype=np.float32,
)
max_diff = numpy_cosine_similarity_distance(expected_slice.flatten(), image_slice.flatten())
assert max_diff < 1e-4
|
# flake8: noqa
import numpy as np
from unittest import mock
from keras.src import backend
from keras.src import testing
from keras.src.optimizers.ftrl import Ftrl
class FtrlTest(testing.TestCase):
def test_config(self):
optimizer = Ftrl(
learning_rate=0.05,
learning_rate_power=-0.2,
initial_accumulator_value=0.4,
l1_regularization_strength=0.05,
l2_regularization_strength=0.15,
l2_shrinkage_regularization_strength=0.01,
beta=0.3,
)
self.run_class_serialization_test(optimizer)
def test_single_step(self):
optimizer = Ftrl(learning_rate=0.5)
grads = np.array([1.0, 6.0, 7.0, 2.0])
vars = backend.Variable([1.0, 2.0, 3.0, 4.0])
optimizer.apply_gradients(zip([grads], [vars]))
self.assertAllClose(
vars, [0.2218, 1.3954, 2.3651, 2.8814], rtol=1e-4, atol=1e-4
)
def test_correctness_with_golden(self):
optimizer = Ftrl(
learning_rate=0.05,
learning_rate_power=-0.2,
initial_accumulator_value=0.4,
l1_regularization_strength=0.05,
l2_regularization_strength=0.15,
l2_shrinkage_regularization_strength=0.01,
beta=0.3,
)
x = backend.Variable(np.ones([10]))
grads = np.arange(0.1, 1.1, 0.1)
first_grads = np.full((10,), 0.01)
# fmt: off
golden = np.array(
[[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[-0.0034, -0.0077, -0.0118, -0.0157, -0.0194, -0.023, -0.0263, -0.0294, -0.0325, -0.0354],
[-0.0078, -0.0162, -0.0242, -0.0317, -0.0387, -0.0454, -0.0516, -0.0575, -0.0631, -0.0685],
[-0.0121, -0.0246, -0.0363, -0.0472, -0.0573, -0.0668, -0.0757, -0.0842, -0.0922, -0.0999],
[-0.0164, -0.0328, -0.0481, -0.0623, -0.0753, -0.0875, -0.099, -0.1098, -0.1201, -0.1299]]
)
# fmt: on
optimizer.apply_gradients(zip([first_grads], [x]))
for i in range(5):
self.assertAllClose(x, golden[i], rtol=5e-4, atol=5e-4)
optimizer.apply_gradients(zip([grads], [x]))
def test_clip_norm(self):
optimizer = Ftrl(clipnorm=1)
grad = [np.array([100.0, 100.0])]
clipped_grad = optimizer._clip_gradients(grad)
self.assertAllClose(clipped_grad[0], [2**0.5 / 2, 2**0.5 / 2])
def test_clip_value(self):
optimizer = Ftrl(clipvalue=1)
grad = [np.array([100.0, 100.0])]
clipped_grad = optimizer._clip_gradients(grad)
self.assertAllClose(clipped_grad[0], [1.0, 1.0])
def test_invalid_initial_accumulator_value(self):
invalid_value = -0.1
with self.assertRaisesRegex(
ValueError,
f"^`initial_accumulator_value` needs to be positive or zero. Received: initial_accumulator_value={invalid_value}.$",
):
Ftrl(initial_accumulator_value=invalid_value)
def test_invalid_learning_rate_power(self):
invalid_value = 0.1
with self.assertRaisesRegex(
ValueError,
f"^`learning_rate_power` needs to be negative or zero. Received: learning_rate_power={invalid_value}.$",
):
Ftrl(learning_rate_power=invalid_value)
def test_invalid_l1_regularization_strength(self):
invalid_value = -0.1
with self.assertRaisesRegex(
ValueError,
f"^`l1_regularization_strength` needs to be positive or zero. Received: l1_regularization_strength={invalid_value}.$",
):
Ftrl(l1_regularization_strength=invalid_value)
def test_invalid_l2_regularization_strength(self):
invalid_value = -0.1
with self.assertRaisesRegex(
ValueError,
f"^`l2_regularization_strength` needs to be positive or zero. Received: l2_regularization_strength={invalid_value}.$",
):
Ftrl(l2_regularization_strength=invalid_value)
def test_invalid_l2_shrinkage_regularization_strength(self):
invalid_value = -0.1
with self.assertRaisesRegex(
ValueError,
f"^`l2_shrinkage_regularization_strength` needs to be positive or zero. Received: l2_shrinkage_regularization_strength={invalid_value}.$",
):
Ftrl(l2_shrinkage_regularization_strength=invalid_value)
|
# flake8: noqa
import numpy as np
from keras.src import backend
from keras.src import testing
from keras.src.optimizers.ftrl import Ftrl
class FtrlTest(testing.TestCase):
def test_config(self):
optimizer = Ftrl(
learning_rate=0.05,
learning_rate_power=-0.2,
initial_accumulator_value=0.4,
l1_regularization_strength=0.05,
l2_regularization_strength=0.15,
l2_shrinkage_regularization_strength=0.01,
beta=0.3,
)
self.run_class_serialization_test(optimizer)
def test_single_step(self):
optimizer = Ftrl(learning_rate=0.5)
grads = np.array([1.0, 6.0, 7.0, 2.0])
vars = backend.Variable([1.0, 2.0, 3.0, 4.0])
optimizer.apply_gradients(zip([grads], [vars]))
self.assertAllClose(
vars, [0.2218, 1.3954, 2.3651, 2.8814], rtol=1e-4, atol=1e-4
)
def test_correctness_with_golden(self):
optimizer = Ftrl(
learning_rate=0.05,
learning_rate_power=-0.2,
initial_accumulator_value=0.4,
l1_regularization_strength=0.05,
l2_regularization_strength=0.15,
l2_shrinkage_regularization_strength=0.01,
beta=0.3,
)
x = backend.Variable(np.ones([10]))
grads = np.arange(0.1, 1.1, 0.1)
first_grads = np.full((10,), 0.01)
# fmt: off
golden = np.array(
[[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[-0.0034, -0.0077, -0.0118, -0.0157, -0.0194, -0.023, -0.0263, -0.0294, -0.0325, -0.0354],
[-0.0078, -0.0162, -0.0242, -0.0317, -0.0387, -0.0454, -0.0516, -0.0575, -0.0631, -0.0685],
[-0.0121, -0.0246, -0.0363, -0.0472, -0.0573, -0.0668, -0.0757, -0.0842, -0.0922, -0.0999],
[-0.0164, -0.0328, -0.0481, -0.0623, -0.0753, -0.0875, -0.099, -0.1098, -0.1201, -0.1299]]
)
# fmt: on
optimizer.apply_gradients(zip([first_grads], [x]))
for i in range(5):
self.assertAllClose(x, golden[i], rtol=5e-4, atol=5e-4)
optimizer.apply_gradients(zip([grads], [x]))
def test_clip_norm(self):
optimizer = Ftrl(clipnorm=1)
grad = [np.array([100.0, 100.0])]
clipped_grad = optimizer._clip_gradients(grad)
self.assertAllClose(clipped_grad[0], [2**0.5 / 2, 2**0.5 / 2])
def test_clip_value(self):
optimizer = Ftrl(clipvalue=1)
grad = [np.array([100.0, 100.0])]
clipped_grad = optimizer._clip_gradients(grad)
self.assertAllClose(clipped_grad[0], [1.0, 1.0])
|
from __future__ import annotations
from collections.abc import Iterable
import torch
import torch.nn as nn
from sentence_transformers.sparse_encoder.losses.ReconstructionLoss import ReconstructionLoss
from sentence_transformers.sparse_encoder.losses.SparseMultipleNegativesRankingLoss import (
SparseMultipleNegativesRankingLoss,
)
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
class CSRLoss(nn.Module):
"""
CSR Loss module that combines Reconstruction Loss and Sparse Multiple Negatives Ranking Loss.
Based on the paper:
Beyond Matryoshka: Revisiting Sparse Coding for Adaptive Representation, https://arxiv.org/abs/2503.01776
This module computes the combined loss according to the formula:
L_CSR = L_recon + γ * L_MRL
where:
- L_recon = L(k) + L(4k)/8 + β*L_aux
- L_MRL is the Multiple Negatives Ranking Loss
"""
def __init__(
self,
model: SparseEncoder,
beta: float = 0.1,
gamma: float = 1.0,
scale: float = 20.0,
):
super().__init__()
self.model = model
self.beta = beta
self.gamma = gamma
self.scale = scale
# Initialize the component losses
self.reconstruction_loss = ReconstructionLoss(model, beta)
self.ranking_loss = SparseMultipleNegativesRankingLoss(model, scale)
def forward(
self,
sentence_features: Iterable[dict[str, torch.Tensor]],
labels: torch.Tensor = None,
) -> dict[str, torch.Tensor]:
"""
Forward pass of the CSR Loss module.
This method is used when the loss is computed as part of the model's forward pass.
Args:
sentence_features: Iterable of dictionaries containing sentence embeddings
labels: Optional tensor of labels (not used in this implementation)
Returns:
Dictionary containing the total loss and individual loss components
"""
# Compute embeddings using the model
outputs = [self.model(sentence_feature) for sentence_feature in sentence_features]
sentence_embedding = [output["sentence_embedding"] for output in outputs]
recon_loss = self.reconstruction_loss.compute_loss_from_embeddings(outputs)
ranking_loss = self.ranking_loss.compute_loss_from_embeddings(sentence_embedding)
# Compute total loss: L_CSR = L_recon + γ * L_MRL
total_loss = recon_loss + self.gamma * ranking_loss
return total_loss
def get_config_dict(self):
"""
Get the configuration dictionary.
Returns:
Dictionary containing the configuration parameters
"""
return {
"beta": self.beta,
"gamma": self.gamma,
"scale": self.scale,
}
|
from __future__ import annotations
from collections.abc import Iterable
import torch
import torch.nn as nn
from sentence_transformers.sparse_encoder.losses.ReconstructionLoss import ReconstructionLoss
from sentence_transformers.sparse_encoder.losses.SparseMultipleNegativesRankingLoss import (
SparseMultipleNegativesRankingLoss,
)
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
class CSRLoss(nn.Module):
"""
CSR Loss module that combines Reconstruction Loss and Sparse Multiple Negatives Ranking Loss.
Based on the paper:
Beyond Matryoshka: Revisiting Sparse Coding for Adaptive Representation, https://arxiv.org/abs/2503.01776
This module computes the combined loss according to the formula:
L_CSR = L_recon + γ * L_MRL
where:
- L_recon = L(k) + L(4k)/8 + β*L_aux
- L_MRL is the Multiple Negatives Ranking Loss
"""
def __init__(
self,
model: SparseEncoder,
beta: float = 0.1,
gamma: float = 1.0,
scale: float = 20.0,
):
super().__init__()
self.model = model
self.beta = beta
self.gamma = gamma
self.scale = scale
# Initialize the component losses
self.reconstruction_loss = ReconstructionLoss(model, beta)
self.ranking_loss = SparseMultipleNegativesRankingLoss(model, scale)
def forward(
self,
sentence_features: Iterable[dict[str, torch.Tensor]],
labels: torch.Tensor = None,
) -> dict[str, torch.Tensor]:
"""
Forward pass of the CSR Loss module.
This method is used when the loss is computed as part of the model's forward pass.
Args:
sentence_features: Iterable of dictionaries containing sentence embeddings
labels: Optional tensor of labels (not used in this implementation)
Returns:
Dictionary containing the total loss and individual loss components
"""
# Compute embeddings using the model
outputs = [self.model(sentence_feature) for sentence_feature in sentence_features]
sparse_embeddings = [output["sparse_embedding"] for output in outputs]
recon_loss = self.reconstruction_loss.compute_loss_from_embeddings(outputs)
ranking_loss = self.ranking_loss.compute_loss_from_embeddings(sparse_embeddings)
# Compute total loss: L_CSR = L_recon + γ * L_MRL
total_loss = recon_loss + self.gamma * ranking_loss
return total_loss
def get_config_dict(self):
"""
Get the configuration dictionary.
Returns:
Dictionary containing the configuration parameters
"""
return {
"beta": self.beta,
"gamma": self.gamma,
"scale": self.scale,
}
|
import logging
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.evaluation import SparseNanoBEIREvaluator
logging.basicConfig(format="%(message)s", level=logging.INFO)
# Load a model
model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
datasets = ["QuoraRetrieval", "MSMARCO"]
evaluator = SparseNanoBEIREvaluator(
dataset_names=datasets,
show_progress_bar=True,
batch_size=32,
)
# Run evaluation
results = evaluator(model)
"""
Evaluating NanoQuoraRetrieval
Information Retrieval Evaluation of the model on the NanoQuoraRetrieval dataset:
Queries: 50
Corpus: 5046
Score-Function: dot
Accuracy@1: 92.00%
Accuracy@3: 96.00%
Accuracy@5: 98.00%
Accuracy@10: 100.00%
Precision@1: 92.00%
Precision@3: 40.00%
Precision@5: 24.80%
Precision@10: 13.20%
Recall@1: 79.73%
Recall@3: 92.53%
Recall@5: 94.93%
Recall@10: 98.27%
MRR@10: 0.9439
NDCG@10: 0.9339
MAP@100: 0.9072
Model Sparsity Stats Query : Row Non-Zero Mean: 62.97999954223633, Row Sparsity Mean: 0.9979365468025208
Model Sparsity Stats Corpus : Row Non-Zero Mean: 63.39932632446289, Row Sparsity Mean: 0.9979228377342224
Information Retrieval Evaluation of the model on the NanoMSMARCO dataset:
Queries: 50
Corpus: 5043
Score-Function: dot
Accuracy@1: 48.00%
Accuracy@3: 74.00%
Accuracy@5: 76.00%
Accuracy@10: 88.00%
Precision@1: 48.00%
Precision@3: 24.67%
Precision@5: 15.20%
Precision@10: 8.80%
Recall@1: 48.00%
Recall@3: 74.00%
Recall@5: 76.00%
Recall@10: 88.00%
MRR@10: 0.6211
NDCG@10: 0.6838
MAP@100: 0.6277
Model Sparsity Stats Query : Row Non-Zero Mean: 48.08000183105469, Row Sparsity Mean: 0.9984247088432312
Model Sparsity Stats Corpus : Row Non-Zero Mean: 125.3604965209961, Row Sparsity Mean: 0.9958928227424622
Average Queries: 50.0
Average Corpus: 5044.5
Aggregated for Score Function: dot
Accuracy@1: 70.00%
Accuracy@3: 85.00%
Accuracy@5: 87.00%
Accuracy@10: 94.00%
Precision@1: 70.00%
Recall@1: 63.87%
Precision@3: 32.33%
Recall@3: 83.27%
Precision@5: 20.00%
Recall@5: 85.47%
Precision@10: 11.00%
Recall@10: 93.13%
MRR@10: 0.7825
NDCG@10: 0.8089
Model Sparsity Stats Query : Row Non-Zero Mean: 55.53000068664551, Row Sparsity Mean: 0.998180627822876
Model Sparsity Stats Corpus : Row Non-Zero Mean: 94.37991142272949, Row Sparsity Mean: 0.9969078302383423
"""
# Print the results
print(f"Primary metric: {evaluator.primary_metric}")
# => Primary metric: NanoBEIR_mean_dot_ndcg@10
print(f"Primary metric value: {results[evaluator.primary_metric]:.4f}")
# => Primary metric value: 0.8089
|
import logging
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.evaluation import SparseNanoBEIREvaluator
logging.basicConfig(format="%(message)s", level=logging.INFO)
# Load a model
model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
datasets = ["QuoraRetrieval", "MSMARCO"]
evaluator = SparseNanoBEIREvaluator(
dataset_names=datasets,
show_progress_bar=True,
batch_size=32,
)
# Run evaluation
results = evaluator(model)
"""
Evaluating NanoQuoraRetrieval
Information Retrieval Evaluation of the model on the NanoQuoraRetrieval dataset:
Query info: num_rows: 50, num_cols: 30522, row_non_zero_mean: 62.97999954223633, row_sparsity_mean: 0.9979365468025208 1/1 [00:04<00:00, 4.12s/it]
Corpus info: num_rows: 5046, num_cols: 30522, row_non_zero_mean: 63.394371032714844, row_sparsity_mean: 0.9979230165481567
Score-Function: dot
Accuracy@1: 92.00%
Accuracy@3: 96.00%
Accuracy@5: 98.00%
Accuracy@10: 100.00%
Precision@1: 92.00%
Precision@3: 40.00%
Precision@5: 24.80%
Precision@10: 13.20%
Recall@1: 79.73%
Recall@3: 92.53%
Recall@5: 94.93%
Recall@10: 98.27%
MRR@10: 0.9439
NDCG@10: 0.9339
MAP@100: 0.9072
Evaluating NanoMSMARCO
Information Retrieval Evaluation of the model on the NanoMSMARCO dataset:
Query info: num_rows: 50, num_cols: 30522, row_non_zero_mean: 48.099998474121094, row_sparsity_mean: 0.99842399358749391/1 [00:19<00:00, 19.40s/it]
Corpus info: num_rows: 5043, num_cols: 30522, row_non_zero_mean: 125.38131713867188, row_sparsity_mean: 0.9958921670913696
Score-Function: dot
Accuracy@1: 48.00%
Accuracy@3: 74.00%
Accuracy@5: 76.00%
Accuracy@10: 88.00%
Precision@1: 48.00%
Precision@3: 24.67%
Precision@5: 15.20%
Precision@10: 8.80%
Recall@1: 48.00%
Recall@3: 74.00%
Recall@5: 76.00%
Recall@10: 88.00%
MRR@10: 0.6211
NDCG@10: 0.6838
MAP@100: 0.6277
Average Querie: num_rows: 50.0, num_cols: 30522.0, row_non_zero_mean: 55.53999900817871, row_sparsity_mean: 0.9981802701950073
Average Corpus: num_rows: 5044.5, num_cols: 30522.0, row_non_zero_mean: 94.38784408569336, row_sparsity_mean: 0.9969075918197632
Aggregated for Score Function: dot
Accuracy@1: 70.00%
Accuracy@3: 85.00%
Accuracy@5: 87.00%
Accuracy@10: 94.00%
Precision@1: 70.00%
Recall@1: 63.87%
Precision@3: 32.33%
Recall@3: 83.27%
Precision@5: 20.00%
Recall@5: 85.47%
Precision@10: 11.00%
Recall@10: 93.13%
MRR@10: 0.7825
NDCG@10: 0.8089
"""
# Print the results
print(f"Primary metric: {evaluator.primary_metric}")
# => Primary metric: NanoBEIR_mean_dot_ndcg@10
print(f"Primary metric value: {results[evaluator.primary_metric]:.4f}")
# => Primary metric value: 0.8089
|
from __future__ import annotations
from typing import Any, Callable, List, Tuple, Type, Union
import PIL.Image
from torchvision import datapoints
from torchvision._utils import sequence_to_str
from torchvision.transforms.v2.functional import get_dimensions, get_spatial_size, is_simple_tensor
def query_bounding_boxes(flat_inputs: List[Any]) -> datapoints.BoundingBoxes:
bounding_boxes = [inpt for inpt in flat_inputs if isinstance(inpt, datapoints.BoundingBoxes)]
if not bounding_boxes:
raise TypeError("No bounding box was found in the sample")
elif len(bounding_boxes) > 1:
raise ValueError("Found multiple bounding boxes in the sample")
return bounding_boxes.pop()
def query_chw(flat_inputs: List[Any]) -> Tuple[int, int, int]:
chws = {
tuple(get_dimensions(inpt))
for inpt in flat_inputs
if isinstance(inpt, (datapoints.Image, PIL.Image.Image, datapoints.Video)) or is_simple_tensor(inpt)
}
if not chws:
raise TypeError("No image or video was found in the sample")
elif len(chws) > 1:
raise ValueError(f"Found multiple CxHxW dimensions in the sample: {sequence_to_str(sorted(chws))}")
c, h, w = chws.pop()
return c, h, w
def query_spatial_size(flat_inputs: List[Any]) -> Tuple[int, int]:
sizes = {
tuple(get_spatial_size(inpt))
for inpt in flat_inputs
if isinstance(
inpt, (datapoints.Image, PIL.Image.Image, datapoints.Video, datapoints.Mask, datapoints.BoundingBoxes)
)
or is_simple_tensor(inpt)
}
if not sizes:
raise TypeError("No image, video, mask or bounding box was found in the sample")
elif len(sizes) > 1:
raise ValueError(f"Found multiple HxW dimensions in the sample: {sequence_to_str(sorted(sizes))}")
h, w = sizes.pop()
return h, w
def check_type(obj: Any, types_or_checks: Tuple[Union[Type, Callable[[Any], bool]], ...]) -> bool:
for type_or_check in types_or_checks:
if isinstance(obj, type_or_check) if isinstance(type_or_check, type) else type_or_check(obj):
return True
return False
def has_any(flat_inputs: List[Any], *types_or_checks: Union[Type, Callable[[Any], bool]]) -> bool:
for inpt in flat_inputs:
if check_type(inpt, types_or_checks):
return True
return False
def has_all(flat_inputs: List[Any], *types_or_checks: Union[Type, Callable[[Any], bool]]) -> bool:
for type_or_check in types_or_checks:
for inpt in flat_inputs:
if isinstance(inpt, type_or_check) if isinstance(type_or_check, type) else type_or_check(inpt):
break
else:
return False
return True
|
from __future__ import annotations
from typing import Any, Callable, List, Tuple, Type, Union
import PIL.Image
from torchvision import datapoints
from torchvision._utils import sequence_to_str
from torchvision.transforms.v2.functional import get_dimensions, get_spatial_size, is_simple_tensor
def query_bounding_box(flat_inputs: List[Any]) -> datapoints.BoundingBox:
bounding_boxes = [inpt for inpt in flat_inputs if isinstance(inpt, datapoints.BoundingBox)]
if not bounding_boxes:
raise TypeError("No bounding box was found in the sample")
elif len(bounding_boxes) > 1:
raise ValueError("Found multiple bounding boxes in the sample")
return bounding_boxes.pop()
def query_chw(flat_inputs: List[Any]) -> Tuple[int, int, int]:
chws = {
tuple(get_dimensions(inpt))
for inpt in flat_inputs
if isinstance(inpt, (datapoints.Image, PIL.Image.Image, datapoints.Video)) or is_simple_tensor(inpt)
}
if not chws:
raise TypeError("No image or video was found in the sample")
elif len(chws) > 1:
raise ValueError(f"Found multiple CxHxW dimensions in the sample: {sequence_to_str(sorted(chws))}")
c, h, w = chws.pop()
return c, h, w
def query_spatial_size(flat_inputs: List[Any]) -> Tuple[int, int]:
sizes = {
tuple(get_spatial_size(inpt))
for inpt in flat_inputs
if isinstance(
inpt, (datapoints.Image, PIL.Image.Image, datapoints.Video, datapoints.Mask, datapoints.BoundingBox)
)
or is_simple_tensor(inpt)
}
if not sizes:
raise TypeError("No image, video, mask or bounding box was found in the sample")
elif len(sizes) > 1:
raise ValueError(f"Found multiple HxW dimensions in the sample: {sequence_to_str(sorted(sizes))}")
h, w = sizes.pop()
return h, w
def check_type(obj: Any, types_or_checks: Tuple[Union[Type, Callable[[Any], bool]], ...]) -> bool:
for type_or_check in types_or_checks:
if isinstance(obj, type_or_check) if isinstance(type_or_check, type) else type_or_check(obj):
return True
return False
def has_any(flat_inputs: List[Any], *types_or_checks: Union[Type, Callable[[Any], bool]]) -> bool:
for inpt in flat_inputs:
if check_type(inpt, types_or_checks):
return True
return False
def has_all(flat_inputs: List[Any], *types_or_checks: Union[Type, Callable[[Any], bool]]) -> bool:
for type_or_check in types_or_checks:
for inpt in flat_inputs:
if isinstance(inpt, type_or_check) if isinstance(type_or_check, type) else type_or_check(inpt):
break
else:
return False
return True
|
import json
import os
import subprocess
import pytest
from jina.checker import NetworkChecker
from jina.jaml import JAML
from jina.orchestrate.pods.factory import PodFactory
from jina.parsers import set_deployment_parser
from jina.parsers.ping import set_ping_parser
from jina_cli.autocomplete import ac_table
from jina_cli.export import api_to_dict
from jina_cli.lookup import _build_lookup_table, lookup_and_print
from tests.helper import _generate_pod_args
def test_export_api(tmpdir):
with open(tmpdir / 'test.yml', 'w', encoding='utf-8') as fp:
JAML.dump(api_to_dict(), fp)
with open(tmpdir / 'test.json', 'w', encoding='utf-8') as fp:
json.dump(api_to_dict(), fp)
@pytest.mark.parametrize('cli', ac_table['commands'])
def test_help_lookup(cli, capsys):
nkw2kw, kw2info = _build_lookup_table()
if cli not in {'--help', '--version', '--version-full'}:
assert cli in nkw2kw
lookup_and_print(cli)
captured = capsys.readouterr()
assert 'Traceback (most recent call last)' not in captured.out
def test_main_cli():
subprocess.check_call(['jina'])
def test_cli_help():
subprocess.check_call(['jina', 'help', 'deployment'])
@pytest.mark.parametrize(
'uses', ['jinaai://jina-ai/DummyHubExecutor']
)
def test_cli_hub(uses):
subprocess.check_call(['jina', 'hub', '--help'])
for cmd in ['new', 'status', 'pull', 'push']:
subprocess.check_call(['jina', 'hub', cmd, '--help'])
subprocess.check_call(['jina', 'hub', 'pull', uses])
def test_cli_warn_unknown_args():
subprocess.check_call(['jina', 'help', 'deployment', '--abcdefg'])
@pytest.mark.parametrize('cli', ac_table['commands'])
def test_all_cli(cli):
subprocess.check_call(['jina', cli, '--help'])
@pytest.mark.parametrize('smethod', ['fork', 'spawn'])
def test_all_start_method(smethod):
s = subprocess.check_output(
['jina', '-v'],
env=dict(os.environ, JINA_MP_START_METHOD=smethod),
stderr=subprocess.STDOUT,
)
assert 'UserWarning' in s.decode()
assert smethod in s.decode()
def test_help_non_exist():
s = subprocess.check_output(
['jina', 'help', 'abcdefg'],
stderr=subprocess.STDOUT,
)
assert 'misspelling' in s.decode()
def test_help_exist():
s = subprocess.check_output(
['jina', 'help', 'port'],
stderr=subprocess.STDOUT,
)
assert 'a CLI argument of Jina' in s.decode()
def test_parse_env_map():
a = set_deployment_parser().parse_args(
['--env', 'key1=value1', '--env', 'key2=value2']
)
assert a.env == {'key1': 'value1', 'key2': 'value2'}
a = set_deployment_parser().parse_args(
['--env', 'key1=value1', 'key2=value2', 'key3=3']
)
assert a.env == {'key1': 'value1', 'key2': 'value2', 'key3': 3}
@pytest.mark.slow
def test_ping():
a1 = _generate_pod_args()
a2 = set_ping_parser().parse_args(['executor', f'0.0.0.0:{a1.port[0]}'])
a3 = set_ping_parser().parse_args(
['executor', f'0.0.0.1:{a1.port[0]}', '--timeout', '1000']
)
with pytest.raises(SystemExit) as cm:
with PodFactory.build_pod(a1):
NetworkChecker(a2)
assert cm.value.code == 0
# test with bad address
with pytest.raises(SystemExit) as cm:
with PodFactory.build_pod(a1):
NetworkChecker(a3)
assert cm.value.code == 1
@pytest.mark.parametrize(
'cmd',
[
['jina', 'ping', 'flow', '127.0.0.1:8080'],
['jina', 'help', 'port'],
['jina', 'hub'],
],
)
def test_logo_silence(cmd):
from jina.constants import __resources_path__
with open(os.path.join(__resources_path__, 'jina.logo'), encoding='utf-8') as fp:
logo_str = fp.read()
s = subprocess.run(
cmd,
stdout=subprocess.PIPE,
)
assert logo_str not in s.stdout.decode()
|
import json
import os
import subprocess
import pytest
from jina.checker import NetworkChecker
from jina.jaml import JAML
from jina.orchestrate.pods.factory import PodFactory
from jina.parsers import set_deployment_parser
from jina.parsers.ping import set_ping_parser
from jina_cli.autocomplete import ac_table
from jina_cli.export import api_to_dict
from jina_cli.lookup import _build_lookup_table, lookup_and_print
from tests.helper import _generate_pod_args
def test_export_api(tmpdir):
with open(tmpdir / 'test.yml', 'w', encoding='utf8') as fp:
JAML.dump(api_to_dict(), fp)
with open(tmpdir / 'test.json', 'w', encoding='utf8') as fp:
json.dump(api_to_dict(), fp)
@pytest.mark.parametrize('cli', ac_table['commands'])
def test_help_lookup(cli, capsys):
nkw2kw, kw2info = _build_lookup_table()
if cli not in {'--help', '--version', '--version-full'}:
assert cli in nkw2kw
lookup_and_print(cli)
captured = capsys.readouterr()
assert 'Traceback (most recent call last)' not in captured.out
def test_main_cli():
subprocess.check_call(['jina'])
def test_cli_help():
subprocess.check_call(['jina', 'help', 'deployment'])
@pytest.mark.parametrize(
'uses', ['jinaai://jina-ai/DummyHubExecutor']
)
def test_cli_hub(uses):
subprocess.check_call(['jina', 'hub', '--help'])
for cmd in ['new', 'status', 'pull', 'push']:
subprocess.check_call(['jina', 'hub', cmd, '--help'])
subprocess.check_call(['jina', 'hub', 'pull', uses])
def test_cli_warn_unknown_args():
subprocess.check_call(['jina', 'help', 'deployment', '--abcdefg'])
@pytest.mark.parametrize('cli', ac_table['commands'])
def test_all_cli(cli):
subprocess.check_call(['jina', cli, '--help'])
@pytest.mark.parametrize('smethod', ['fork', 'spawn'])
def test_all_start_method(smethod):
s = subprocess.check_output(
['jina', '-v'],
env=dict(os.environ, JINA_MP_START_METHOD=smethod),
stderr=subprocess.STDOUT,
)
assert 'UserWarning' in s.decode()
assert smethod in s.decode()
def test_help_non_exist():
s = subprocess.check_output(
['jina', 'help', 'abcdefg'],
stderr=subprocess.STDOUT,
)
assert 'misspelling' in s.decode()
def test_help_exist():
s = subprocess.check_output(
['jina', 'help', 'port'],
stderr=subprocess.STDOUT,
)
assert 'a CLI argument of Jina' in s.decode()
def test_parse_env_map():
a = set_deployment_parser().parse_args(
['--env', 'key1=value1', '--env', 'key2=value2']
)
assert a.env == {'key1': 'value1', 'key2': 'value2'}
a = set_deployment_parser().parse_args(
['--env', 'key1=value1', 'key2=value2', 'key3=3']
)
assert a.env == {'key1': 'value1', 'key2': 'value2', 'key3': 3}
@pytest.mark.slow
def test_ping():
a1 = _generate_pod_args()
a2 = set_ping_parser().parse_args(['executor', f'0.0.0.0:{a1.port[0]}'])
a3 = set_ping_parser().parse_args(
['executor', f'0.0.0.1:{a1.port[0]}', '--timeout', '1000']
)
with pytest.raises(SystemExit) as cm:
with PodFactory.build_pod(a1):
NetworkChecker(a2)
assert cm.value.code == 0
# test with bad address
with pytest.raises(SystemExit) as cm:
with PodFactory.build_pod(a1):
NetworkChecker(a3)
assert cm.value.code == 1
@pytest.mark.parametrize(
'cmd',
[
['jina', 'ping', 'flow', '127.0.0.1:8080'],
['jina', 'help', 'port'],
['jina', 'hub'],
],
)
def test_logo_silence(cmd):
from jina.constants import __resources_path__
with open(os.path.join(__resources_path__, 'jina.logo')) as fp:
logo_str = fp.read()
s = subprocess.run(
cmd,
stdout=subprocess.PIPE,
)
assert logo_str not in s.stdout.decode()
|
"""
This script downloads the parallel sentences corpus and create parallel sentences tsv files that can be used to extend
existent sentence embedding models to new languages.
The parallel sentences corpus is a crawl of transcripts from talks, which are translated to 100+ languages.
The parallel sentences corpus cannot be downloaded automatically. It is available for research purposes only (CC-BY-NC).
The training procedure can be found in the files make_multilingual.py.
Further information can be found in our paper:
Making Monolingual Sentence Embeddings Multilingual using Knowledge Distillation
https://arxiv.org/abs/2004.09813
"""
import csv
import gzip
import os
from tqdm.autonotebook import tqdm
import sentence_transformers.util
source_languages = set(["en"]) # Languages our (monolingual) teacher model understands
target_languages = set(["de", "es", "it", "fr", "ar", "tr"]) # New languages we want to extend to
dev_sentences = 1000 # Number of sentences we want to use for development
download_url = "https://sbert.net/datasets/parallel-sentences.tsv.gz" # Specify parallel sentences URL here
parallel_sentences_path = "../datasets/parallel-sentences.tsv.gz" # Path of the parallel-sentences.tsv.gz file.
parallel_sentences_folder = "parallel-sentences/"
os.makedirs(os.path.dirname(parallel_sentences_path), exist_ok=True)
if not os.path.exists(parallel_sentences_path):
print("parallel-sentences.tsv.gz does not exists. Try to download from server")
sentence_transformers.util.http_get(download_url, parallel_sentences_path)
os.makedirs(parallel_sentences_folder, exist_ok=True)
train_files = []
dev_files = []
files_to_create = []
for source_lang in source_languages:
for target_lang in target_languages:
output_filename_train = os.path.join(
parallel_sentences_folder, f"talks-{source_lang}-{target_lang}-train.tsv.gz"
)
output_filename_dev = os.path.join(parallel_sentences_folder, f"talks-{source_lang}-{target_lang}-dev.tsv.gz")
train_files.append(output_filename_train)
dev_files.append(output_filename_dev)
if not os.path.exists(output_filename_train) or not os.path.exists(output_filename_dev):
files_to_create.append(
{
"src_lang": source_lang,
"trg_lang": target_lang,
"fTrain": gzip.open(output_filename_train, "wt", encoding="utf8"),
"fDev": gzip.open(output_filename_dev, "wt", encoding="utf8"),
"devCount": 0,
}
)
if len(files_to_create) > 0:
print(
"Parallel sentences files {} do not exist. Create these files now".format(
", ".join(map(lambda x: x["src_lang"] + "-" + x["trg_lang"], files_to_create))
)
)
with gzip.open(parallel_sentences_path, "rt", encoding="utf8") as fIn:
reader = csv.DictReader(fIn, delimiter="\t", quoting=csv.QUOTE_NONE)
for line in tqdm(reader, desc="Sentences"):
for outfile in files_to_create:
src_text = line[outfile["src_lang"]].strip()
trg_text = line[outfile["trg_lang"]].strip()
if src_text != "" and trg_text != "":
if outfile["devCount"] < dev_sentences:
outfile["devCount"] += 1
fOut = outfile["fDev"]
else:
fOut = outfile["fTrain"]
fOut.write(f"{src_text}\t{trg_text}\n")
for outfile in files_to_create:
outfile["fTrain"].close()
outfile["fDev"].close()
print("---DONE---")
|
"""
This script downloads the parallel sentences corpus and create parallel sentences tsv files that can be used to extend
existent sentence embedding models to new languages.
The parallel sentences corpus is a crawl of transcripts from talks, which are translated to 100+ languages.
The parallel sentences corpus cannot be downloaded automatically. It is available for research purposes only (CC-BY-NC).
The training procedure can be found in the files make_multilingual.py.
Further information can be found in our paper:
Making Monolingual Sentence Embeddings Multilingual using Knowledge Distillation
https://arxiv.org/abs/2004.09813
"""
import csv
import gzip
import os
from tqdm.autonotebook import tqdm
import sentence_transformers.util
source_languages = set(["en"]) # Languages our (monolingual) teacher model understands
target_languages = set(["de", "es", "it", "fr", "ar", "tr"]) # New languages we want to extend to
dev_sentences = 1000 # Number of sentences we want to use for development
download_url = "https://sbert.net/datasets/parallel-sentences.tsv.gz" # Specify parallel sentences URL here
parallel_sentences_path = "../datasets/parallel-sentences.tsv.gz" # Path of the parallel-sentences.tsv.gz file.
parallel_sentences_folder = "parallel-sentences/"
os.makedirs(os.path.dirname(parallel_sentences_path), exist_ok=True)
if not os.path.exists(parallel_sentences_path):
print("parallel-sentences.tsv.gz does not exists. Try to download from server")
sentence_transformers.util.http_get(download_url, parallel_sentences_path)
os.makedirs(parallel_sentences_folder, exist_ok=True)
train_files = []
dev_files = []
files_to_create = []
for source_lang in source_languages:
for target_lang in target_languages:
output_filename_train = os.path.join(
parallel_sentences_folder, "talks-{}-{}-train.tsv.gz".format(source_lang, target_lang)
)
output_filename_dev = os.path.join(
parallel_sentences_folder, "talks-{}-{}-dev.tsv.gz".format(source_lang, target_lang)
)
train_files.append(output_filename_train)
dev_files.append(output_filename_dev)
if not os.path.exists(output_filename_train) or not os.path.exists(output_filename_dev):
files_to_create.append(
{
"src_lang": source_lang,
"trg_lang": target_lang,
"fTrain": gzip.open(output_filename_train, "wt", encoding="utf8"),
"fDev": gzip.open(output_filename_dev, "wt", encoding="utf8"),
"devCount": 0,
}
)
if len(files_to_create) > 0:
print(
"Parallel sentences files {} do not exist. Create these files now".format(
", ".join(map(lambda x: x["src_lang"] + "-" + x["trg_lang"], files_to_create))
)
)
with gzip.open(parallel_sentences_path, "rt", encoding="utf8") as fIn:
reader = csv.DictReader(fIn, delimiter="\t", quoting=csv.QUOTE_NONE)
for line in tqdm(reader, desc="Sentences"):
for outfile in files_to_create:
src_text = line[outfile["src_lang"]].strip()
trg_text = line[outfile["trg_lang"]].strip()
if src_text != "" and trg_text != "":
if outfile["devCount"] < dev_sentences:
outfile["devCount"] += 1
fOut = outfile["fDev"]
else:
fOut = outfile["fTrain"]
fOut.write("{}\t{}\n".format(src_text, trg_text))
for outfile in files_to_create:
outfile["fTrain"].close()
outfile["fDev"].close()
print("---DONE---")
|
from .flair_text import FlairTextEncoder
|
from .flair_text import FlairTextEncoder
|
from typing import Dict, List, Optional, Set
import pytest
from docarray import BaseDocument, DocumentArray
from docarray.documents import Image
from docarray.utils.reduce import reduce, reduce_all
class InnerDoc(BaseDocument):
integer: int
inner_list: List
class MMDoc(BaseDocument):
text: str = ''
price: int = 0
categories: Optional[List[str]] = None
image: Optional[Image] = None
matches: Optional[DocumentArray] = None
matches_with_same_id: Optional[DocumentArray] = None
opt_int: Optional[int] = None
test_set: Optional[Set] = None
inner_doc: Optional[InnerDoc] = None
test_dict: Optional[Dict] = None
@pytest.fixture
def doc1():
return MMDoc(
text='hey here',
categories=['a', 'b', 'c'],
price=10,
matches=DocumentArray[MMDoc]([MMDoc()]),
matches_with_same_id=DocumentArray[MMDoc](
[MMDoc(id='a', matches=DocumentArray[MMDoc]([MMDoc()]))]
),
test_set={'a', 'a'},
inner_doc=InnerDoc(integer=2, inner_list=['c', 'd']),
test_dict={'a': 0, 'b': 2, 'd': 4, 'z': 3},
)
@pytest.fixture
def doc2(doc1):
return MMDoc(
id=doc1.id,
text='hey here 2',
categories=['d', 'e', 'f'],
price=5,
opt_int=5,
matches=DocumentArray[MMDoc]([MMDoc()]),
matches_with_same_id=DocumentArray[MMDoc](
[MMDoc(id='a', matches=DocumentArray[MMDoc]([MMDoc()]))]
),
test_set={'a', 'b'},
inner_doc=InnerDoc(integer=3, inner_list=['a', 'b']),
test_dict={'a': 10, 'b': 10, 'c': 3, 'z': None},
)
def test_reduce_different_ids():
da1 = DocumentArray[MMDoc]([MMDoc() for _ in range(10)])
da2 = DocumentArray[MMDoc]([MMDoc() for _ in range(10)])
result = reduce(da1, da2)
assert len(result) == 20
# da1 is changed in place (no extra memory)
assert len(da1) == 20
def test_reduce(doc1, doc2):
da1 = DocumentArray[MMDoc]([doc1, MMDoc()])
da2 = DocumentArray[MMDoc]([MMDoc(), doc2])
result = reduce(da1, da2)
assert len(result) == 3
# da1 is changed in place (no extra memory)
assert len(da1) == 3
merged_doc = result[0]
assert merged_doc.text == 'hey here 2'
assert merged_doc.categories == ['a', 'b', 'c', 'd', 'e', 'f']
assert len(merged_doc.matches) == 2
assert merged_doc.opt_int == 5
assert merged_doc.price == 5
assert merged_doc.test_set == {'a', 'b'}
assert len(merged_doc.matches_with_same_id) == 1
assert len(merged_doc.matches_with_same_id[0].matches) == 2
assert merged_doc.inner_doc.integer == 3
assert merged_doc.inner_doc.inner_list == ['c', 'd', 'a', 'b']
def test_reduce_all(doc1, doc2):
da1 = DocumentArray[MMDoc]([doc1, MMDoc()])
da2 = DocumentArray[MMDoc]([MMDoc(), doc2])
da3 = DocumentArray[MMDoc]([MMDoc(), MMDoc(), doc1])
result = reduce_all([da1, da2, da3])
assert len(result) == 5
# da1 is changed in place (no extra memory)
assert len(da1) == 5
merged_doc = result[0]
assert merged_doc.text == 'hey here 2'
assert merged_doc.categories == [
'a',
'b',
'c',
'd',
'e',
'f',
'a',
'b',
'c',
'd',
'e',
'f',
]
assert len(merged_doc.matches) == 2
assert merged_doc.opt_int == 5
assert merged_doc.price == 5
assert merged_doc.test_set == {'a', 'b'}
assert len(merged_doc.matches_with_same_id) == 1
assert len(merged_doc.matches_with_same_id[0].matches) == 2
assert merged_doc.inner_doc.integer == 3
assert merged_doc.inner_doc.inner_list == ['c', 'd', 'a', 'b', 'c', 'd', 'a', 'b']
|
import pytest
from typing import Optional, List, Dict, Set
from docarray import BaseDocument, DocumentArray
from docarray.documents import Image
from docarray.utils.reduce import reduce, reduce_all
class InnerDoc(BaseDocument):
integer: int
l: List
class MMDoc(BaseDocument):
text: str = ''
price: int = 0
categories: Optional[List[str]] = None
image: Optional[Image] = None
matches: Optional[DocumentArray] = None
matches_with_same_id: Optional[DocumentArray] = None
opt_int: Optional[int] = None
test_set: Optional[Set] = None
inner_doc: Optional[InnerDoc] = None
test_dict: Optional[Dict] = None
@pytest.fixture
def doc1():
return MMDoc(
text='hey here',
categories=['a', 'b', 'c'],
price=10,
matches=DocumentArray[MMDoc]([MMDoc()]),
matches_with_same_id=DocumentArray[MMDoc](
[MMDoc(id='a', matches=DocumentArray[MMDoc]([MMDoc()]))]
),
test_set={'a', 'a'},
inner_doc=InnerDoc(integer=2, l=['c', 'd']),
test_dict={'a': 0, 'b': 2, 'd': 4, 'z': 3},
)
@pytest.fixture
def doc2(doc1):
return MMDoc(
id=doc1.id,
text='hey here 2',
categories=['d', 'e', 'f'],
price=5,
opt_int=5,
matches=DocumentArray[MMDoc]([MMDoc()]),
matches_with_same_id=DocumentArray[MMDoc](
[MMDoc(id='a', matches=DocumentArray[MMDoc]([MMDoc()]))]
),
test_set={'a', 'b'},
inner_doc=InnerDoc(integer=3, l=['a', 'b']),
test_dict={'a': 10, 'b': 10, 'c': 3, 'z': None},
)
def test_reduce_different_ids():
da1 = DocumentArray[MMDoc]([MMDoc() for _ in range(10)])
da2 = DocumentArray[MMDoc]([MMDoc() for _ in range(10)])
result = reduce(da1, da2)
assert len(result) == 20
# da1 is changed in place (no extra memory)
assert len(da1) == 20
def test_reduce(doc1, doc2):
da1 = DocumentArray[MMDoc]([doc1, MMDoc()])
da2 = DocumentArray[MMDoc]([MMDoc(), doc2])
result = reduce(da1, da2)
assert len(result) == 3
# da1 is changed in place (no extra memory)
assert len(da1) == 3
merged_doc = result[0]
assert merged_doc.text == 'hey here 2'
assert merged_doc.categories == ['a', 'b', 'c', 'd', 'e', 'f']
assert len(merged_doc.matches) == 2
assert merged_doc.opt_int == 5
assert merged_doc.price == 5
assert merged_doc.test_set == {'a', 'b'}
assert len(merged_doc.matches_with_same_id) == 1
assert len(merged_doc.matches_with_same_id[0].matches) == 2
assert merged_doc.inner_doc.integer == 3
assert merged_doc.inner_doc.l == ['c', 'd', 'a', 'b']
def test_reduce_all(doc1, doc2):
da1 = DocumentArray[MMDoc]([doc1, MMDoc()])
da2 = DocumentArray[MMDoc]([MMDoc(), doc2])
da3 = DocumentArray[MMDoc]([MMDoc(), MMDoc(), doc1])
result = reduce_all([da1, da2, da3])
assert len(result) == 5
# da1 is changed in place (no extra memory)
assert len(da1) == 5
merged_doc = result[0]
assert merged_doc.text == 'hey here 2'
assert merged_doc.categories == [
'a',
'b',
'c',
'd',
'e',
'f',
'a',
'b',
'c',
'd',
'e',
'f',
]
assert len(merged_doc.matches) == 2
assert merged_doc.opt_int == 5
assert merged_doc.price == 5
assert merged_doc.test_set == {'a', 'b'}
assert len(merged_doc.matches_with_same_id) == 1
assert len(merged_doc.matches_with_same_id[0].matches) == 2
assert merged_doc.inner_doc.integer == 3
assert merged_doc.inner_doc.l == ['c', 'd', 'a', 'b', 'c', 'd', 'a', 'b']
|
from ._multi_channel import MVDR, PSD, RTFMVDR, SoudenMVDR
from ._transforms import (
AmplitudeToDB,
ComputeDeltas,
Fade,
FrequencyMasking,
GriffinLim,
InverseMelScale,
InverseSpectrogram,
LFCC,
Loudness,
MelScale,
MelSpectrogram,
MFCC,
MuLawDecoding,
MuLawEncoding,
PitchShift,
Resample,
RNNTLoss,
SlidingWindowCmn,
SpectralCentroid,
Spectrogram,
TimeMasking,
TimeStretch,
Vad,
Vol,
)
__all__ = [
"AmplitudeToDB",
"ComputeDeltas",
"Fade",
"FrequencyMasking",
"GriffinLim",
"InverseMelScale",
"InverseSpectrogram",
"LFCC",
"Loudness",
"MFCC",
"MVDR",
"MelScale",
"MelSpectrogram",
"MuLawDecoding",
"MuLawEncoding",
"PSD",
"PitchShift",
"RNNTLoss",
"RTFMVDR",
"Resample",
"SlidingWindowCmn",
"SoudenMVDR",
"SpectralCentroid",
"Spectrogram",
"TimeMasking",
"TimeStretch",
"Vad",
"Vol",
]
|
from ._multi_channel import MVDR, PSD, RTFMVDR, SoudenMVDR
from ._transforms import (
AmplitudeToDB,
ComputeDeltas,
Fade,
FrequencyMasking,
GriffinLim,
InverseMelScale,
InverseSpectrogram,
LFCC,
MelScale,
MelSpectrogram,
MFCC,
MuLawDecoding,
MuLawEncoding,
PitchShift,
Resample,
RNNTLoss,
SlidingWindowCmn,
SpectralCentroid,
Spectrogram,
TimeMasking,
TimeStretch,
Vad,
Vol,
)
__all__ = [
"AmplitudeToDB",
"ComputeDeltas",
"Fade",
"FrequencyMasking",
"GriffinLim",
"InverseMelScale",
"InverseSpectrogram",
"LFCC",
"MFCC",
"MVDR",
"MelScale",
"MelSpectrogram",
"MuLawDecoding",
"MuLawEncoding",
"PSD",
"PitchShift",
"RNNTLoss",
"RTFMVDR",
"Resample",
"SlidingWindowCmn",
"SoudenMVDR",
"SpectralCentroid",
"Spectrogram",
"TimeMasking",
"TimeStretch",
"Vad",
"Vol",
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .utils import (get_device, get_max_cuda_memory, get_max_musa_memory,
is_cuda_available, is_dipu_available, is_mlu_available,
is_mps_available, is_musa_available, is_npu_available,
is_npu_support_full_precision)
__all__ = [
'get_max_cuda_memory', 'get_device', 'is_cuda_available',
'is_mlu_available', 'is_mps_available', 'is_npu_available',
'is_dipu_available', 'get_max_musa_memory', 'is_musa_available',
'is_npu_support_full_precision'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .utils import (get_device, get_max_cuda_memory, is_cuda_available,
is_dipu_available, is_mlu_available, is_mps_available,
is_npu_available, is_npu_support_full_precision)
__all__ = [
'get_max_cuda_memory', 'get_device', 'is_cuda_available',
'is_mlu_available', 'is_mps_available', 'is_npu_available',
'is_dipu_available', 'is_npu_support_full_precision'
]
|
import numpy as np
import pytest
from pydantic import parse_obj_as
from docarray.base_doc.doc import BaseDoc
from docarray.documents import Mesh3D
from docarray.utils._internal.pydantic import is_pydantic_v2
from tests import TOYDATA_DIR
LOCAL_OBJ_FILE = str(TOYDATA_DIR / 'tetrahedron.obj')
REMOTE_OBJ_FILE = 'https://people.sc.fsu.edu/~jburkardt/data/obj/al.obj'
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize('file_url', [LOCAL_OBJ_FILE, REMOTE_OBJ_FILE])
def test_mesh(file_url: str):
mesh = Mesh3D(url=file_url)
mesh.tensors = mesh.url.load()
assert isinstance(mesh.tensors.vertices, np.ndarray)
assert isinstance(mesh.tensors.faces, np.ndarray)
@pytest.mark.skipif(is_pydantic_v2, reason="Not working with pydantic v2 for now")
def test_str_init():
t = parse_obj_as(Mesh3D, 'http://hello.ply')
assert t.url == 'http://hello.ply'
@pytest.mark.skipif(is_pydantic_v2, reason="Not working with pydantic v2 for now")
def test_doc():
class MyDoc(BaseDoc):
mesh1: Mesh3D
mesh2: Mesh3D
doc = MyDoc(mesh1='http://hello.ply', mesh2=Mesh3D(url='http://hello.ply'))
assert doc.mesh1.url == 'http://hello.ply'
assert doc.mesh2.url == 'http://hello.ply'
|
import numpy as np
import pytest
from pydantic import parse_obj_as
from docarray.base_doc.doc import BaseDoc
from docarray.documents import Mesh3D
from tests import TOYDATA_DIR
LOCAL_OBJ_FILE = str(TOYDATA_DIR / 'tetrahedron.obj')
REMOTE_OBJ_FILE = 'https://people.sc.fsu.edu/~jburkardt/data/obj/al.obj'
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize('file_url', [LOCAL_OBJ_FILE, REMOTE_OBJ_FILE])
def test_mesh(file_url):
mesh = Mesh3D(url=file_url)
mesh.tensors = mesh.url.load()
assert isinstance(mesh.tensors.vertices, np.ndarray)
assert isinstance(mesh.tensors.faces, np.ndarray)
def test_str_init():
t = parse_obj_as(Mesh3D, 'http://hello.ply')
assert t.url == 'http://hello.ply'
def test_doc():
class MyDoc(BaseDoc):
mesh1: Mesh3D
mesh2: Mesh3D
doc = MyDoc(mesh1='http://hello.ply', mesh2=Mesh3D(url='http://hello.ply'))
assert doc.mesh1.url == 'http://hello.ply'
assert doc.mesh2.url == 'http://hello.ply'
|
from sentence_transformers import SentenceTransformer
from . import SentenceEvaluator
import torch
from torch.utils.data import DataLoader
import logging
from ..util import batch_to_device
import os
import csv
logger = logging.getLogger(__name__)
class LabelAccuracyEvaluator(SentenceEvaluator):
"""
Evaluate a model based on its accuracy on a labeled dataset
This requires a model with LossFunction.SOFTMAX
The results are written in a CSV. If a CSV already exists, then values are appended.
"""
def __init__(self, dataloader: DataLoader, name: str = "", softmax_model=None, write_csv: bool = True):
"""
Constructs an evaluator for the given dataset
:param dataloader:
the data for the evaluation
"""
self.dataloader = dataloader
self.name = name
self.softmax_model = softmax_model
if name:
name = "_" + name
self.write_csv = write_csv
self.csv_file = "accuracy_evaluation" + name + "_results.csv"
self.csv_headers = ["epoch", "steps", "accuracy"]
def __call__(self, model: SentenceTransformer, output_path: str = None, epoch: int = -1, steps: int = -1) -> float:
model.eval()
total = 0
correct = 0
if epoch != -1:
if steps == -1:
out_txt = " after epoch {}:".format(epoch)
else:
out_txt = " in epoch {} after {} steps:".format(epoch, steps)
else:
out_txt = ":"
logger.info("Evaluation on the " + self.name + " dataset" + out_txt)
self.dataloader.collate_fn = model.smart_batching_collate
for step, batch in enumerate(self.dataloader):
features, label_ids = batch
for idx in range(len(features)):
features[idx] = batch_to_device(features[idx], model.device)
label_ids = label_ids.to(model.device)
with torch.no_grad():
_, prediction = self.softmax_model(features, labels=None)
total += prediction.size(0)
correct += torch.argmax(prediction, dim=1).eq(label_ids).sum().item()
accuracy = correct / total
logger.info("Accuracy: {:.4f} ({}/{})\n".format(accuracy, correct, total))
if output_path is not None and self.write_csv:
csv_path = os.path.join(output_path, self.csv_file)
if not os.path.isfile(csv_path):
with open(csv_path, newline="", mode="w", encoding="utf-8") as f:
writer = csv.writer(f)
writer.writerow(self.csv_headers)
writer.writerow([epoch, steps, accuracy])
else:
with open(csv_path, newline="", mode="a", encoding="utf-8") as f:
writer = csv.writer(f)
writer.writerow([epoch, steps, accuracy])
return accuracy
|
from . import SentenceEvaluator
import torch
from torch.utils.data import DataLoader
import logging
from ..util import batch_to_device
import os
import csv
logger = logging.getLogger(__name__)
class LabelAccuracyEvaluator(SentenceEvaluator):
"""
Evaluate a model based on its accuracy on a labeled dataset
This requires a model with LossFunction.SOFTMAX
The results are written in a CSV. If a CSV already exists, then values are appended.
"""
def __init__(self, dataloader: DataLoader, name: str = "", softmax_model=None, write_csv: bool = True):
"""
Constructs an evaluator for the given dataset
:param dataloader:
the data for the evaluation
"""
self.dataloader = dataloader
self.name = name
self.softmax_model = softmax_model
if name:
name = "_" + name
self.write_csv = write_csv
self.csv_file = "accuracy_evaluation" + name + "_results.csv"
self.csv_headers = ["epoch", "steps", "accuracy"]
def __call__(self, model, output_path: str = None, epoch: int = -1, steps: int = -1) -> float:
model.eval()
total = 0
correct = 0
if epoch != -1:
if steps == -1:
out_txt = " after epoch {}:".format(epoch)
else:
out_txt = " in epoch {} after {} steps:".format(epoch, steps)
else:
out_txt = ":"
logger.info("Evaluation on the " + self.name + " dataset" + out_txt)
self.dataloader.collate_fn = model.smart_batching_collate
for step, batch in enumerate(self.dataloader):
features, label_ids = batch
for idx in range(len(features)):
features[idx] = batch_to_device(features[idx], model.device)
label_ids = label_ids.to(model.device)
with torch.no_grad():
_, prediction = self.softmax_model(features, labels=None)
total += prediction.size(0)
correct += torch.argmax(prediction, dim=1).eq(label_ids).sum().item()
accuracy = correct / total
logger.info("Accuracy: {:.4f} ({}/{})\n".format(accuracy, correct, total))
if output_path is not None and self.write_csv:
csv_path = os.path.join(output_path, self.csv_file)
if not os.path.isfile(csv_path):
with open(csv_path, newline="", mode="w", encoding="utf-8") as f:
writer = csv.writer(f)
writer.writerow(self.csv_headers)
writer.writerow([epoch, steps, accuracy])
else:
with open(csv_path, newline="", mode="a", encoding="utf-8") as f:
writer = csv.writer(f)
writer.writerow([epoch, steps, accuracy])
return accuracy
|
# Copyright (c) OpenMMLab. All rights reserved.
from .checkloss_hook import CheckInvalidLossHook
from .mean_teacher_hook import MeanTeacherHook
from .memory_profiler_hook import MemoryProfilerHook
from .num_class_check_hook import NumClassCheckHook
from .set_epoch_info_hook import SetEpochInfoHook
from .sync_norm_hook import SyncNormHook
from .utils import trigger_visualization_hook
from .visualization_hook import DetVisualizationHook
from .yolox_mode_switch_hook import YOLOXModeSwitchHook
__all__ = [
'YOLOXModeSwitchHook', 'SyncNormHook', 'CheckInvalidLossHook',
'SetEpochInfoHook', 'MemoryProfilerHook', 'DetVisualizationHook',
'NumClassCheckHook', 'MeanTeacherHook', 'trigger_visualization_hook'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .checkloss_hook import CheckInvalidLossHook
from .mean_teacher_hook import MeanTeacherHook
from .memory_profiler_hook import MemoryProfilerHook
from .num_class_check_hook import NumClassCheckHook
from .set_epoch_info_hook import SetEpochInfoHook
from .sync_norm_hook import SyncNormHook
from .visualization_hook import DetVisualizationHook
from .yolox_mode_switch_hook import YOLOXModeSwitchHook
__all__ = [
'YOLOXModeSwitchHook', 'SyncNormHook', 'CheckInvalidLossHook',
'SetEpochInfoHook', 'MemoryProfilerHook', 'DetVisualizationHook',
'NumClassCheckHook', 'MeanTeacherHook'
]
|
_base_ = './fcos_hrnetv2p-w32-gn-head_4xb4-1x_coco.py'
# learning policy
max_epochs = 24
train_cfg = dict(max_epochs=max_epochs)
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[16, 22],
gamma=0.1)
]
|
_base_ = './fcos_hrnetv2p_w32_gn-head_4x4_1x_coco.py'
# learning policy
max_epochs = 24
train_cfg = dict(max_epochs=max_epochs)
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[16, 22],
gamma=0.1)
]
|
"""Argparser module for Deployment runtimes"""
import argparse
from jina.enums import DeploymentRoleType
from jina.parsers.helper import _SHOW_ALL_ARGS, KVAppendAction, add_arg_group
def mixin_base_deployment_parser(parser):
"""Add mixin arguments required by :class:`BaseDeployment` into the given parser.
:param parser: the parser instance to which we add arguments
"""
gp = add_arg_group(parser, title='Deployment')
gp.add_argument(
'--uses-before',
type=str,
help='The executor attached before the Pods described by --uses, typically before sending to all '
'shards, accepted type follows `--uses`. This argument only applies for sharded Deployments (shards > 1).',
)
gp.add_argument(
'--uses-after',
type=str,
help='The executor attached after the Pods described by --uses, typically used for receiving from '
'all shards, accepted type follows `--uses`. This argument only applies for sharded Deployments (shards > 1).',
)
gp.add_argument(
'--when',
action=KVAppendAction,
metavar='KEY: VALUE',
nargs='*',
help='The condition that the documents need to fulfill before reaching the Executor.'
'The condition can be defined in the form of a `DocArray query condition <https://docarray.jina.ai/fundamentals/documentarray/find/#query-by-conditions>`',
)
gp.add_argument(
'--external',
action='store_true',
default=False,
help='The Deployment will be considered an external Deployment that has been started independently from the Flow.'
'This Deployment will not be context managed by the Flow.',
)
gp.add_argument(
'--grpc-metadata',
action=KVAppendAction,
metavar='KEY: VALUE',
nargs='*',
help='The metadata to be passed to the gRPC request.',
)
# hidden CLI used for internal only
gp.add_argument(
'--deployment-role',
type=DeploymentRoleType.from_string,
choices=list(DeploymentRoleType),
help='The role of this deployment in the flow'
if _SHOW_ALL_ARGS
else argparse.SUPPRESS,
)
gp.add_argument(
'--tls',
action='store_true',
default=False,
help='If set, connect to deployment using tls encryption',
)
|
"""Argparser module for Deployment runtimes"""
import argparse
from jina.enums import DeploymentRoleType
from jina.parsers.helper import _SHOW_ALL_ARGS, KVAppendAction, add_arg_group
def mixin_base_deployment_parser(parser):
"""Add mixin arguments required by :class:`BaseDeployment` into the given parser.
:param parser: the parser instance to which we add arguments
"""
gp = add_arg_group(parser, title='Deployment')
gp.add_argument(
'--uses-before',
type=str,
help='The executor attached before the Pods described by --uses, typically before sending to all '
'shards, accepted type follows `--uses`. This argument only applies for sharded Deployments (shards > 1).',
)
gp.add_argument(
'--uses-after',
type=str,
help='The executor attached after the Pods described by --uses, typically used for receiving from '
'all shards, accepted type follows `--uses`. This argument only applies for sharded Deployments (shards > 1).',
)
gp.add_argument(
'--when',
action=KVAppendAction,
metavar='KEY: VALUE',
nargs='*',
help='The condition that the documents need to fulfill before reaching the Executor.'
'The condition can be defined in the form of a `DocArray query condition <https://docarray.jina.ai/fundamentals/documentarray/find/#query-by-conditions>`',
)
gp.add_argument(
'--external',
action='store_true',
default=False,
help='The Deployment will be considered an external Deployment that has been started independently from the Flow.'
'This Deployment will not be context managed by the Flow.',
)
gp.add_argument(
'--grpc-metadata',
action=KVAppendAction,
metavar='KEY: VALUE',
nargs='*',
help='The metadata to be passed to the gRPC request.',
)
# hidden CLI used for internal only
gp.add_argument(
'--deployment-role',
type=DeploymentRoleType.from_string,
choices=list(DeploymentRoleType),
help='The role of this deployment in the flow'
if _SHOW_ALL_ARGS
else argparse.SUPPRESS,
)
gp.add_argument(
'--tls',
action='store_true',
default=False,
help='If set, connect to deployment using tls encryption',
)
|
"""Agent utils."""
from llama_index.core.llms import ChatMessage, TextBlock
from typing import List
from llama_index.core.agent.types import TaskStep
from llama_index.core.base.llms.types import ChatMessage, MessageRole
from llama_index.core.memory import BaseMemory
def add_user_step_to_memory(
step: TaskStep, memory: BaseMemory, verbose: bool = False
) -> None:
"""Add user step to memory."""
user_message = ChatMessage(content=step.input, role=MessageRole.USER)
memory.put(user_message)
if verbose:
print(f"Added user message to memory: {step.input}")
def messages_to_xml_format(messages: List[ChatMessage]) -> ChatMessage:
blocks = [TextBlock(text="<current_conversation>\n")]
for message in messages:
blocks.append(TextBlock(text=f"\t<{message.role.value}>\n"))
for block in message.blocks:
if isinstance(block, TextBlock):
blocks.append(TextBlock(text=f"\t\t<message>{block.text}</message>\n"))
blocks.append(TextBlock(text=f"\t</{message.role.value}>\n"))
blocks.append(TextBlock(text="</current_conversation>\n\n"))
blocks.append(
TextBlock(
text="Given the conversation, format the output according to the provided schema."
)
)
return ChatMessage(role="user", blocks=blocks)
|
"""Agent utils."""
from llama_index.core.agent.types import TaskStep
from llama_index.core.base.llms.types import ChatMessage, MessageRole
from llama_index.core.memory import BaseMemory
def add_user_step_to_memory(
step: TaskStep, memory: BaseMemory, verbose: bool = False
) -> None:
"""Add user step to memory."""
user_message = ChatMessage(content=step.input, role=MessageRole.USER)
memory.put(user_message)
if verbose:
print(f"Added user message to memory: {step.input}")
|
"""Embedded Tables Retriever w/ Unstructured.IO."""
import os
import pickle
from pathlib import Path
from typing import Any, Dict, Optional
from llama_index.core import VectorStoreIndex
from llama_index.core.llama_pack.base import BaseLlamaPack
from llama_index.core.node_parser import UnstructuredElementNodeParser
from llama_index.core.query_engine import RetrieverQueryEngine
from llama_index.core.retrievers import RecursiveRetriever
from llama_index.readers.file.flat import FlatReader
class EmbeddedTablesUnstructuredRetrieverPack(BaseLlamaPack):
"""
Embedded Tables + Unstructured.io Retriever pack.
Use unstructured.io to parse out embedded tables from an HTML document, build
a node graph, and then run our recursive retriever against that.
**NOTE**: must take in a single HTML file.
"""
def __init__(
self,
html_path: str,
nodes_save_path: Optional[str] = None,
**kwargs: Any,
) -> None:
"""Init params."""
self.reader = FlatReader()
docs = self.reader.load_data(Path(html_path))
self.node_parser = UnstructuredElementNodeParser()
if nodes_save_path is None or not os.path.exists(nodes_save_path):
raw_nodes = self.node_parser.get_nodes_from_documents(docs)
pickle.dump(raw_nodes, open(nodes_save_path, "wb"))
else:
raw_nodes = pickle.load(open(nodes_save_path, "rb"))
base_nodes, node_mappings = self.node_parser.get_base_nodes_and_mappings(
raw_nodes
)
# construct top-level vector index + query engine
vector_index = VectorStoreIndex(base_nodes)
vector_retriever = vector_index.as_retriever(similarity_top_k=1)
self.recursive_retriever = RecursiveRetriever(
"vector",
retriever_dict={"vector": vector_retriever},
node_dict=node_mappings,
verbose=True,
)
self.query_engine = RetrieverQueryEngine.from_args(self.recursive_retriever)
def get_modules(self) -> Dict[str, Any]:
"""Get modules."""
return {
"node_parser": self.node_parser,
"recursive_retriever": self.recursive_retriever,
"query_engine": self.query_engine,
}
def run(self, *args: Any, **kwargs: Any) -> Any:
"""Run the pipeline."""
return self.query_engine.query(*args, **kwargs)
|
"""Embedded Tables Retriever w/ Unstructured.IO."""
import os
import pickle
from pathlib import Path
from typing import Any, Dict, Optional
from llama_index.core import VectorStoreIndex
from llama_index.core.llama_pack.base import BaseLlamaPack
from llama_index.core.node_parser import UnstructuredElementNodeParser
from llama_index.core.query_engine import RetrieverQueryEngine
from llama_index.core.retrievers import RecursiveRetriever
from llama_index.readers.file.flat import FlatReader
class EmbeddedTablesUnstructuredRetrieverPack(BaseLlamaPack):
"""Embedded Tables + Unstructured.io Retriever pack.
Use unstructured.io to parse out embedded tables from an HTML document, build
a node graph, and then run our recursive retriever against that.
**NOTE**: must take in a single HTML file.
"""
def __init__(
self,
html_path: str,
nodes_save_path: Optional[str] = None,
**kwargs: Any,
) -> None:
"""Init params."""
self.reader = FlatReader()
docs = self.reader.load_data(Path(html_path))
self.node_parser = UnstructuredElementNodeParser()
if nodes_save_path is None or not os.path.exists(nodes_save_path):
raw_nodes = self.node_parser.get_nodes_from_documents(docs)
pickle.dump(raw_nodes, open(nodes_save_path, "wb"))
else:
raw_nodes = pickle.load(open(nodes_save_path, "rb"))
base_nodes, node_mappings = self.node_parser.get_base_nodes_and_mappings(
raw_nodes
)
# construct top-level vector index + query engine
vector_index = VectorStoreIndex(base_nodes)
vector_retriever = vector_index.as_retriever(similarity_top_k=1)
self.recursive_retriever = RecursiveRetriever(
"vector",
retriever_dict={"vector": vector_retriever},
node_dict=node_mappings,
verbose=True,
)
self.query_engine = RetrieverQueryEngine.from_args(self.recursive_retriever)
def get_modules(self) -> Dict[str, Any]:
"""Get modules."""
return {
"node_parser": self.node_parser,
"recursive_retriever": self.recursive_retriever,
"query_engine": self.query_engine,
}
def run(self, *args: Any, **kwargs: Any) -> Any:
"""Run the pipeline."""
return self.query_engine.query(*args, **kwargs)
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: add_voter.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(
b'\n\x0f\x61\x64\x64_voter.proto\"-\n\rAwaitResponse\x12\r\n\x05\x65rror\x18\x01 \x01(\t\x12\r\n\x05index\x18\x02 \x01(\x04\"\x10\n\x0e\x46orgetResponse\"!\n\x06\x46uture\x12\x17\n\x0foperation_token\x18\x01 \x01(\t\"F\n\x0f\x41\x64\x64VoterRequest\x12\n\n\x02id\x18\x01 \x01(\t\x12\x0f\n\x07\x61\x64\x64ress\x18\x02 \x01(\t\x12\x16\n\x0eprevious_index\x18\x03 \x01(\x04\x32~\n\tRaftAdmin\x12\'\n\x08\x41\x64\x64Voter\x12\x10.AddVoterRequest\x1a\x07.Future\"\x00\x12\"\n\x05\x41wait\x12\x07.Future\x1a\x0e.AwaitResponse\"\x00\x12$\n\x06\x46orget\x12\x07.Future\x1a\x0f.ForgetResponse\"\x00\x62\x06proto3'
)
_AWAITRESPONSE = DESCRIPTOR.message_types_by_name['AwaitResponse']
_FORGETRESPONSE = DESCRIPTOR.message_types_by_name['ForgetResponse']
_FUTURE = DESCRIPTOR.message_types_by_name['Future']
_ADDVOTERREQUEST = DESCRIPTOR.message_types_by_name['AddVoterRequest']
AwaitResponse = _reflection.GeneratedProtocolMessageType(
'AwaitResponse',
(_message.Message,),
{
'DESCRIPTOR': _AWAITRESPONSE,
'__module__': 'add_voter_pb2',
# @@protoc_insertion_point(class_scope:AwaitResponse)
},
)
_sym_db.RegisterMessage(AwaitResponse)
ForgetResponse = _reflection.GeneratedProtocolMessageType(
'ForgetResponse',
(_message.Message,),
{
'DESCRIPTOR': _FORGETRESPONSE,
'__module__': 'add_voter_pb2',
# @@protoc_insertion_point(class_scope:ForgetResponse)
},
)
_sym_db.RegisterMessage(ForgetResponse)
Future = _reflection.GeneratedProtocolMessageType(
'Future',
(_message.Message,),
{
'DESCRIPTOR': _FUTURE,
'__module__': 'add_voter_pb2',
# @@protoc_insertion_point(class_scope:Future)
},
)
_sym_db.RegisterMessage(Future)
AddVoterRequest = _reflection.GeneratedProtocolMessageType(
'AddVoterRequest',
(_message.Message,),
{
'DESCRIPTOR': _ADDVOTERREQUEST,
'__module__': 'add_voter_pb2',
# @@protoc_insertion_point(class_scope:AddVoterRequest)
},
)
_sym_db.RegisterMessage(AddVoterRequest)
_RAFTADMIN = DESCRIPTOR.services_by_name['RaftAdmin']
if _descriptor._USE_C_DESCRIPTORS == False:
DESCRIPTOR._options = None
_AWAITRESPONSE._serialized_start = 19
_AWAITRESPONSE._serialized_end = 64
_FORGETRESPONSE._serialized_start = 66
_FORGETRESPONSE._serialized_end = 82
_FUTURE._serialized_start = 84
_FUTURE._serialized_end = 117
_ADDVOTERREQUEST._serialized_start = 119
_ADDVOTERREQUEST._serialized_end = 189
_RAFTADMIN._serialized_start = 191
_RAFTADMIN._serialized_end = 317
# @@protoc_insertion_point(module_scope)
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: add_voter.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x0f\x61\x64\x64_voter.proto\"-\n\rAwaitResponse\x12\r\n\x05\x65rror\x18\x01 \x01(\t\x12\r\n\x05index\x18\x02 \x01(\x04\"\x10\n\x0e\x46orgetResponse\"!\n\x06\x46uture\x12\x17\n\x0foperation_token\x18\x01 \x01(\t\"F\n\x0f\x41\x64\x64VoterRequest\x12\n\n\x02id\x18\x01 \x01(\t\x12\x0f\n\x07\x61\x64\x64ress\x18\x02 \x01(\t\x12\x16\n\x0eprevious_index\x18\x03 \x01(\x04\x32~\n\tRaftAdmin\x12\'\n\x08\x41\x64\x64Voter\x12\x10.AddVoterRequest\x1a\x07.Future\"\x00\x12\"\n\x05\x41wait\x12\x07.Future\x1a\x0e.AwaitResponse\"\x00\x12$\n\x06\x46orget\x12\x07.Future\x1a\x0f.ForgetResponse\"\x00\x62\x06proto3')
_AWAITRESPONSE = DESCRIPTOR.message_types_by_name['AwaitResponse']
_FORGETRESPONSE = DESCRIPTOR.message_types_by_name['ForgetResponse']
_FUTURE = DESCRIPTOR.message_types_by_name['Future']
_ADDVOTERREQUEST = DESCRIPTOR.message_types_by_name['AddVoterRequest']
AwaitResponse = _reflection.GeneratedProtocolMessageType('AwaitResponse', (_message.Message,), {
'DESCRIPTOR' : _AWAITRESPONSE,
'__module__' : 'add_voter_pb2'
# @@protoc_insertion_point(class_scope:AwaitResponse)
})
_sym_db.RegisterMessage(AwaitResponse)
ForgetResponse = _reflection.GeneratedProtocolMessageType('ForgetResponse', (_message.Message,), {
'DESCRIPTOR' : _FORGETRESPONSE,
'__module__' : 'add_voter_pb2'
# @@protoc_insertion_point(class_scope:ForgetResponse)
})
_sym_db.RegisterMessage(ForgetResponse)
Future = _reflection.GeneratedProtocolMessageType('Future', (_message.Message,), {
'DESCRIPTOR' : _FUTURE,
'__module__' : 'add_voter_pb2'
# @@protoc_insertion_point(class_scope:Future)
})
_sym_db.RegisterMessage(Future)
AddVoterRequest = _reflection.GeneratedProtocolMessageType('AddVoterRequest', (_message.Message,), {
'DESCRIPTOR' : _ADDVOTERREQUEST,
'__module__' : 'add_voter_pb2'
# @@protoc_insertion_point(class_scope:AddVoterRequest)
})
_sym_db.RegisterMessage(AddVoterRequest)
_RAFTADMIN = DESCRIPTOR.services_by_name['RaftAdmin']
if _descriptor._USE_C_DESCRIPTORS == False:
DESCRIPTOR._options = None
_AWAITRESPONSE._serialized_start=19
_AWAITRESPONSE._serialized_end=64
_FORGETRESPONSE._serialized_start=66
_FORGETRESPONSE._serialized_end=82
_FUTURE._serialized_start=84
_FUTURE._serialized_end=117
_ADDVOTERREQUEST._serialized_start=119
_ADDVOTERREQUEST._serialized_end=189
_RAFTADMIN._serialized_start=191
_RAFTADMIN._serialized_end=317
# @@protoc_insertion_point(module_scope)
|
# Copyright (c) OpenMMLab. All rights reserved.
from torch.autograd import Function
from torch.nn import functional as F
class SigmoidGeometricMean(Function):
"""Forward and backward function of geometric mean of two sigmoid
functions.
This implementation with analytical gradient function substitutes
the autograd function of (x.sigmoid() * y.sigmoid()).sqrt(). The
original implementation incurs none during gradient backprapagation
if both x and y are very small values.
"""
@staticmethod
def forward(ctx, x, y):
x_sigmoid = x.sigmoid()
y_sigmoid = y.sigmoid()
z = (x_sigmoid * y_sigmoid).sqrt()
ctx.save_for_backward(x_sigmoid, y_sigmoid, z)
return z
@staticmethod
def backward(ctx, grad_output):
x_sigmoid, y_sigmoid, z = ctx.saved_tensors
grad_x = grad_output * z * (1 - x_sigmoid) / 2
grad_y = grad_output * z * (1 - y_sigmoid) / 2
return grad_x, grad_y
sigmoid_geometric_mean = SigmoidGeometricMean.apply
def interpolate_as(source, target, mode='bilinear', align_corners=False):
"""Interpolate the `source` to the shape of the `target`.
The `source` must be a Tensor, but the `target` can be a Tensor or a
np.ndarray with the shape (..., target_h, target_w).
Args:
source (Tensor): A 3D/4D Tensor with the shape (N, H, W) or
(N, C, H, W).
target (Tensor | np.ndarray): The interpolation target with the shape
(..., target_h, target_w).
mode (str): Algorithm used for interpolation. The options are the
same as those in F.interpolate(). Default: ``'bilinear'``.
align_corners (bool): The same as the argument in F.interpolate().
Returns:
Tensor: The interpolated source Tensor.
"""
assert len(target.shape) >= 2
def _interpolate_as(source, target, mode='bilinear', align_corners=False):
"""Interpolate the `source` (4D) to the shape of the `target`."""
target_h, target_w = target.shape[-2:]
source_h, source_w = source.shape[-2:]
if target_h != source_h or target_w != source_w:
source = F.interpolate(
source,
size=(target_h, target_w),
mode=mode,
align_corners=align_corners)
return source
if len(source.shape) == 3:
source = source[:, None, :, :]
source = _interpolate_as(source, target, mode, align_corners)
return source[:, 0, :, :]
else:
return _interpolate_as(source, target, mode, align_corners)
|
# Copyright (c) OpenMMLab. All rights reserved.
from torch.nn import functional as F
def interpolate_as(source, target, mode='bilinear', align_corners=False):
"""Interpolate the `source` to the shape of the `target`.
The `source` must be a Tensor, but the `target` can be a Tensor or a
np.ndarray with the shape (..., target_h, target_w).
Args:
source (Tensor): A 3D/4D Tensor with the shape (N, H, W) or
(N, C, H, W).
target (Tensor | np.ndarray): The interpolation target with the shape
(..., target_h, target_w).
mode (str): Algorithm used for interpolation. The options are the
same as those in F.interpolate(). Default: ``'bilinear'``.
align_corners (bool): The same as the argument in F.interpolate().
Returns:
Tensor: The interpolated source Tensor.
"""
assert len(target.shape) >= 2
def _interpolate_as(source, target, mode='bilinear', align_corners=False):
"""Interpolate the `source` (4D) to the shape of the `target`."""
target_h, target_w = target.shape[-2:]
source_h, source_w = source.shape[-2:]
if target_h != source_h or target_w != source_w:
source = F.interpolate(
source,
size=(target_h, target_w),
mode=mode,
align_corners=align_corners)
return source
if len(source.shape) == 3:
source = source[:, None, :, :]
source = _interpolate_as(source, target, mode, align_corners)
return source[:, 0, :, :]
else:
return _interpolate_as(source, target, mode, align_corners)
|
import pytest
from llama_index.core.workflow.context import Context
from llama_index.core.workflow.decorators import step
from llama_index.core.workflow.events import Event, StartEvent, StopEvent
from llama_index.core.workflow.service import ServiceManager, ServiceNotFoundError
from llama_index.core.workflow.workflow import Workflow
class ServiceWorkflow(Workflow):
"""This wokflow is only responsible to generate a number, it knows nothing about the caller."""
def __init__(self, *args, **kwargs) -> None:
self._the_answer = kwargs.pop("the_answer", 42)
super().__init__(*args, **kwargs)
@step
async def generate(self, ev: StartEvent) -> StopEvent:
return StopEvent(result=self._the_answer)
class NumGenerated(Event):
"""To be used in the dummy workflow below."""
num: int
class DummyWorkflow(Workflow):
"""
This workflow needs a number, and it calls another workflow to get one.
A service named "service_workflow" must be added to `DummyWorkflow` for
the step to be able to use it (see below).
This step knows nothing about the other workflow, it gets an instance
and it only knows it has to call `run` on that instance.
"""
@step
async def get_a_number(
self,
ev: StartEvent,
ctx: Context,
service_workflow: ServiceWorkflow = ServiceWorkflow(),
) -> NumGenerated:
res = await service_workflow.run()
return NumGenerated(num=int(res))
@step
async def multiply(self, ev: NumGenerated) -> StopEvent:
return StopEvent(ev.num * 2)
@pytest.mark.asyncio()
async def test_e2e():
wf = DummyWorkflow()
# We are responsible for passing the ServiceWorkflow instances to the dummy workflow
# and give it a name, in this case "service_workflow"
wf.add_workflows(service_workflow=ServiceWorkflow(the_answer=1337))
res = await wf.run()
assert res == 2674
@pytest.mark.asyncio()
async def test_default_value_for_service():
wf = DummyWorkflow()
# We don't add any workflow to leverage the default value defined by the user
res = await wf.run()
assert res == 84
def test_service_manager_add(workflow):
s = ServiceManager()
s.add("test_id", workflow)
assert s._services["test_id"] == workflow
def test_service_manager_get(workflow):
s = ServiceManager()
s._services["test_id"] = workflow
assert s.get("test_id") == workflow
with pytest.raises(ServiceNotFoundError):
s.get("not_found")
|
import pytest
from llama_index.core.workflow.decorators import step
from llama_index.core.workflow.events import Event, StartEvent, StopEvent
from llama_index.core.workflow.workflow import Workflow
from llama_index.core.workflow.context import Context
from llama_index.core.workflow.service import ServiceManager, ServiceNotFoundError
class ServiceWorkflow(Workflow):
"""This wokflow is only responsible to generate a number, it knows nothing about the caller."""
def __init__(self, *args, **kwargs) -> None:
self._the_answer = kwargs.pop("the_answer", 42)
super().__init__(*args, **kwargs)
@step
async def generate(self, ev: StartEvent) -> StopEvent:
return StopEvent(result=self._the_answer)
class NumGenerated(Event):
"""To be used in the dummy workflow below."""
num: int
class DummyWorkflow(Workflow):
"""
This workflow needs a number, and it calls another workflow to get one.
A service named "service_workflow" must be added to `DummyWorkflow` for
the step to be able to use it (see below).
This step knows nothing about the other workflow, it gets an instance
and it only knows it has to call `run` on that instance.
"""
@step
async def get_a_number(
self,
ev: StartEvent,
ctx: Context,
service_workflow: ServiceWorkflow = ServiceWorkflow(),
) -> NumGenerated:
res = await service_workflow.run()
return NumGenerated(num=int(res))
@step
async def multiply(self, ev: NumGenerated) -> StopEvent:
return StopEvent(ev.num * 2)
@pytest.mark.asyncio()
async def test_e2e():
wf = DummyWorkflow()
# We are responsible for passing the ServiceWorkflow instances to the dummy workflow
# and give it a name, in this case "service_workflow"
wf.add_workflows(service_workflow=ServiceWorkflow(the_answer=1337))
res = await wf.run()
assert res == 2674
@pytest.mark.asyncio()
async def test_default_value_for_service():
wf = DummyWorkflow()
# We don't add any workflow to leverage the default value defined by the user
res = await wf.run()
assert res == 84
def test_service_manager_add():
s = ServiceManager()
w = Workflow()
s.add("test_id", w)
assert s._services["test_id"] == w
def test_service_manager_get():
s = ServiceManager()
w = Workflow()
s._services["test_id"] = w
assert s.get("test_id") == w
with pytest.raises(ServiceNotFoundError):
s.get("not_found")
|
# there's a rather large issue with the pants build, it's only running tests
# with sources that are imported, which causes pytest markers to not be registered
# so we need to import pytest_asyncio manually here to ensure that the markers
# are registered
import pytest_asyncio # noqa: F401
# Set the default fixture loop scope explicitly to avoid warnings
pytest_asyncio.DEFAULT_FIXTURE_LOOP_SCOPE = "function"
|
# there's a rather large issue with the pants build, it's only running tests
# with sources that are imported, which causes pytest markers to not be registered
# so we need to import pytest_asyncio manually here to ensure that the markers
# are registered
import pytest_asyncio # noqa: F401
|
import multiprocessing
import re
from copy import deepcopy
from functools import partial
from typing import TYPE_CHECKING
from hubble.executor.helper import is_valid_huburi
from hubble.executor.hubio import HubIO
from jina.enums import PodRoleType
from jina.parsers.helper import _update_gateway_args
if TYPE_CHECKING: # pragma: no cover
from argparse import Namespace
def _get_event(obj) -> multiprocessing.Event:
if isinstance(obj, multiprocessing.Process) or isinstance(
obj, multiprocessing.context.ForkProcess
):
return multiprocessing.Event()
elif isinstance(obj, multiprocessing.context.SpawnProcess):
return multiprocessing.get_context('spawn').Event()
else:
raise TypeError(f'{obj} is not an instance of "multiprocessing.Process"')
class ConditionalEvent:
"""
:class:`ConditionalEvent` provides a common interface to an event (multiprocessing or threading event)
that gets triggered when any of the events provided in input is triggered (OR logic)
:param events_list: The list of events that compose this composable event
"""
def __init__(self, events_list):
super().__init__()
self.event = None
self.event = multiprocessing.synchronize.Event(
ctx=multiprocessing.get_context()
)
self.event_list = events_list
for e in events_list:
self._setup(e, self._state_changed)
self._state_changed()
def _state_changed(self):
bools = [e.is_set() for e in self.event_list]
if any(bools):
self.event.set()
else:
self.event.clear()
def _custom_set(self, e):
e._set()
e._state_changed()
def _custom_clear(self, e):
e._clear()
e._state_changed()
def _setup(self, e, changed_callback):
e._set = e.set
e._clear = e.clear
e._state_changed = changed_callback
e.set = partial(self._custom_set, e)
e.clear = partial(self._custom_clear, e)
def update_runtime_cls(args) -> 'Namespace':
"""Get runtime_cls as a string from args
:param args: pod/deployment namespace args
:return: runtime class as a string
"""
_args = args
if _args.runtime_cls == 'WorkerRuntime' and is_valid_huburi(_args.uses):
_hub_args = deepcopy(_args)
_hub_args.uri = _args.uses
_hub_args.no_usage = True
_args.uses = HubIO(_hub_args).pull()
if hasattr(_args, 'protocol') and _args.pod_role == PodRoleType.GATEWAY:
_update_gateway_args(_args)
if _args.pod_role == PodRoleType.HEAD:
_args.runtime_cls = 'HeadRuntime'
return _args
|
import multiprocessing
import re
from copy import deepcopy
from functools import partial
from typing import TYPE_CHECKING
from hubble.executor.helper import is_valid_huburi
from hubble.executor.hubio import HubIO
from jina.enums import PodRoleType
from jina.parsers.helper import _update_gateway_args
if TYPE_CHECKING: # pragma: no cover
from argparse import Namespace
def _get_event(obj) -> multiprocessing.Event:
if isinstance(obj, multiprocessing.Process) or isinstance(
obj, multiprocessing.context.ForkProcess
):
return multiprocessing.Event()
elif isinstance(obj, multiprocessing.context.SpawnProcess):
return multiprocessing.get_context('spawn').Event()
else:
raise TypeError(f'{obj} is not an instance of "multiprocessing.Process"')
class ConditionalEvent:
"""
:class:`ConditionalEvent` provides a common interface to an event (multiprocessing or threading event)
that gets triggered when any of the events provided in input is triggered (OR logic)
:param events_list: The list of events that compose this composable event
"""
def __init__(self, events_list):
super().__init__()
self.event = None
self.event = multiprocessing.synchronize.Event(
ctx=multiprocessing.get_context()
)
self.event_list = events_list
for e in events_list:
self._setup(e, self._state_changed)
self._state_changed()
def _state_changed(self):
bools = [e.is_set() for e in self.event_list]
if any(bools):
self.event.set()
else:
self.event.clear()
def _custom_set(self, e):
e._set()
e._state_changed()
def _custom_clear(self, e):
e._clear()
e._state_changed()
def _setup(self, e, changed_callback):
e._set = e.set
e._clear = e.clear
e._state_changed = changed_callback
e.set = partial(self._custom_set, e)
e.clear = partial(self._custom_clear, e)
def update_runtime_cls(args, copy=False) -> 'Namespace':
"""Get runtime_cls as a string from args
:param args: pod/deployment namespace args
:param copy: True if args shouldn't be modified in-place
:return: runtime class as a string
"""
_args = deepcopy(args) if copy else args
if _args.runtime_cls == 'WorkerRuntime' and is_valid_huburi(_args.uses):
_hub_args = deepcopy(_args)
_hub_args.uri = _args.uses
_hub_args.no_usage = True
_args.uses = HubIO(_hub_args).pull()
if hasattr(_args, 'protocol') and _args.pod_role == PodRoleType.GATEWAY:
_update_gateway_args(_args)
if _args.pod_role == PodRoleType.HEAD:
_args.runtime_cls = 'HeadRuntime'
return _args
|
from typing import TYPE_CHECKING, Any, Dict, Optional, Type, TypeVar, Union
import numpy as np
from pydantic import Field
from docarray.base_doc import BaseDoc
from docarray.typing import AnyEmbedding, ImageBytes, ImageUrl
from docarray.typing.tensor.abstract_tensor import AbstractTensor
from docarray.typing.tensor.image.image_tensor import ImageTensor
from docarray.utils._internal.misc import import_library
from docarray.utils._internal.pydantic import is_pydantic_v2
if is_pydantic_v2:
from pydantic import model_validator
if TYPE_CHECKING:
import tensorflow as tf # type: ignore
import torch
else:
tf = import_library('tensorflow', raise_error=False)
torch = import_library('torch', raise_error=False)
T = TypeVar('T', bound='ImageDoc')
class ImageDoc(BaseDoc):
"""
Document for handling images.
It can contain:
- an [`ImageUrl`][docarray.typing.url.ImageUrl] (`Image.url`)
- an [`ImageTensor`](../../../api_references/typing/tensor/image) (`Image.tensor`)
- an [`AnyEmbedding`](../../../api_references/typing/tensor/embedding) (`Image.embedding`)
- an [`ImageBytes`][docarray.typing.bytes.ImageBytes] object (`ImageDoc.bytes_`)
You can use this Document directly:
```python
from docarray.documents import ImageDoc
# use it directly
image = ImageDoc(
url='https://github.com/docarray/docarray/blob/main/tests/toydata/image-data/apple.png?raw=true'
)
image.tensor = image.url.load()
# model = MyEmbeddingModel()
# image.embedding = model(image.tensor)
```
You can extend this Document:
```python
from docarray.documents import ImageDoc
from docarray.typing import AnyEmbedding
from typing import Optional
# extend it
class MyImage(ImageDoc):
second_embedding: Optional[AnyEmbedding]
image = MyImage(
url='https://github.com/docarray/docarray/blob/main/tests/toydata/image-data/apple.png?raw=true'
)
image.tensor = image.url.load()
# model = MyEmbeddingModel()
# image.embedding = model(image.tensor)
# image.second_embedding = model(image.tensor)
```
You can use this Document for composition:
```python
from docarray import BaseDoc
from docarray.documents import ImageDoc, TextDoc
# compose it
class MultiModalDoc(BaseDoc):
image: ImageDoc
text: TextDoc
mmdoc = MultiModalDoc(
image=ImageDoc(
url='https://github.com/docarray/docarray/blob/main/tests/toydata/image-data/apple.png?raw=true'
),
text=TextDoc(text='hello world, how are you doing?'),
)
mmdoc.image.tensor = mmdoc.image.url.load()
# or
mmdoc.image.bytes_ = mmdoc.image.url.load_bytes()
mmdoc.image.tensor = mmdoc.image.bytes_.load()
```
"""
url: Optional[ImageUrl] = Field(
description='URL to a (potentially remote) image file that needs to be loaded',
example='https://github.com/docarray/docarray/blob/main/tests/toydata/image-data/apple.png?raw=true',
default=None,
)
tensor: Optional[ImageTensor] = Field(
description='Tensor object of the image which can be specifed to one of `ImageNdArray`, `ImageTorchTensor`, `ImageTensorflowTensor`.',
default=None,
)
embedding: Optional[AnyEmbedding] = Field(
description='Store an embedding: a vector representation of the image.',
example=[1, 0, 1],
default=None,
)
bytes_: Optional[ImageBytes] = Field(
description='Bytes object of the image which is an instance of `ImageBytes`.',
default=None,
)
@classmethod
def _validate(cls, value) -> Dict[str, Any]:
if isinstance(value, str):
value = dict(url=value)
elif (
isinstance(value, (AbstractTensor, np.ndarray))
or (torch is not None and isinstance(value, torch.Tensor))
or (tf is not None and isinstance(value, tf.Tensor))
):
value = dict(tensor=value)
elif isinstance(value, bytes):
value = dict(byte=value)
return value
if is_pydantic_v2:
@model_validator(mode='before')
@classmethod
def validate_model_before(cls, value):
return cls._validate(value)
else:
@classmethod
def validate(
cls: Type[T],
value: Union[str, AbstractTensor, Any],
) -> T:
return super().validate(cls._validate(value))
|
from typing import TYPE_CHECKING, Any, Optional, Type, TypeVar, Union
import numpy as np
from pydantic import Field
from docarray.base_doc import BaseDoc
from docarray.typing import AnyEmbedding, ImageBytes, ImageUrl
from docarray.typing.tensor.abstract_tensor import AbstractTensor
from docarray.typing.tensor.image.image_tensor import ImageTensor
from docarray.utils._internal.misc import import_library
if TYPE_CHECKING:
import tensorflow as tf # type: ignore
import torch
else:
tf = import_library('tensorflow', raise_error=False)
torch = import_library('torch', raise_error=False)
T = TypeVar('T', bound='ImageDoc')
class ImageDoc(BaseDoc):
"""
Document for handling images.
It can contain:
- an [`ImageUrl`][docarray.typing.url.ImageUrl] (`Image.url`)
- an [`ImageTensor`](../../../api_references/typing/tensor/image) (`Image.tensor`)
- an [`AnyEmbedding`](../../../api_references/typing/tensor/embedding) (`Image.embedding`)
- an [`ImageBytes`][docarray.typing.bytes.ImageBytes] object (`ImageDoc.bytes_`)
You can use this Document directly:
```python
from docarray.documents import ImageDoc
# use it directly
image = ImageDoc(
url='https://github.com/docarray/docarray/blob/main/tests/toydata/image-data/apple.png?raw=true'
)
image.tensor = image.url.load()
# model = MyEmbeddingModel()
# image.embedding = model(image.tensor)
```
You can extend this Document:
```python
from docarray.documents import ImageDoc
from docarray.typing import AnyEmbedding
from typing import Optional
# extend it
class MyImage(ImageDoc):
second_embedding: Optional[AnyEmbedding]
image = MyImage(
url='https://github.com/docarray/docarray/blob/main/tests/toydata/image-data/apple.png?raw=true'
)
image.tensor = image.url.load()
# model = MyEmbeddingModel()
# image.embedding = model(image.tensor)
# image.second_embedding = model(image.tensor)
```
You can use this Document for composition:
```python
from docarray import BaseDoc
from docarray.documents import ImageDoc, TextDoc
# compose it
class MultiModalDoc(BaseDoc):
image: ImageDoc
text: TextDoc
mmdoc = MultiModalDoc(
image=ImageDoc(
url='https://github.com/docarray/docarray/blob/main/tests/toydata/image-data/apple.png?raw=true'
),
text=TextDoc(text='hello world, how are you doing?'),
)
mmdoc.image.tensor = mmdoc.image.url.load()
# or
mmdoc.image.bytes_ = mmdoc.image.url.load_bytes()
mmdoc.image.tensor = mmdoc.image.bytes_.load()
```
"""
url: Optional[ImageUrl] = Field(
description='URL to a (potentially remote) image file that needs to be loaded',
example='https://github.com/docarray/docarray/blob/main/tests/toydata/image-data/apple.png?raw=true',
default=None,
)
tensor: Optional[ImageTensor] = Field(
description='Tensor object of the image which can be specifed to one of `ImageNdArray`, `ImageTorchTensor`, `ImageTensorflowTensor`.',
default=None,
)
embedding: Optional[AnyEmbedding] = Field(
description='Store an embedding: a vector representation of the image.',
example=[1, 0, 1],
default=None,
)
bytes_: Optional[ImageBytes] = Field(
description='Bytes object of the image which is an instance of `ImageBytes`.',
default=None,
)
@classmethod
def validate(
cls: Type[T],
value: Union[str, AbstractTensor, Any],
) -> T:
if isinstance(value, str):
value = cls(url=value)
elif (
isinstance(value, (AbstractTensor, np.ndarray))
or (torch is not None and isinstance(value, torch.Tensor))
or (tf is not None and isinstance(value, tf.Tensor))
):
value = cls(tensor=value)
elif isinstance(value, bytes):
value = cls(byte=value)
return super().validate(value)
|
"""
========================================
Label Propagation digits active learning
========================================
Demonstrates an active learning technique to learn handwritten digits
using label propagation.
We start by training a label propagation model with only 10 labeled points,
then we select the top five most uncertain points to label. Next, we train
with 15 labeled points (original 10 + 5 new ones). We repeat this process
four times to have a model trained with 30 labeled examples. Note you can
increase this to label more than 30 by changing `max_iterations`. Labeling
more than 30 can be useful to get a sense for the speed of convergence of
this active learning technique.
A plot will appear showing the top 5 most uncertain digits for each iteration
of training. These may or may not contain mistakes, but we will train the next
model with their true labels.
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import matplotlib.pyplot as plt
import numpy as np
from scipy import stats
from sklearn import datasets
from sklearn.metrics import classification_report, confusion_matrix
from sklearn.semi_supervised import LabelSpreading
digits = datasets.load_digits()
rng = np.random.RandomState(0)
indices = np.arange(len(digits.data))
rng.shuffle(indices)
X = digits.data[indices[:330]]
y = digits.target[indices[:330]]
images = digits.images[indices[:330]]
n_total_samples = len(y)
n_labeled_points = 40
max_iterations = 5
unlabeled_indices = np.arange(n_total_samples)[n_labeled_points:]
f = plt.figure()
for i in range(max_iterations):
if len(unlabeled_indices) == 0:
print("No unlabeled items left to label.")
break
y_train = np.copy(y)
y_train[unlabeled_indices] = -1
lp_model = LabelSpreading(gamma=0.25, max_iter=20)
lp_model.fit(X, y_train)
predicted_labels = lp_model.transduction_[unlabeled_indices]
true_labels = y[unlabeled_indices]
cm = confusion_matrix(true_labels, predicted_labels, labels=lp_model.classes_)
print("Iteration %i %s" % (i, 70 * "_"))
print(
"Label Spreading model: %d labeled & %d unlabeled (%d total)"
% (n_labeled_points, n_total_samples - n_labeled_points, n_total_samples)
)
print(classification_report(true_labels, predicted_labels))
print("Confusion matrix")
print(cm)
# compute the entropies of transduced label distributions
pred_entropies = stats.distributions.entropy(lp_model.label_distributions_.T)
# select up to 5 digit examples that the classifier is most uncertain about
uncertainty_index = np.argsort(pred_entropies)[::-1]
uncertainty_index = uncertainty_index[
np.isin(uncertainty_index, unlabeled_indices)
][:5]
# keep track of indices that we get labels for
delete_indices = np.array([], dtype=int)
# for more than 5 iterations, visualize the gain only on the first 5
if i < 5:
f.text(
0.05,
(1 - (i + 1) * 0.183),
"model %d\n\nfit with\n%d labels" % ((i + 1), i * 5 + 10),
size=10,
)
for index, image_index in enumerate(uncertainty_index):
image = images[image_index]
# for more than 5 iterations, visualize the gain only on the first 5
if i < 5:
sub = f.add_subplot(5, 5, index + 1 + (5 * i))
sub.imshow(image, cmap=plt.cm.gray_r, interpolation="none")
sub.set_title(
"predict: %i\ntrue: %i"
% (lp_model.transduction_[image_index], y[image_index]),
size=10,
)
sub.axis("off")
# labeling 5 points, remote from labeled set
(delete_index,) = (unlabeled_indices == image_index).nonzero()
delete_indices = np.concatenate((delete_indices, delete_index))
unlabeled_indices = np.delete(unlabeled_indices, delete_indices)
n_labeled_points += len(uncertainty_index)
f.suptitle(
(
"Active learning with Label Propagation.\nRows show 5 most "
"uncertain labels to learn with the next model."
),
y=1.15,
)
plt.subplots_adjust(left=0.2, bottom=0.03, right=0.9, top=0.9, wspace=0.2, hspace=0.85)
plt.show()
|
"""
========================================
Label Propagation digits active learning
========================================
Demonstrates an active learning technique to learn handwritten digits
using label propagation.
We start by training a label propagation model with only 10 labeled points,
then we select the top five most uncertain points to label. Next, we train
with 15 labeled points (original 10 + 5 new ones). We repeat this process
four times to have a model trained with 30 labeled examples. Note you can
increase this to label more than 30 by changing `max_iterations`. Labeling
more than 30 can be useful to get a sense for the speed of convergence of
this active learning technique.
A plot will appear showing the top 5 most uncertain digits for each iteration
of training. These may or may not contain mistakes, but we will train the next
model with their true labels.
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import matplotlib.pyplot as plt
import numpy as np
from scipy import stats
from sklearn import datasets
from sklearn.metrics import classification_report, confusion_matrix
from sklearn.semi_supervised import LabelSpreading
digits = datasets.load_digits()
rng = np.random.RandomState(0)
indices = np.arange(len(digits.data))
rng.shuffle(indices)
X = digits.data[indices[:330]]
y = digits.target[indices[:330]]
images = digits.images[indices[:330]]
n_total_samples = len(y)
n_labeled_points = 40
max_iterations = 5
unlabeled_indices = np.arange(n_total_samples)[n_labeled_points:]
f = plt.figure()
for i in range(max_iterations):
if len(unlabeled_indices) == 0:
print("No unlabeled items left to label.")
break
y_train = np.copy(y)
y_train[unlabeled_indices] = -1
lp_model = LabelSpreading(gamma=0.25, max_iter=20)
lp_model.fit(X, y_train)
predicted_labels = lp_model.transduction_[unlabeled_indices]
true_labels = y[unlabeled_indices]
cm = confusion_matrix(true_labels, predicted_labels, labels=lp_model.classes_)
print("Iteration %i %s" % (i, 70 * "_"))
print(
"Label Spreading model: %d labeled & %d unlabeled (%d total)"
% (n_labeled_points, n_total_samples - n_labeled_points, n_total_samples)
)
print(classification_report(true_labels, predicted_labels))
print("Confusion matrix")
print(cm)
# compute the entropies of transduced label distributions
pred_entropies = stats.distributions.entropy(lp_model.label_distributions_.T)
# select up to 5 digit examples that the classifier is most uncertain about
uncertainty_index = np.argsort(pred_entropies)[::-1]
uncertainty_index = uncertainty_index[
np.isin(uncertainty_index, unlabeled_indices)
][:5]
# keep track of indices that we get labels for
delete_indices = np.array([], dtype=int)
# for more than 5 iterations, visualize the gain only on the first 5
if i < 5:
f.text(
0.05,
(1 - (i + 1) * 0.183),
"model %d\n\nfit with\n%d labels" % ((i + 1), i * 5 + 10),
size=10,
)
for index, image_index in enumerate(uncertainty_index):
image = images[image_index]
# for more than 5 iterations, visualize the gain only on the first 5
if i < 5:
sub = f.add_subplot(5, 5, index + 1 + (5 * i))
sub.imshow(image, cmap=plt.cm.gray_r, interpolation="none")
sub.set_title(
"predict: %i\ntrue: %i"
% (lp_model.transduction_[image_index], y[image_index]),
size=10,
)
sub.axis("off")
# labeling 5 points, remote from labeled set
(delete_index,) = np.where(unlabeled_indices == image_index)
delete_indices = np.concatenate((delete_indices, delete_index))
unlabeled_indices = np.delete(unlabeled_indices, delete_indices)
n_labeled_points += len(uncertainty_index)
f.suptitle(
(
"Active learning with Label Propagation.\nRows show 5 most "
"uncertain labels to learn with the next model."
),
y=1.15,
)
plt.subplots_adjust(left=0.2, bottom=0.03, right=0.9, top=0.9, wspace=0.2, hspace=0.85)
plt.show()
|
from langchain_core.utils.input import (
get_bolded_text,
get_color_mapping,
get_colored_text,
print_text,
)
__all__ = ["get_bolded_text", "get_color_mapping", "get_colored_text", "print_text"]
|
from langchain_core.utils.input import (
get_bolded_text,
get_color_mapping,
get_colored_text,
print_text,
)
__all__ = ["get_color_mapping", "get_colored_text", "get_bolded_text", "print_text"]
|
_base_ = './mask-rcnn_r50-caffe_fpn_1x_coco.py'
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet101_caffe')))
|
_base_ = './mask_rcnn_r50_caffe_fpn_1x_coco.py'
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet101_caffe')))
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import warnings
from mmcv import Config, DictAction
def parse_args():
parser = argparse.ArgumentParser(description='Print the whole config')
parser.add_argument('config', help='config file path')
parser.add_argument(
'--options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file (deprecate), '
'change to --cfg-options instead.')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
args = parser.parse_args()
if args.options and args.cfg_options:
raise ValueError(
'--options and --cfg-options cannot be both '
'specified, --options is deprecated in favor of --cfg-options')
if args.options:
warnings.warn('--options is deprecated in favor of --cfg-options')
args.cfg_options = args.options
return args
def main():
args = parse_args()
cfg = Config.fromfile(args.config)
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
# import modules from string list.
if cfg.get('custom_imports', None):
from mmcv.utils import import_modules_from_strings
import_modules_from_strings(**cfg['custom_imports'])
print(f'Config:\n{cfg.pretty_text}')
if __name__ == '__main__':
main()
|
import argparse
import warnings
from mmcv import Config, DictAction
def parse_args():
parser = argparse.ArgumentParser(description='Print the whole config')
parser.add_argument('config', help='config file path')
parser.add_argument(
'--options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file (deprecate), '
'change to --cfg-options instead.')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
args = parser.parse_args()
if args.options and args.cfg_options:
raise ValueError(
'--options and --cfg-options cannot be both '
'specified, --options is deprecated in favor of --cfg-options')
if args.options:
warnings.warn('--options is deprecated in favor of --cfg-options')
args.cfg_options = args.options
return args
def main():
args = parse_args()
cfg = Config.fromfile(args.config)
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
# import modules from string list.
if cfg.get('custom_imports', None):
from mmcv.utils import import_modules_from_strings
import_modules_from_strings(**cfg['custom_imports'])
print(f'Config:\n{cfg.pretty_text}')
if __name__ == '__main__':
main()
|
# coding: utf-8
"""Find the path to xgboost dynamic library files."""
import os
import platform
import sys
from typing import List
class XGBoostLibraryNotFound(Exception):
"""Error thrown by when xgboost is not found"""
def find_lib_path() -> List[str]:
"""Find the path to xgboost dynamic library files.
Returns
-------
lib_path
List of all found library path to xgboost
"""
curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))
dll_path = [
# normal, after installation `lib` is copied into Python package tree.
os.path.join(curr_path, "lib"),
# editable installation, no copying is performed.
os.path.join(curr_path, os.path.pardir, os.path.pardir, "lib"),
# use libxgboost from a system prefix, if available. This should be the last
# option.
os.path.join(sys.base_prefix, "lib"),
]
if sys.platform == "win32":
# On Windows, Conda may install libs in different paths
dll_path.extend(
[
os.path.join(sys.base_prefix, "bin"),
os.path.join(sys.base_prefix, "Library"),
os.path.join(sys.base_prefix, "Library", "bin"),
os.path.join(sys.base_prefix, "Library", "lib"),
os.path.join(sys.base_prefix, "Library", "mingw-w64"),
os.path.join(sys.base_prefix, "Library", "mingw-w64", "bin"),
os.path.join(sys.base_prefix, "Library", "mingw-w64", "lib"),
]
)
dll_path = [os.path.join(p, "xgboost.dll") for p in dll_path]
elif sys.platform.startswith(("linux", "freebsd", "emscripten")):
dll_path = [os.path.join(p, "libxgboost.so") for p in dll_path]
elif sys.platform == "darwin":
dll_path = [os.path.join(p, "libxgboost.dylib") for p in dll_path]
elif sys.platform == "cygwin":
dll_path = [os.path.join(p, "cygxgboost.dll") for p in dll_path]
if platform.system() == "OS400":
dll_path = [os.path.join(p, "libxgboost.so") for p in dll_path]
lib_path = [p for p in dll_path if os.path.exists(p) and os.path.isfile(p)]
# XGBOOST_BUILD_DOC is defined by sphinx conf.
if not lib_path and not os.environ.get("XGBOOST_BUILD_DOC", False):
link = "https://xgboost.readthedocs.io/en/stable/install.html"
msg = (
"Cannot find XGBoost Library in the candidate path. "
+ "List of candidates:\n- "
+ ("\n- ".join(dll_path))
+ "\nXGBoost Python package path: "
+ curr_path
+ "\nsys.base_prefix: "
+ sys.base_prefix
+ "\nSee: "
+ link
+ " for installing XGBoost."
)
raise XGBoostLibraryNotFound(msg)
return lib_path
|
# coding: utf-8
"""Find the path to xgboost dynamic library files."""
import os
import platform
import sys
from typing import List
class XGBoostLibraryNotFound(Exception):
"""Error thrown by when xgboost is not found"""
def find_lib_path() -> List[str]:
"""Find the path to xgboost dynamic library files.
Returns
-------
lib_path
List of all found library path to xgboost
"""
curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))
dll_path = [
# normal, after installation `lib` is copied into Python package tree.
os.path.join(curr_path, "lib"),
# editable installation, no copying is performed.
os.path.join(curr_path, os.path.pardir, os.path.pardir, "lib"),
# use libxgboost from a system prefix, if available. This should be the last
# option.
os.path.join(sys.base_prefix, "lib"),
]
if sys.platform == "win32":
# On Windows, Conda may install libs in different paths
dll_path.extend(
[
os.path.join(sys.base_prefix, "bin"),
os.path.join(sys.base_prefix, "Library"),
os.path.join(sys.base_prefix, "Library", "bin"),
os.path.join(sys.base_prefix, "Library", "lib"),
]
)
dll_path = [os.path.join(p, "xgboost.dll") for p in dll_path]
elif sys.platform.startswith(("linux", "freebsd", "emscripten")):
dll_path = [os.path.join(p, "libxgboost.so") for p in dll_path]
elif sys.platform == "darwin":
dll_path = [os.path.join(p, "libxgboost.dylib") for p in dll_path]
elif sys.platform == "cygwin":
dll_path = [os.path.join(p, "cygxgboost.dll") for p in dll_path]
if platform.system() == "OS400":
dll_path = [os.path.join(p, "libxgboost.so") for p in dll_path]
lib_path = [p for p in dll_path if os.path.exists(p) and os.path.isfile(p)]
# XGBOOST_BUILD_DOC is defined by sphinx conf.
if not lib_path and not os.environ.get("XGBOOST_BUILD_DOC", False):
link = "https://xgboost.readthedocs.io/en/stable/install.html"
msg = (
"Cannot find XGBoost Library in the candidate path. "
+ "List of candidates:\n- "
+ ("\n- ".join(dll_path))
+ "\nXGBoost Python package path: "
+ curr_path
+ "\nsys.base_prefix: "
+ sys.base_prefix
+ "\nSee: "
+ link
+ " for installing XGBoost."
)
raise XGBoostLibraryNotFound(msg)
return lib_path
|
import pytest
import datasets
import datasets.config
# Import fixture modules as plugins
pytest_plugins = ["tests.fixtures.files", "tests.fixtures.hub", "tests.fixtures.fsspec"]
def pytest_collection_modifyitems(config, items):
# Mark tests as "unit" by default if not marked as "integration" (or already marked as "unit")
for item in items:
if any(marker in item.keywords for marker in ["integration", "unit"]):
continue
item.add_marker(pytest.mark.unit)
@pytest.fixture(autouse=True)
def set_test_cache_config(tmp_path_factory, monkeypatch):
# test_hf_cache_home = tmp_path_factory.mktemp("cache") # TODO: why a cache dir per test function does not work?
test_hf_cache_home = tmp_path_factory.getbasetemp() / "cache"
test_hf_datasets_cache = test_hf_cache_home / "datasets"
test_hf_metrics_cache = test_hf_cache_home / "metrics"
test_hf_modules_cache = test_hf_cache_home / "modules"
monkeypatch.setattr("datasets.config.HF_DATASETS_CACHE", str(test_hf_datasets_cache))
monkeypatch.setattr("datasets.config.HF_METRICS_CACHE", str(test_hf_metrics_cache))
monkeypatch.setattr("datasets.config.HF_MODULES_CACHE", str(test_hf_modules_cache))
test_downloaded_datasets_path = test_hf_datasets_cache / "downloads"
monkeypatch.setattr("datasets.config.DOWNLOADED_DATASETS_PATH", str(test_downloaded_datasets_path))
test_extracted_datasets_path = test_hf_datasets_cache / "downloads" / "extracted"
monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_PATH", str(test_extracted_datasets_path))
@pytest.fixture(autouse=True, scope="session")
def disable_tqdm_output():
datasets.disable_progress_bar()
@pytest.fixture(autouse=True)
def set_update_download_counts_to_false(monkeypatch):
# don't take tests into account when counting downloads
monkeypatch.setattr("datasets.config.HF_UPDATE_DOWNLOAD_COUNTS", False)
@pytest.fixture
def set_sqlalchemy_silence_uber_warning(monkeypatch):
# Required to suppress RemovedIn20Warning when feature(s) are not compatible with SQLAlchemy 2.0
# To be removed once SQLAlchemy 2.0 supported
try:
monkeypatch.setattr("sqlalchemy.util.deprecations.SILENCE_UBER_WARNING", True)
except AttributeError:
pass
@pytest.fixture(autouse=True, scope="session")
def zero_time_out_for_remote_code():
datasets.config.TIME_OUT_REMOTE_CODE = 0
|
import pytest
import datasets
import datasets.config
# Import fixture modules as plugins
pytest_plugins = ["tests.fixtures.files", "tests.fixtures.hub", "tests.fixtures.fsspec"]
def pytest_collection_modifyitems(config, items):
# Mark tests as "unit" by default if not marked as "integration" (or already marked as "unit")
for item in items:
if any(marker in item.keywords for marker in ["integration", "unit"]):
continue
item.add_marker(pytest.mark.unit)
def pytest_configure(config):
config.addinivalue_line("markers", "torchaudio_latest: mark test to run with torchaudio>=0.12")
@pytest.fixture(autouse=True)
def set_test_cache_config(tmp_path_factory, monkeypatch):
# test_hf_cache_home = tmp_path_factory.mktemp("cache") # TODO: why a cache dir per test function does not work?
test_hf_cache_home = tmp_path_factory.getbasetemp() / "cache"
test_hf_datasets_cache = test_hf_cache_home / "datasets"
test_hf_metrics_cache = test_hf_cache_home / "metrics"
test_hf_modules_cache = test_hf_cache_home / "modules"
monkeypatch.setattr("datasets.config.HF_DATASETS_CACHE", str(test_hf_datasets_cache))
monkeypatch.setattr("datasets.config.HF_METRICS_CACHE", str(test_hf_metrics_cache))
monkeypatch.setattr("datasets.config.HF_MODULES_CACHE", str(test_hf_modules_cache))
test_downloaded_datasets_path = test_hf_datasets_cache / "downloads"
monkeypatch.setattr("datasets.config.DOWNLOADED_DATASETS_PATH", str(test_downloaded_datasets_path))
test_extracted_datasets_path = test_hf_datasets_cache / "downloads" / "extracted"
monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_PATH", str(test_extracted_datasets_path))
@pytest.fixture(autouse=True, scope="session")
def disable_tqdm_output():
datasets.disable_progress_bar()
@pytest.fixture(autouse=True)
def set_update_download_counts_to_false(monkeypatch):
# don't take tests into account when counting downloads
monkeypatch.setattr("datasets.config.HF_UPDATE_DOWNLOAD_COUNTS", False)
@pytest.fixture
def set_sqlalchemy_silence_uber_warning(monkeypatch):
# Required to suppress RemovedIn20Warning when feature(s) are not compatible with SQLAlchemy 2.0
# To be removed once SQLAlchemy 2.0 supported
try:
monkeypatch.setattr("sqlalchemy.util.deprecations.SILENCE_UBER_WARNING", True)
except AttributeError:
pass
@pytest.fixture(autouse=True, scope="session")
def zero_time_out_for_remote_code():
datasets.config.TIME_OUT_REMOTE_CODE = 0
|
_base_ = 'mask_rcnn_r50_caffe_fpn_syncbn-all_rpn-2conv_lsj_100e_coco.py'
# Enable automatic-mixed-precision training with AmpOptimWrapper.
optim_wrapper = dict(type='AmpOptimWrapper')
|
_base_ = 'mask_rcnn_r50_caffe_fpn_syncbn-all_rpn-2conv_lsj_100e_coco.py'
fp16 = dict(loss_scale=512.)
|
import os
from pathlib import Path
from typing import List, Tuple, Union
from torch import Tensor
from torch.hub import download_url_to_file
from torch.utils.data import Dataset
from torchaudio.datasets.librispeech import load_librispeech_item
from torchaudio.datasets.utils import extract_archive
_ARCHIVE_NAME = "librispeech_finetuning"
_URL = "https://dl.fbaipublicfiles.com/librilight/data/librispeech_finetuning.tgz"
_CHECKSUM = "5d1efdc777b548194d7e09ba89126e2188026df9fd57aa57eb14408d2b2342af"
def _get_fileids_paths(path, subset, _ext_audio) -> List[Tuple[str, str]]:
"""Get the file names and the corresponding file paths without `speaker_id`
and `chapter_id` directories.
The format of path is like:
{root}/{_ARCHIVE_NAME}/1h/[0-5]/[clean, other] or
{root}/{_ARCHIVE_NAME}/9h/[clean, other]
"""
if subset == "10min":
files_paths = [
(os.path.join(os.path.dirname(p), "..", ".."), str(p.stem))
for p in Path(path).glob("1h/0/*/*/*/*" + _ext_audio)
]
elif subset in ["1h", "10h"]:
files_paths = [
(os.path.join(os.path.dirname(p), "..", ".."), str(p.stem))
for p in Path(path).glob("1h/*/*/*/*/*" + _ext_audio)
]
if subset == "10h":
files_paths += [
(os.path.join(os.path.dirname(p), "..", ".."), str(p.stem))
for p in Path(path).glob("9h/*/*/*/*" + _ext_audio)
]
else:
raise ValueError(f"Unsupported subset value. Found {subset}.")
files_paths = sorted(files_paths, key=lambda x: x[0] + x[1])
return files_paths
class LibriLightLimited(Dataset):
"""Create a Dataset for LibriLightLimited, which is the supervised subset of
LibriLight dataset.
Args:
root (str or Path): Path to the directory where the dataset is found or downloaded.
subset (str, optional): The subset to use. Options: [``10min``, ``1h``, ``10h``]
(Default: ``10min``).
download (bool, optional):
Whether to download the dataset if it is not found at root path. (default: ``False``).
"""
_ext_txt = ".trans.txt"
_ext_audio = ".flac"
def __init__(
self,
root: Union[str, Path],
subset: str = "10min",
download: bool = False,
) -> None:
assert subset in ["10min", "1h", "10h"], "`subset` must be one of ['10min', '1h', '10h']"
root = os.fspath(root)
self._path = os.path.join(root, _ARCHIVE_NAME)
archive = os.path.join(root, f"{_ARCHIVE_NAME}.tgz")
if not os.path.isdir(self._path):
if not download:
raise RuntimeError("Dataset not found. Please use `download=True` to download")
if not os.path.isfile(archive):
download_url_to_file(_URL, archive, hash_prefix=_CHECKSUM)
extract_archive(archive)
self._fileids_paths = _get_fileids_paths(self._path, subset, self._ext_audio)
def __getitem__(self, n: int) -> Tuple[Tensor, int, str, int, int, int]:
"""Load the n-th sample from the dataset.
Args:
n (int): The index of the sample to be loaded
Returns:
(Tensor, int, str, int, int, int):
``(waveform, sample_rate, transcript, speaker_id, chapter_id, utterance_id)``
"""
file_path, fileid = self._fileids_paths[n]
return load_librispeech_item(fileid, file_path, self._ext_audio, self._ext_txt)
def __len__(self) -> int:
return len(self._fileids_paths)
|
import os
from pathlib import Path
from typing import List, Tuple, Union
from torch import Tensor
from torch.hub import download_url_to_file
from torch.utils.data import Dataset
from torchaudio.datasets.librispeech import load_librispeech_item
from torchaudio.datasets.utils import extract_archive
_ARCHIVE_NAME = "librispeech_finetuning"
_URL = "https://dl.fbaipublicfiles.com/librilight/data/librispeech_finetuning.tgz"
_CHECKSUM = "5d1efdc777b548194d7e09ba89126e2188026df9fd57aa57eb14408d2b2342af"
def _get_fileids_paths(path, subset, _ext_audio) -> List[Tuple[str, str]]:
"""Get the file names and the corresponding file paths without `speaker_id`
and `chapter_id` directories.
The format of path is like:
{root}/{_ARCHIVE_NAME}/1h/[0-5]/[clean, other] or
{root}/{_ARCHIVE_NAME}/9h/[clean, other]
"""
if subset == "10min":
files_paths = [
(os.path.join(os.path.dirname(p), "..", ".."), str(p.stem))
for p in Path(path).glob("1h/0/*/*/*/*" + _ext_audio)
]
elif subset in ["1h", "10h"]:
files_paths = [
(os.path.join(os.path.dirname(p), "..", ".."), str(p.stem))
for p in Path(path).glob("1h/*/*/*/*/*" + _ext_audio)
]
if subset == "10h":
files_paths += [
(os.path.join(os.path.dirname(p), "..", ".."), str(p.stem))
for p in Path(path).glob("9h/*/*/*/*" + _ext_audio)
]
else:
raise ValueError(f"Unsupported subset value. Found {subset}.")
files_paths = sorted(files_paths, key=lambda x: x[0] + x[1])
return files_paths
class LibriLightLimited(Dataset):
"""Create a Dataset for LibriLightLimited, which is the supervised subset of
LibriLight dataset.
Args:
root (str or Path): Path to the directory where the dataset is found or downloaded.
subset (str, optional): The subset to use. Options: [``10min`, ``1h``, ``10h``]
(Default: ``10min``).
download (bool, optional):
Whether to download the dataset if it is not found at root path. (default: ``False``).
"""
_ext_txt = ".trans.txt"
_ext_audio = ".flac"
def __init__(
self,
root: Union[str, Path],
subset: str = "10min",
download: bool = False,
) -> None:
assert subset in ["10min", "1h", "10h"], "`subset` must be one of ['10min', '1h', '10h']"
root = os.fspath(root)
self._path = os.path.join(root, _ARCHIVE_NAME)
archive = os.path.join(root, f"{_ARCHIVE_NAME}.tgz")
if not os.path.isdir(self._path):
if not download:
raise RuntimeError("Dataset not found. Please use `download=True` to download")
if not os.path.isfile(archive):
download_url_to_file(_URL, archive, hash_prefix=_CHECKSUM)
extract_archive(archive)
self._fileids_paths = _get_fileids_paths(self._path, subset, self._ext_audio)
def __getitem__(self, n: int) -> Tuple[Tensor, int, str, int, int, int]:
"""Load the n-th sample from the dataset.
Args:
n (int): The index of the sample to be loaded
Returns:
(Tensor, int, str, int, int, int):
``(waveform, sample_rate, transcript, speaker_id, chapter_id, utterance_id)``
"""
file_path, fileid = self._fileids_paths[n]
return load_librispeech_item(fileid, file_path, self._ext_audio, self._ext_txt)
def __len__(self) -> int:
return len(self._fileids_paths)
|
def __getattr__(name: str):
if name == "Streamer":
import warnings
from torchaudio.io import StreamReader
warnings.warn(
f"{__name__}.{name} has been moved to torchaudio.io.StreamReader. Please use torchaudio.io.StreamReader",
DeprecationWarning,
)
global Streamer
Streamer = StreamReader
return Streamer
raise AttributeError(f"module {__name__} has no attribute {name}")
def __dir__():
return ["Streamer"]
|
_INITIALIZED = False
_LAZILY_IMPORTED = [
"Streamer",
"SourceStream",
"SourceAudioStream",
"SourceVideoStream",
"OutputStream",
]
def _init_extension():
import torch
import torchaudio
try:
torchaudio._extension._load_lib("libtorchaudio_ffmpeg")
except OSError as err:
raise ImportError(
"Stream API requires FFmpeg libraries (libavformat and such). Please install FFmpeg 4."
) from err
try:
torch.ops.torchaudio.ffmpeg_init()
except RuntimeError as err:
raise RuntimeError(
"Stream API requires FFmpeg binding. Please set BUILD_FFMPEG=1 when building from source."
) from err
global _INITIALIZED
_INITIALIZED = True
def __getattr__(name: str):
if name in _LAZILY_IMPORTED:
if not _INITIALIZED:
_init_extension()
from . import streamer
item = getattr(streamer, name)
globals()[name] = item
return item
raise AttributeError(f"module {__name__} has no attribute {name}")
def __dir__():
return sorted(__all__ + _LAZILY_IMPORTED)
__all__ = []
|
__copyright__ = 'Copyright (c) 2021 Jina AI Limited. All rights reserved.'
__license__ = 'Apache-2.0'
import subprocess
from typing import Callable, List
import pytest
from jina import DocumentArray, Flow
from ...transform_encoder import TransformerTorchEncoder
@pytest.mark.parametrize('request_size', [1, 10, 50, 100])
def test_integration(data_generator: Callable, request_size: int):
with Flow(return_results=True).add(uses=TransformerTorchEncoder) as flow:
resp = flow.post(
on='/index',
inputs=data_generator(),
request_size=request_size,
return_results=True,
)
assert min(len(resp) * request_size, 50) == 50
for r in resp:
for doc in r.docs:
assert doc.embedding is not None
def filter_none(elements):
return list(filter(lambda e: e is not None, elements))
@pytest.mark.parametrize(
['docs', 'docs_per_path', 'traversal_path'],
[
(pytest.lazy_fixture('docs_with_text'), [['r', 10], ['c', 0], ['cc', 0]], 'r'),
(
pytest.lazy_fixture('docs_with_chunk_text'),
[['r', 0], ['c', 10], ['cc', 0]],
'c',
),
(
pytest.lazy_fixture('docs_with_chunk_chunk_text'),
[['r', 0], ['c', 0], ['cc', 10]],
'cc',
),
],
)
def test_traversal_path(
docs: DocumentArray, docs_per_path: List[List[str]], traversal_path: str
):
def validate_traversal(expected_docs_per_path: List[List[str]]):
def validate(res):
for path, count in expected_docs_per_path:
assert (
len(
filter_none(
DocumentArray(res[0].docs)
.traverse_flat([path])
.get_attributes('embedding')
)
)
== count
)
return validate
flow = Flow(return_results=True).add(uses=TransformerTorchEncoder)
with flow:
resp = flow.post(
on='/test',
inputs=docs,
parameters={'traversal_paths': [traversal_path]},
return_results=True,
)
validate_traversal(docs_per_path)(resp)
@pytest.mark.gpu
@pytest.mark.docker
def test_docker_runtime_gpu(build_docker_image_gpu: str):
with pytest.raises(subprocess.TimeoutExpired):
subprocess.run(
[
'jina',
'pea',
f'--uses=docker://{build_docker_image_gpu}',
'--gpus',
'all',
'--uses-with',
'device:cuda',
],
timeout=30,
check=True,
)
@pytest.mark.docker
def test_docker_runtime(build_docker_image: str):
with pytest.raises(subprocess.TimeoutExpired):
subprocess.run(
['jina', 'executor', f'--uses=docker://{build_docker_image}'],
timeout=30,
check=True,
)
|
__copyright__ = 'Copyright (c) 2021 Jina AI Limited. All rights reserved.'
__license__ = 'Apache-2.0'
import subprocess
from typing import Callable, List
import pytest
from jina import DocumentArray, Flow
from ...transform_encoder import TransformerTorchEncoder
@pytest.mark.parametrize('request_size', [1, 10, 50, 100])
def test_integration(data_generator: Callable, request_size: int):
with Flow(return_results=True).add(uses=TransformerTorchEncoder) as flow:
resp = flow.post(
on='/index',
inputs=data_generator(),
request_size=request_size,
return_results=True,
)
assert min(len(resp) * request_size, 50) == 50
for r in resp:
for doc in r.docs:
assert doc.embedding is not None
def filter_none(elements):
return list(filter(lambda e: e is not None, elements))
@pytest.mark.parametrize(
['docs', 'docs_per_path', 'traversal_path'],
[
(pytest.lazy_fixture('docs_with_text'), [['r', 10], ['c', 0], ['cc', 0]], 'r'),
(
pytest.lazy_fixture('docs_with_chunk_text'),
[['r', 0], ['c', 10], ['cc', 0]],
'c',
),
(
pytest.lazy_fixture('docs_with_chunk_chunk_text'),
[['r', 0], ['c', 0], ['cc', 10]],
'cc',
),
],
)
def test_traversal_path(
docs: DocumentArray, docs_per_path: List[List[str]], traversal_path: str
):
def validate_traversal(expected_docs_per_path: List[List[str]]):
def validate(res):
for path, count in expected_docs_per_path:
assert (
len(
filter_none(
DocumentArray(res[0].docs)
.traverse_flat([path])
.get_attributes('embedding')
)
)
== count
)
return validate
flow = Flow(return_results=True).add(uses=TransformerTorchEncoder)
with flow:
resp = flow.post(
on='/test',
inputs=docs,
parameters={'traversal_paths': [traversal_path]},
return_results=True,
)
validate_traversal(docs_per_path)(resp)
@pytest.mark.docker
def test_docker_runtime(build_docker_image: str):
with pytest.raises(subprocess.TimeoutExpired):
subprocess.run(
['jina', 'executor', f'--uses=docker://{build_docker_image}'],
timeout=30,
check=True,
)
|
import sys
from typing import Callable
import pytest
from langchain_core.runnables.base import RunnableLambda
from langchain_core.runnables.utils import (
get_function_nonlocals,
get_lambda_source,
indent_lines_after_first,
)
@pytest.mark.skipif(
sys.version_info < (3, 9), reason="Requires python version >= 3.9 to run."
)
@pytest.mark.parametrize(
("func", "expected_source"),
[
(lambda x: x * 2, "lambda x: x * 2"),
(lambda a, b: a + b, "lambda a, b: a + b"),
(lambda x: x if x > 0 else 0, "lambda x: x if x > 0 else 0"), # noqa: FURB136
],
)
def test_get_lambda_source(func: Callable, expected_source: str) -> None:
"""Test get_lambda_source function."""
source = get_lambda_source(func)
assert source == expected_source
@pytest.mark.parametrize(
("text", "prefix", "expected_output"),
[
("line 1\nline 2\nline 3", "1", "line 1\n line 2\n line 3"),
("line 1\nline 2\nline 3", "ax", "line 1\n line 2\n line 3"),
],
)
def test_indent_lines_after_first(text: str, prefix: str, expected_output: str) -> None:
"""Test indent_lines_after_first function."""
indented_text = indent_lines_after_first(text, prefix)
assert indented_text == expected_output
global_agent = RunnableLambda(lambda x: x * 3)
def test_nonlocals() -> None:
agent = RunnableLambda(lambda x: x * 2)
def my_func(input: str, agent: dict[str, str]) -> str:
return agent.get("agent_name", input)
def my_func2(input: str) -> str:
return agent.get("agent_name", input) # type: ignore[attr-defined]
def my_func3(input: str) -> str:
return agent.invoke(input)
def my_func4(input: str) -> str:
return global_agent.invoke(input)
def my_func5() -> tuple[Callable[[str], str], RunnableLambda]:
global_agent = RunnableLambda(lambda x: x * 3)
def my_func6(input: str) -> str:
return global_agent.invoke(input)
return my_func6, global_agent
assert get_function_nonlocals(my_func) == []
assert get_function_nonlocals(my_func2) == []
assert get_function_nonlocals(my_func3) == [agent.invoke]
assert get_function_nonlocals(my_func4) == [global_agent.invoke]
func, nl = my_func5()
assert get_function_nonlocals(func) == [nl.invoke]
assert RunnableLambda(my_func3).deps == [agent]
assert RunnableLambda(my_func4).deps == [global_agent]
assert RunnableLambda(func).deps == [nl]
|
import sys
from typing import Callable
import pytest
from langchain_core.runnables.base import RunnableLambda
from langchain_core.runnables.utils import (
get_function_nonlocals,
get_lambda_source,
indent_lines_after_first,
)
@pytest.mark.skipif(
sys.version_info < (3, 9), reason="Requires python version >= 3.9 to run."
)
@pytest.mark.parametrize(
"func, expected_source",
[
(lambda x: x * 2, "lambda x: x * 2"),
(lambda a, b: a + b, "lambda a, b: a + b"),
(lambda x: x if x > 0 else 0, "lambda x: x if x > 0 else 0"), # noqa: FURB136
],
)
def test_get_lambda_source(func: Callable, expected_source: str) -> None:
"""Test get_lambda_source function."""
source = get_lambda_source(func)
assert source == expected_source
@pytest.mark.parametrize(
"text,prefix,expected_output",
[
("line 1\nline 2\nline 3", "1", "line 1\n line 2\n line 3"),
("line 1\nline 2\nline 3", "ax", "line 1\n line 2\n line 3"),
],
)
def test_indent_lines_after_first(text: str, prefix: str, expected_output: str) -> None:
"""Test indent_lines_after_first function."""
indented_text = indent_lines_after_first(text, prefix)
assert indented_text == expected_output
global_agent = RunnableLambda(lambda x: x * 3)
def test_nonlocals() -> None:
agent = RunnableLambda(lambda x: x * 2)
def my_func(input: str, agent: dict[str, str]) -> str:
return agent.get("agent_name", input)
def my_func2(input: str) -> str:
return agent.get("agent_name", input) # type: ignore[attr-defined]
def my_func3(input: str) -> str:
return agent.invoke(input)
def my_func4(input: str) -> str:
return global_agent.invoke(input)
def my_func5() -> tuple[Callable[[str], str], RunnableLambda]:
global_agent = RunnableLambda(lambda x: x * 3)
def my_func6(input: str) -> str:
return global_agent.invoke(input)
return my_func6, global_agent
assert get_function_nonlocals(my_func) == []
assert get_function_nonlocals(my_func2) == []
assert get_function_nonlocals(my_func3) == [agent.invoke]
assert get_function_nonlocals(my_func4) == [global_agent.invoke]
func, nl = my_func5()
assert get_function_nonlocals(func) == [nl.invoke]
assert RunnableLambda(my_func3).deps == [agent]
assert RunnableLambda(my_func4).deps == [global_agent]
assert RunnableLambda(func).deps == [nl]
|
"""Standard LangChain interface tests"""
from typing import Type
import pytest
from langchain_core.language_models import BaseChatModel
from langchain_tests.integration_tests import ChatModelIntegrationTests
from langchain_community.chat_models.litellm import ChatLiteLLM
class TestLiteLLMStandard(ChatModelIntegrationTests):
@property
def chat_model_class(self) -> Type[BaseChatModel]:
return ChatLiteLLM
@property
def chat_model_params(self) -> dict:
return {"model": "ollama/mistral"}
@pytest.mark.xfail(reason="Not yet implemented.")
def test_usage_metadata_streaming(self, model: BaseChatModel) -> None:
super().test_usage_metadata_streaming(model)
|
"""Standard LangChain interface tests"""
from typing import Type
import pytest
from langchain_core.language_models import BaseChatModel
from langchain_tests.integration_tests import ChatModelIntegrationTests
from langchain_community.chat_models.litellm import ChatLiteLLM
class TestLiteLLMStandard(ChatModelIntegrationTests):
@property
def chat_model_class(self) -> Type[BaseChatModel]:
return ChatLiteLLM
@property
def chat_model_params(self) -> dict:
return {"model": "ollama/mistral"}
@pytest.mark.xfail(reason="Not yet implemented.")
def test_usage_metadata(self, model: BaseChatModel) -> None:
super().test_usage_metadata(model)
|
import logging
import platform
import warnings
from typing import Any, List, Optional, Type, Union
from langchain_core.callbacks import (
CallbackManagerForToolRun,
)
from langchain_core.tools import BaseTool
from pydantic import BaseModel, Field, model_validator
logger = logging.getLogger(__name__)
class ShellInput(BaseModel):
"""Commands for the Bash Shell tool."""
commands: Union[str, List[str]] = Field(
...,
description="List of shell commands to run. Deserialized using json.loads",
)
"""List of shell commands to run."""
@model_validator(mode="before")
@classmethod
def _validate_commands(cls, values: dict) -> Any:
"""Validate commands."""
# TODO: Add real validators
commands = values.get("commands")
if not isinstance(commands, list):
values["commands"] = [commands]
# Warn that the bash tool is not safe
warnings.warn(
"The shell tool has no safeguards by default. Use at your own risk."
)
return values
def _get_default_bash_process() -> Any:
"""Get default bash process."""
try:
from langchain_experimental.llm_bash.bash import BashProcess
except ImportError:
raise ImportError(
"BashProcess has been moved to langchain experimental."
"To use this tool, install langchain-experimental "
"with `pip install langchain-experimental`."
)
return BashProcess(return_err_output=True)
def _get_platform() -> str:
"""Get platform."""
system = platform.system()
if system == "Darwin":
return "MacOS"
return system
class ShellTool(BaseTool):
"""Tool to run shell commands."""
process: Any = Field(default_factory=_get_default_bash_process)
"""Bash process to run commands."""
name: str = "terminal"
"""Name of tool."""
description: str = f"Run shell commands on this {_get_platform()} machine."
"""Description of tool."""
args_schema: Type[BaseModel] = ShellInput
"""Schema for input arguments."""
ask_human_input: bool = False
"""
If True, prompts the user for confirmation (y/n) before executing
a command generated by the language model in the bash shell.
"""
def _run(
self,
commands: Union[str, List[str]],
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Run commands and return final output."""
print(f"Executing command:\n {commands}") # noqa: T201
try:
if self.ask_human_input:
user_input = input("Proceed with command execution? (y/n): ").lower()
if user_input == "y":
return self.process.run(commands)
else:
logger.info("Invalid input. User aborted command execution.")
return None # type: ignore[return-value]
else:
return self.process.run(commands)
except Exception as e:
logger.error(f"Error during command execution: {e}")
return None # type: ignore[return-value]
|
import logging
import platform
import warnings
from typing import Any, List, Optional, Type, Union
from langchain_core.callbacks import (
CallbackManagerForToolRun,
)
from langchain_core.tools import BaseTool
from pydantic import BaseModel, Field, model_validator
logger = logging.getLogger(__name__)
class ShellInput(BaseModel):
"""Commands for the Bash Shell tool."""
commands: Union[str, List[str]] = Field(
...,
description="List of shell commands to run. Deserialized using json.loads",
)
"""List of shell commands to run."""
@model_validator(mode="before")
@classmethod
def _validate_commands(cls, values: dict) -> Any:
"""Validate commands."""
# TODO: Add real validators
commands = values.get("commands")
if not isinstance(commands, list):
values["commands"] = [commands]
# Warn that the bash tool is not safe
warnings.warn(
"The shell tool has no safeguards by default. Use at your own risk."
)
return values
def _get_default_bash_process() -> Any:
"""Get default bash process."""
try:
from langchain_experimental.llm_bash.bash import BashProcess
except ImportError:
raise ImportError(
"BashProcess has been moved to langchain experimental."
"To use this tool, install langchain-experimental "
"with `pip install langchain-experimental`."
)
return BashProcess(return_err_output=True)
def _get_platform() -> str:
"""Get platform."""
system = platform.system()
if system == "Darwin":
return "MacOS"
return system
class ShellTool(BaseTool): # type: ignore[override, override]
"""Tool to run shell commands."""
process: Any = Field(default_factory=_get_default_bash_process)
"""Bash process to run commands."""
name: str = "terminal"
"""Name of tool."""
description: str = f"Run shell commands on this {_get_platform()} machine."
"""Description of tool."""
args_schema: Type[BaseModel] = ShellInput
"""Schema for input arguments."""
ask_human_input: bool = False
"""
If True, prompts the user for confirmation (y/n) before executing
a command generated by the language model in the bash shell.
"""
def _run(
self,
commands: Union[str, List[str]],
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Run commands and return final output."""
print(f"Executing command:\n {commands}") # noqa: T201
try:
if self.ask_human_input:
user_input = input("Proceed with command execution? (y/n): ").lower()
if user_input == "y":
return self.process.run(commands)
else:
logger.info("Invalid input. User aborted command execution.")
return None # type: ignore[return-value]
else:
return self.process.run(commands)
except Exception as e:
logger.error(f"Error during command execution: {e}")
return None # type: ignore[return-value]
|
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import copy
from typing import Dict
from jina import requests, DocumentArray, Executor
from jina_commons import get_logger
from jinahub.indexers.searcher.FaissSearcher.faiss_searcher import FaissSearcher
from jinahub.indexers.storage.PostgreSQLStorage import PostgreSQLStorage
class FaissPostgresSearcher(Executor):
"""A Compound Indexer made up of a FaissSearcher (for vectors) and a Postgres Indexer"""
def __init__(
self,
dump_path=None,
**kwargs,
):
super().__init__(**kwargs)
# when constructed from rolling update the dump_path is passed via a runtime_arg
dump_path = dump_path or kwargs.get('runtime_args').get('dump_path')
self.logger = get_logger(self)
self._kv_indexer = None
self._vec_indexer = None
if dump_path:
self._vec_indexer = FaissSearcher(dump_path=dump_path, **kwargs)
self._kv_indexer = PostgreSQLStorage(**kwargs)
else:
self.logger.warning(
f'No dump path provided for {self}. Use .rolling_update() to re-initialize...'
)
@requests(on='/search')
def search(self, docs: 'DocumentArray', parameters: Dict = None, **kwargs):
if self._kv_indexer and self._vec_indexer:
self._vec_indexer.search(docs, parameters)
kv_parameters = copy.deepcopy(parameters)
kv_parameters['traversal_paths'] = [
path + 'm' for path in kv_parameters.get('traversal_paths', ['r'])
]
self._kv_indexer.search(docs, kv_parameters)
else:
return
|
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import copy
from typing import Dict
from jina import requests, DocumentArray, Executor
from jina_commons import get_logger
from jinahub.indexers.searcher.FaissSearcher.faiss_searcher import FaissSearcher
from jinahub.indexers.storage.PostgreSQLStorage.postgres import PostgreSQLStorage
class FaissPostgresSearcher(Executor):
"""A Compound Indexer made up of a FaissSearcher (for vectors) and a Postgres Indexer"""
def __init__(
self,
dump_path=None,
**kwargs,
):
super().__init__(**kwargs)
# when constructed from rolling update the dump_path is passed via a runtime_arg
dump_path = dump_path or kwargs.get('runtime_args').get('dump_path')
self.logger = get_logger(self)
self._kv_indexer = None
self._vec_indexer = None
if dump_path:
self._vec_indexer = FaissSearcher(dump_path=dump_path, **kwargs)
self._kv_indexer = PostgreSQLStorage(**kwargs)
else:
self.logger.warning(
f'No dump path provided for {self}. Use .rolling_update() to re-initialize...'
)
@requests(on='/search')
def search(self, docs: 'DocumentArray', parameters: Dict = None, **kwargs):
if self._kv_indexer and self._vec_indexer:
self._vec_indexer.search(docs, parameters)
kv_parameters = copy.deepcopy(parameters)
kv_parameters['traversal_paths'] = [
path + 'm' for path in kv_parameters.get('traversal_paths', ['r'])
]
self._kv_indexer.search(docs, kv_parameters)
else:
return
|
import warnings
from typing import List, Optional, Type
from jina.excepts import BadYAMLVersion
from jina.jaml import JAMLCompatible
from jina.jaml.parsers.base import VersionedYAMLParser
from jina.orchestrate.deployments import Deployment
from jina.serve.runtimes.gateway.gateway import BaseGateway
def _get_all_parser(cls: Type['JAMLCompatible']):
"""Get all parsers and legacy parser of a class
:param cls: target class
:return: a tuple of two elements; first is a list of all parsers, second is the legacy parser for default fallback
"""
from jina.orchestrate.flow.base import Flow
from jina.serve.executors import BaseExecutor
if issubclass(cls, Flow):
return _get_flow_parser()
elif issubclass(cls, BaseExecutor):
return _get_exec_parser()
elif issubclass(cls, BaseGateway):
return _get_gateway_parser()
elif issubclass(cls, Deployment):
return _get_deployment_parser()
else:
raise NotImplementedError(f'No parser exists for cls {cls.__name__}')
def _get_flow_parser():
from jina.jaml.parsers.flow.v1 import V1Parser
return [V1Parser], V1Parser
def _get_exec_parser():
from jina.jaml.parsers.executor.legacy import ExecutorLegacyParser
return [ExecutorLegacyParser], ExecutorLegacyParser
def _get_deployment_parser():
from jina.jaml.parsers.deployment.legacy import DeploymentLegacyParser
return [DeploymentLegacyParser], DeploymentLegacyParser
def _get_gateway_parser():
from jina.jaml.parsers.gateway.legacy import GatewayLegacyParser
return [GatewayLegacyParser], GatewayLegacyParser
def get_parser(
cls: Type['JAMLCompatible'], version: Optional[str]
) -> 'VersionedYAMLParser':
"""
.. # noqa: DAR401
:param cls: the target class to parse
:param version: yaml version number in "MAJOR[.MINOR]" format
:return: parser given the YAML version
"""
all_parsers, legacy_parser = _get_all_parser(cls)
if version:
if isinstance(version, (float, int)):
version = str(version)
for p in all_parsers:
if p.version == version:
return p()
for p in all_parsers:
# fallback to major
if version.split('.')[0] == p.version:
warnings.warn(
f'can not find parser for version: {version}, '
f'fallback to parser for version: {p.version}',
UserWarning,
)
return p()
raise BadYAMLVersion(f'{version} is not a valid version number')
else:
if version is not None:
warnings.warn(
f'can not find parser for version: {version}, '
f'fallback to legacy parser. '
f'this usually mean you are using a deprecated YAML format.',
DeprecationWarning,
)
# fallback to legacy parser
return legacy_parser()
def get_supported_versions(cls) -> List[str]:
"""List all supported versions
:param cls: the class to check
:return: supported versions sorted alphabetically
"""
all_parsers, _ = _get_all_parser(cls)
return list(sorted(p.version for p in all_parsers))
|
import warnings
from typing import List, Optional, Type
from jina.excepts import BadYAMLVersion
from jina.jaml import JAMLCompatible
from jina.jaml.parsers.base import VersionedYAMLParser
from jina.orchestrate.deployments import Deployment
from jina.serve.gateway import BaseGateway
def _get_all_parser(cls: Type['JAMLCompatible']):
"""Get all parsers and legacy parser of a class
:param cls: target class
:return: a tuple of two elements; first is a list of all parsers, second is the legacy parser for default fallback
"""
from jina.orchestrate.flow.base import Flow
from jina.serve.executors import BaseExecutor
if issubclass(cls, Flow):
return _get_flow_parser()
elif issubclass(cls, BaseExecutor):
return _get_exec_parser()
elif issubclass(cls, BaseGateway):
return _get_gateway_parser()
elif issubclass(cls, Deployment):
return _get_deployment_parser()
else:
raise NotImplementedError(f'No parser exists for cls {cls.__name__}')
def _get_flow_parser():
from jina.jaml.parsers.flow.v1 import V1Parser
return [V1Parser], V1Parser
def _get_exec_parser():
from jina.jaml.parsers.executor.legacy import ExecutorLegacyParser
return [ExecutorLegacyParser], ExecutorLegacyParser
def _get_deployment_parser():
from jina.jaml.parsers.deployment.legacy import DeploymentLegacyParser
return [DeploymentLegacyParser], DeploymentLegacyParser
def _get_gateway_parser():
from jina.jaml.parsers.gateway.legacy import GatewayLegacyParser
return [GatewayLegacyParser], GatewayLegacyParser
def get_parser(
cls: Type['JAMLCompatible'], version: Optional[str]
) -> 'VersionedYAMLParser':
"""
.. # noqa: DAR401
:param cls: the target class to parse
:param version: yaml version number in "MAJOR[.MINOR]" format
:return: parser given the YAML version
"""
all_parsers, legacy_parser = _get_all_parser(cls)
if version:
if isinstance(version, (float, int)):
version = str(version)
for p in all_parsers:
if p.version == version:
return p()
for p in all_parsers:
# fallback to major
if version.split('.')[0] == p.version:
warnings.warn(
f'can not find parser for version: {version}, '
f'fallback to parser for version: {p.version}',
UserWarning,
)
return p()
raise BadYAMLVersion(f'{version} is not a valid version number')
else:
if version is not None:
warnings.warn(
f'can not find parser for version: {version}, '
f'fallback to legacy parser. '
f'this usually mean you are using a deprecated YAML format.',
DeprecationWarning,
)
# fallback to legacy parser
return legacy_parser()
def get_supported_versions(cls) -> List[str]:
"""List all supported versions
:param cls: the class to check
:return: supported versions sorted alphabetically
"""
all_parsers, _ = _get_all_parser(cls)
return list(sorted(p.version for p in all_parsers))
|
_base_ = '../cascade_rcnn/cascade_mask_rcnn_r50_fpn_1x_coco.py'
norm_cfg = dict(type='SyncBN', requires_grad=True)
model = dict(
# use ResNeSt img_norm
data_preprocessor=dict(
mean=[123.68, 116.779, 103.939],
std=[58.393, 57.12, 57.375],
bgr_to_rgb=True),
backbone=dict(
type='ResNeSt',
stem_channels=64,
depth=50,
radix=2,
reduction_factor=4,
avg_down_stride=True,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=norm_cfg,
norm_eval=False,
style='pytorch',
init_cfg=dict(type='Pretrained', checkpoint='open-mmlab://resnest50')),
roi_head=dict(
bbox_head=[
dict(
type='Shared4Conv1FCBBoxHead',
in_channels=256,
conv_out_channels=256,
fc_out_channels=1024,
norm_cfg=norm_cfg,
roi_feat_size=7,
num_classes=80,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2]),
reg_class_agnostic=True,
loss_cls=dict(
type='CrossEntropyLoss',
use_sigmoid=False,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0,
loss_weight=1.0)),
dict(
type='Shared4Conv1FCBBoxHead',
in_channels=256,
conv_out_channels=256,
fc_out_channels=1024,
norm_cfg=norm_cfg,
roi_feat_size=7,
num_classes=80,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.05, 0.05, 0.1, 0.1]),
reg_class_agnostic=True,
loss_cls=dict(
type='CrossEntropyLoss',
use_sigmoid=False,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0,
loss_weight=1.0)),
dict(
type='Shared4Conv1FCBBoxHead',
in_channels=256,
conv_out_channels=256,
fc_out_channels=1024,
norm_cfg=norm_cfg,
roi_feat_size=7,
num_classes=80,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.033, 0.033, 0.067, 0.067]),
reg_class_agnostic=True,
loss_cls=dict(
type='CrossEntropyLoss',
use_sigmoid=False,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0))
],
mask_head=dict(norm_cfg=norm_cfg)))
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(
type='LoadAnnotations',
with_bbox=True,
with_mask=True,
poly2mask=False),
dict(
type='RandomChoiceResize',
scales=[(1333, 640), (1333, 672), (1333, 704), (1333, 736),
(1333, 768), (1333, 800)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
|
_base_ = '../cascade_rcnn/cascade_mask_rcnn_r50_fpn_1x_coco.py'
norm_cfg = dict(type='SyncBN', requires_grad=True)
model = dict(
backbone=dict(
type='ResNeSt',
stem_channels=64,
depth=50,
radix=2,
reduction_factor=4,
avg_down_stride=True,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=norm_cfg,
norm_eval=False,
style='pytorch',
init_cfg=dict(type='Pretrained', checkpoint='open-mmlab://resnest50')),
roi_head=dict(
bbox_head=[
dict(
type='Shared4Conv1FCBBoxHead',
in_channels=256,
conv_out_channels=256,
fc_out_channels=1024,
norm_cfg=norm_cfg,
roi_feat_size=7,
num_classes=80,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2]),
reg_class_agnostic=True,
loss_cls=dict(
type='CrossEntropyLoss',
use_sigmoid=False,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0,
loss_weight=1.0)),
dict(
type='Shared4Conv1FCBBoxHead',
in_channels=256,
conv_out_channels=256,
fc_out_channels=1024,
norm_cfg=norm_cfg,
roi_feat_size=7,
num_classes=80,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.05, 0.05, 0.1, 0.1]),
reg_class_agnostic=True,
loss_cls=dict(
type='CrossEntropyLoss',
use_sigmoid=False,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0,
loss_weight=1.0)),
dict(
type='Shared4Conv1FCBBoxHead',
in_channels=256,
conv_out_channels=256,
fc_out_channels=1024,
norm_cfg=norm_cfg,
roi_feat_size=7,
num_classes=80,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.033, 0.033, 0.067, 0.067]),
reg_class_agnostic=True,
loss_cls=dict(
type='CrossEntropyLoss',
use_sigmoid=False,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0))
],
mask_head=dict(norm_cfg=norm_cfg)))
# # use ResNeSt img_norm
img_norm_cfg = dict(
mean=[123.68, 116.779, 103.939], std=[58.393, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='LoadAnnotations',
with_bbox=True,
with_mask=True,
poly2mask=False),
dict(
type='Resize',
img_scale=[(1333, 640), (1333, 672), (1333, 704), (1333, 736),
(1333, 768), (1333, 800)],
multiscale_mode='value',
keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
train=dict(pipeline=train_pipeline),
val=dict(pipeline=test_pipeline),
test=dict(pipeline=test_pipeline))
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Optional, Sequence, Union
from mmengine.data import BaseDataSample
from .base import BaseEvaluator
class ComposedEvaluator:
"""Wrapper class to compose multiple :class:`DatasetEvaluator` instances.
Args:
evaluators (Sequence[BaseEvaluator]): The evaluators to compose.
collect_device (str): Device name used for collecting results from
different ranks during distributed training. Must be 'cpu' or
'gpu'. Defaults to 'cpu'.
"""
def __init__(self,
evaluators: Sequence[BaseEvaluator],
collect_device='cpu'):
self._dataset_meta: Union[None, dict] = None
self.collect_device = collect_device
self.evaluators = evaluators
@property
def dataset_meta(self) -> Optional[dict]:
return self._dataset_meta
@dataset_meta.setter
def dataset_meta(self, dataset_meta: dict) -> None:
self._dataset_meta = dataset_meta
for evaluator in self.evaluators:
evaluator.dataset_meta = dataset_meta
def process(self, data_samples: BaseDataSample, predictions: dict):
"""Invoke process method of each wrapped evaluator.
Args:
data_samples (BaseDataSample): The data samples from the dataset.
predictions (dict): The output of the model.
"""
for evalutor in self.evaluators:
evalutor.process(data_samples, predictions)
def evaluate(self, size: int) -> dict:
"""Invoke evaluate method of each wrapped evaluator and collect the
metrics dict.
Args:
size (int): Length of the entire validation dataset. When batch
size > 1, the dataloader may pad some data samples to make
sure all ranks have the same length of dataset slice. The
``collect_results`` function will drop the padded data base on
this size.
Returns:
dict: Evaluation metrics of all wrapped evaluators. The keys are
the names of the metrics, and the values are corresponding results.
"""
metrics = {}
for evaluator in self.evaluators:
_metrics = evaluator.evaluate(size)
# Check metric name conflicts
for name in _metrics.keys():
if name in metrics:
raise ValueError(
'There are multiple evaluators with the same metric '
f'name {name}')
metrics.update(_metrics)
return metrics
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Optional, Sequence, Union
from .base import BaseEvaluator
class ComposedEvaluator:
"""Wrapper class to compose multiple :class:`DatasetEvaluator` instances.
Args:
evaluators (Sequence[BaseEvaluator]): The evaluators to compose.
collect_device (str): Device name used for collecting results from
different ranks during distributed training. Must be 'cpu' or
'gpu'. Defaults to 'cpu'.
"""
def __init__(self,
evaluators: Sequence[BaseEvaluator],
collect_device='cpu'):
self._dataset_meta: Union[None, dict] = None
self.collect_device = collect_device
self.evaluators = evaluators
@property
def dataset_meta(self) -> Optional[dict]:
return self._dataset_meta
@dataset_meta.setter
def dataset_meta(self, dataset_meta: dict) -> None:
self._dataset_meta = dataset_meta
for evaluator in self.evaluators:
evaluator.dataset_meta = dataset_meta
def process(self, data_samples: dict, predictions: dict):
"""Invoke process method of each wrapped evaluator.
Args:
data_samples (dict): The data samples from the dataset.
predictions (dict): The output of the model.
"""
for evalutor in self.evaluators:
evalutor.process(data_samples, predictions)
def evaluate(self, size: int) -> dict:
"""Invoke evaluate method of each wrapped evaluator and collect the
metrics dict.
Args:
size (int): Length of the entire validation dataset. When batch
size > 1, the dataloader may pad some data samples to make
sure all ranks have the same length of dataset slice. The
``collect_results`` function will drop the padded data base on
this size.
Returns:
metrics (dict): Evaluation metrics of all wrapped evaluators. The
keys are the names of the metrics, and the values are
corresponding results.
"""
metrics = {}
for evaluator in self.evaluators:
_metrics = evaluator.evaluate(size)
# Check metric name conflicts
for name in _metrics.keys():
if name in metrics:
raise ValueError(
'There are multiple evaluators with the same metric '
f'name {name}')
metrics.update(_metrics)
return metrics
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch.nn as nn
from mmcv.cnn import ConvModule, bias_init_with_prob, normal_init
from ..builder import HEADS
from .anchor_head import AnchorHead
@HEADS.register_module()
class RetinaSepBNHead(AnchorHead):
""""RetinaHead with separate BN.
In RetinaHead, conv/norm layers are shared across different FPN levels,
while in RetinaSepBNHead, conv layers are shared across different FPN
levels, but BN layers are separated.
"""
def __init__(self,
num_classes,
num_ins,
in_channels,
stacked_convs=4,
conv_cfg=None,
norm_cfg=None,
init_cfg=None,
**kwargs):
assert init_cfg is None, 'To prevent abnormal initialization ' \
'behavior, init_cfg is not allowed to be set'
self.stacked_convs = stacked_convs
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.num_ins = num_ins
super(RetinaSepBNHead, self).__init__(
num_classes, in_channels, init_cfg=init_cfg, **kwargs)
def _init_layers(self):
"""Initialize layers of the head."""
self.relu = nn.ReLU(inplace=True)
self.cls_convs = nn.ModuleList()
self.reg_convs = nn.ModuleList()
for i in range(self.num_ins):
cls_convs = nn.ModuleList()
reg_convs = nn.ModuleList()
for i in range(self.stacked_convs):
chn = self.in_channels if i == 0 else self.feat_channels
cls_convs.append(
ConvModule(
chn,
self.feat_channels,
3,
stride=1,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg))
reg_convs.append(
ConvModule(
chn,
self.feat_channels,
3,
stride=1,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg))
self.cls_convs.append(cls_convs)
self.reg_convs.append(reg_convs)
for i in range(self.stacked_convs):
for j in range(1, self.num_ins):
self.cls_convs[j][i].conv = self.cls_convs[0][i].conv
self.reg_convs[j][i].conv = self.reg_convs[0][i].conv
self.retina_cls = nn.Conv2d(
self.feat_channels,
self.num_base_priors * self.cls_out_channels,
3,
padding=1)
self.retina_reg = nn.Conv2d(
self.feat_channels, self.num_base_priors * 4, 3, padding=1)
def init_weights(self):
"""Initialize weights of the head."""
super(RetinaSepBNHead, self).init_weights()
for m in self.cls_convs[0]:
normal_init(m.conv, std=0.01)
for m in self.reg_convs[0]:
normal_init(m.conv, std=0.01)
bias_cls = bias_init_with_prob(0.01)
normal_init(self.retina_cls, std=0.01, bias=bias_cls)
normal_init(self.retina_reg, std=0.01)
def forward(self, feats):
"""Forward features from the upstream network.
Args:
feats (tuple[Tensor]): Features from the upstream network, each is
a 4D-tensor.
Returns:
tuple: Usually a tuple of classification scores and bbox prediction
cls_scores (list[Tensor]): Classification scores for all scale
levels, each is a 4D-tensor, the channels number is
num_anchors * num_classes.
bbox_preds (list[Tensor]): Box energies / deltas for all scale
levels, each is a 4D-tensor, the channels number is
num_anchors * 4.
"""
cls_scores = []
bbox_preds = []
for i, x in enumerate(feats):
cls_feat = feats[i]
reg_feat = feats[i]
for cls_conv in self.cls_convs[i]:
cls_feat = cls_conv(cls_feat)
for reg_conv in self.reg_convs[i]:
reg_feat = reg_conv(reg_feat)
cls_score = self.retina_cls(cls_feat)
bbox_pred = self.retina_reg(reg_feat)
cls_scores.append(cls_score)
bbox_preds.append(bbox_pred)
return cls_scores, bbox_preds
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch.nn as nn
from mmcv.cnn import ConvModule, bias_init_with_prob, normal_init
from ..builder import HEADS
from .anchor_head import AnchorHead
@HEADS.register_module()
class RetinaSepBNHead(AnchorHead):
""""RetinaHead with separate BN.
In RetinaHead, conv/norm layers are shared across different FPN levels,
while in RetinaSepBNHead, conv layers are shared across different FPN
levels, but BN layers are separated.
"""
def __init__(self,
num_classes,
num_ins,
in_channels,
stacked_convs=4,
conv_cfg=None,
norm_cfg=None,
init_cfg=None,
**kwargs):
assert init_cfg is None, 'To prevent abnormal initialization ' \
'behavior, init_cfg is not allowed to be set'
self.stacked_convs = stacked_convs
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.num_ins = num_ins
super(RetinaSepBNHead, self).__init__(
num_classes, in_channels, init_cfg=init_cfg, **kwargs)
def _init_layers(self):
"""Initialize layers of the head."""
self.relu = nn.ReLU(inplace=True)
self.cls_convs = nn.ModuleList()
self.reg_convs = nn.ModuleList()
for i in range(self.num_ins):
cls_convs = nn.ModuleList()
reg_convs = nn.ModuleList()
for i in range(self.stacked_convs):
chn = self.in_channels if i == 0 else self.feat_channels
cls_convs.append(
ConvModule(
chn,
self.feat_channels,
3,
stride=1,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg))
reg_convs.append(
ConvModule(
chn,
self.feat_channels,
3,
stride=1,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg))
self.cls_convs.append(cls_convs)
self.reg_convs.append(reg_convs)
for i in range(self.stacked_convs):
for j in range(1, self.num_ins):
self.cls_convs[j][i].conv = self.cls_convs[0][i].conv
self.reg_convs[j][i].conv = self.reg_convs[0][i].conv
self.retina_cls = nn.Conv2d(
self.feat_channels,
self.num_anchors * self.cls_out_channels,
3,
padding=1)
self.retina_reg = nn.Conv2d(
self.feat_channels, self.num_anchors * 4, 3, padding=1)
def init_weights(self):
"""Initialize weights of the head."""
super(RetinaSepBNHead, self).init_weights()
for m in self.cls_convs[0]:
normal_init(m.conv, std=0.01)
for m in self.reg_convs[0]:
normal_init(m.conv, std=0.01)
bias_cls = bias_init_with_prob(0.01)
normal_init(self.retina_cls, std=0.01, bias=bias_cls)
normal_init(self.retina_reg, std=0.01)
def forward(self, feats):
"""Forward features from the upstream network.
Args:
feats (tuple[Tensor]): Features from the upstream network, each is
a 4D-tensor.
Returns:
tuple: Usually a tuple of classification scores and bbox prediction
cls_scores (list[Tensor]): Classification scores for all scale
levels, each is a 4D-tensor, the channels number is
num_anchors * num_classes.
bbox_preds (list[Tensor]): Box energies / deltas for all scale
levels, each is a 4D-tensor, the channels number is
num_anchors * 4.
"""
cls_scores = []
bbox_preds = []
for i, x in enumerate(feats):
cls_feat = feats[i]
reg_feat = feats[i]
for cls_conv in self.cls_convs[i]:
cls_feat = cls_conv(cls_feat)
for reg_conv in self.reg_convs[i]:
reg_feat = reg_conv(reg_feat)
cls_score = self.retina_cls(cls_feat)
bbox_pred = self.retina_reg(reg_feat)
cls_scores.append(cls_score)
bbox_preds.append(bbox_pred)
return cls_scores, bbox_preds
|
_base_ = './tood_r101_fpn_ms-2x_coco.py'
model = dict(
backbone=dict(
dcn=dict(type='DCNv2', deformable_groups=1, fallback_on_stride=False),
stage_with_dcn=(False, True, True, True)),
bbox_head=dict(num_dcn=2))
|
_base_ = './tood_r101_fpn_mstrain_2x_coco.py'
model = dict(
backbone=dict(
dcn=dict(type='DCNv2', deformable_groups=1, fallback_on_stride=False),
stage_with_dcn=(False, True, True, True)),
bbox_head=dict(num_dcn=2))
|
from collections.abc import Sequence
from typing import Callable
from langchain_core.agents import AgentAction
from langchain_core.language_models import BaseLanguageModel
from langchain_core.messages import BaseMessage
from langchain_core.prompts.chat import ChatPromptTemplate
from langchain_core.runnables import Runnable, RunnablePassthrough
from langchain_core.tools import BaseTool
from langchain.agents.format_scratchpad.tools import (
format_to_tool_messages,
)
from langchain.agents.output_parsers.tools import ToolsAgentOutputParser
MessageFormatter = Callable[[Sequence[tuple[AgentAction, str]]], list[BaseMessage]]
def create_tool_calling_agent(
llm: BaseLanguageModel,
tools: Sequence[BaseTool],
prompt: ChatPromptTemplate,
*,
message_formatter: MessageFormatter = format_to_tool_messages,
) -> Runnable:
"""Create an agent that uses tools.
Args:
llm: LLM to use as the agent.
tools: Tools this agent has access to.
prompt: The prompt to use. See Prompt section below for more on the expected
input variables.
message_formatter: Formatter function to convert (AgentAction, tool output)
tuples into FunctionMessages.
Returns:
A Runnable sequence representing an agent. It takes as input all the same input
variables as the prompt passed in does. It returns as output either an
AgentAction or AgentFinish.
Example:
.. code-block:: python
from langchain.agents import AgentExecutor, create_tool_calling_agent, tool
from langchain_anthropic import ChatAnthropic
from langchain_core.prompts import ChatPromptTemplate
prompt = ChatPromptTemplate.from_messages(
[
("system", "You are a helpful assistant"),
("placeholder", "{chat_history}"),
("human", "{input}"),
("placeholder", "{agent_scratchpad}"),
]
)
model = ChatAnthropic(model="claude-3-opus-20240229")
@tool
def magic_function(input: int) -> int:
\"\"\"Applies a magic function to an input.\"\"\"
return input + 2
tools = [magic_function]
agent = create_tool_calling_agent(model, tools, prompt)
agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True)
agent_executor.invoke({"input": "what is the value of magic_function(3)?"})
# Using with chat history
from langchain_core.messages import AIMessage, HumanMessage
agent_executor.invoke(
{
"input": "what's my name?",
"chat_history": [
HumanMessage(content="hi! my name is bob"),
AIMessage(content="Hello Bob! How can I assist you today?"),
],
}
)
Prompt:
The agent prompt must have an `agent_scratchpad` key that is a
``MessagesPlaceholder``. Intermediate agent actions and tool output
messages will be passed in here.
"""
missing_vars = {"agent_scratchpad"}.difference(
prompt.input_variables + list(prompt.partial_variables),
)
if missing_vars:
msg = f"Prompt missing required variables: {missing_vars}"
raise ValueError(msg)
if not hasattr(llm, "bind_tools"):
msg = "This function requires a bind_tools() method be implemented on the LLM."
raise ValueError(
msg,
)
llm_with_tools = llm.bind_tools(tools)
return (
RunnablePassthrough.assign(
agent_scratchpad=lambda x: message_formatter(x["intermediate_steps"]),
)
| prompt
| llm_with_tools
| ToolsAgentOutputParser()
)
|
from collections.abc import Sequence
from typing import Callable
from langchain_core.agents import AgentAction
from langchain_core.language_models import BaseLanguageModel
from langchain_core.messages import BaseMessage
from langchain_core.prompts.chat import ChatPromptTemplate
from langchain_core.runnables import Runnable, RunnablePassthrough
from langchain_core.tools import BaseTool
from langchain.agents.format_scratchpad.tools import (
format_to_tool_messages,
)
from langchain.agents.output_parsers.tools import ToolsAgentOutputParser
MessageFormatter = Callable[[Sequence[tuple[AgentAction, str]]], list[BaseMessage]]
def create_tool_calling_agent(
llm: BaseLanguageModel,
tools: Sequence[BaseTool],
prompt: ChatPromptTemplate,
*,
message_formatter: MessageFormatter = format_to_tool_messages,
) -> Runnable:
"""Create an agent that uses tools.
Args:
llm: LLM to use as the agent.
tools: Tools this agent has access to.
prompt: The prompt to use. See Prompt section below for more on the expected
input variables.
message_formatter: Formatter function to convert (AgentAction, tool output)
tuples into FunctionMessages.
Returns:
A Runnable sequence representing an agent. It takes as input all the same input
variables as the prompt passed in does. It returns as output either an
AgentAction or AgentFinish.
Example:
.. code-block:: python
from langchain.agents import AgentExecutor, create_tool_calling_agent, tool
from langchain_anthropic import ChatAnthropic
from langchain_core.prompts import ChatPromptTemplate
prompt = ChatPromptTemplate.from_messages(
[
("system", "You are a helpful assistant"),
("placeholder", "{chat_history}"),
("human", "{input}"),
("placeholder", "{agent_scratchpad}"),
]
)
model = ChatAnthropic(model="claude-3-opus-20240229")
@tool
def magic_function(input: int) -> int:
\"\"\"Applies a magic function to an input.\"\"\"
return input + 2
tools = [magic_function]
agent = create_tool_calling_agent(model, tools, prompt)
agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True)
agent_executor.invoke({"input": "what is the value of magic_function(3)?"})
# Using with chat history
from langchain_core.messages import AIMessage, HumanMessage
agent_executor.invoke(
{
"input": "what's my name?",
"chat_history": [
HumanMessage(content="hi! my name is bob"),
AIMessage(content="Hello Bob! How can I assist you today?"),
],
}
)
Prompt:
The agent prompt must have an `agent_scratchpad` key that is a
``MessagesPlaceholder``. Intermediate agent actions and tool output
messages will be passed in here.
"""
missing_vars = {"agent_scratchpad"}.difference(
prompt.input_variables + list(prompt.partial_variables)
)
if missing_vars:
msg = f"Prompt missing required variables: {missing_vars}"
raise ValueError(msg)
if not hasattr(llm, "bind_tools"):
msg = "This function requires a bind_tools() method be implemented on the LLM."
raise ValueError(
msg,
)
llm_with_tools = llm.bind_tools(tools)
return (
RunnablePassthrough.assign(
agent_scratchpad=lambda x: message_formatter(x["intermediate_steps"])
)
| prompt
| llm_with_tools
| ToolsAgentOutputParser()
)
|
import logging
import sentry_sdk
from backend.util.settings import Settings
from sentry_sdk.integrations.anthropic import AnthropicIntegration
from sentry_sdk.integrations.launchdarkly import LaunchDarklyIntegration
from sentry_sdk.integrations.logging import LoggingIntegration
def sentry_init():
sentry_dsn = Settings().secrets.sentry_dsn
sentry_sdk.init(
dsn=sentry_dsn,
traces_sample_rate=1.0,
profiles_sample_rate=1.0,
environment=f"app:{Settings().config.app_env.value}-behave:{Settings().config.behave_as.value}",
_experiments={
"enable_logs": True,
},
integrations=[
LoggingIntegration(sentry_logs_level=logging.INFO),
AnthropicIntegration(
include_prompts=False,
),
],
)
|
import sentry_sdk
from backend.util.settings import Settings
def sentry_init():
sentry_dsn = Settings().secrets.sentry_dsn
sentry_sdk.init(dsn=sentry_dsn, traces_sample_rate=1.0, profiles_sample_rate=1.0)
|
from __future__ import annotations
from collections.abc import Iterable
from torch import Tensor
from sentence_transformers import util
from sentence_transformers.sparse_encoder.losses.SparseCoSENTLoss import SparseCoSENTLoss
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
class SparseAnglELoss(SparseCoSENTLoss):
def __init__(self, model: SparseEncoder, scale: float = 20.0) -> None:
"""
This class implements AnglE (Angle Optimized).
This is a modification of :class:`SparseCoSENTLoss`, designed to address the following issue:
The cosine function's gradient approaches 0 as the wave approaches the top or bottom of its form.
This can hinder the optimization process, so AnglE proposes to instead optimize the angle difference
in complex space in order to mitigate this effect.
It expects that each of the InputExamples consists of a pair of texts and a float valued label, representing
the expected similarity score between the pair.
It computes the following loss function:
``loss = logsum(1+exp(s(k,l)-s(i,j))+exp...)``, where ``(i,j)`` and ``(k,l)`` are any of the input pairs in the
batch such that the expected similarity of ``(i,j)`` is greater than ``(k,l)``. The summation is over all possible
pairs of input pairs in the batch that match this condition. This is the same as CoSENTLoss, with a different
similarity function.
Args:
model: SparseEncoder
scale: Output of similarity function is multiplied by scale
value. Represents the inverse temperature.
References:
- For further details, see: https://arxiv.org/abs/2309.12871v1
Requirements:
- Need to be used in SpladeLoss or CSRLoss as a loss function.
- Sentence pairs with corresponding similarity scores in range of the similarity function. Default is [-1,1].
Inputs:
+--------------------------------+------------------------+
| Texts | Labels |
+================================+========================+
| (sentence_A, sentence_B) pairs | float similarity score |
+--------------------------------+------------------------+
Relations:
- :class:`SparseCoSENTLoss` is AnglELoss with ``pairwise_cos_sim`` as the metric, rather than ``pairwise_angle_sim``.
- :class:`SparseCosineSimilarityLoss` seems to produce a weaker training signal than ``SparseCoSENTLoss`` or ``SparseAnglELoss``.
Example:
::
from datasets import Dataset
from sentence_transformers.sparse_encoder import SparseEncoder, SparseEncoderTrainer, losses
model = SparseEncoder("distilbert/distilbert-base-uncased")
train_dataset = Dataset.from_dict(
{
"sentence1": ["It's nice weather outside today.", "He drove to work."],
"sentence2": ["It's so sunny.", "She walked to the store."],
"score": [1.0, 0.3],
}
)
loss = losses.SpladeLoss(model=model, loss=losses.SparseAnglELoss(model), lambda_corpus=5e-5, all_docs=True)
trainer = SparseEncoderTrainer(model=model, train_dataset=train_dataset, loss=loss)
trainer.train()
"""
return super().__init__(model, scale, similarity_fct=util.pairwise_angle_sim)
def forward(self, sentence_features: Iterable[dict[str, Tensor]], labels: Tensor) -> Tensor:
raise AttributeError("SparseAngleLoss should not be used alone. Use it with SpladeLoss or CSRLoss.")
|
from __future__ import annotations
from sentence_transformers import util
from sentence_transformers.sparse_encoder.losses.SparseCoSENTLoss import SparseCoSENTLoss
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
class SparseAnglELoss(SparseCoSENTLoss):
def __init__(self, model: SparseEncoder, scale: float = 20.0) -> None:
"""
This class implements AnglE (Angle Optimized).
This is a modification of :class:`SparseCoSENTLoss`, designed to address the following issue:
The cosine function's gradient approaches 0 as the wave approaches the top or bottom of its form.
This can hinder the optimization process, so AnglE proposes to instead optimize the angle difference
in complex space in order to mitigate this effect.
It expects that each of the InputExamples consists of a pair of texts and a float valued label, representing
the expected similarity score between the pair.
It computes the following loss function:
``loss = logsum(1+exp(s(k,l)-s(i,j))+exp...)``, where ``(i,j)`` and ``(k,l)`` are any of the input pairs in the
batch such that the expected similarity of ``(i,j)`` is greater than ``(k,l)``. The summation is over all possible
pairs of input pairs in the batch that match this condition. This is the same as CoSENTLoss, with a different
similarity function.
Args:
model: SparseEncoder
scale: Output of similarity function is multiplied by scale
value. Represents the inverse temperature.
References:
- For further details, see: https://arxiv.org/abs/2309.12871v1
Requirements:
- Sentence pairs with corresponding similarity scores in range of the similarity function. Default is [-1,1].
Inputs:
+--------------------------------+------------------------+
| Texts | Labels |
+================================+========================+
| (sentence_A, sentence_B) pairs | float similarity score |
+--------------------------------+------------------------+
Relations:
- :class:`SparseCoSENTLoss` is AnglELoss with ``pairwise_cos_sim`` as the metric, rather than ``pairwise_angle_sim``.
- :class:`SparseCosineSimilarityLoss` seems to produce a weaker training signal than ``SparseCoSENTLoss`` or ``SparseAnglELoss``.
Example:
::
from datasets import Dataset
from sentence_transformers.sparse_encoder import SparseEncoder, SparseEncoderTrainer, losses
model = SparseEncoder("distilbert/distilbert-base-uncased")
train_dataset = Dataset.from_dict(
{
"sentence1": ["It's nice weather outside today.", "He drove to work."],
"sentence2": ["It's so sunny.", "She walked to the store."],
"score": [1.0, 0.3],
}
)
loss = losses.SpladeLoss(model=model, loss=losses.SparseAnglELoss(model), lambda_corpus=5e-5, all_docs=True)
trainer = SparseEncoderTrainer(model=model, train_dataset=train_dataset, loss=loss)
trainer.train()
"""
return super().__init__(model, scale, similarity_fct=util.pairwise_angle_sim)
|
from typing import Iterator
from typing import Tuple
import torch
from keras.src.backend.common.stateless_scope import in_stateless_scope
from keras.src.ops.operation import Operation
class TorchLayer(torch.nn.Module):
@property
def torch_params(self):
if not hasattr(self, "_torch_params"):
self._track_variables()
return self._torch_params
def _post_build(self):
# Do not track variables when in a stateless scope.
# The variables are not initialized.
if in_stateless_scope():
return
self._track_variables()
def _track_variables(self):
# set torch_params attribute will have module automatically track
# parameters.
self._torch_params = torch.nn.ParameterDict(
{variable.path: variable.value for variable in self.variables}
)
def named_parameters(
self,
prefix: str = "",
recurse: bool = True,
remove_duplicate: bool = True,
) -> Iterator[Tuple[str, torch.nn.Parameter]]:
if not hasattr(self, "_torch_params"):
self._track_variables()
return torch.nn.Module.named_parameters(
self, prefix, recurse, remove_duplicate
)
def forward(self, *args, **kwargs):
return Operation.__call__(self, *args, **kwargs)
def _setattr_hook(self, name, value):
from keras.src.layers import Layer
if (
isinstance(value, torch.nn.Module)
and not isinstance(value, Layer)
and not name == "_torch_params"
):
from keras.src.utils.torch_utils import TorchModuleWrapper
if not isinstance(self, TorchModuleWrapper):
value = TorchModuleWrapper(value)
return name, value
def _post_track_variable(self, variable):
if hasattr(self, "_torch_params"):
if variable.path not in self.torch_params:
self.torch_params[variable.path] = variable.value
def _post_untrack_variable(self, variable):
if hasattr(self, "_torch_params"):
if variable.path in self.torch_params:
self.torch_params.pop(variable.path)
|
from typing import Iterator
from typing import Tuple
import torch
from keras.src.backend.common.stateless_scope import in_stateless_scope
from keras.src.ops.operation import Operation
class TorchLayer(torch.nn.Module):
def _post_build(self):
# Do not track variables when in a stateless scope.
# The variables are not initialized.
if in_stateless_scope():
return
self._track_variables()
def _track_variables(self):
# set torch_params attribute will have module automatically track
# parameters.
self.torch_params = torch.nn.ParameterDict(
{variable.path: variable.value for variable in self.variables}
)
def named_parameters(
self,
prefix: str = "",
recurse: bool = True,
remove_duplicate: bool = True,
) -> Iterator[Tuple[str, torch.nn.Parameter]]:
if not hasattr(self, "torch_params"):
self._track_variables()
return torch.nn.Module.named_parameters(
self, prefix, recurse, remove_duplicate
)
def forward(self, *args, **kwargs):
return Operation.__call__(self, *args, **kwargs)
def _setattr_hook(self, name, value):
from keras.src.layers import Layer
if (
isinstance(value, torch.nn.Module)
and not isinstance(value, Layer)
and not name == "torch_params"
):
from keras.src.utils.torch_utils import TorchModuleWrapper
if not isinstance(self, TorchModuleWrapper):
value = TorchModuleWrapper(value)
return name, value
def _post_track_variable(self, variable):
if hasattr(self, "torch_params"):
if variable.path not in self.torch_params:
self.torch_params[variable.path] = variable.value
def _post_untrack_variable(self, variable):
if hasattr(self, "torch_params"):
if variable.path in self.torch_params:
self.torch_params.pop(variable.path)
|
__version__ = '0.31.2'
import logging
from docarray.array import DocList, DocVec
from docarray.base_doc.doc import BaseDoc
from docarray.utils._internal.misc import _get_path_from_docarray_root_level
__all__ = ['BaseDoc', 'DocList', 'DocVec']
logger = logging.getLogger('docarray')
handler = logging.StreamHandler()
formatter = logging.Formatter("%(levelname)s - %(name)s - %(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
def __getattr__(name: str):
if name in ['Document', 'DocumentArray']:
raise ImportError(
f'Cannot import name \'{name}\' from \'{_get_path_from_docarray_root_level(__file__)}\'.\n'
f'The object named \'{name}\' does not exist anymore in this version of docarray.\n'
f'If you still want to use \'{name}\' please downgrade to version <=0.21.0 '
f'with: `pip install -U docarray==0.21.0`.'
)
else:
raise ImportError(
f'cannot import name \'{name}\' from \'{_get_path_from_docarray_root_level(__file__)}\''
)
|
__version__ = '0.31.1'
import logging
from docarray.array import DocList, DocVec
from docarray.base_doc.doc import BaseDoc
from docarray.utils._internal.misc import _get_path_from_docarray_root_level
__all__ = ['BaseDoc', 'DocList', 'DocVec']
logger = logging.getLogger('docarray')
handler = logging.StreamHandler()
formatter = logging.Formatter("%(levelname)s - %(name)s - %(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
def __getattr__(name: str):
if name in ['Document', 'DocumentArray']:
raise ImportError(
f'Cannot import name \'{name}\' from \'{_get_path_from_docarray_root_level(__file__)}\'.\n'
f'The object named \'{name}\' does not exist anymore in this version of docarray.\n'
f'If you still want to use \'{name}\' please downgrade to version <=0.21.0 '
f'with: `pip install -U docarray==0.21.0`.'
)
else:
raise ImportError(
f'cannot import name \'{name}\' from \'{_get_path_from_docarray_root_level(__file__)}\''
)
|
from keras.src import ops
from keras.src.api_export import keras_export
from keras.src.layers.layer import Layer
@keras_export("keras.layers.UnitNormalization")
class UnitNormalization(Layer):
"""Unit normalization layer.
Normalize a batch of inputs so that each input in the batch has a L2 norm
equal to 1 (across the axes specified in `axis`).
Example:
>>> data = np.arange(6).reshape(2, 3)
>>> normalized_data = keras.layers.UnitNormalization()(data)
>>> np.sum(normalized_data[0, :] ** 2)
1.0
Args:
axis: Integer or list/tuple. The axis or axes to normalize across.
Typically, this is the features axis or axes. The left-out axes are
typically the batch axis or axes. `-1` is the last dimension
in the input. Defaults to `-1`.
"""
def __init__(self, axis=-1, **kwargs):
super().__init__(**kwargs)
if isinstance(axis, (list, tuple)):
self.axis = list(axis)
elif isinstance(axis, int):
self.axis = axis
else:
raise TypeError(
"Invalid value for `axis` argument: "
"expected an int or a list/tuple of ints. "
f"Received: axis={axis}"
)
self.supports_masking = True
self.built = True
def call(self, inputs):
x = ops.cast(inputs, self.compute_dtype)
square_sum = ops.sum(ops.square(x), axis=self.axis, keepdims=True)
x_inv_norm = ops.rsqrt(ops.maximum(square_sum, 1e-12))
return ops.multiply(x, x_inv_norm)
def compute_output_shape(self, input_shape):
return input_shape
def get_config(self):
config = super().get_config()
config.update({"axis": self.axis})
return config
|
from keras.src import ops
from keras.src.api_export import keras_export
from keras.src.layers.layer import Layer
@keras_export("keras.layers.UnitNormalization")
class UnitNormalization(Layer):
"""Unit normalization layer.
Normalize a batch of inputs so that each input in the batch has a L2 norm
equal to 1 (across the axes specified in `axis`).
Example:
>>> data = np.arange(6).reshape(2, 3)
>>> normalized_data = keras.layers.UnitNormalization()(data)
>>> np.sum(normalized_data[0, :] ** 2)
1.0
Args:
axis: Integer or list/tuple. The axis or axes to normalize across.
Typically, this is the features axis or axes. The left-out axes are
typically the batch axis or axes. `-1` is the last dimension
in the input. Defaults to `-1`.
"""
def __init__(self, axis=-1, **kwargs):
super().__init__(**kwargs)
if isinstance(axis, (list, tuple)):
self.axis = list(axis)
elif isinstance(axis, int):
self.axis = axis
else:
raise TypeError(
"Invalid value for `axis` argument: "
"expected an int or a list/tuple of ints. "
f"Received: axis={axis}"
)
self.supports_masking = True
def build(self, input_shape):
self.built = True
def call(self, inputs):
x = ops.cast(inputs, self.compute_dtype)
square_sum = ops.sum(ops.square(x), axis=self.axis, keepdims=True)
x_inv_norm = ops.rsqrt(ops.maximum(square_sum, 1e-12))
return ops.multiply(x, x_inv_norm)
def compute_output_shape(self, input_shape):
return input_shape
def get_config(self):
config = super().get_config()
config.update({"axis": self.axis})
return config
|
# Copyright (c) OpenMMLab. All rights reserved.
import unittest
from unittest import TestCase
import torch
from torch import nn
from mmengine.model.efficient_conv_bn_eval import \
turn_on_efficient_conv_bn_eval_for_single_model
from mmengine.testing import assert_allclose
from mmengine.utils import is_installed
from mmengine.utils.dl_utils import TORCH_VERSION
from mmengine.utils.version_utils import digit_version
mmcv_is_installed = is_installed('mmcv')
class BackboneModel(nn.Module):
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
if mmcv_is_installed:
from mmcv.cnn import ConvModule
conv0 = nn.Conv2d(6, 6, 6)
bn0 = nn.BatchNorm2d(6)
self.mod1 = ConvModule.create_from_conv_bn(conv0, bn0)
self.conv1 = nn.Conv2d(6, 6, 6)
self.bn1 = nn.BatchNorm2d(6)
self.conv2 = nn.Conv2d(6, 6, 6)
self.bn2 = nn.BatchNorm2d(6)
self.conv3 = nn.Conv2d(6, 6, 6)
self.bn3 = nn.BatchNorm2d(6)
def forward(self, x):
if mmcv_is_installed:
# this ConvModule can use efficient_conv_bn_eval feature
x = self.mod1(x)
# this conv-bn pair can use efficient_conv_bn_eval feature
x = self.bn1(self.conv1(x))
# this conv-bn pair can use efficient_conv_bn_eval feature
# only for the second `self.conv2` call.
x = self.bn2(self.conv2(self.conv2(x)))
# this conv-bn pair can use efficient_conv_bn_eval feature
# just for the first forward of the `self.bn3`
x = self.bn3(self.bn3(self.conv3(x)))
return x
@unittest.skipIf(
digit_version(TORCH_VERSION) < digit_version('1.8'),
reason='torch.fx needs Pytorch 1.8 or higher')
class TestEfficientConvBNEval(TestCase):
"""Test the turn_on_efficient_conv_bn_eval function."""
def test_efficient_conv_bn_eval(self):
model = BackboneModel()
model.eval()
input = torch.randn(64, 6, 32, 32)
output = model(input)
turn_on_efficient_conv_bn_eval_for_single_model(model)
output2 = model(input)
print((output - output2).abs().max().item())
assert_allclose(output, output2)
|
# Copyright (c) OpenMMLab. All rights reserved.
import unittest
from unittest import TestCase
import torch
from torch import nn
from mmengine.model.efficient_conv_bn_eval import \
turn_on_efficient_conv_bn_eval_for_single_model
from mmengine.testing import assert_allclose
from mmengine.utils import is_installed
from mmengine.utils.dl_utils import TORCH_VERSION
from mmengine.utils.version_utils import digit_version
mmcv_is_installed = is_installed('mmcv')
class BackboneModel(nn.Module):
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
if mmcv_is_installed:
from mmcv.cnn import ConvModule
conv0 = nn.Conv2d(6, 6, 6)
bn0 = nn.BatchNorm2d(6)
self.mod1 = ConvModule.create_from_conv_bn(conv0, bn0)
self.conv1 = nn.Conv2d(6, 6, 6)
self.bn1 = nn.BatchNorm2d(6)
self.conv2 = nn.Conv2d(6, 6, 6)
self.bn2 = nn.BatchNorm2d(6)
self.conv3 = nn.Conv2d(6, 6, 6)
self.bn3 = nn.BatchNorm2d(6)
def forward(self, x):
if mmcv_is_installed:
# this ConvModule can use efficient_conv_bn_eval feature
x = self.mod1(x)
# this conv-bn pair can use efficient_conv_bn_eval feature
x = self.bn1(self.conv1(x))
# this conv-bn pair cannot use efficient_conv_bn_eval feature
# because `self.conv2` is used twice
x = self.bn2(self.conv2(self.conv2(x)))
# this conv-bn pair can use efficient_conv_bn_eval feature
# just for the first forward of the `self.bn3`
x = self.bn3(self.bn3(self.conv3(x)))
return x
@unittest.skipIf(
digit_version(TORCH_VERSION) < digit_version('1.8'),
reason='torch.fx needs Pytorch 1.8 or higher')
class TestEfficientConvBNEval(TestCase):
"""Test the turn_on_efficient_conv_bn_eval function."""
def test_efficient_conv_bn_eval(self):
model = BackboneModel()
model.eval()
input = torch.randn(64, 6, 32, 32)
output = model(input)
turn_on_efficient_conv_bn_eval_for_single_model(model)
output2 = model(input)
print((output - output2).abs().max().item())
assert_allclose(output, output2)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.