input
stringlengths 33
5k
| output
stringlengths 32
5k
|
---|---|
import sys
from os import path
from setuptools import find_packages
from setuptools import setup
if sys.version_info < (3, 7, 0):
raise OSError(f'DocArray requires Python >=3.7, but yours is {sys.version}')
try:
pkg_name = 'docarray'
libinfo_py = path.join(pkg_name, '__init__.py')
libinfo_content = open(libinfo_py, 'r', encoding='utf8').readlines()
version_line = [l.strip() for l in libinfo_content if l.startswith('__version__')][
0
]
exec(version_line) # gives __version__
except FileNotFoundError:
__version__ = '0.0.0'
try:
with open('README.md', encoding='utf8') as fp:
_long_description = fp.read()
except FileNotFoundError:
_long_description = ''
setup(
name=pkg_name,
packages=find_packages(),
version=__version__,
include_package_data=True,
description='The data structure for unstructured data',
author='DocArray team',
author_email='hello@jina.ai',
license='Apache 2.0',
url='https://github.com/jina-ai/docarray',
download_url='https://github.com/jina-ai/docarray/tags',
long_description=_long_description,
long_description_content_type='text/markdown',
zip_safe=False,
install_requires=['numpy', 'rich>=12.0.0', 'jina-hubble-sdk>=0.24.0'],
extras_require={
# req usage, please see https://docarray.jina.ai/#install
'common': [
'protobuf>=3.13.0',
'lz4',
'requests',
'matplotlib',
'Pillow',
'fastapi',
'uvicorn',
],
'full': [
'protobuf>=3.13.0',
'grpcio>=1.46.0,<1.48.1',
'grpcio-reflection>=1.46.0,<1.48.1',
'grpcio-health-checking>=1.46.0,<1.48.1',
'lz4',
'requests',
'matplotlib',
'Pillow',
'trimesh',
'scipy',
'av',
'fastapi',
'uvicorn',
'strawberry-graphql',
],
'qdrant': [
'qdrant-client~=0.10.3',
],
'annlite': [
'annlite',
],
'weaviate': [
'weaviate-client~=3.9.0',
],
'elasticsearch': [
'elasticsearch>=8.2.0',
],
'redis': [
'redis>=4.3.0',
],
'milvus': [
'pymilvus>=2.1.0',
],
'benchmark': [
'pandas',
'matplotlib',
'seaborn',
'h5py',
],
'test': [
'protobuf>=3.13.0,<=3.20.0', # pip dependency resolution does not respect this restriction from paddle
'pytest',
'pytest-timeout',
'pytest-mock',
'pytest-cov==3.0.0',
'pytest-repeat',
'pytest-reraise',
'mock',
'pytest-custom_exit_code',
'black==22.3.0',
'tensorflow==2.7.0',
'paddlepaddle',
'torch==1.9.0',
'torchvision==0.10.0',
'datasets',
'onnx',
'onnxruntime',
'jupyterlab',
'transformers>=4.16.2',
'weaviate-client~=3.9.0',
'annlite',
'elasticsearch>=8.2.0',
'redis>=4.3.0',
'pymilvus>=2.1.0',
'jina',
'pytest-mock',
],
},
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Intended Audience :: Science/Research',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
'Programming Language :: Unix Shell',
'Environment :: Console',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Topic :: Database :: Database Engines/Servers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Internet :: WWW/HTTP :: Indexing/Search',
'Topic :: Scientific/Engineering :: Image Recognition',
'Topic :: Multimedia :: Video',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Mathematics',
'Topic :: Software Development',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
],
project_urls={
'Documentation': 'https://docarray.jina.ai',
'Source': 'https://github.com/jina-ai/docarray/',
'Tracker': 'https://github.com/jina-ai/docarray/issues',
},
keywords='docarray deep-learning data-structures cross-modal multi-modal unstructured-data nested-data neural-search',
)
|
import sys
from os import path
from setuptools import find_packages
from setuptools import setup
if sys.version_info < (3, 7, 0):
raise OSError(f'DocArray requires Python >=3.7, but yours is {sys.version}')
try:
pkg_name = 'docarray'
libinfo_py = path.join(pkg_name, '__init__.py')
libinfo_content = open(libinfo_py, 'r', encoding='utf8').readlines()
version_line = [l.strip() for l in libinfo_content if l.startswith('__version__')][
0
]
exec(version_line) # gives __version__
except FileNotFoundError:
__version__ = '0.0.0'
try:
with open('README.md', encoding='utf8') as fp:
_long_description = fp.read()
except FileNotFoundError:
_long_description = ''
setup(
name=pkg_name,
packages=find_packages(),
version=__version__,
include_package_data=True,
description='The data structure for unstructured data',
author='DocArray team',
author_email='hello@jina.ai',
license='Apache 2.0',
url='https://github.com/jina-ai/docarray',
download_url='https://github.com/jina-ai/docarray/tags',
long_description=_long_description,
long_description_content_type='text/markdown',
zip_safe=False,
install_requires=['numpy', 'rich>=12.0.0', 'jina-hubble-sdk>=0.24.0'],
extras_require={
# req usage, please see https://docarray.jina.ai/#install
'common': [
'protobuf>=3.13.0',
'lz4',
'requests',
'matplotlib',
'Pillow',
'fastapi',
'uvicorn',
],
'full': [
'protobuf>=3.13.0',
'grpcio>=1.46.0,<1.48.1',
'grpcio-reflection>=1.46.0,<1.48.1',
'grpcio-health-checking>=1.46.0,<1.48.1',
'lz4',
'requests',
'matplotlib',
'Pillow',
'trimesh',
'scipy',
'av',
'fastapi',
'uvicorn',
'strawberry-graphql',
],
'qdrant': [
'qdrant-client~=0.10.3',
],
'annlite': [
'annlite',
],
'weaviate': [
'weaviate-client~=3.9.0',
],
'elasticsearch': [
'elasticsearch>=8.2.0',
],
'redis': [
'redis>=4.3.0',
],
'benchmark': [
'pandas',
'matplotlib',
'seaborn',
'h5py',
],
'test': [
'protobuf>=3.13.0,<=3.20.0', # pip dependency resolution does not respect this restriction from paddle
'pytest',
'pytest-timeout',
'pytest-mock',
'pytest-cov==3.0.0',
'pytest-repeat',
'pytest-reraise',
'mock',
'pytest-custom_exit_code',
'black==22.3.0',
'tensorflow==2.7.0',
'paddlepaddle',
'torch==1.9.0',
'torchvision==0.10.0',
'datasets',
'onnx',
'onnxruntime',
'jupyterlab',
'transformers>=4.16.2',
'weaviate-client~=3.9.0',
'annlite',
'elasticsearch>=8.2.0',
'redis>=4.3.0',
'jina',
],
},
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Intended Audience :: Science/Research',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
'Programming Language :: Unix Shell',
'Environment :: Console',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Topic :: Database :: Database Engines/Servers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Internet :: WWW/HTTP :: Indexing/Search',
'Topic :: Scientific/Engineering :: Image Recognition',
'Topic :: Multimedia :: Video',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Mathematics',
'Topic :: Software Development',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
],
project_urls={
'Documentation': 'https://docarray.jina.ai',
'Source': 'https://github.com/jina-ai/docarray/',
'Tracker': 'https://github.com/jina-ai/docarray/issues',
},
keywords='docarray deep-learning data-structures cross-modal multi-modal unstructured-data nested-data neural-search',
)
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.registry import MODELS
from .two_stage import TwoStageDetector
@MODELS.register_module()
class PointRend(TwoStageDetector):
"""PointRend: Image Segmentation as Rendering
This detector is the implementation of
`PointRend <https://arxiv.org/abs/1912.08193>`_.
"""
def __init__(self,
backbone,
rpn_head,
roi_head,
train_cfg,
test_cfg,
neck=None,
pretrained=None,
init_cfg=None):
super(PointRend, self).__init__(
backbone=backbone,
neck=neck,
rpn_head=rpn_head,
roi_head=roi_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
pretrained=pretrained,
init_cfg=init_cfg)
|
# Copyright (c) OpenMMLab. All rights reserved.
from ..builder import DETECTORS
from .two_stage import TwoStageDetector
@DETECTORS.register_module()
class PointRend(TwoStageDetector):
"""PointRend: Image Segmentation as Rendering
This detector is the implementation of
`PointRend <https://arxiv.org/abs/1912.08193>`_.
"""
def __init__(self,
backbone,
rpn_head,
roi_head,
train_cfg,
test_cfg,
neck=None,
pretrained=None,
init_cfg=None):
super(PointRend, self).__init__(
backbone=backbone,
neck=neck,
rpn_head=rpn_head,
roi_head=roi_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
pretrained=pretrained,
init_cfg=init_cfg)
|
import unittest
import torch
from transformers import AutoTokenizer, Gemma2Config, Gemma2Model
from diffusers import (
AutoencoderKL,
FlowMatchEulerDiscreteScheduler,
Lumina2Pipeline,
Lumina2Text2ImgPipeline,
Lumina2Transformer2DModel,
)
from diffusers.utils.testing_utils import torch_device
from ..test_pipelines_common import PipelineTesterMixin
class Lumina2PipelineFastTests(unittest.TestCase, PipelineTesterMixin):
pipeline_class = Lumina2Pipeline
params = frozenset(
[
"prompt",
"height",
"width",
"guidance_scale",
"negative_prompt",
"prompt_embeds",
"negative_prompt_embeds",
]
)
batch_params = frozenset(["prompt", "negative_prompt"])
required_optional_params = frozenset(
[
"num_inference_steps",
"generator",
"latents",
"return_dict",
"callback_on_step_end",
"callback_on_step_end_tensor_inputs",
]
)
supports_dduf = False
test_xformers_attention = False
test_layerwise_casting = True
def get_dummy_components(self):
torch.manual_seed(0)
transformer = Lumina2Transformer2DModel(
sample_size=4,
patch_size=2,
in_channels=4,
hidden_size=8,
num_layers=2,
num_attention_heads=1,
num_kv_heads=1,
multiple_of=16,
ffn_dim_multiplier=None,
norm_eps=1e-5,
scaling_factor=1.0,
axes_dim_rope=[4, 2, 2],
cap_feat_dim=8,
)
torch.manual_seed(0)
vae = AutoencoderKL(
sample_size=32,
in_channels=3,
out_channels=3,
block_out_channels=(4,),
layers_per_block=1,
latent_channels=4,
norm_num_groups=1,
use_quant_conv=False,
use_post_quant_conv=False,
shift_factor=0.0609,
scaling_factor=1.5035,
)
scheduler = FlowMatchEulerDiscreteScheduler()
tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/dummy-gemma")
torch.manual_seed(0)
config = Gemma2Config(
head_dim=4,
hidden_size=8,
intermediate_size=8,
num_attention_heads=2,
num_hidden_layers=2,
num_key_value_heads=2,
sliding_window=2,
)
text_encoder = Gemma2Model(config)
components = {
"transformer": transformer,
"vae": vae.eval(),
"scheduler": scheduler,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
}
return components
def get_dummy_inputs(self, device, seed=0):
if str(device).startswith("mps"):
generator = torch.manual_seed(seed)
else:
generator = torch.Generator(device="cpu").manual_seed(seed)
inputs = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 5.0,
"height": 32,
"width": 32,
"output_type": "np",
}
return inputs
def test_deprecation_raises_warning(self):
with self.assertWarns(FutureWarning) as warning:
_ = Lumina2Text2ImgPipeline(**self.get_dummy_components()).to(torch_device)
warning_message = str(warning.warnings[0].message)
assert "renamed to `Lumina2Pipeline`" in warning_message
|
import unittest
import torch
from transformers import AutoTokenizer, Gemma2Config, Gemma2Model
from diffusers import (
AutoencoderKL,
FlowMatchEulerDiscreteScheduler,
Lumina2Text2ImgPipeline,
Lumina2Transformer2DModel,
)
from ..test_pipelines_common import PipelineTesterMixin
class Lumina2Text2ImgPipelinePipelineFastTests(unittest.TestCase, PipelineTesterMixin):
pipeline_class = Lumina2Text2ImgPipeline
params = frozenset(
[
"prompt",
"height",
"width",
"guidance_scale",
"negative_prompt",
"prompt_embeds",
"negative_prompt_embeds",
]
)
batch_params = frozenset(["prompt", "negative_prompt"])
required_optional_params = frozenset(
[
"num_inference_steps",
"generator",
"latents",
"return_dict",
"callback_on_step_end",
"callback_on_step_end_tensor_inputs",
]
)
supports_dduf = False
test_xformers_attention = False
test_layerwise_casting = True
def get_dummy_components(self):
torch.manual_seed(0)
transformer = Lumina2Transformer2DModel(
sample_size=4,
patch_size=2,
in_channels=4,
hidden_size=8,
num_layers=2,
num_attention_heads=1,
num_kv_heads=1,
multiple_of=16,
ffn_dim_multiplier=None,
norm_eps=1e-5,
scaling_factor=1.0,
axes_dim_rope=[4, 2, 2],
cap_feat_dim=8,
)
torch.manual_seed(0)
vae = AutoencoderKL(
sample_size=32,
in_channels=3,
out_channels=3,
block_out_channels=(4,),
layers_per_block=1,
latent_channels=4,
norm_num_groups=1,
use_quant_conv=False,
use_post_quant_conv=False,
shift_factor=0.0609,
scaling_factor=1.5035,
)
scheduler = FlowMatchEulerDiscreteScheduler()
tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/dummy-gemma")
torch.manual_seed(0)
config = Gemma2Config(
head_dim=4,
hidden_size=8,
intermediate_size=8,
num_attention_heads=2,
num_hidden_layers=2,
num_key_value_heads=2,
sliding_window=2,
)
text_encoder = Gemma2Model(config)
components = {
"transformer": transformer,
"vae": vae.eval(),
"scheduler": scheduler,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
}
return components
def get_dummy_inputs(self, device, seed=0):
if str(device).startswith("mps"):
generator = torch.manual_seed(seed)
else:
generator = torch.Generator(device="cpu").manual_seed(seed)
inputs = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 5.0,
"height": 32,
"width": 32,
"output_type": "np",
}
return inputs
|
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
from jina import Document, Flow, DocumentArray
try:
from spacy_text_encoder import SpacyTextEncoder
except:
from ...spacy_text_encoder import SpacyTextEncoder
def test_spacy_text_encoder():
docs = DocumentArray([Document(text='Han likes eating pizza'), Document(text='Han likes pizza'),
Document(text='Jina rocks')])
f = Flow().add(uses=SpacyTextEncoder)
with f:
resp = f.post(on='/test', inputs=docs, return_results=True)
docs = resp[0].docs
assert len(docs) == 3
for doc in docs:
assert doc.embedding.shape == (96,)
|
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
from jina import Document, Flow, DocumentArray
try:
from spacy_text_encoder import SpacyTextEncoder
except:
from jinahub.encoder.spacy_text_encoder import SpacyTextEncoder
def test_spacy_text_encoder():
docs = DocumentArray([Document(text='Han likes eating pizza'), Document(text='Han likes pizza'),
Document(text='Jina rocks')])
f = Flow().add(uses=SpacyTextEncoder)
with f:
resp = f.post(on='/test', inputs=docs, return_results=True)
docs = resp[0].docs
assert len(docs) == 3
for doc in docs:
assert doc.embedding.shape == (96,)
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.models.utils import ResLayer, SimplifiedBasicBlock
from mmdet.registry import MODELS
from .fcn_mask_head import FCNMaskHead
@MODELS.register_module()
class SCNetMaskHead(FCNMaskHead):
"""Mask head for `SCNet <https://arxiv.org/abs/2012.10150>`_.
Args:
conv_to_res (bool, optional): if True, change the conv layers to
``SimplifiedBasicBlock``.
"""
def __init__(self, conv_to_res=True, **kwargs):
super(SCNetMaskHead, self).__init__(**kwargs)
self.conv_to_res = conv_to_res
if conv_to_res:
assert self.conv_kernel_size == 3
self.num_res_blocks = self.num_convs // 2
self.convs = ResLayer(
SimplifiedBasicBlock,
self.in_channels,
self.conv_out_channels,
self.num_res_blocks,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg)
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.models.builder import HEADS
from mmdet.models.utils import ResLayer, SimplifiedBasicBlock
from .fcn_mask_head import FCNMaskHead
@HEADS.register_module()
class SCNetMaskHead(FCNMaskHead):
"""Mask head for `SCNet <https://arxiv.org/abs/2012.10150>`_.
Args:
conv_to_res (bool, optional): if True, change the conv layers to
``SimplifiedBasicBlock``.
"""
def __init__(self, conv_to_res=True, **kwargs):
super(SCNetMaskHead, self).__init__(**kwargs)
self.conv_to_res = conv_to_res
if conv_to_res:
assert self.conv_kernel_size == 3
self.num_res_blocks = self.num_convs // 2
self.convs = ResLayer(
SimplifiedBasicBlock,
self.in_channels,
self.conv_out_channels,
self.num_res_blocks,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg)
|
import numpy as np
import pytest
from tensorflow import data as tf_data
from keras.src import layers
from keras.src import testing
class MixUpTest(testing.TestCase):
@pytest.mark.requires_trainable_backend
def test_layer(self):
self.run_layer_test(
layers.MixUp,
init_kwargs={
"alpha": 0.2,
},
input_shape=(8, 3, 4, 3),
supports_masking=False,
expected_output_shape=(8, 3, 4, 3),
# StatelessRandomGammaV3 is not supported on XLA_GPU_JIT
run_training_check=not testing.tensorflow_uses_gpu(),
)
def test_mix_up_basic_functionality(self):
image = np.random.random((64, 64, 3))
mix_up_layer = layers.MixUp(alpha=1)
transformation = {"mix_weight": 1, "permutation_order": [0]}
output = mix_up_layer.transform_images(
image, transformation=transformation
)[0]
self.assertAllClose(output, image)
image = np.random.random((4, 64, 64, 3))
mix_up_layer = layers.MixUp(alpha=0.2)
transformation = {"mix_weight": 0.2, "permutation_order": [1, 0, 2, 3]}
output = mix_up_layer.transform_images(
image, transformation=transformation
)
self.assertNotAllClose(output, image)
self.assertAllClose(output.shape, image.shape)
def test_mix_up_basic_functionality_channel_first(self):
image = np.random.random((3, 64, 64))
mix_up_layer = layers.MixUp(alpha=1)
transformation = {"mix_weight": 1, "permutation_order": [0]}
output = mix_up_layer.transform_images(
image, transformation=transformation
)[0]
self.assertAllClose(output, image)
image = np.random.random((4, 3, 64, 64))
mix_up_layer = layers.MixUp(alpha=0.2)
transformation = {"mix_weight": 0.2, "permutation_order": [1, 0, 2, 3]}
output = mix_up_layer.transform_images(
image, transformation=transformation
)
self.assertNotAllClose(output, image)
self.assertAllClose(output.shape, image.shape)
def test_tf_data_compatibility(self):
layer = layers.MixUp()
input_data = np.random.random((2, 8, 8, 3))
ds = tf_data.Dataset.from_tensor_slices(input_data).batch(2).map(layer)
for output in ds.take(1):
output.numpy()
|
import numpy as np
import pytest
from tensorflow import data as tf_data
from keras.src import layers
from keras.src import testing
class MixUpTest(testing.TestCase):
@pytest.mark.requires_trainable_backend
def test_layer(self):
self.run_layer_test(
layers.MixUp,
init_kwargs={
"alpha": 0.2,
},
input_shape=(8, 3, 4, 3),
supports_masking=False,
expected_output_shape=(8, 3, 4, 3),
)
def test_mix_up_basic_functionality(self):
image = np.random.random((64, 64, 3))
mix_up_layer = layers.MixUp(alpha=1)
transformation = {"mix_weight": 1, "permutation_order": [0]}
output = mix_up_layer.transform_images(
image, transformation=transformation
)[0]
self.assertAllClose(output, image)
image = np.random.random((4, 64, 64, 3))
mix_up_layer = layers.MixUp(alpha=0.2)
transformation = {"mix_weight": 0.2, "permutation_order": [1, 0, 2, 3]}
output = mix_up_layer.transform_images(
image, transformation=transformation
)
self.assertNotAllClose(output, image)
self.assertAllClose(output.shape, image.shape)
def test_mix_up_basic_functionality_channel_first(self):
image = np.random.random((3, 64, 64))
mix_up_layer = layers.MixUp(alpha=1)
transformation = {"mix_weight": 1, "permutation_order": [0]}
output = mix_up_layer.transform_images(
image, transformation=transformation
)[0]
self.assertAllClose(output, image)
image = np.random.random((4, 3, 64, 64))
mix_up_layer = layers.MixUp(alpha=0.2)
transformation = {"mix_weight": 0.2, "permutation_order": [1, 0, 2, 3]}
output = mix_up_layer.transform_images(
image, transformation=transformation
)
self.assertNotAllClose(output, image)
self.assertAllClose(output.shape, image.shape)
def test_tf_data_compatibility(self):
layer = layers.MixUp()
input_data = np.random.random((2, 8, 8, 3))
ds = tf_data.Dataset.from_tensor_slices(input_data).batch(2).map(layer)
for output in ds.take(1):
output.numpy()
|
# Copyright (c) OpenMMLab. All rights reserved.
import os.path as osp
from typing import Any, Optional, Sequence, Tuple
import cv2
import numpy as np
from mmengine.data import BaseDataElement
from mmengine.hooks import Hook
from mmengine.registry import HOOKS
from mmengine.utils.misc import tensor2imgs
@HOOKS.register_module()
class NaiveVisualizationHook(Hook):
"""Show or Write the predicted results during the process of testing.
Args:
interval (int): Visualization interval. Default: 1.
draw_gt (bool): Whether to draw the ground truth. Default to True.
draw_pred (bool): Whether to draw the predicted result.
Default to True.
"""
priority = 'NORMAL'
def __init__(self,
interval: int = 1,
draw_gt: bool = True,
draw_pred: bool = True):
self.draw_gt = draw_gt
self.draw_pred = draw_pred
self._interval = interval
def _unpad(self, input: np.ndarray, unpad_shape: Tuple[int,
int]) -> np.ndarray:
unpad_width, unpad_height = unpad_shape
unpad_image = input[:unpad_height, :unpad_width]
return unpad_image
def after_test_iter(
self,
runner,
batch_idx: int,
data_batch: Optional[Sequence[Tuple[Any, BaseDataElement]]] = None,
outputs: Optional[Sequence[BaseDataElement]] = None) -> None:
"""Show or Write the predicted results.
Args:
runner (Runner): The runner of the training process.
batch_idx (int): The index of the current batch in the test loop.
data_batch (Sequence[Tuple[Any, BaseDataElement]], optional): Data
from dataloader. Defaults to None.
outputs (Sequence[BaseDataElement], optional): Outputs from model.
Defaults to None.
"""
if self.every_n_iters(runner, self._interval):
inputs, data_samples = data_batch # type: ignore
inputs = tensor2imgs(inputs,
**data_samples[0].get('img_norm_cfg', dict()))
for input, data_sample, output in zip(
inputs,
data_samples, # type: ignore
outputs): # type: ignore
# TODO We will implement a function to revert the augmentation
# in the future.
ori_shape = (data_sample.ori_width, data_sample.ori_height)
if 'pad_shape' in data_sample:
input = self._unpad(input,
data_sample.get('scale', ori_shape))
origin_image = cv2.resize(input, ori_shape)
name = osp.basename(data_sample.img_path)
runner.writer.add_image(name, origin_image, data_sample,
output, self.draw_gt, self.draw_pred)
|
# Copyright (c) OpenMMLab. All rights reserved.
import os.path as osp
from typing import Any, Optional, Sequence, Tuple
import cv2
import numpy as np
from mmengine.data import BaseDataSample
from mmengine.hooks import Hook
from mmengine.registry import HOOKS
from mmengine.utils.misc import tensor2imgs
@HOOKS.register_module()
class NaiveVisualizationHook(Hook):
"""Show or Write the predicted results during the process of testing.
Args:
interval (int): Visualization interval. Default: 1.
draw_gt (bool): Whether to draw the ground truth. Default to True.
draw_pred (bool): Whether to draw the predicted result.
Default to True.
"""
priority = 'NORMAL'
def __init__(self,
interval: int = 1,
draw_gt: bool = True,
draw_pred: bool = True):
self.draw_gt = draw_gt
self.draw_pred = draw_pred
self._interval = interval
def _unpad(self, input: np.ndarray, unpad_shape: Tuple[int,
int]) -> np.ndarray:
unpad_width, unpad_height = unpad_shape
unpad_image = input[:unpad_height, :unpad_width]
return unpad_image
def after_test_iter(
self,
runner,
batch_idx: int,
data_batch: Optional[Sequence[Tuple[Any, BaseDataSample]]] = None,
outputs: Optional[Sequence[BaseDataSample]] = None) -> None:
"""Show or Write the predicted results.
Args:
runner (Runner): The runner of the training process.
batch_idx (int): The index of the current batch in the test loop.
data_batch (Sequence[Tuple[Any, BaseDataSample]], optional): Data
from dataloader. Defaults to None.
outputs (Sequence[BaseDataSample], optional): Outputs from model.
Defaults to None.
"""
if self.every_n_iters(runner, self._interval):
inputs, data_samples = data_batch # type: ignore
inputs = tensor2imgs(inputs,
**data_samples[0].get('img_norm_cfg', dict()))
for input, data_sample, output in zip(
inputs,
data_samples, # type: ignore
outputs): # type: ignore
# TODO We will implement a function to revert the augmentation
# in the future.
ori_shape = (data_sample.ori_width, data_sample.ori_height)
if 'pad_shape' in data_sample:
input = self._unpad(input,
data_sample.get('scale', ori_shape))
origin_image = cv2.resize(input, ori_shape)
name = osp.basename(data_sample.img_path)
runner.writer.add_image(name, origin_image, data_sample,
output, self.draw_gt, self.draw_pred)
|
"""Utilities for getting information about the runtime environment."""
import platform
from functools import lru_cache
@lru_cache(maxsize=1)
def get_runtime_environment() -> dict:
"""Get information about the LangChain runtime environment.
Returns:
A dictionary with information about the runtime environment.
"""
# Lazy import to avoid circular imports
from langchain_core import __version__
return {
"library_version": __version__,
"library": "langchain-core",
"platform": platform.platform(),
"runtime": "python",
"runtime_version": platform.python_version(),
}
|
import platform
from functools import lru_cache
@lru_cache(maxsize=1)
def get_runtime_environment() -> dict:
"""Get information about the LangChain runtime environment.
Returns:
A dictionary with information about the runtime environment.
"""
# Lazy import to avoid circular imports
from langchain_core import __version__
return {
"library_version": __version__,
"library": "langchain-core",
"platform": platform.platform(),
"runtime": "python",
"runtime_version": platform.python_version(),
}
|
from __future__ import annotations
from torch import Tensor, nn
from sentence_transformers.cross_encoder.CrossEncoder import CrossEncoder
from sentence_transformers.util import fullname
class MSELoss(nn.Module):
def __init__(self, model: CrossEncoder, activation_fct: nn.Module = nn.Identity(), **kwargs) -> None:
"""
Computes the MSE loss between the computed query-passage score and a target query-passage score. This loss
is used to distill a cross-encoder model from a teacher cross-encoder model or gold labels.
Args:
model (:class:`~sentence_transformers.cross_encoder.CrossEncoder`): A CrossEncoder model to be trained.
activation_fct (:class:`~torch.nn.Module`): Activation function applied to the logits before computing the loss.
**kwargs: Additional keyword arguments passed to the underlying :class:`torch.nn.MSELoss`.
.. note::
Be mindful of the magnitude of both the labels and what the model produces. If the teacher model produces
logits with Sigmoid to bound them to [0, 1], then you may wish to use a Sigmoid activation function in the loss.
References:
- Improving Efficient Neural Ranking Models with Cross-Architecture Knowledge Distillation: https://arxiv.org/abs/2010.02666
Requirements:
1. Your model must be initialized with `num_labels = 1` (a.k.a. the default) to predict one class.
2. Usually uses a finetuned CrossEncoder teacher M in a knowledge distillation setup.
Inputs:
+-----------------------------------------+-----------------------------+-------------------------------+
| Texts | Labels | Number of Model Output Labels |
+=========================================+=============================+===============================+
| (sentence_A, sentence_B) pairs | similarity score | 1 |
+-----------------------------------------+-----------------------------+-------------------------------+
Relations:
- :class:`MarginMSELoss` is similar to this loss, but with a margin through a negative pair.
Example:
::
from sentence_transformers.cross_encoder import CrossEncoder, CrossEncoderTrainer, losses
from datasets import Dataset
student_model = CrossEncoder("microsoft/mpnet-base")
teacher_model = CrossEncoder("cross-encoder/ms-marco-MiniLM-L12-v2")
train_dataset = Dataset.from_dict({
"query": ["What are pandas?", "What is the capital of France?"],
"answer": ["Pandas are a kind of bear.", "The capital of France is Paris."],
})
def compute_labels(batch):
return {
"label": teacher_model.predict(list(zip(batch["query"], batch["answer"])))
}
train_dataset = train_dataset.map(compute_labels, batched=True)
loss = losses.MSELoss(student_model)
trainer = CrossEncoderTrainer(
model=student_model,
train_dataset=train_dataset,
loss=loss,
)
trainer.train()
"""
super().__init__()
self.model = model
self.activation_fct = activation_fct
self.loss_fct = nn.MSELoss(**kwargs)
if not isinstance(self.model, CrossEncoder):
raise ValueError(
f"{self.__class__.__name__} expects a model of type CrossEncoder, "
f"but got a model of type {type(self.model)}."
)
if self.model.num_labels != 1:
raise ValueError(
f"{self.__class__.__name__} expects a model with 1 output label, "
f"but got a model with {self.model.num_labels} output labels."
)
def forward(self, inputs: list[list[str]], labels: Tensor) -> Tensor:
if len(inputs) != 2:
raise ValueError(
f"MSELoss expects a dataset with two non-label columns, but got a dataset with {len(inputs)} columns."
)
pairs = list(zip(inputs[0], inputs[1]))
tokens = self.model.tokenizer(
pairs,
padding=True,
truncation=True,
return_tensors="pt",
)
tokens.to(self.model.device)
logits = self.model(**tokens)[0].view(-1)
logits = self.activation_fct(logits)
loss = self.loss_fct(logits, labels.float())
return loss
def get_config_dict(self):
return {
"activation_fct": fullname(self.activation_fct),
}
|
from __future__ import annotations
from torch import Tensor, nn
from sentence_transformers.cross_encoder.CrossEncoder import CrossEncoder
from sentence_transformers.util import fullname
class MSELoss(nn.Module):
def __init__(self, model: CrossEncoder, activation_fct: nn.Module = nn.Identity(), **kwargs) -> None:
"""
Computes the MSE loss between the computed query-passage score and a target query-passage score. This loss
is used to distill a cross-encoder model from a teacher cross-encoder model or gold labels.
Args:
model (:class:`~sentence_transformers.cross_encoder.CrossEncoder`): A CrossEncoder model to be trained.
activation_fct (:class:`~torch.nn.Module`): Activation function applied to the logits before computing the loss.
**kwargs: Additional keyword arguments passed to the underlying :class:`torch.nn.MSELoss`.
.. note::
Be mindful of the magnitude of both the labels and what the model produces. If the teacher model produces
logits with Sigmoid to bound them to [0, 1], then you may wish to use a Sigmoid activation function in the loss.
References:
- Improving Efficient Neural Ranking Models with Cross-Architecture Knowledge Distillation: https://arxiv.org/abs/2010.02666
Requirements:
1. Your model must be initialized with `num_labels = 1` (a.k.a. the default) to predict one class.
2. Usually uses a finetuned CrossEncoder teacher M in a knowledge distillation setup.
Inputs:
+-----------------------------------------+-----------------------------+-------------------------------+
| Texts | Labels | Number of Model Output Labels |
+=========================================+=============================+===============================+
| (sentence_A, sentence_B) pairs | similarity score | 1 |
+-----------------------------------------+-----------------------------+-------------------------------+
Relations:
- :class:`MarginMSELoss` is similar to this loss, but with a margin through a negative pair.
Example:
::
from sentence_transformers.cross_encoder import CrossEncoder, CrossEncoderTrainer, losses
from datasets import Dataset
student_model = CrossEncoder("microsoft/mpnet-base")
teacher_model = CrossEncoder("cross-encoder/ms-marco-MiniLM-L-12-v2")
train_dataset = Dataset.from_dict({
"query": ["What are pandas?", "What is the capital of France?"],
"answer": ["Pandas are a kind of bear.", "The capital of France is Paris."],
})
def compute_labels(batch):
return {
"label": teacher_model.predict(list(zip(batch["query"], batch["answer"])))
}
train_dataset = train_dataset.map(compute_labels, batched=True)
loss = losses.MSELoss(student_model)
trainer = CrossEncoderTrainer(
model=student_model,
train_dataset=train_dataset,
loss=loss,
)
trainer.train()
"""
super().__init__()
self.model = model
self.activation_fct = activation_fct
self.loss_fct = nn.MSELoss(**kwargs)
if not isinstance(self.model, CrossEncoder):
raise ValueError(
f"{self.__class__.__name__} expects a model of type CrossEncoder, "
f"but got a model of type {type(self.model)}."
)
if self.model.num_labels != 1:
raise ValueError(
f"{self.__class__.__name__} expects a model with 1 output label, "
f"but got a model with {self.model.num_labels} output labels."
)
def forward(self, inputs: list[list[str]], labels: Tensor) -> Tensor:
if len(inputs) != 2:
raise ValueError(
f"MSELoss expects a dataset with two non-label columns, but got a dataset with {len(inputs)} columns."
)
pairs = list(zip(inputs[0], inputs[1]))
tokens = self.model.tokenizer(
pairs,
padding=True,
truncation=True,
return_tensors="pt",
)
tokens.to(self.model.device)
logits = self.model(**tokens)[0].view(-1)
logits = self.activation_fct(logits)
loss = self.loss_fct(logits, labels.float())
return loss
def get_config_dict(self):
return {
"activation_fct": fullname(self.activation_fct),
}
|
from __future__ import annotations
import logging
from typing import TYPE_CHECKING, Any, Literal
from sentence_transformers.evaluation import BinaryClassificationEvaluator
if TYPE_CHECKING:
import numpy as np
from torch import Tensor
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
logger = logging.getLogger(__name__)
class SparseBinaryClassificationEvaluator(BinaryClassificationEvaluator):
def __init__(
self,
sentences1: list[str],
sentences2: list[str],
labels: list[int],
name: str = "",
batch_size: int = 32,
show_progress_bar: bool = False,
write_csv: bool = True,
truncate_dim: int | None = None,
similarity_fn_names: list[Literal["cosine", "dot", "euclidean", "manhattan"]] | None = None,
):
return super().__init__(
sentences1=sentences1,
sentences2=sentences2,
labels=labels,
name=name,
batch_size=batch_size,
show_progress_bar=show_progress_bar,
write_csv=write_csv,
truncate_dim=truncate_dim,
similarity_fn_names=similarity_fn_names,
)
def __call__(
self, model: SparseEncoder, output_path: str = None, epoch: int = -1, steps: int = -1
) -> dict[str, float]:
return super().__call__(model, output_path=output_path, epoch=epoch, steps=steps)
def compute_metrices(self, model: SparseEncoder) -> dict[str, dict[str, float]]:
return super().compute_metrices(model=model)
def embed_inputs(
self,
model: SparseEncoder,
sentences: str | list[str] | np.ndarray,
**kwargs,
) -> Tensor:
return model.encode(
sentences,
batch_size=self.batch_size,
show_progress_bar=self.show_progress_bar,
convert_to_sparse_tensor=True,
**kwargs,
)
def store_metrics_in_model_card_data(
self, model: SparseEncoder, metrics: dict[str, Any], epoch: int = 0, step: int = 0
) -> None:
model.model_card_data.set_evaluation_metrics(self, metrics, epoch=epoch, step=step)
|
from __future__ import annotations
import logging
from typing import TYPE_CHECKING, Any, Literal
from sentence_transformers.evaluation import BinaryClassificationEvaluator
if TYPE_CHECKING:
import numpy as np
from torch import Tensor
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
logger = logging.getLogger(__name__)
class SparseBinaryClassificationEvaluator(BinaryClassificationEvaluator):
def __init__(
self,
sentences1: list[str],
sentences2: list[str],
labels: list[int],
name: str = "",
batch_size: int = 32,
show_progress_bar: bool = False,
write_csv: bool = True,
truncate_dim: int | None = None,
similarity_fn_names: list[Literal["cosine", "dot", "euclidean", "manhattan"]] | None = None,
):
super().__init__(
sentences1=sentences1,
sentences2=sentences2,
labels=labels,
name=name,
batch_size=batch_size,
show_progress_bar=show_progress_bar,
write_csv=write_csv,
truncate_dim=truncate_dim,
similarity_fn_names=similarity_fn_names,
)
def __call__(
self, model: SparseEncoder, output_path: str = None, epoch: int = -1, steps: int = -1
) -> dict[str, float]:
return super().__call__(model, output_path, epoch, steps)
def compute_metrices(self, model: SparseEncoder) -> dict[str, dict[str, float]]:
return super().compute_metrices(model)
def embed_inputs(
self,
model: SparseEncoder,
sentences: str | list[str] | np.ndarray,
**kwargs,
) -> Tensor:
return model.encode(
sentences,
batch_size=self.batch_size,
show_progress_bar=self.show_progress_bar,
convert_to_sparse_tensor=True,
**kwargs,
)
def store_metrics_in_model_card_data(
self, model: SparseEncoder, metrics: dict[str, Any], epoch: int = 0, step: int = 0
) -> None:
model.model_card_data.set_evaluation_metrics(self, metrics, epoch, step)
|
# Copyright 2023 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torchvision_available, is_vision_available
from ...test_image_processing_common import ImageProcessingTestMixin, prepare_image_inputs
if is_vision_available():
from transformers import PvtImageProcessor
if is_torchvision_available():
from transformers import PvtImageProcessorFast
class PvtImageProcessingTester:
def __init__(
self,
parent,
batch_size=7,
num_channels=3,
image_size=18,
min_resolution=30,
max_resolution=400,
do_resize=True,
size=None,
do_normalize=True,
image_mean=[0.485, 0.456, 0.406],
image_std=[0.229, 0.224, 0.225],
):
size = size if size is not None else {"height": 18, "width": 18}
self.parent = parent
self.batch_size = batch_size
self.num_channels = num_channels
self.image_size = image_size
self.min_resolution = min_resolution
self.max_resolution = max_resolution
self.do_resize = do_resize
self.size = size
self.do_normalize = do_normalize
self.image_mean = image_mean
self.image_std = image_std
def prepare_image_processor_dict(self):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
def expected_output_image_shape(self, images):
return self.num_channels, self.size["height"], self.size["width"]
def prepare_image_inputs(self, equal_resolution=False, numpify=False, torchify=False):
return prepare_image_inputs(
batch_size=self.batch_size,
num_channels=self.num_channels,
min_resolution=self.min_resolution,
max_resolution=self.max_resolution,
equal_resolution=equal_resolution,
numpify=numpify,
torchify=torchify,
)
@require_torch
@require_vision
class PvtImageProcessingTest(ImageProcessingTestMixin, unittest.TestCase):
image_processing_class = PvtImageProcessor if is_vision_available() else None
fast_image_processing_class = PvtImageProcessorFast if is_torchvision_available() else None
def setUp(self):
super().setUp()
self.image_processor_tester = PvtImageProcessingTester(self)
@property
def image_processor_dict(self):
return self.image_processor_tester.prepare_image_processor_dict()
def test_image_processor_properties(self):
for image_processing_class in self.image_processor_list:
image_processing = image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(image_processing, "image_mean"))
self.assertTrue(hasattr(image_processing, "image_std"))
self.assertTrue(hasattr(image_processing, "do_normalize"))
self.assertTrue(hasattr(image_processing, "do_resize"))
self.assertTrue(hasattr(image_processing, "size"))
def test_image_processor_from_dict_with_kwargs(self):
for image_processing_class in self.image_processor_list:
image_processor = image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size, {"height": 18, "width": 18})
image_processor = image_processing_class.from_dict(self.image_processor_dict, size=42)
self.assertEqual(image_processor.size, {"height": 42, "width": 42})
|
# Copyright 2023 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_vision_available
from ...test_image_processing_common import ImageProcessingTestMixin, prepare_image_inputs
if is_vision_available():
from transformers import PvtImageProcessor
class PvtImageProcessingTester:
def __init__(
self,
parent,
batch_size=7,
num_channels=3,
image_size=18,
min_resolution=30,
max_resolution=400,
do_resize=True,
size=None,
do_normalize=True,
image_mean=[0.485, 0.456, 0.406],
image_std=[0.229, 0.224, 0.225],
):
size = size if size is not None else {"height": 18, "width": 18}
self.parent = parent
self.batch_size = batch_size
self.num_channels = num_channels
self.image_size = image_size
self.min_resolution = min_resolution
self.max_resolution = max_resolution
self.do_resize = do_resize
self.size = size
self.do_normalize = do_normalize
self.image_mean = image_mean
self.image_std = image_std
def prepare_image_processor_dict(self):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
def expected_output_image_shape(self, images):
return self.num_channels, self.size["height"], self.size["width"]
def prepare_image_inputs(self, equal_resolution=False, numpify=False, torchify=False):
return prepare_image_inputs(
batch_size=self.batch_size,
num_channels=self.num_channels,
min_resolution=self.min_resolution,
max_resolution=self.max_resolution,
equal_resolution=equal_resolution,
numpify=numpify,
torchify=torchify,
)
@require_torch
@require_vision
class PvtImageProcessingTest(ImageProcessingTestMixin, unittest.TestCase):
image_processing_class = PvtImageProcessor if is_vision_available() else None
def setUp(self):
super().setUp()
self.image_processor_tester = PvtImageProcessingTester(self)
@property
def image_processor_dict(self):
return self.image_processor_tester.prepare_image_processor_dict()
def test_image_processor_properties(self):
image_processing = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(image_processing, "image_mean"))
self.assertTrue(hasattr(image_processing, "image_std"))
self.assertTrue(hasattr(image_processing, "do_normalize"))
self.assertTrue(hasattr(image_processing, "do_resize"))
self.assertTrue(hasattr(image_processing, "size"))
def test_image_processor_from_dict_with_kwargs(self):
image_processor = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size, {"height": 18, "width": 18})
image_processor = self.image_processing_class.from_dict(self.image_processor_dict, size=42)
self.assertEqual(image_processor.size, {"height": 42, "width": 42})
|
from unittest.mock import MagicMock, AsyncMock
import pytest
import sys
from llama_index.readers.web.oxylabs_web.base import OxylabsWebReader
READER_TEST_PARAM = pytest.param(
[
"https://sandbox.oxylabs.io/products/1",
"https://sandbox.oxylabs.io/products/2",
],
{
"parse": True,
},
{
"results": [{"content": {"key1": "value1", "key2": "value2"}}],
"job": {"job_id": 42424242},
},
"# key1\n value1\n\n# key2\n value2\n",
id="response_success",
)
skip_if_py39_or_lower = sys.version_info < (3, 10)
@pytest.mark.skipif(skip_if_py39_or_lower, reason="Pytest does not support Python 3.9")
@pytest.mark.parametrize(
("urls", "additional_params", "return_value", "expected_output"),
[READER_TEST_PARAM],
)
def test_sync_oxylabs_reader(
urls: list[str],
additional_params: dict,
return_value: dict,
expected_output: str,
):
reader = OxylabsWebReader(
username="OXYLABS_USERNAME",
password="OXYLABS_PASSWORD",
)
get_response_mock = MagicMock()
get_response_mock.return_value = return_value
reader.api.get_response = get_response_mock
docs = reader.load_data(urls, additional_params)
for doc in docs:
assert doc.text == expected_output
@pytest.mark.skipif(skip_if_py39_or_lower, reason="Pytest does not support Python 3.9")
@pytest.mark.parametrize(
("urls", "additional_params", "return_value", "expected_output"),
[READER_TEST_PARAM],
)
@pytest.mark.asyncio
async def test_async_oxylabs_reader(
urls: list[str],
additional_params: dict,
return_value: dict,
expected_output: str,
):
reader = OxylabsWebReader(
username="OXYLABS_USERNAME",
password="OXYLABS_PASSWORD",
)
get_response_mock = AsyncMock()
get_response_mock.return_value = return_value
reader.async_api.get_response = get_response_mock
docs = await reader.aload_data(urls, additional_params)
for doc in docs:
assert doc.text == expected_output
|
from unittest.mock import MagicMock, AsyncMock
import pytest
from llama_index.readers.web.oxylabs_web.base import OxylabsWebReader
READER_TEST_PARAM = pytest.param(
[
"https://sandbox.oxylabs.io/products/1",
"https://sandbox.oxylabs.io/products/2",
],
{
"parse": True,
},
{
"results": [{"content": {"key1": "value1", "key2": "value2"}}],
"job": {"job_id": 42424242},
},
"# key1\n value1\n\n# key2\n value2\n",
id="response_success",
)
@pytest.mark.parametrize(
("urls", "additional_params", "return_value", "expected_output"),
[READER_TEST_PARAM],
)
def test_sync_oxylabs_reader(
urls: list[str],
additional_params: dict,
return_value: dict,
expected_output: str,
):
reader = OxylabsWebReader(
username="OXYLABS_USERNAME",
password="OXYLABS_PASSWORD",
)
get_response_mock = MagicMock()
get_response_mock.return_value = return_value
reader.api.get_response = get_response_mock
docs = reader.load_data(urls, additional_params)
for doc in docs:
assert doc.text == expected_output
@pytest.mark.parametrize(
("urls", "additional_params", "return_value", "expected_output"),
[READER_TEST_PARAM],
)
@pytest.mark.asyncio
async def test_async_oxylabs_reader(
urls: list[str],
additional_params: dict,
return_value: dict,
expected_output: str,
):
reader = OxylabsWebReader(
username="OXYLABS_USERNAME",
password="OXYLABS_PASSWORD",
)
get_response_mock = AsyncMock()
get_response_mock.return_value = return_value
reader.async_api.get_response = get_response_mock
docs = await reader.aload_data(urls, additional_params)
for doc in docs:
assert doc.text == expected_output
|
import types
from keras.src.activations.activations import celu
from keras.src.activations.activations import elu
from keras.src.activations.activations import exponential
from keras.src.activations.activations import gelu
from keras.src.activations.activations import glu
from keras.src.activations.activations import hard_sigmoid
from keras.src.activations.activations import hard_silu
from keras.src.activations.activations import leaky_relu
from keras.src.activations.activations import linear
from keras.src.activations.activations import log_softmax
from keras.src.activations.activations import mish
from keras.src.activations.activations import relu
from keras.src.activations.activations import relu6
from keras.src.activations.activations import selu
from keras.src.activations.activations import sigmoid
from keras.src.activations.activations import silu
from keras.src.activations.activations import softmax
from keras.src.activations.activations import softplus
from keras.src.activations.activations import softsign
from keras.src.activations.activations import tanh
from keras.src.api_export import keras_export
from keras.src.saving import object_registration
from keras.src.saving import serialization_lib
ALL_OBJECTS = {
relu,
leaky_relu,
relu6,
softmax,
celu,
elu,
selu,
softplus,
softsign,
silu,
gelu,
glu,
tanh,
sigmoid,
exponential,
hard_sigmoid,
hard_silu,
linear,
mish,
log_softmax,
}
ALL_OBJECTS_DICT = {fn.__name__: fn for fn in ALL_OBJECTS}
# Additional aliases
ALL_OBJECTS_DICT["swish"] = silu
ALL_OBJECTS_DICT["hard_swish"] = hard_silu
@keras_export("keras.activations.serialize")
def serialize(activation):
fn_config = serialization_lib.serialize_keras_object(activation)
if "config" not in fn_config:
raise ValueError(
f"Unknown activation function '{activation}' cannot be "
"serialized due to invalid function name. Make sure to use "
"an activation name that matches the references defined in "
"activations.py or use "
"`@keras.saving.register_keras_serializable()`"
"to register any custom activations. "
f"config={fn_config}"
)
if not isinstance(activation, types.FunctionType):
# Case for additional custom activations represented by objects
return fn_config
if (
isinstance(fn_config["config"], str)
and fn_config["config"] not in globals()
):
# Case for custom activation functions from external activations modules
fn_config["config"] = object_registration.get_registered_name(
activation
)
return fn_config
# Case for keras.activations builtins (simply return name)
return fn_config["config"]
@keras_export("keras.activations.deserialize")
def deserialize(config, custom_objects=None):
"""Return a Keras activation function via its config."""
return serialization_lib.deserialize_keras_object(
config,
module_objects=ALL_OBJECTS_DICT,
custom_objects=custom_objects,
)
@keras_export("keras.activations.get")
def get(identifier):
"""Retrieve a Keras activation function via an identifier."""
if identifier is None:
return linear
if isinstance(identifier, dict):
obj = deserialize(identifier)
elif isinstance(identifier, str):
obj = ALL_OBJECTS_DICT.get(identifier, None)
else:
obj = identifier
if callable(obj):
return obj
raise ValueError(
f"Could not interpret activation function identifier: {identifier}"
)
|
import types
from keras.src.activations.activations import celu
from keras.src.activations.activations import elu
from keras.src.activations.activations import exponential
from keras.src.activations.activations import gelu
from keras.src.activations.activations import hard_sigmoid
from keras.src.activations.activations import hard_silu
from keras.src.activations.activations import leaky_relu
from keras.src.activations.activations import linear
from keras.src.activations.activations import log_softmax
from keras.src.activations.activations import mish
from keras.src.activations.activations import relu
from keras.src.activations.activations import relu6
from keras.src.activations.activations import selu
from keras.src.activations.activations import sigmoid
from keras.src.activations.activations import silu
from keras.src.activations.activations import softmax
from keras.src.activations.activations import softplus
from keras.src.activations.activations import softsign
from keras.src.activations.activations import tanh
from keras.src.api_export import keras_export
from keras.src.saving import object_registration
from keras.src.saving import serialization_lib
ALL_OBJECTS = {
relu,
leaky_relu,
relu6,
softmax,
celu,
elu,
selu,
softplus,
softsign,
silu,
gelu,
tanh,
sigmoid,
exponential,
hard_sigmoid,
hard_silu,
linear,
mish,
log_softmax,
}
ALL_OBJECTS_DICT = {fn.__name__: fn for fn in ALL_OBJECTS}
# Additional aliases
ALL_OBJECTS_DICT["swish"] = silu
ALL_OBJECTS_DICT["hard_swish"] = hard_silu
@keras_export("keras.activations.serialize")
def serialize(activation):
fn_config = serialization_lib.serialize_keras_object(activation)
if "config" not in fn_config:
raise ValueError(
f"Unknown activation function '{activation}' cannot be "
"serialized due to invalid function name. Make sure to use "
"an activation name that matches the references defined in "
"activations.py or use "
"`@keras.saving.register_keras_serializable()`"
"to register any custom activations. "
f"config={fn_config}"
)
if not isinstance(activation, types.FunctionType):
# Case for additional custom activations represented by objects
return fn_config
if (
isinstance(fn_config["config"], str)
and fn_config["config"] not in globals()
):
# Case for custom activation functions from external activations modules
fn_config["config"] = object_registration.get_registered_name(
activation
)
return fn_config
# Case for keras.activations builtins (simply return name)
return fn_config["config"]
@keras_export("keras.activations.deserialize")
def deserialize(config, custom_objects=None):
"""Return a Keras activation function via its config."""
return serialization_lib.deserialize_keras_object(
config,
module_objects=ALL_OBJECTS_DICT,
custom_objects=custom_objects,
)
@keras_export("keras.activations.get")
def get(identifier):
"""Retrieve a Keras activation function via an identifier."""
if identifier is None:
return linear
if isinstance(identifier, dict):
obj = deserialize(identifier)
elif isinstance(identifier, str):
obj = ALL_OBJECTS_DICT.get(identifier, None)
else:
obj = identifier
if callable(obj):
return obj
raise ValueError(
f"Could not interpret activation function identifier: {identifier}"
)
|
_base_ = './point_rend_r50_caffe_fpn_mstrain_1x_coco.py'
max_epochs = 36
# learning policy
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[28, 34],
gamma=0.1)
]
train_cfg = dict(max_epochs=max_epochs)
|
_base_ = './point_rend_r50_caffe_fpn_mstrain_1x_coco.py'
# learning policy
lr_config = dict(step=[28, 34])
runner = dict(type='EpochBasedRunner', max_epochs=36)
|
import logging
from datasets import load_dataset
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.evaluation import SparseTranslationEvaluator
logging.basicConfig(format="%(message)s", level=logging.INFO)
# Load a model, not mutilingual but hope to see some on the hub soon
model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
# Load a parallel sentences dataset
dataset = load_dataset("sentence-transformers/parallel-sentences-news-commentary", "en-nl", split="train[:1000]")
# Initialize the TranslationEvaluator using the same texts from two languages
translation_evaluator = SparseTranslationEvaluator(
source_sentences=dataset["english"],
target_sentences=dataset["non_english"],
name="news-commentary-en-nl",
)
results = translation_evaluator(model)
"""
Evaluating translation matching Accuracy of the model on the news-commentary-en-nl dataset:
Accuracy src2trg: 41.40
Accuracy trg2src: 47.70
Model Sparsity: Active Dimensions: 113.6, Sparsity Ratio: 0.9963
"""
# Print the results
print(f"Primary metric: {translation_evaluator.primary_metric}")
# => Primary metric: news-commentary-en-nl_mean_accuracy
print(f"Primary metric value: {results[translation_evaluator.primary_metric]:.4f}")
# => Primary metric value: 0.4455
|
import logging
from datasets import load_dataset
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.evaluation import SparseTranslationEvaluator
logging.basicConfig(format="%(message)s", level=logging.INFO)
# Load a model, not mutilingual but hope to see some on the hub soon
model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
# Load a parallel sentences dataset
dataset = load_dataset("sentence-transformers/parallel-sentences-news-commentary", "en-nl", split="train[:1000]")
# Initialize the TranslationEvaluator using the same texts from two languages
translation_evaluator = SparseTranslationEvaluator(
source_sentences=dataset["english"],
target_sentences=dataset["non_english"],
name="news-commentary-en-nl",
)
results = translation_evaluator(model)
"""
Evaluating translation matching Accuracy of the model on the news-commentary-en-nl dataset:
Accuracy src2trg: 41.40
Accuracy trg2src: 47.70
Model Sparsity Stats: Row Non-Zero Mean: 113.6150016784668, Row Sparsity Mean: 0.9962776005268097
"""
# Print the results
print(f"Primary metric: {translation_evaluator.primary_metric}")
# => Primary metric: news-commentary-en-nl_mean_accuracy
print(f"Primary metric value: {results[translation_evaluator.primary_metric]:.4f}")
# => Primary metric value: 0.4455
|
from jina.serve.runtimes.gateway.http.fastapi import (
FastAPIBaseGateway,
) # keep import here for backwards compatibility
from jina.serve.runtimes.gateway.gateway import BaseGateway
from jina.serve.runtimes.servers.http import HTTPServer
__all__ = ['HTTPGateway']
class HTTPGateway(HTTPServer, BaseGateway):
"""
:class:`HTTPGateway` is a FastAPIBaseGateway that uses the default FastAPI app
"""
pass
|
from jina.serve.runtimes.gateway.http.fastapi import FastAPIBaseGateway # keep import here for backwards compatibility
from jina.serve.runtimes.gateway.gateway import BaseGateway
from jina.serve.runtimes.servers.http import HTTPServer
__all__ = ['HTTPGateway']
class HTTPGateway(HTTPServer, BaseGateway):
"""
:class:`HTTPGateway` is a FastAPIBaseGateway that uses the default FastAPI app
"""
pass
|
import numpy as np
from absl.testing import parameterized
from keras.src import backend
from keras.src import testing
from keras.src.utils import numerical_utils
NUM_CLASSES = 5
class TestNumericalUtils(testing.TestCase, parameterized.TestCase):
@parameterized.parameters(
[
((1,), (1, NUM_CLASSES)),
((3,), (3, NUM_CLASSES)),
((4, 3), (4, 3, NUM_CLASSES)),
((5, 4, 3), (5, 4, 3, NUM_CLASSES)),
((3, 1), (3, NUM_CLASSES)),
((3, 2, 1), (3, 2, NUM_CLASSES)),
]
)
def test_to_categorical(self, shape, expected_shape):
label = np.random.randint(0, NUM_CLASSES, shape)
one_hot = numerical_utils.to_categorical(label, NUM_CLASSES)
# Check shape
self.assertEqual(one_hot.shape, expected_shape)
# Make sure there is only one 1 in a row
self.assertTrue(np.all(one_hot.sum(axis=-1) == 1))
# Get original labels back from one hots
self.assertTrue(
np.all(np.argmax(one_hot, -1).reshape(label.shape) == label)
)
def test_to_categorical_without_num_classes(self):
label = [0, 2, 5]
one_hot = numerical_utils.to_categorical(label)
self.assertEqual(one_hot.shape, (3, 5 + 1))
def test_to_categorical_with_backend_tensor(self):
label = backend.convert_to_tensor(np.array([0, 2, 1, 3, 4]))
expected = backend.convert_to_tensor(
np.array(
[
[1, 0, 0, 0, 0],
[0, 0, 1, 0, 0],
[0, 1, 0, 0, 0],
[0, 0, 0, 1, 0],
[0, 0, 0, 0, 1],
]
)
)
one_hot = numerical_utils.to_categorical(label, NUM_CLASSES)
assert backend.is_tensor(one_hot)
self.assertAllClose(one_hot, expected)
@parameterized.parameters([1, 2, 3])
def test_normalize(self, order):
xb = backend.random.uniform((3, 3), seed=1337)
xnp = backend.convert_to_numpy(xb)
# Expected result
l2 = np.atleast_1d(np.linalg.norm(xnp, order, axis=-1))
l2[l2 == 0] = 1
expected = xnp / np.expand_dims(l2, axis=-1)
# Test NumPy
out = numerical_utils.normalize(xnp, axis=-1, order=order)
self.assertIsInstance(out, np.ndarray)
self.assertAllClose(out, expected)
# Test backend
out = numerical_utils.normalize(xb, axis=-1, order=order)
self.assertTrue(backend.is_tensor(out))
self.assertAllClose(backend.convert_to_numpy(out), expected)
|
import numpy as np
from absl.testing import parameterized
from keras.src import backend
from keras.src import testing
from keras.src.utils import numerical_utils
NUM_CLASSES = 5
class TestNumericalUtils(testing.TestCase, parameterized.TestCase):
@parameterized.parameters(
[
((1,), (1, NUM_CLASSES)),
((3,), (3, NUM_CLASSES)),
((4, 3), (4, 3, NUM_CLASSES)),
((5, 4, 3), (5, 4, 3, NUM_CLASSES)),
((3, 1), (3, NUM_CLASSES)),
((3, 2, 1), (3, 2, NUM_CLASSES)),
]
)
def test_to_categorical(self, shape, expected_shape):
label = np.random.randint(0, NUM_CLASSES, shape)
one_hot = numerical_utils.to_categorical(label, NUM_CLASSES)
# Check shape
self.assertEqual(one_hot.shape, expected_shape)
# Make sure there is only one 1 in a row
self.assertTrue(np.all(one_hot.sum(axis=-1) == 1))
# Get original labels back from one hots
self.assertTrue(
np.all(np.argmax(one_hot, -1).reshape(label.shape) == label)
)
def test_to_categorial_without_num_classes(self):
label = [0, 2, 5]
one_hot = numerical_utils.to_categorical(label)
self.assertEqual(one_hot.shape, (3, 5 + 1))
def test_to_categorical_with_backend_tensor(self):
label = backend.convert_to_tensor(np.array([0, 2, 1, 3, 4]))
expected = backend.convert_to_tensor(
np.array(
[
[1, 0, 0, 0, 0],
[0, 0, 1, 0, 0],
[0, 1, 0, 0, 0],
[0, 0, 0, 1, 0],
[0, 0, 0, 0, 1],
]
)
)
one_hot = numerical_utils.to_categorical(label, NUM_CLASSES)
assert backend.is_tensor(one_hot)
self.assertAllClose(one_hot, expected)
@parameterized.parameters([1, 2, 3])
def test_normalize(self, order):
xb = backend.random.uniform((3, 3), seed=1337)
xnp = backend.convert_to_numpy(xb)
# Expected result
l2 = np.atleast_1d(np.linalg.norm(xnp, order, axis=-1))
l2[l2 == 0] = 1
expected = xnp / np.expand_dims(l2, axis=-1)
# Test NumPy
out = numerical_utils.normalize(xnp, axis=-1, order=order)
self.assertIsInstance(out, np.ndarray)
self.assertAllClose(out, expected)
# Test backend
out = numerical_utils.normalize(xb, axis=-1, order=order)
self.assertTrue(backend.is_tensor(out))
self.assertAllClose(backend.convert_to_numpy(out), expected)
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os
import os.path as osp
from mmengine.config import Config, DictAction
from mmengine.runner import Runner
from mmdet.utils import register_all_modules
# TODO: support fuse_conv_bn and format_only
def parse_args():
parser = argparse.ArgumentParser(
description='MMDet test (and eval) a model')
parser.add_argument('config', help='test config file path')
parser.add_argument('checkpoint', help='checkpoint file')
parser.add_argument(
'--work-dir',
help='the directory to save the file containing evaluation metrics')
parser.add_argument(
'--show', action='store_true', help='show prediction results')
parser.add_argument(
'--show-dir',
help='directory where painted images will be saved. '
'If specified, it will be automatically saved '
'to the work_dir/timestamp/show_dir')
parser.add_argument(
'--wait-time', type=float, default=2, help='the interval of show (s)')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
parser.add_argument(
'--launcher',
choices=['none', 'pytorch', 'slurm', 'mpi'],
default='none',
help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
args = parser.parse_args()
if 'LOCAL_RANK' not in os.environ:
os.environ['LOCAL_RANK'] = str(args.local_rank)
return args
def trigger_visualization_hook(cfg, args):
default_hooks = cfg.default_hooks
if 'visualization' in default_hooks:
visualization_hook = default_hooks['visualization']
# Turn on visualization
visualization_hook['draw'] = True
if args.show:
visualization_hook['show'] = True
visualization_hook['wait_time'] = args.wait_time
if args.show_dir:
visualization_hook['test_out_dir'] = args.show_dir
else:
raise RuntimeError(
'VisualizationHook must be included in default_hooks.'
'refer to usage '
'"visualization=dict(type=\'VisualizationHook\')"')
return cfg
def main():
args = parse_args()
# register all modules in mmdet into the registries
# do not init the default scope here because it will be init in the runner
register_all_modules(init_default_scope=False)
# load config
cfg = Config.fromfile(args.config)
cfg.launcher = args.launcher
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
# work_dir is determined in this priority: CLI > segment in file > filename
if args.work_dir is not None:
# update configs according to CLI args if args.work_dir is not None
cfg.work_dir = args.work_dir
elif cfg.get('work_dir', None) is None:
# use config filename as default work_dir if cfg.work_dir is None
cfg.work_dir = osp.join('./work_dirs',
osp.splitext(osp.basename(args.config))[0])
cfg.load_from = args.checkpoint
if args.show or args.show_dir:
cfg = trigger_visualization_hook(cfg, args)
# build the runner from config
runner = Runner.from_cfg(cfg)
# start testing
runner.test()
if __name__ == '__main__':
main()
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os
import os.path as osp
from mmengine.config import Config, DictAction
from mmengine.runner import Runner
from mmdet.utils import register_all_modules
# TODO: support fuse_conv_bn, visualization, and format_only
def parse_args():
parser = argparse.ArgumentParser(
description='MMDet test (and eval) a model')
parser.add_argument('config', help='test config file path')
parser.add_argument('checkpoint', help='checkpoint file')
parser.add_argument(
'--work-dir',
help='the directory to save the file containing evaluation metrics')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
parser.add_argument(
'--launcher',
choices=['none', 'pytorch', 'slurm', 'mpi'],
default='none',
help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
args = parser.parse_args()
if 'LOCAL_RANK' not in os.environ:
os.environ['LOCAL_RANK'] = str(args.local_rank)
return args
def main():
args = parse_args()
# register all modules in mmdet into the registries
# do not init the default scope here because it will be init in the runner
register_all_modules(init_default_scope=False)
# load config
cfg = Config.fromfile(args.config)
cfg.launcher = args.launcher
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
# work_dir is determined in this priority: CLI > segment in file > filename
if args.work_dir is not None:
# update configs according to CLI args if args.work_dir is not None
cfg.work_dir = args.work_dir
elif cfg.get('work_dir', None) is None:
# use config filename as default work_dir if cfg.work_dir is None
cfg.work_dir = osp.join('./work_dirs',
osp.splitext(osp.basename(args.config))[0])
cfg.load_from = args.checkpoint
# build the runner from config
runner = Runner.from_cfg(cfg)
# start testing
runner.test()
if __name__ == '__main__':
main()
|
from langchain_core.prompts.prompt import PromptTemplate
# For backwards compatibility.
Prompt = PromptTemplate
__all__ = ["Prompt", "PromptTemplate"]
|
from langchain_core.prompts.prompt import PromptTemplate
# For backwards compatibility.
Prompt = PromptTemplate
__all__ = ["PromptTemplate", "Prompt"]
|
#!/usr/bin/env python3
"""Generate feature statistics for training set.
Example:
python global_stats.py --model-type librispeech --dataset-path /home/librispeech
"""
import json
import logging
import pathlib
from argparse import ArgumentParser, RawTextHelpFormatter
import torch
import torchaudio
from common import (
MODEL_TYPE_LIBRISPEECH,
MODEL_TYPE_MUSTC,
MODEL_TYPE_TEDLIUM3,
piecewise_linear_log,
spectrogram_transform,
)
from mustc.dataset import MUSTC
logger = logging.getLogger()
def parse_args():
parser = ArgumentParser(description=__doc__, formatter_class=RawTextHelpFormatter)
parser.add_argument(
"--model-type", type=str, choices=[MODEL_TYPE_LIBRISPEECH, MODEL_TYPE_TEDLIUM3, MODEL_TYPE_MUSTC], required=True
)
parser.add_argument(
"--dataset-path",
required=True,
type=pathlib.Path,
help="Path to dataset. "
"For LibriSpeech, all of 'train-clean-360', 'train-clean-100', and 'train-other-500' must exist.",
)
parser.add_argument(
"--output-path",
default=pathlib.Path("global_stats.json"),
type=pathlib.Path,
help="File to save feature statistics to. (Default: './global_stats.json')",
)
return parser.parse_args()
def generate_statistics(samples):
E_x = 0
E_x_2 = 0
N = 0
for idx, sample in enumerate(samples):
mel_spec = spectrogram_transform(sample[0].squeeze()).transpose(1, 0)
scaled_mel_spec = piecewise_linear_log(mel_spec)
sum = scaled_mel_spec.sum(0)
sq_sum = scaled_mel_spec.pow(2).sum(0)
M = scaled_mel_spec.size(0)
E_x = E_x * (N / (N + M)) + sum / (N + M)
E_x_2 = E_x_2 * (N / (N + M)) + sq_sum / (N + M)
N += M
if idx % 100 == 0:
logger.info(f"Processed {idx}")
return E_x, (E_x_2 - E_x**2) ** 0.5
def get_dataset(args):
if args.model_type == MODEL_TYPE_LIBRISPEECH:
return torch.utils.data.ConcatDataset(
[
torchaudio.datasets.LIBRISPEECH(args.dataset_path, url="train-clean-360"),
torchaudio.datasets.LIBRISPEECH(args.dataset_path, url="train-clean-100"),
torchaudio.datasets.LIBRISPEECH(args.dataset_path, url="train-other-500"),
]
)
elif args.model_type == MODEL_TYPE_TEDLIUM3:
return torchaudio.datasets.TEDLIUM(args.dataset_path, release="release3", subset="train")
elif args.model_type == MODEL_TYPE_MUSTC:
return MUSTC(args.dataset_path, subset="train")
else:
raise ValueError(f"Encountered unsupported model type {args.model_type}.")
def cli_main():
args = parse_args()
dataset = get_dataset(args)
dataloader = torch.utils.data.DataLoader(dataset, num_workers=4)
mean, stddev = generate_statistics(iter(dataloader))
json_str = json.dumps({"mean": mean.tolist(), "invstddev": (1 / stddev).tolist()}, indent=2)
with open(args.output_path, "w") as f:
f.write(json_str)
if __name__ == "__main__":
cli_main()
|
#!/usr/bin/env python3
"""Generate feature statistics for training set.
Example:
python global_stats.py --model-type librispeech --dataset-path /home/librispeech
"""
import json
import logging
import pathlib
from argparse import ArgumentParser, RawTextHelpFormatter
import torch
import torchaudio
from common import (
MODEL_TYPE_LIBRISPEECH,
MODEL_TYPE_MUSTC,
MODEL_TYPE_TEDLIUM3,
piecewise_linear_log,
spectrogram_transform,
)
from must.dataset import MUSTC
logger = logging.getLogger()
def parse_args():
parser = ArgumentParser(description=__doc__, formatter_class=RawTextHelpFormatter)
parser.add_argument(
"--model-type", type=str, choices=[MODEL_TYPE_LIBRISPEECH, MODEL_TYPE_TEDLIUM3, MODEL_TYPE_MUSTC], required=True
)
parser.add_argument(
"--dataset-path",
required=True,
type=pathlib.Path,
help="Path to dataset. "
"For LibriSpeech, all of 'train-clean-360', 'train-clean-100', and 'train-other-500' must exist.",
)
parser.add_argument(
"--output-path",
default=pathlib.Path("global_stats.json"),
type=pathlib.Path,
help="File to save feature statistics to. (Default: './global_stats.json')",
)
return parser.parse_args()
def generate_statistics(samples):
E_x = 0
E_x_2 = 0
N = 0
for idx, sample in enumerate(samples):
mel_spec = spectrogram_transform(sample[0].squeeze()).transpose(1, 0)
scaled_mel_spec = piecewise_linear_log(mel_spec)
sum = scaled_mel_spec.sum(0)
sq_sum = scaled_mel_spec.pow(2).sum(0)
M = scaled_mel_spec.size(0)
E_x = E_x * (N / (N + M)) + sum / (N + M)
E_x_2 = E_x_2 * (N / (N + M)) + sq_sum / (N + M)
N += M
if idx % 100 == 0:
logger.info(f"Processed {idx}")
return E_x, (E_x_2 - E_x**2) ** 0.5
def get_dataset(args):
if args.model_type == MODEL_TYPE_LIBRISPEECH:
return torch.utils.data.ConcatDataset(
[
torchaudio.datasets.LIBRISPEECH(args.dataset_path, url="train-clean-360"),
torchaudio.datasets.LIBRISPEECH(args.dataset_path, url="train-clean-100"),
torchaudio.datasets.LIBRISPEECH(args.dataset_path, url="train-other-500"),
]
)
elif args.model_type == MODEL_TYPE_TEDLIUM3:
return torchaudio.datasets.TEDLIUM(args.dataset_path, release="release3", subset="train")
elif args.model_type == MODEL_TYPE_MUSTC:
return MUSTC(args.dataset_path, subset="train")
else:
raise ValueError(f"Encountered unsupported model type {args.model_type}.")
def cli_main():
args = parse_args()
dataset = get_dataset(args)
dataloader = torch.utils.data.DataLoader(dataset, num_workers=4)
mean, stddev = generate_statistics(iter(dataloader))
json_str = json.dumps({"mean": mean.tolist(), "invstddev": (1 / stddev).tolist()}, indent=2)
with open(args.output_path, "w") as f:
f.write(json_str)
if __name__ == "__main__":
cli_main()
|
import asyncio
import os
import random
import string
import tempfile
import time
import pytest
from jina import helper
@pytest.fixture(scope='function')
def random_workspace_name():
"""Generate a random workspace name with digits and letters."""
rand = ''.join(random.choices(string.ascii_uppercase + string.digits, k=6))
return f'JINA_TEST_WORKSPACE_{rand}'
@pytest.fixture(scope='function')
def test_metas(tmpdir, random_workspace_name):
from jina.serve.executors.metas import get_default_metas
os.environ[random_workspace_name] = str(tmpdir)
metas = get_default_metas()
metas['workspace'] = os.environ[random_workspace_name]
yield metas
del os.environ[random_workspace_name]
@pytest.fixture()
def docker_compose(request):
os.system(
f"docker-compose -f {request.param} --project-directory . up --build -d --remove-orphans"
)
time.sleep(10)
yield
os.system(
f"docker-compose -f {request.param} --project-directory . down --remove-orphans"
)
@pytest.fixture(scope='function')
def port_generator():
generated_ports = set()
def random_port():
port = helper.random_port()
while port in generated_ports:
port = helper.random_port()
generated_ports.add(port)
return port
return random_port
@pytest.fixture(scope='function')
def test_envs(tmpdir):
os.environ['JINA_HUB_ROOT'] = str(tmpdir)
os.environ['JINA_HUB_CACHE_DIR'] = str(tmpdir)
yield None
del os.environ['JINA_HUB_ROOT']
del os.environ['JINA_HUB_CACHE_DIR']
@pytest.fixture(autouse=True)
def test_log_level(monkeypatch):
monkeypatch.setenv('JINA_LOG_LEVEL', 'DEBUG')
@pytest.fixture(autouse=True)
def test_grpc_fork_support_false(monkeypatch):
monkeypatch.setenv('GRPC_ENABLE_FORK_SUPPORT', 'false')
@pytest.fixture(autouse=True)
def test_timeout_ctrl_time(monkeypatch):
monkeypatch.setenv('JINA_DEFAULT_TIMEOUT_CTRL', '500')
@pytest.fixture(autouse=True)
def test_disable_telemetry(monkeypatch):
monkeypatch.setenv('JINA_OPTOUT_TELEMETRY', 'True')
@pytest.fixture(autouse=True)
def tmpfile(tmpdir):
tmpfile = f'jina_test_{next(tempfile._get_candidate_names())}.db'
return tmpdir / tmpfile
@pytest.fixture(scope='session')
def event_loop(request):
"""
Valid only for `pytest.mark.asyncio` tests
"""
loop = asyncio.get_event_loop_policy().new_event_loop()
yield loop
loop.close()
|
import asyncio
import os
import random
import string
import tempfile
import time
import pytest
from jina import helper
@pytest.fixture(scope='function')
def random_workspace_name():
"""Generate a random workspace name with digits and letters."""
rand = ''.join(random.choices(string.ascii_uppercase + string.digits, k=6))
return f'JINA_TEST_WORKSPACE_{rand}'
@pytest.fixture(scope='function')
def test_metas(tmpdir, random_workspace_name):
from jina.serve.executors.metas import get_default_metas
os.environ[random_workspace_name] = str(tmpdir)
metas = get_default_metas()
metas['workspace'] = os.environ[random_workspace_name]
yield metas
del os.environ[random_workspace_name]
@pytest.fixture()
def docker_compose(request):
os.system(
f"docker-compose -f {request.param} --project-directory . up --build -d --remove-orphans"
)
time.sleep(10)
yield
os.system(
f"docker-compose -f {request.param} --project-directory . down --remove-orphans"
)
@pytest.fixture(scope='function')
def port_generator():
generated_ports = set()
def random_port():
port = helper.random_port()
while port in generated_ports:
port = helper.random_port()
generated_ports.add(port)
return port
return random_port
@pytest.fixture(scope='function')
def test_envs(tmpdir):
os.environ['JINA_HUB_ROOT'] = str(tmpdir)
os.environ['JINA_HUB_CACHE_DIR'] = str(tmpdir)
yield None
del os.environ['JINA_HUB_ROOT']
del os.environ['JINA_HUB_CACHE_DIR']
@pytest.fixture(autouse=True)
def test_log_level(monkeypatch):
monkeypatch.setenv('JINA_LOG_LEVEL', 'DEBUG')
@pytest.fixture(autouse=True)
def test_grpc_fork_support_false(monkeypatch):
monkeypatch.setenv('GRPC_ENABLE_FORK_SUPPORT', 'false')
@pytest.fixture(autouse=True)
def test_timeout_ctrl_time(monkeypatch):
monkeypatch.setenv('JINA_DEFAULT_TIMEOUT_CTRL', '500')
@pytest.fixture(autouse=True)
def tmpfile(tmpdir):
tmpfile = f'jina_test_{next(tempfile._get_candidate_names())}.db'
return tmpdir / tmpfile
@pytest.fixture(scope='session')
def event_loop(request):
"""
Valid only for `pytest.mark.asyncio` tests
"""
loop = asyncio.get_event_loop_policy().new_event_loop()
yield loop
loop.close()
|
import importlib
import shutil
import warnings
from typing import List
import fsspec
import fsspec.asyn
from fsspec.implementations.local import LocalFileSystem
from . import compression
COMPRESSION_FILESYSTEMS: List[compression.BaseCompressedFileFileSystem] = [
compression.Bz2FileSystem,
compression.GzipFileSystem,
compression.Lz4FileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS:
if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
warnings.warn(f"A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.")
fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
def is_remote_filesystem(fs: fsspec.AbstractFileSystem) -> bool:
"""
Checks if `fs` is a remote filesystem.
Args:
fs (`fsspec.spec.AbstractFileSystem`):
An abstract super-class for pythonic file-systems, e.g. `fsspec.filesystem(\'file\')` or `s3fs.S3FileSystem`.
"""
return not isinstance(fs, LocalFileSystem)
def rename(fs: fsspec.AbstractFileSystem, src: str, dst: str):
"""
Renames the file `src` in `fs` to `dst`.
"""
if not is_remote_filesystem(fs):
# LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
shutil.move(fs._strip_protocol(src), fs._strip_protocol(dst))
else:
fs.mv(src, dst, recursive=True)
|
import importlib
import shutil
import warnings
from typing import List
import fsspec
import fsspec.asyn
from fsspec.implementations.local import LocalFileSystem
from ..utils.deprecation_utils import deprecated
from . import compression
_has_s3fs = importlib.util.find_spec("s3fs") is not None
if _has_s3fs:
from .s3filesystem import S3FileSystem
COMPRESSION_FILESYSTEMS: List[compression.BaseCompressedFileFileSystem] = [
compression.Bz2FileSystem,
compression.GzipFileSystem,
compression.Lz4FileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS:
if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
warnings.warn(f"A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.")
fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
@deprecated(
"This function is deprecated and will be removed in a future version. Please use `fsspec.core.strip_protocol` instead."
)
def extract_path_from_uri(dataset_path: str) -> str:
"""
Preprocesses `dataset_path` and removes remote filesystem (e.g. removing `s3://`).
Args:
dataset_path (`str`):
Path (e.g. `dataset/train`) or remote uri (e.g. `s3://my-bucket/dataset/train`) of the dataset directory.
"""
if "://" in dataset_path:
dataset_path = dataset_path.split("://")[1]
return dataset_path
def is_remote_filesystem(fs: fsspec.AbstractFileSystem) -> bool:
"""
Checks if `fs` is a remote filesystem.
Args:
fs (`fsspec.spec.AbstractFileSystem`):
An abstract super-class for pythonic file-systems, e.g. `fsspec.filesystem(\'file\')` or [`datasets.filesystems.S3FileSystem`].
"""
return not isinstance(fs, LocalFileSystem)
def rename(fs: fsspec.AbstractFileSystem, src: str, dst: str):
"""
Renames the file `src` in `fs` to `dst`.
"""
if not is_remote_filesystem(fs):
# LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
shutil.move(fs._strip_protocol(src), fs._strip_protocol(dst))
else:
fs.mv(src, dst, recursive=True)
|
from keras.src import backend
from keras.src import layers
from keras.src import models
from keras.src import ops
from keras.src import tree
from keras.src.utils.module_utils import tensorflow as tf
def get_input_signature(model):
if not isinstance(model, models.Model):
raise TypeError(
"The model must be a `keras.Model`. "
f"Received: model={model} of the type {type(model)}"
)
if not model.built:
raise ValueError(
"The model provided has not yet been built. It must be built "
"before export."
)
if isinstance(model, models.Functional):
input_signature = [
tree.map_structure(make_input_spec, model._inputs_struct)
]
elif isinstance(model, models.Sequential):
input_signature = tree.map_structure(make_input_spec, model.inputs)
else:
input_signature = _infer_input_signature_from_model(model)
if not input_signature or not model._called:
raise ValueError(
"The model provided has never called. "
"It must be called at least once before export."
)
return input_signature
def _infer_input_signature_from_model(model):
shapes_dict = getattr(model, "_build_shapes_dict", None)
if not shapes_dict:
return None
def _make_input_spec(structure):
# We need to turn wrapper structures like TrackingDict or _DictWrapper
# into plain Python structures because they don't work with jax2tf/JAX.
if isinstance(structure, dict):
return {k: _make_input_spec(v) for k, v in structure.items()}
elif isinstance(structure, tuple):
if all(isinstance(d, (int, type(None))) for d in structure):
return layers.InputSpec(
shape=(None,) + structure[1:], dtype=model.input_dtype
)
return tuple(_make_input_spec(v) for v in structure)
elif isinstance(structure, list):
if all(isinstance(d, (int, type(None))) for d in structure):
return layers.InputSpec(
shape=[None] + structure[1:], dtype=model.input_dtype
)
return [_make_input_spec(v) for v in structure]
else:
raise ValueError(
f"Unsupported type {type(structure)} for {structure}"
)
return [_make_input_spec(value) for value in shapes_dict.values()]
def make_input_spec(x):
if isinstance(x, layers.InputSpec):
if x.shape is None or x.dtype is None:
raise ValueError(
f"The `shape` and `dtype` must be provided. Received: x={x}"
)
input_spec = x
elif isinstance(x, backend.KerasTensor):
shape = (None,) + backend.standardize_shape(x.shape)[1:]
dtype = backend.standardize_dtype(x.dtype)
input_spec = layers.InputSpec(dtype=dtype, shape=shape, name=x.name)
elif backend.is_tensor(x):
shape = (None,) + backend.standardize_shape(x.shape)[1:]
dtype = backend.standardize_dtype(x.dtype)
input_spec = layers.InputSpec(dtype=dtype, shape=shape, name=None)
else:
raise TypeError(
f"Unsupported x={x} of the type ({type(x)}). Supported types are: "
"`keras.InputSpec`, `keras.KerasTensor` and backend tensor."
)
return input_spec
def make_tf_tensor_spec(x):
if isinstance(x, tf.TensorSpec):
tensor_spec = x
else:
input_spec = make_input_spec(x)
tensor_spec = tf.TensorSpec(
input_spec.shape, dtype=input_spec.dtype, name=input_spec.name
)
return tensor_spec
def convert_spec_to_tensor(spec, replace_none_number=None):
shape = backend.standardize_shape(spec.shape)
if replace_none_number is not None:
replace_none_number = int(replace_none_number)
shape = tuple(
s if s is not None else replace_none_number for s in shape
)
return ops.ones(shape, spec.dtype)
|
from keras.src import backend
from keras.src import layers
from keras.src import models
from keras.src import ops
from keras.src import tree
from keras.src.utils.module_utils import tensorflow as tf
def get_input_signature(model):
if not isinstance(model, models.Model):
raise TypeError(
"The model must be a `keras.Model`. "
f"Received: model={model} of the type {type(model)}"
)
if not model.built:
raise ValueError(
"The model provided has not yet been built. It must be built "
"before export."
)
if isinstance(model, (models.Functional, models.Sequential)):
input_signature = tree.map_structure(make_input_spec, model.inputs)
if isinstance(input_signature, list) and len(input_signature) > 1:
input_signature = [input_signature]
else:
input_signature = _infer_input_signature_from_model(model)
if not input_signature or not model._called:
raise ValueError(
"The model provided has never called. "
"It must be called at least once before export."
)
return input_signature
def _infer_input_signature_from_model(model):
shapes_dict = getattr(model, "_build_shapes_dict", None)
if not shapes_dict:
return None
def _make_input_spec(structure):
# We need to turn wrapper structures like TrackingDict or _DictWrapper
# into plain Python structures because they don't work with jax2tf/JAX.
if isinstance(structure, dict):
return {k: _make_input_spec(v) for k, v in structure.items()}
elif isinstance(structure, tuple):
if all(isinstance(d, (int, type(None))) for d in structure):
return layers.InputSpec(
shape=(None,) + structure[1:], dtype=model.input_dtype
)
return tuple(_make_input_spec(v) for v in structure)
elif isinstance(structure, list):
if all(isinstance(d, (int, type(None))) for d in structure):
return layers.InputSpec(
shape=[None] + structure[1:], dtype=model.input_dtype
)
return [_make_input_spec(v) for v in structure]
else:
raise ValueError(
f"Unsupported type {type(structure)} for {structure}"
)
return [_make_input_spec(value) for value in shapes_dict.values()]
def make_input_spec(x):
if isinstance(x, layers.InputSpec):
if x.shape is None or x.dtype is None:
raise ValueError(
f"The `shape` and `dtype` must be provided. Received: x={x}"
)
input_spec = x
elif isinstance(x, backend.KerasTensor):
shape = (None,) + backend.standardize_shape(x.shape)[1:]
dtype = backend.standardize_dtype(x.dtype)
input_spec = layers.InputSpec(dtype=dtype, shape=shape, name=x.name)
elif backend.is_tensor(x):
shape = (None,) + backend.standardize_shape(x.shape)[1:]
dtype = backend.standardize_dtype(x.dtype)
input_spec = layers.InputSpec(dtype=dtype, shape=shape, name=None)
else:
raise TypeError(
f"Unsupported x={x} of the type ({type(x)}). Supported types are: "
"`keras.InputSpec`, `keras.KerasTensor` and backend tensor."
)
return input_spec
def make_tf_tensor_spec(x):
if isinstance(x, tf.TensorSpec):
tensor_spec = x
else:
input_spec = make_input_spec(x)
tensor_spec = tf.TensorSpec(
input_spec.shape, dtype=input_spec.dtype, name=input_spec.name
)
return tensor_spec
def convert_spec_to_tensor(spec, replace_none_number=None):
shape = backend.standardize_shape(spec.shape)
if replace_none_number is not None:
replace_none_number = int(replace_none_number)
shape = tuple(
s if s is not None else replace_none_number for s in shape
)
return ops.ones(shape, spec.dtype)
|
import uuid
from typing import Optional
import pytest
from langchain_core.documents import Document
from langchain_community.vectorstores import Qdrant
from tests.integration_tests.vectorstores.fake_embeddings import (
ConsistentFakeEmbeddings,
)
from tests.integration_tests.vectorstores.qdrant.common import assert_documents_equals
@pytest.mark.parametrize("batch_size", [1, 64])
@pytest.mark.parametrize("vector_name", [None, "my-vector"])
def test_qdrant_add_documents_extends_existing_collection(
batch_size: int, vector_name: Optional[str]
) -> None:
"""Test end to end construction and search."""
texts = ["foo", "bar", "baz"]
docsearch: Qdrant = Qdrant.from_texts(
texts,
ConsistentFakeEmbeddings(),
location=":memory:",
batch_size=batch_size,
vector_name=vector_name,
)
new_texts = ["foobar", "foobaz"]
docsearch.add_documents(
[Document(page_content=content) for content in new_texts], batch_size=batch_size
)
output = docsearch.similarity_search("foobar", k=1)
# ConsistentFakeEmbeddings return the same query embedding as the first document
# embedding computed in `embedding.embed_documents`. Thus, "foo" embedding is the
# same as "foobar" embedding
assert_documents_equals(output, [Document(page_content="foobar")])
@pytest.mark.parametrize("batch_size", [1, 64])
def test_qdrant_add_texts_returns_all_ids(batch_size: int) -> None:
"""Test end to end Qdrant.add_texts returns unique ids."""
docsearch: Qdrant = Qdrant.from_texts(
["foobar"],
ConsistentFakeEmbeddings(),
location=":memory:",
batch_size=batch_size,
)
ids = docsearch.add_texts(["foo", "bar", "baz"])
assert 3 == len(ids)
assert 3 == len(set(ids))
@pytest.mark.parametrize("vector_name", [None, "my-vector"])
def test_qdrant_add_texts_stores_duplicated_texts(vector_name: Optional[str]) -> None:
"""Test end to end Qdrant.add_texts stores duplicated texts separately."""
from qdrant_client import QdrantClient
from qdrant_client.http import models as rest
client = QdrantClient(":memory:")
collection_name = uuid.uuid4().hex
vectors_config = rest.VectorParams(size=10, distance=rest.Distance.COSINE)
if vector_name is not None:
vectors_config = {vector_name: vectors_config}
client.recreate_collection(collection_name, vectors_config=vectors_config)
vec_store = Qdrant(
client,
collection_name,
embeddings=ConsistentFakeEmbeddings(),
vector_name=vector_name,
)
ids = vec_store.add_texts(["abc", "abc"], [{"a": 1}, {"a": 2}])
assert 2 == len(set(ids))
assert 2 == client.count(collection_name).count
@pytest.mark.parametrize("batch_size", [1, 64])
def test_qdrant_add_texts_stores_ids(batch_size: int) -> None:
"""Test end to end Qdrant.add_texts stores provided ids."""
from qdrant_client import QdrantClient
from qdrant_client.http import models as rest
ids = [
"fa38d572-4c31-4579-aedc-1960d79df6df",
"cdc1aa36-d6ab-4fb2-8a94-56674fd27484",
]
client = QdrantClient(":memory:")
collection_name = uuid.uuid4().hex
client.recreate_collection(
collection_name,
vectors_config=rest.VectorParams(size=10, distance=rest.Distance.COSINE),
)
vec_store = Qdrant(client, collection_name, ConsistentFakeEmbeddings())
returned_ids = vec_store.add_texts(["abc", "def"], ids=ids, batch_size=batch_size)
assert all(first == second for first, second in zip(ids, returned_ids))
assert 2 == client.count(collection_name).count
stored_ids = [point.id for point in client.scroll(collection_name)[0]]
assert set(ids) == set(stored_ids)
@pytest.mark.parametrize("vector_name", ["custom-vector"])
def test_qdrant_add_texts_stores_embeddings_as_named_vectors(vector_name: str) -> None:
"""Test end to end Qdrant.add_texts stores named vectors if name is provided."""
from qdrant_client import QdrantClient
from qdrant_client.http import models as rest
collection_name = uuid.uuid4().hex
client = QdrantClient(":memory:")
client.recreate_collection(
collection_name,
vectors_config={
vector_name: rest.VectorParams(size=10, distance=rest.Distance.COSINE)
},
)
vec_store = Qdrant(
client,
collection_name,
ConsistentFakeEmbeddings(),
vector_name=vector_name,
)
vec_store.add_texts(["lorem", "ipsum", "dolor", "sit", "amet"])
assert 5 == client.count(collection_name).count
assert all(
vector_name in point.vector
for point in client.scroll(collection_name, with_vectors=True)[0]
)
|
import uuid
from typing import Optional
import pytest
from langchain_core.documents import Document
from langchain_community.vectorstores import Qdrant
from tests.integration_tests.vectorstores.fake_embeddings import (
ConsistentFakeEmbeddings,
)
from tests.integration_tests.vectorstores.qdrant.common import assert_documents_equals
@pytest.mark.parametrize("batch_size", [1, 64])
@pytest.mark.parametrize("vector_name", [None, "my-vector"])
def test_qdrant_add_documents_extends_existing_collection(
batch_size: int, vector_name: Optional[str]
) -> None:
"""Test end to end construction and search."""
texts = ["foo", "bar", "baz"]
docsearch: Qdrant = Qdrant.from_texts(
texts,
ConsistentFakeEmbeddings(),
location=":memory:",
batch_size=batch_size,
vector_name=vector_name,
)
new_texts = ["foobar", "foobaz"]
docsearch.add_documents(
[Document(page_content=content) for content in new_texts], batch_size=batch_size
)
output = docsearch.similarity_search("foobar", k=1)
# ConsistentFakeEmbeddings return the same query embedding as the first document
# embedding computed in `embedding.embed_documents`. Thus, "foo" embedding is the
# same as "foobar" embedding
assert_documents_equals(output, [Document(page_content="foobar")])
@pytest.mark.parametrize("batch_size", [1, 64])
def test_qdrant_add_texts_returns_all_ids(batch_size: int) -> None:
"""Test end to end Qdrant.add_texts returns unique ids."""
docsearch: Qdrant = Qdrant.from_texts(
["foobar"],
ConsistentFakeEmbeddings(),
location=":memory:",
batch_size=batch_size,
)
ids = docsearch.add_texts(["foo", "bar", "baz"])
assert 3 == len(ids)
assert 3 == len(set(ids))
@pytest.mark.parametrize("vector_name", [None, "my-vector"])
def test_qdrant_add_texts_stores_duplicated_texts(vector_name: Optional[str]) -> None:
"""Test end to end Qdrant.add_texts stores duplicated texts separately."""
from qdrant_client import QdrantClient
from qdrant_client.http import models as rest
client = QdrantClient(":memory:")
collection_name = uuid.uuid4().hex
vectors_config = rest.VectorParams(size=10, distance=rest.Distance.COSINE)
if vector_name is not None:
vectors_config = {vector_name: vectors_config} # type: ignore[assignment]
client.recreate_collection(collection_name, vectors_config=vectors_config)
vec_store = Qdrant(
client,
collection_name,
embeddings=ConsistentFakeEmbeddings(),
vector_name=vector_name,
)
ids = vec_store.add_texts(["abc", "abc"], [{"a": 1}, {"a": 2}])
assert 2 == len(set(ids))
assert 2 == client.count(collection_name).count
@pytest.mark.parametrize("batch_size", [1, 64])
def test_qdrant_add_texts_stores_ids(batch_size: int) -> None:
"""Test end to end Qdrant.add_texts stores provided ids."""
from qdrant_client import QdrantClient
from qdrant_client.http import models as rest
ids = [
"fa38d572-4c31-4579-aedc-1960d79df6df",
"cdc1aa36-d6ab-4fb2-8a94-56674fd27484",
]
client = QdrantClient(":memory:")
collection_name = uuid.uuid4().hex
client.recreate_collection(
collection_name,
vectors_config=rest.VectorParams(size=10, distance=rest.Distance.COSINE),
)
vec_store = Qdrant(client, collection_name, ConsistentFakeEmbeddings())
returned_ids = vec_store.add_texts(["abc", "def"], ids=ids, batch_size=batch_size)
assert all(first == second for first, second in zip(ids, returned_ids))
assert 2 == client.count(collection_name).count
stored_ids = [point.id for point in client.scroll(collection_name)[0]]
assert set(ids) == set(stored_ids)
@pytest.mark.parametrize("vector_name", ["custom-vector"])
def test_qdrant_add_texts_stores_embeddings_as_named_vectors(vector_name: str) -> None:
"""Test end to end Qdrant.add_texts stores named vectors if name is provided."""
from qdrant_client import QdrantClient
from qdrant_client.http import models as rest
collection_name = uuid.uuid4().hex
client = QdrantClient(":memory:")
client.recreate_collection(
collection_name,
vectors_config={
vector_name: rest.VectorParams(size=10, distance=rest.Distance.COSINE)
},
)
vec_store = Qdrant(
client,
collection_name,
ConsistentFakeEmbeddings(),
vector_name=vector_name,
)
vec_store.add_texts(["lorem", "ipsum", "dolor", "sit", "amet"])
assert 5 == client.count(collection_name).count
assert all(
vector_name in point.vector # type: ignore[operator]
for point in client.scroll(collection_name, with_vectors=True)[0]
)
|
from __future__ import annotations
from unittest.mock import Mock, PropertyMock
import pytest
import torch
from sentence_transformers import SentenceTransformer
from sentence_transformers.evaluation import InformationRetrievalEvaluator
from sentence_transformers.util import cos_sim
@pytest.fixture
def mock_model():
def mock_encode(sentences: str | list[str], **kwargs) -> torch.Tensor:
"""
We simply one-hot encode the sentences; if a sentence contains a keyword, the corresponding one-hot
encoding is added to the sentence embedding.
"""
one_hot_encodings = {
"pokemon": torch.tensor([1.0, 0.0, 0.0, 0.0, 0.0]),
"car": torch.tensor([0.0, 1.0, 0.0, 0.0, 0.0]),
"vehicle": torch.tensor([0.0, 0.0, 1.0, 0.0, 0.0]),
"fruit": torch.tensor([0.0, 0.0, 0.0, 1.0, 0.0]),
"vegetable": torch.tensor([0.0, 0.0, 0.0, 0.0, 1.0]),
}
if isinstance(sentences, str):
sentences = [sentences]
embeddings = []
for sentence in sentences:
encoding = torch.zeros(5)
for keyword, one_hot in one_hot_encodings.items():
if keyword in sentence:
encoding += one_hot
embeddings.append(encoding)
return torch.stack(embeddings)
model = Mock(spec=SentenceTransformer)
model.similarity_fn_name = "cosine"
model.similarity.side_effect = cos_sim
model.encode.side_effect = mock_encode
model.model_card_data = PropertyMock(return_value=Mock())
return model
@pytest.fixture
def test_data():
queries = {
"0": "What is a pokemon?",
"1": "What is a vegetable?",
"2": "What is a fruit?",
"3": "What is a vehicle?",
"4": "What is a car?",
}
corpus = {
"0": "A pokemon is a fictional creature",
"1": "A vegetable is a plant",
"2": "A fruit is a plant",
"3": "A vehicle is a machine",
"4": "A car is a vehicle",
}
relevant_docs = {"0": {"0"}, "1": {"1"}, "2": {"2"}, "3": {"3", "4"}, "4": {"4"}}
return queries, corpus, relevant_docs
def test_simple(test_data):
queries, corpus, relevant_docs = test_data
model = SentenceTransformer("sentence-transformers-testing/stsb-bert-tiny-safetensors")
ir_evaluator = InformationRetrievalEvaluator(
queries=queries,
corpus=corpus,
relevant_docs=relevant_docs,
name="test",
accuracy_at_k=[1, 3],
precision_recall_at_k=[1, 3],
mrr_at_k=[3],
ndcg_at_k=[3],
map_at_k=[5],
)
results = ir_evaluator(model)
expected_keys = [
"test_cosine_accuracy@1",
"test_cosine_accuracy@3",
"test_cosine_precision@1",
"test_cosine_precision@3",
"test_cosine_recall@1",
"test_cosine_recall@3",
"test_cosine_ndcg@3",
"test_cosine_mrr@3",
"test_cosine_map@5",
]
assert set(results.keys()) == set(expected_keys)
def test_metrices(test_data, mock_model):
queries, corpus, relevant_docs = test_data
ir_evaluator = InformationRetrievalEvaluator(
queries=queries,
corpus=corpus,
relevant_docs=relevant_docs,
name="test",
accuracy_at_k=[1, 3],
precision_recall_at_k=[1, 3],
mrr_at_k=[3],
ndcg_at_k=[3],
map_at_k=[5],
)
results = ir_evaluator(mock_model)
# We expect test_cosine_precision@3 to be 0.4, since 6 out of 15 (5 queries * 3) are True Positives
# We expect test_cosine_recall@1 to be 0.9; the average of 4 times a recall of 1 and once a recall of 0.5
expected_results = {
"test_cosine_accuracy@1": 1.0,
"test_cosine_accuracy@3": 1.0,
"test_cosine_precision@1": 1.0,
"test_cosine_precision@3": 0.4,
"test_cosine_recall@1": 0.9,
"test_cosine_recall@3": 1.0,
"test_cosine_ndcg@3": 1.0,
"test_cosine_mrr@3": 1.0,
"test_cosine_map@5": 1.0,
}
for key, expected_value in expected_results.items():
assert results[key] == pytest.approx(expected_value, abs=1e-9)
|
from __future__ import annotations
from unittest.mock import Mock, PropertyMock
import pytest
import torch
from sentence_transformers import SentenceTransformer
from sentence_transformers.evaluation import InformationRetrievalEvaluator
@pytest.fixture
def mock_model():
def mock_encode(sentences: str | list[str], **kwargs) -> torch.Tensor:
"""
We simply one-hot encode the sentences; if a sentence contains a keyword, the corresponding one-hot
encoding is added to the sentence embedding.
"""
one_hot_encodings = {
"pokemon": torch.tensor([1.0, 0.0, 0.0, 0.0, 0.0]),
"car": torch.tensor([0.0, 1.0, 0.0, 0.0, 0.0]),
"vehicle": torch.tensor([0.0, 0.0, 1.0, 0.0, 0.0]),
"fruit": torch.tensor([0.0, 0.0, 0.0, 1.0, 0.0]),
"vegetable": torch.tensor([0.0, 0.0, 0.0, 0.0, 1.0]),
}
if isinstance(sentences, str):
sentences = [sentences]
embeddings = []
for sentence in sentences:
encoding = torch.zeros(5)
for keyword, one_hot in one_hot_encodings.items():
if keyword in sentence:
encoding += one_hot
embeddings.append(encoding)
return torch.stack(embeddings)
model = Mock(spec=SentenceTransformer)
model.encode.side_effect = mock_encode
model.model_card_data = PropertyMock(return_value=Mock())
return model
@pytest.fixture
def test_data():
queries = {
"0": "What is a pokemon?",
"1": "What is a vegetable?",
"2": "What is a fruit?",
"3": "What is a vehicle?",
"4": "What is a car?",
}
corpus = {
"0": "A pokemon is a fictional creature",
"1": "A vegetable is a plant",
"2": "A fruit is a plant",
"3": "A vehicle is a machine",
"4": "A car is a vehicle",
}
relevant_docs = {"0": {"0"}, "1": {"1"}, "2": {"2"}, "3": {"3", "4"}, "4": {"4"}}
return queries, corpus, relevant_docs
def test_simple(test_data):
queries, corpus, relevant_docs = test_data
model = SentenceTransformer("sentence-transformers-testing/stsb-bert-tiny-safetensors")
ir_evaluator = InformationRetrievalEvaluator(
queries=queries,
corpus=corpus,
relevant_docs=relevant_docs,
name="test",
accuracy_at_k=[1, 3],
precision_recall_at_k=[1, 3],
mrr_at_k=[3],
ndcg_at_k=[3],
map_at_k=[5],
)
results = ir_evaluator(model)
expected_keys = [
"test_cosine_accuracy@1",
"test_cosine_accuracy@3",
"test_cosine_precision@1",
"test_cosine_precision@3",
"test_cosine_recall@1",
"test_cosine_recall@3",
"test_cosine_ndcg@3",
"test_cosine_mrr@3",
"test_cosine_map@5",
"test_dot_accuracy@1",
"test_dot_accuracy@3",
"test_dot_precision@1",
"test_dot_precision@3",
"test_dot_recall@1",
"test_dot_recall@3",
"test_dot_ndcg@3",
"test_dot_mrr@3",
"test_dot_map@5",
]
assert set(results.keys()) == set(expected_keys)
def test_metrices(test_data, mock_model):
queries, corpus, relevant_docs = test_data
ir_evaluator = InformationRetrievalEvaluator(
queries=queries,
corpus=corpus,
relevant_docs=relevant_docs,
name="test",
accuracy_at_k=[1, 3],
precision_recall_at_k=[1, 3],
mrr_at_k=[3],
ndcg_at_k=[3],
map_at_k=[5],
)
results = ir_evaluator(mock_model)
# We expect test_cosine_precision@3 to be 0.4, since 6 out of 15 (5 queries * 3) are True Positives
# We expect test_cosine_recall@1 to be 0.9; the average of 4 times a recall of 1 and once a recall of 0.5
expected_results = {
"test_cosine_accuracy@1": 1.0,
"test_cosine_accuracy@3": 1.0,
"test_cosine_precision@1": 1.0,
"test_cosine_precision@3": 0.4,
"test_cosine_recall@1": 0.9,
"test_cosine_recall@3": 1.0,
"test_cosine_ndcg@3": 1.0,
"test_cosine_mrr@3": 1.0,
"test_cosine_map@5": 1.0,
"test_dot_accuracy@1": 1.0,
"test_dot_accuracy@3": 1.0,
"test_dot_precision@1": 1.0,
"test_dot_precision@3": 0.4,
"test_dot_recall@1": 0.9,
"test_dot_recall@3": 1.0,
"test_dot_ndcg@3": 1.0,
"test_dot_mrr@3": 1.0,
"test_dot_map@5": 1.0,
}
for key, expected_value in expected_results.items():
assert results[key] == pytest.approx(expected_value, abs=1e-9)
|
import functools
import logging
import os
import time
from typing import Any, Awaitable, Callable, Coroutine, ParamSpec, Tuple, TypeVar
from pydantic import BaseModel
class TimingInfo(BaseModel):
cpu_time: float
wall_time: float
def _start_measurement() -> Tuple[float, float]:
return time.time(), os.times()[0] + os.times()[1]
def _end_measurement(
start_wall_time: float, start_cpu_time: float
) -> Tuple[float, float]:
end_wall_time = time.time()
end_cpu_time = os.times()[0] + os.times()[1]
return end_wall_time - start_wall_time, end_cpu_time - start_cpu_time
P = ParamSpec("P")
T = TypeVar("T")
logger = logging.getLogger(__name__)
def time_measured(func: Callable[P, T]) -> Callable[P, Tuple[TimingInfo, T]]:
"""
Decorator to measure the time taken by a synchronous function to execute.
"""
@functools.wraps(func)
def wrapper(*args: P.args, **kwargs: P.kwargs) -> Tuple[TimingInfo, T]:
start_wall_time, start_cpu_time = _start_measurement()
try:
result = func(*args, **kwargs)
finally:
wall_duration, cpu_duration = _end_measurement(
start_wall_time, start_cpu_time
)
timing_info = TimingInfo(cpu_time=cpu_duration, wall_time=wall_duration)
return timing_info, result
return wrapper
def async_time_measured(
func: Callable[P, Awaitable[T]],
) -> Callable[P, Awaitable[Tuple[TimingInfo, T]]]:
"""
Decorator to measure the time taken by an async function to execute.
"""
@functools.wraps(func)
async def async_wrapper(*args: P.args, **kwargs: P.kwargs) -> Tuple[TimingInfo, T]:
start_wall_time, start_cpu_time = _start_measurement()
try:
result = await func(*args, **kwargs)
finally:
wall_duration, cpu_duration = _end_measurement(
start_wall_time, start_cpu_time
)
timing_info = TimingInfo(cpu_time=cpu_duration, wall_time=wall_duration)
return timing_info, result
return async_wrapper
def error_logged(func: Callable[P, T]) -> Callable[P, T | None]:
"""
Decorator to suppress and log any exceptions raised by a function.
"""
@functools.wraps(func)
def wrapper(*args: P.args, **kwargs: P.kwargs) -> T | None:
try:
return func(*args, **kwargs)
except Exception as e:
logger.exception(
f"Error when calling function {func.__name__} with arguments {args} {kwargs}: {e}"
)
return wrapper
def async_error_logged(
func: Callable[P, Coroutine[Any, Any, T]],
) -> Callable[P, Coroutine[Any, Any, T | None]]:
"""
Decorator to suppress and log any exceptions raised by an async function.
"""
@functools.wraps(func)
async def wrapper(*args: P.args, **kwargs: P.kwargs) -> T | None:
try:
return await func(*args, **kwargs)
except Exception as e:
logger.exception(
f"Error when calling async function {func.__name__} with arguments {args} {kwargs}: {e}"
)
return wrapper
|
import functools
import logging
import os
import time
from typing import Callable, ParamSpec, Tuple, TypeVar
from pydantic import BaseModel
class TimingInfo(BaseModel):
cpu_time: float
wall_time: float
def _start_measurement() -> Tuple[float, float]:
return time.time(), os.times()[0] + os.times()[1]
def _end_measurement(
start_wall_time: float, start_cpu_time: float
) -> Tuple[float, float]:
end_wall_time = time.time()
end_cpu_time = os.times()[0] + os.times()[1]
return end_wall_time - start_wall_time, end_cpu_time - start_cpu_time
P = ParamSpec("P")
T = TypeVar("T")
logger = logging.getLogger(__name__)
def time_measured(func: Callable[P, T]) -> Callable[P, Tuple[TimingInfo, T]]:
"""
Decorator to measure the time taken by a function to execute.
"""
@functools.wraps(func)
def wrapper(*args: P.args, **kwargs: P.kwargs) -> Tuple[TimingInfo, T]:
start_wall_time, start_cpu_time = _start_measurement()
try:
result = func(*args, **kwargs)
finally:
wall_duration, cpu_duration = _end_measurement(
start_wall_time, start_cpu_time
)
timing_info = TimingInfo(cpu_time=cpu_duration, wall_time=wall_duration)
return timing_info, result
return wrapper
def error_logged(func: Callable[P, T]) -> Callable[P, T | None]:
"""
Decorator to suppress and log any exceptions raised by a function.
"""
@functools.wraps(func)
def wrapper(*args: P.args, **kwargs: P.kwargs) -> T | None:
try:
return func(*args, **kwargs)
except Exception as e:
logger.exception(
f"Error when calling function {func.__name__} with arguments {args} {kwargs}: {e}"
)
return wrapper
|
__copyright__ = 'Copyright (c) 2020-2021 Jina AI Limited. All rights reserved.'
__license__ = 'Apache-2.0'
from typing import Any, Iterable, Optional
import librosa as lr
import numpy as np
import torch
from jina import DocumentArray, Executor, requests
from jina.excepts import BadDocType
from .audio_clip.model import AudioCLIP
class AudioCLIPEncoder(Executor):
"""
Encode audio data with AudioCLIP embeddings
"""
TARGET_SAMPLE_RATE = 44100 # derived from ESResNeXt
def __init__(
self,
model_path: str = 'assets/AudioCLIP-Full-Training.pt',
traversal_paths: Iterable[str] = ('r',),
batch_size: int = 32,
device: str = 'cpu',
download_model: bool = True,
*args,
**kwargs
):
"""
:param model_path: path of the pre-trained AudioCLIP model
:param traversal_paths: default traversal path
:param device: Torch device string (e.g. 'cpu', 'cuda', 'cuda:2')
"""
super().__init__(*args, **kwargs)
torch.set_grad_enabled(False)
self.model_path = model_path
self.traversal_paths = traversal_paths
self.batch_size = batch_size
if download_model:
import os
import subprocess
root_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
subprocess.call(['sh', 'scripts/download_model.sh'], cwd=root_path)
try:
self.model = AudioCLIP(pretrained=model_path).to(device).eval()
except FileNotFoundError:
raise FileNotFoundError(
'Please download AudioCLIP model and set the `model_path` argument.'
)
@requests
def encode(
self,
docs: Optional[DocumentArray] = None,
parameters: dict = {},
*args,
**kwargs
) -> Any:
"""
Encode all Documents with audio data (stored in the ``blob`` attribute) and store the
embeddings in the ``embedding`` attribute of the Documents.
:param docs: a `DocumentArray` contains `Document`s with `blob` of the size (n,) or (2, n).
The `blob` contains audio time-series data. Additionally,
`tags` of each `Document` must contain `sample_rate` field,
which has the sample rate of the audio data. The `sample_rate` must be a positive
scalar value.
:param parameters: dictionary to defines the `traversal_paths`.
"""
if not docs:
return
traversal_paths = parameters.get('traversal_paths', self.traversal_paths)
batch_size = parameters.get('batch_size', self.batch_size)
with torch.inference_mode():
for batch in docs.batch(batch_size, traversal_paths):
self._create_embeddings(batch)
def _create_embeddings(self, filtered_docs: Iterable):
"""Update the documents with the embeddings generated by AudioCLIP"""
for d in filtered_docs:
d.blob, d.tags['sample_rate'] = self._resample(
d.blob, d.tags.get('sample_rate', None)
)
audio = torch.Tensor(d.blob).unsqueeze(0)
embedding = self.model.encode_audio(audio=audio)[0]
d.embedding = embedding.cpu().numpy()
def _resample(self, blob: np.ndarray, orig_sr: int):
if orig_sr is None:
raise BadDocType(
'sample rate is not given, please provide a valid sample rate'
)
if orig_sr == AudioCLIPEncoder.TARGET_SAMPLE_RATE:
return
return (
lr.resample(blob, orig_sr, AudioCLIPEncoder.TARGET_SAMPLE_RATE),
AudioCLIPEncoder.TARGET_SAMPLE_RATE,
)
|
__copyright__ = 'Copyright (c) 2020-2021 Jina AI Limited. All rights reserved.'
__license__ = 'Apache-2.0'
from typing import Any, Iterable, Optional
import librosa as lr
import numpy as np
import torch
from jina import DocumentArray, Executor, requests
from jina.excepts import BadDocType
from .audio_clip.model import AudioCLIP
class AudioCLIPEncoder(Executor):
"""
Encode audio data with AudioCLIP embeddings
"""
TARGET_SAMPLE_RATE = 44100 # derived from ESResNeXt
def __init__(
self,
model_path: str = 'assets/AudioCLIP-Full-Training.pt',
traversal_paths: Iterable[str] = ('r',),
batch_size: int = 32,
device: str = 'cpu',
*args,
**kwargs
):
"""
:param model_path: path of the pre-trained AudioCLIP model
:param traversal_paths: default traversal path
:param device: Torch device string (e.g. 'cpu', 'cuda', 'cuda:2')
"""
super().__init__(*args, **kwargs)
torch.set_grad_enabled(False)
self.model_path = model_path
self.traversal_paths = traversal_paths
self.batch_size = batch_size
try:
self.model = AudioCLIP(pretrained=model_path).to(device).eval()
except FileNotFoundError:
raise FileNotFoundError(
'Please download AudioCLIP model and set the `model_path` argument.'
)
@requests
def encode(
self,
docs: Optional[DocumentArray] = None,
parameters: dict = {},
*args,
**kwargs
) -> Any:
"""
Encode all Documents with audio data (stored in the ``blob`` attribute) and store the
embeddings in the ``embedding`` attribute of the Documents.
:param docs: a `DocumentArray` contains `Document`s with `blob` of the size (n,) or (2, n).
The `blob` contains audio time-series data. Additionally,
`tags` of each `Document` must contain `sample_rate` field,
which has the sample rate of the audio data. The `sample_rate` must be a positive
scalar value.
:param parameters: dictionary to defines the `traversal_paths`.
"""
if not docs:
return
traversal_paths = parameters.get('traversal_paths', self.traversal_paths)
batch_size = parameters.get('batch_size', self.batch_size)
with torch.inference_mode():
for batch in docs.batch(batch_size, traversal_paths):
self._create_embeddings(batch)
def _create_embeddings(self, filtered_docs: Iterable):
"""Update the documents with the embeddings generated by AudioCLIP"""
for d in filtered_docs:
d.blob, d.tags['sample_rate'] = self._resample(
d.blob, d.tags.get('sample_rate', None)
)
audio = torch.Tensor(d.blob).unsqueeze(0)
embedding = self.model.encode_audio(audio=audio)[0]
d.embedding = embedding.cpu().numpy()
def _resample(self, blob: np.ndarray, orig_sr: int):
if orig_sr is None:
raise BadDocType(
'sample rate is not given, please provide a valid sample rate'
)
if orig_sr == AudioCLIPEncoder.TARGET_SAMPLE_RATE:
return
return (
lr.resample(blob, orig_sr, AudioCLIPEncoder.TARGET_SAMPLE_RATE),
AudioCLIPEncoder.TARGET_SAMPLE_RATE,
)
|
_base_ = [
'../common/ms-poly_3x_coco-instance.py',
'../_base_/models/mask-rcnn_r50_fpn.py'
]
|
_base_ = [
'../common/mstrain-poly_3x_coco_instance.py',
'../_base_/models/mask_rcnn_r50_fpn.py'
]
|
import uuid
from typing import Optional
import pytest
from langchain_community.vectorstores import Qdrant
from tests.integration_tests.vectorstores.fake_embeddings import (
ConsistentFakeEmbeddings,
)
from tests.integration_tests.vectorstores.qdrant.async_api.fixtures import (
qdrant_locations,
)
@pytest.mark.parametrize("batch_size", [1, 64])
@pytest.mark.parametrize("qdrant_location", qdrant_locations())
async def test_qdrant_aadd_texts_returns_all_ids(
batch_size: int, qdrant_location: str
) -> None:
"""Test end to end Qdrant.aadd_texts returns unique ids."""
docsearch: Qdrant = Qdrant.from_texts(
["foobar"],
ConsistentFakeEmbeddings(),
batch_size=batch_size,
location=qdrant_location,
)
ids = await docsearch.aadd_texts(["foo", "bar", "baz"])
assert 3 == len(ids)
assert 3 == len(set(ids))
@pytest.mark.parametrize("vector_name", [None, "my-vector"])
@pytest.mark.parametrize("qdrant_location", qdrant_locations())
async def test_qdrant_aadd_texts_stores_duplicated_texts(
vector_name: Optional[str], qdrant_location: str
) -> None:
"""Test end to end Qdrant.aadd_texts stores duplicated texts separately."""
from qdrant_client import QdrantClient
from qdrant_client.http import models as rest
client = QdrantClient(location=qdrant_location)
collection_name = uuid.uuid4().hex
vectors_config = rest.VectorParams(size=10, distance=rest.Distance.COSINE)
if vector_name is not None:
vectors_config = {vector_name: vectors_config}
client.recreate_collection(collection_name, vectors_config=vectors_config)
vec_store = Qdrant(
client,
collection_name,
embeddings=ConsistentFakeEmbeddings(),
vector_name=vector_name,
)
ids = await vec_store.aadd_texts(["abc", "abc"], [{"a": 1}, {"a": 2}])
assert 2 == len(set(ids))
assert 2 == client.count(collection_name).count
@pytest.mark.parametrize("batch_size", [1, 64])
@pytest.mark.parametrize("qdrant_location", qdrant_locations())
async def test_qdrant_aadd_texts_stores_ids(
batch_size: int, qdrant_location: str
) -> None:
"""Test end to end Qdrant.aadd_texts stores provided ids."""
from qdrant_client import QdrantClient
from qdrant_client.http import models as rest
ids = [
"fa38d572-4c31-4579-aedc-1960d79df6df",
"cdc1aa36-d6ab-4fb2-8a94-56674fd27484",
]
client = QdrantClient(location=qdrant_location)
collection_name = uuid.uuid4().hex
client.recreate_collection(
collection_name,
vectors_config=rest.VectorParams(size=10, distance=rest.Distance.COSINE),
)
vec_store = Qdrant(client, collection_name, ConsistentFakeEmbeddings())
returned_ids = await vec_store.aadd_texts(
["abc", "def"], ids=ids, batch_size=batch_size
)
assert all(first == second for first, second in zip(ids, returned_ids))
assert 2 == client.count(collection_name).count
stored_ids = [point.id for point in client.scroll(collection_name)[0]]
assert set(ids) == set(stored_ids)
@pytest.mark.parametrize("vector_name", ["custom-vector"])
@pytest.mark.parametrize("qdrant_location", qdrant_locations())
async def test_qdrant_aadd_texts_stores_embeddings_as_named_vectors(
vector_name: str, qdrant_location: str
) -> None:
"""Test end to end Qdrant.aadd_texts stores named vectors if name is provided."""
from qdrant_client import QdrantClient
from qdrant_client.http import models as rest
collection_name = uuid.uuid4().hex
client = QdrantClient(location=qdrant_location)
client.recreate_collection(
collection_name,
vectors_config={
vector_name: rest.VectorParams(size=10, distance=rest.Distance.COSINE)
},
)
vec_store = Qdrant(
client,
collection_name,
ConsistentFakeEmbeddings(),
vector_name=vector_name,
)
await vec_store.aadd_texts(["lorem", "ipsum", "dolor", "sit", "amet"])
assert 5 == client.count(collection_name).count
assert all(
vector_name in point.vector
for point in client.scroll(collection_name, with_vectors=True)[0]
)
|
import uuid
from typing import Optional
import pytest
from langchain_community.vectorstores import Qdrant
from tests.integration_tests.vectorstores.fake_embeddings import (
ConsistentFakeEmbeddings,
)
from tests.integration_tests.vectorstores.qdrant.async_api.fixtures import (
qdrant_locations,
)
@pytest.mark.parametrize("batch_size", [1, 64])
@pytest.mark.parametrize("qdrant_location", qdrant_locations())
async def test_qdrant_aadd_texts_returns_all_ids(
batch_size: int, qdrant_location: str
) -> None:
"""Test end to end Qdrant.aadd_texts returns unique ids."""
docsearch: Qdrant = Qdrant.from_texts(
["foobar"],
ConsistentFakeEmbeddings(),
batch_size=batch_size,
location=qdrant_location,
)
ids = await docsearch.aadd_texts(["foo", "bar", "baz"])
assert 3 == len(ids)
assert 3 == len(set(ids))
@pytest.mark.parametrize("vector_name", [None, "my-vector"])
@pytest.mark.parametrize("qdrant_location", qdrant_locations())
async def test_qdrant_aadd_texts_stores_duplicated_texts(
vector_name: Optional[str], qdrant_location: str
) -> None:
"""Test end to end Qdrant.aadd_texts stores duplicated texts separately."""
from qdrant_client import QdrantClient
from qdrant_client.http import models as rest
client = QdrantClient(location=qdrant_location)
collection_name = uuid.uuid4().hex
vectors_config = rest.VectorParams(size=10, distance=rest.Distance.COSINE)
if vector_name is not None:
vectors_config = {vector_name: vectors_config} # type: ignore[assignment]
client.recreate_collection(collection_name, vectors_config=vectors_config)
vec_store = Qdrant(
client,
collection_name,
embeddings=ConsistentFakeEmbeddings(),
vector_name=vector_name,
)
ids = await vec_store.aadd_texts(["abc", "abc"], [{"a": 1}, {"a": 2}])
assert 2 == len(set(ids))
assert 2 == client.count(collection_name).count
@pytest.mark.parametrize("batch_size", [1, 64])
@pytest.mark.parametrize("qdrant_location", qdrant_locations())
async def test_qdrant_aadd_texts_stores_ids(
batch_size: int, qdrant_location: str
) -> None:
"""Test end to end Qdrant.aadd_texts stores provided ids."""
from qdrant_client import QdrantClient
from qdrant_client.http import models as rest
ids = [
"fa38d572-4c31-4579-aedc-1960d79df6df",
"cdc1aa36-d6ab-4fb2-8a94-56674fd27484",
]
client = QdrantClient(location=qdrant_location)
collection_name = uuid.uuid4().hex
client.recreate_collection(
collection_name,
vectors_config=rest.VectorParams(size=10, distance=rest.Distance.COSINE),
)
vec_store = Qdrant(client, collection_name, ConsistentFakeEmbeddings())
returned_ids = await vec_store.aadd_texts(
["abc", "def"], ids=ids, batch_size=batch_size
)
assert all(first == second for first, second in zip(ids, returned_ids))
assert 2 == client.count(collection_name).count
stored_ids = [point.id for point in client.scroll(collection_name)[0]]
assert set(ids) == set(stored_ids)
@pytest.mark.parametrize("vector_name", ["custom-vector"])
@pytest.mark.parametrize("qdrant_location", qdrant_locations())
async def test_qdrant_aadd_texts_stores_embeddings_as_named_vectors(
vector_name: str, qdrant_location: str
) -> None:
"""Test end to end Qdrant.aadd_texts stores named vectors if name is provided."""
from qdrant_client import QdrantClient
from qdrant_client.http import models as rest
collection_name = uuid.uuid4().hex
client = QdrantClient(location=qdrant_location)
client.recreate_collection(
collection_name,
vectors_config={
vector_name: rest.VectorParams(size=10, distance=rest.Distance.COSINE)
},
)
vec_store = Qdrant(
client,
collection_name,
ConsistentFakeEmbeddings(),
vector_name=vector_name,
)
await vec_store.aadd_texts(["lorem", "ipsum", "dolor", "sit", "amet"])
assert 5 == client.count(collection_name).count
assert all(
vector_name in point.vector # type: ignore[operator]
for point in client.scroll(collection_name, with_vectors=True)[0]
)
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: docarray.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_database
from google.protobuf.internal import builder as _builder
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.protobuf import struct_pb2 as google_dot_protobuf_dot_struct__pb2
DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(
b'\n\x0e\x64ocarray.proto\x12\x08\x64ocarray\x1a\x1cgoogle/protobuf/struct.proto\"A\n\x11\x44\x65nseNdArrayProto\x12\x0e\n\x06\x62uffer\x18\x01 \x01(\x0c\x12\r\n\x05shape\x18\x02 \x03(\r\x12\r\n\x05\x64type\x18\x03 \x01(\t\"g\n\x0cNdArrayProto\x12*\n\x05\x64\x65nse\x18\x01 \x01(\x0b\x32\x1b.docarray.DenseNdArrayProto\x12+\n\nparameters\x18\x02 \x01(\x0b\x32\x17.google.protobuf.Struct\"\xe3\x02\n\tNodeProto\x12\x0e\n\x04\x62lob\x18\x01 \x01(\x0cH\x00\x12)\n\x07ndarray\x18\x02 \x01(\x0b\x32\x16.docarray.NdArrayProtoH\x00\x12\x0e\n\x04text\x18\x03 \x01(\tH\x00\x12)\n\x06nested\x18\x04 \x01(\x0b\x32\x17.docarray.DocumentProtoH\x00\x12.\n\x06\x63hunks\x18\x05 \x01(\x0b\x32\x1c.docarray.DocumentArrayProtoH\x00\x12+\n\tembedding\x18\x06 \x01(\x0b\x32\x16.docarray.NdArrayProtoH\x00\x12\x11\n\x07\x61ny_url\x18\x07 \x01(\tH\x00\x12\x13\n\timage_url\x18\x08 \x01(\tH\x00\x12\x12\n\x08text_url\x18\t \x01(\tH\x00\x12\x0c\n\x02id\x18\n \x01(\tH\x00\x12.\n\x0ctorch_tensor\x18\x0b \x01(\x0b\x32\x16.docarray.NdArrayProtoH\x00\x42\t\n\x07\x63ontent\"\x82\x01\n\rDocumentProto\x12/\n\x04\x64\x61ta\x18\x01 \x03(\x0b\x32!.docarray.DocumentProto.DataEntry\x1a@\n\tDataEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\"\n\x05value\x18\x02 \x01(\x0b\x32\x13.docarray.NodeProto:\x02\x38\x01\"?\n\x16\x44ocumentArrayListProto\x12%\n\x04\x64ocs\x18\x01 \x03(\x0b\x32\x17.docarray.DocumentProto\"\x86\x01\n\x0fUnionArrayProto\x12=\n\x0e\x64ocument_array\x18\x01 \x01(\x0b\x32#.docarray.DocumentArrayStackedProtoH\x00\x12)\n\x07ndarray\x18\x02 \x01(\x0b\x32\x16.docarray.NdArrayProtoH\x00\x42\t\n\x07\x63ontent\"\xda\x01\n\x19\x44ocumentArrayStackedProto\x12/\n\x05list_\x18\x01 \x01(\x0b\x32 .docarray.DocumentArrayListProto\x12\x41\n\x07\x63olumns\x18\x02 \x03(\x0b\x32\x30.docarray.DocumentArrayStackedProto.ColumnsEntry\x1aI\n\x0c\x43olumnsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12(\n\x05value\x18\x02 \x01(\x0b\x32\x19.docarray.UnionArrayProto:\x02\x38\x01\"\x88\x01\n\x12\x44ocumentArrayProto\x12\x31\n\x05list_\x18\x01 \x01(\x0b\x32 .docarray.DocumentArrayListProtoH\x00\x12\x34\n\x05stack\x18\x02 \x01(\x0b\x32#.docarray.DocumentArrayStackedProtoH\x00\x42\t\n\x07\x63ontentb\x06proto3'
)
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals())
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'docarray_pb2', globals())
if _descriptor._USE_C_DESCRIPTORS == False:
DESCRIPTOR._options = None
_DOCUMENTPROTO_DATAENTRY._options = None
_DOCUMENTPROTO_DATAENTRY._serialized_options = b'8\001'
_DOCUMENTARRAYSTACKEDPROTO_COLUMNSENTRY._options = None
_DOCUMENTARRAYSTACKEDPROTO_COLUMNSENTRY._serialized_options = b'8\001'
_DENSENDARRAYPROTO._serialized_start = 58
_DENSENDARRAYPROTO._serialized_end = 123
_NDARRAYPROTO._serialized_start = 125
_NDARRAYPROTO._serialized_end = 228
_NODEPROTO._serialized_start = 231
_NODEPROTO._serialized_end = 586
_DOCUMENTPROTO._serialized_start = 589
_DOCUMENTPROTO._serialized_end = 719
_DOCUMENTPROTO_DATAENTRY._serialized_start = 655
_DOCUMENTPROTO_DATAENTRY._serialized_end = 719
_DOCUMENTARRAYLISTPROTO._serialized_start = 721
_DOCUMENTARRAYLISTPROTO._serialized_end = 784
_UNIONARRAYPROTO._serialized_start = 787
_UNIONARRAYPROTO._serialized_end = 921
_DOCUMENTARRAYSTACKEDPROTO._serialized_start = 924
_DOCUMENTARRAYSTACKEDPROTO._serialized_end = 1142
_DOCUMENTARRAYSTACKEDPROTO_COLUMNSENTRY._serialized_start = 1069
_DOCUMENTARRAYSTACKEDPROTO_COLUMNSENTRY._serialized_end = 1142
_DOCUMENTARRAYPROTO._serialized_start = 1145
_DOCUMENTARRAYPROTO._serialized_end = 1281
# @@protoc_insertion_point(module_scope)
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: docarray.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_database
from google.protobuf.internal import builder as _builder
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.protobuf import struct_pb2 as google_dot_protobuf_dot_struct__pb2
DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(
b'\n\x0e\x64ocarray.proto\x12\x08\x64ocarray\x1a\x1cgoogle/protobuf/struct.proto\"A\n\x11\x44\x65nseNdArrayProto\x12\x0e\n\x06\x62uffer\x18\x01 \x01(\x0c\x12\r\n\x05shape\x18\x02 \x03(\r\x12\r\n\x05\x64type\x18\x03 \x01(\t\"g\n\x0cNdArrayProto\x12*\n\x05\x64\x65nse\x18\x01 \x01(\x0b\x32\x1b.docarray.DenseNdArrayProto\x12+\n\nparameters\x18\x02 \x01(\x0b\x32\x17.google.protobuf.Struct\"\xe3\x02\n\tNodeProto\x12\x0e\n\x04\x62lob\x18\x01 \x01(\x0cH\x00\x12)\n\x07ndarray\x18\x02 \x01(\x0b\x32\x16.docarray.NdArrayProtoH\x00\x12\x0e\n\x04text\x18\x03 \x01(\tH\x00\x12)\n\x06nested\x18\x04 \x01(\x0b\x32\x17.docarray.DocumentProtoH\x00\x12.\n\x06\x63hunks\x18\x05 \x01(\x0b\x32\x1c.docarray.DocumentArrayProtoH\x00\x12+\n\tembedding\x18\x06 \x01(\x0b\x32\x16.docarray.NdArrayProtoH\x00\x12\x11\n\x07\x61ny_url\x18\x07 \x01(\tH\x00\x12\x13\n\timage_url\x18\x08 \x01(\tH\x00\x12\x12\n\x08text_url\x18\t \x01(\tH\x00\x12\x0c\n\x02id\x18\n \x01(\tH\x00\x12.\n\x0ctorch_tensor\x18\x0b \x01(\x0b\x32\x16.docarray.NdArrayProtoH\x00\x42\t\n\x07\x63ontent\"\x82\x01\n\rDocumentProto\x12/\n\x04\x64\x61ta\x18\x01 \x03(\x0b\x32!.docarray.DocumentProto.DataEntry\x1a@\n\tDataEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\"\n\x05value\x18\x02 \x01(\x0b\x32\x13.docarray.NodeProto:\x02\x38\x01\";\n\x12\x44ocumentArrayProto\x12%\n\x04\x64ocs\x18\x01 \x03(\x0b\x32\x17.docarray.DocumentProtob\x06proto3'
)
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals())
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'docarray_pb2', globals())
if _descriptor._USE_C_DESCRIPTORS == False:
DESCRIPTOR._options = None
_DOCUMENTPROTO_DATAENTRY._options = None
_DOCUMENTPROTO_DATAENTRY._serialized_options = b'8\001'
_DENSENDARRAYPROTO._serialized_start = 58
_DENSENDARRAYPROTO._serialized_end = 123
_NDARRAYPROTO._serialized_start = 125
_NDARRAYPROTO._serialized_end = 228
_NODEPROTO._serialized_start = 231
_NODEPROTO._serialized_end = 586
_DOCUMENTPROTO._serialized_start = 589
_DOCUMENTPROTO._serialized_end = 719
_DOCUMENTPROTO_DATAENTRY._serialized_start = 655
_DOCUMENTPROTO_DATAENTRY._serialized_end = 719
_DOCUMENTARRAYPROTO._serialized_start = 721
_DOCUMENTARRAYPROTO._serialized_end = 780
# @@protoc_insertion_point(module_scope)
|
from setuptools import setup, find_packages
with open("README.md", mode="r", encoding="utf-8") as readme_file:
readme = readme_file.read()
setup(
name="sentence-transformers",
version="2.8.0.dev0",
author="Nils Reimers",
author_email="info@nils-reimers.de",
description="Multilingual text embeddings",
long_description=readme,
long_description_content_type="text/markdown",
license="Apache License 2.0",
url="https://www.SBERT.net",
download_url="https://github.com/UKPLab/sentence-transformers/",
packages=find_packages(),
python_requires=">=3.8.0",
install_requires=[
"transformers>=4.34.0,<5.0.0",
"tqdm",
"torch>=1.11.0",
"numpy",
"scikit-learn",
"scipy",
"huggingface-hub>=0.15.1",
"Pillow",
],
extras_require={
"dev": [
"pre-commit",
"pytest",
"ruff>=0.3.0",
],
},
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python :: 3.8",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
],
keywords="Transformer Networks BERT XLNet sentence embedding PyTorch NLP deep learning",
)
|
from setuptools import setup, find_packages
with open("README.md", mode="r", encoding="utf-8") as readme_file:
readme = readme_file.read()
setup(
name="sentence-transformers",
version="2.7.0.dev0",
author="Nils Reimers",
author_email="info@nils-reimers.de",
description="Multilingual text embeddings",
long_description=readme,
long_description_content_type="text/markdown",
license="Apache License 2.0",
url="https://www.SBERT.net",
download_url="https://github.com/UKPLab/sentence-transformers/",
packages=find_packages(),
python_requires=">=3.8.0",
install_requires=[
"transformers>=4.34.0,<5.0.0",
"tqdm",
"torch>=1.11.0",
"numpy",
"scikit-learn",
"scipy",
"huggingface-hub>=0.15.1",
"Pillow",
],
extras_require={
"dev": [
"pre-commit",
"pytest",
"ruff>=0.3.0",
],
},
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python :: 3.8",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
],
keywords="Transformer Networks BERT XLNet sentence embedding PyTorch NLP deep learning",
)
|
_base_ = [
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
# model settings
preprocess_cfg = dict(
mean=[102.9801, 115.9465, 122.7717],
std=[1.0, 1.0, 1.0],
to_rgb=False,
pad_size_divisor=32)
model = dict(
type='FCOS',
preprocess_cfg=preprocess_cfg,
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=False),
norm_eval=True,
style='caffe',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron/resnet50_caffe')),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
add_extra_convs='on_output', # use P5
num_outs=5,
relu_before_extra_convs=True),
bbox_head=dict(
type='FCOSHead',
num_classes=80,
in_channels=256,
stacked_convs=4,
feat_channels=256,
strides=[8, 16, 32, 64, 128],
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='IoULoss', loss_weight=1.0),
loss_centerness=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0)),
# testing settings
test_cfg=dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.5),
max_per_img=100))
# learning rate
param_scheduler = [
dict(type='ConstantLR', factor=1.0 / 3, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=12,
by_epoch=True,
milestones=[8, 11],
gamma=0.1)
]
# optimizer
optim_wrapper = dict(
optimizer=dict(lr=0.01),
paramwise_cfg=dict(bias_lr_mult=2., bias_decay_mult=0.))
default_hooks = dict(
optimizer=dict(
_delete_=True,
type='OptimizerHook',
grad_clip=dict(max_norm=35, norm_type=2)))
|
_base_ = [
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
# model settings
preprocess_cfg = dict(
mean=[102.9801, 115.9465, 122.7717],
std=[1.0, 1.0, 1.0],
to_rgb=False,
pad_size_divisor=32)
model = dict(
type='FCOS',
preprocess_cfg=preprocess_cfg,
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=False),
norm_eval=True,
style='caffe',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron/resnet50_caffe')),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
add_extra_convs='on_output', # use P5
num_outs=5,
relu_before_extra_convs=True),
bbox_head=dict(
type='FCOSHead',
num_classes=80,
in_channels=256,
stacked_convs=4,
feat_channels=256,
strides=[8, 16, 32, 64, 128],
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='IoULoss', loss_weight=1.0),
loss_centerness=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0)),
# testing settings
test_cfg=dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.5),
max_per_img=100))
# training schedule for 1x
train_cfg = dict(by_epoch=True, max_epochs=12)
val_cfg = dict(interval=1)
test_cfg = dict()
# learning rate
param_scheduler = [
dict(type='ConstantLR', factor=1.0 / 3, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=12,
by_epoch=True,
milestones=[8, 11],
gamma=0.1)
]
# optimizer
optim_wrapper = dict(
optimizer=dict(lr=0.01),
paramwise_cfg=dict(bias_lr_mult=2., bias_decay_mult=0.))
default_hooks = dict(
optimizer=dict(
_delete_=True,
type='OptimizerHook',
grad_clip=dict(max_norm=35, norm_type=2)))
|
from keras.src import tree
from keras.src.api_export import keras_export
from keras.src.layers.layer import Layer
from keras.src.saving import serialization_lib
@keras_export("keras.layers.Pipeline")
class Pipeline(Layer):
"""Applies a series of layers to an input.
This class is useful to build a preprocessing pipeline.
Compared to a `Sequential` model, `Pipeline` features
a few important differences:
- It's not a `Model`, just a plain layer.
- When the layers in the pipeline are compatible
with `tf.data`, the pipeline will also
remain `tf.data` compatible. That is to say,
the pipeline will not attempt to convert
its inputs to backend-native tensors
when in a tf.data context (unlike a `Sequential`
model).
Example:
```python
from keras import layers
preprocessing_pipeline = layers.Pipeline([
layers.AutoContrast(),
layers.RandomZoom(0.2),
layers.RandomRotation(0.2),
])
# `ds` is a tf.data.Dataset
preprocessed_ds = ds.map(
preprocessing_pipeline,
num_parallel_calls=4,
)
```
"""
def __init__(self, layers, name=None):
super().__init__(name=name)
self._pipeline_layers = layers
self._convert_input_args = False
self._allow_non_tensor_positional_args = True
@property
def layers(self):
return self._pipeline_layers
def call(self, inputs, training=True, mask=None):
for layer in self._pipeline_layers:
kwargs = {}
if layer._call_has_mask_arg:
kwargs["mask"] = mask
if layer._call_has_training_arg and training is not None:
kwargs["training"] = training
outputs = layer(inputs, **kwargs)
inputs = outputs
def _get_mask_from_keras_tensor(kt):
return getattr(kt, "_keras_mask", None)
mask = tree.map_structure(_get_mask_from_keras_tensor, outputs)
return outputs
def get_config(self):
config = {
"layers": serialization_lib.serialize_keras_object(
self._pipeline_layers
),
"name": self.name,
}
return config
|
from keras.src import tree
from keras.src.api_export import keras_export
from keras.src.layers.layer import Layer
from keras.src.saving import serialization_lib
@keras_export("keras.layers.Pipeline")
class Pipeline(Layer):
"""Applies a series of layers to an input.
This class is useful to build a preprocessing pipeline.
Compared to a `Sequential` model, `Pipeline` features
a few important differences:
- It's not a `Model`, just a plain layer.
- When the layers in the pipeline are compatible
with `tf.data`, the pipeline will also
remain `tf.data` compatible. That is to say,
the pipeline will not attempt to convert
its inputs to backend-native tensors
when in a tf.data context (unlike a `Sequential`
model).
Example:
```python
from keras import layers
preprocessing_pipeline = layers.Pipeline([
layers.AutoContrast(),
layers.RandomZoom(0.2),
layers.RandomRotation(0.2),
])
# `ds` is a tf.data.Dataset
preprocessed_ds = ds.map(
preprocessing_pipeline,
num_parallel_calls=4,
)
```
"""
def __init__(self, layers, name=None):
super().__init__(name=name)
self._pipeline_layers = layers
self._convert_input_args = False
self._allow_non_tensor_positional_args = True
@property
def layers(self):
return self._pipeline_layers
def build(self, input_shape):
for layer in self._pipeline_layers:
layer.build(input_shape)
input_shape = layer.compute_output_shape(input_shape)
self.built = True
def call(self, inputs, training=True, mask=None):
for layer in self._pipeline_layers:
kwargs = {}
if layer._call_has_mask_arg:
kwargs["mask"] = mask
if layer._call_has_training_arg and training is not None:
kwargs["training"] = training
outputs = layer(inputs, **kwargs)
inputs = outputs
def _get_mask_from_keras_tensor(kt):
return getattr(kt, "_keras_mask", None)
mask = tree.map_structure(_get_mask_from_keras_tensor, outputs)
return outputs
def get_config(self):
config = {
"layers": serialization_lib.serialize_keras_object(
self._pipeline_layers
),
"name": self.name,
}
return config
|
from keras.src import regularizers
from keras.src.api_export import keras_export
from keras.src.layers.layer import Layer
@keras_export("keras.layers.ActivityRegularization")
class ActivityRegularization(Layer):
"""Layer that applies an update to the cost function based input activity.
Args:
l1: L1 regularization factor (positive float).
l2: L2 regularization factor (positive float).
Input shape:
Arbitrary. Use the keyword argument `input_shape`
(tuple of integers, does not include the samples axis)
when using this layer as the first layer in a model.
Output shape:
Same shape as input.
"""
def __init__(self, l1=0.0, l2=0.0, **kwargs):
super().__init__(
activity_regularizer=regularizers.L1L2(l1=l1, l2=l2), **kwargs
)
self.supports_masking = True
self.l1 = l1
self.l2 = l2
self._build_at_init()
def call(self, inputs):
return inputs
def compute_output_shape(self, input_shape):
return input_shape
def get_config(self):
base_config = super().get_config()
base_config.pop("activity_regularizer", None)
config = {"l1": self.l1, "l2": self.l2}
return {**base_config, **config}
|
from keras.src import regularizers
from keras.src.api_export import keras_export
from keras.src.layers.layer import Layer
@keras_export("keras.layers.ActivityRegularization")
class ActivityRegularization(Layer):
"""Layer that applies an update to the cost function based input activity.
Args:
l1: L1 regularization factor (positive float).
l2: L2 regularization factor (positive float).
Input shape:
Arbitrary. Use the keyword argument `input_shape`
(tuple of integers, does not include the samples axis)
when using this layer as the first layer in a model.
Output shape:
Same shape as input.
"""
def __init__(self, l1=0.0, l2=0.0, **kwargs):
super().__init__(
activity_regularizer=regularizers.L1L2(l1=l1, l2=l2), **kwargs
)
self.supports_masking = True
self.l1 = l1
self.l2 = l2
self.built = True
def call(self, inputs):
return inputs
def compute_output_shape(self, input_shape):
return input_shape
def get_config(self):
base_config = super().get_config()
base_config.pop("activity_regularizer", None)
config = {"l1": self.l1, "l2": self.l2}
return {**base_config, **config}
|
import json
import os
import subprocess
import pytest
from jina.checker import NetworkChecker
from jina.jaml import JAML
from jina.orchestrate.pods.factory import PodFactory
from jina.parsers import set_deployment_parser, set_pod_parser
from jina.parsers.ping import set_ping_parser
from jina_cli.autocomplete import ac_table
from jina_cli.export import api_to_dict
from jina_cli.lookup import _build_lookup_table, lookup_and_print
def test_export_api(tmpdir):
with open(tmpdir / 'test.yml', 'w', encoding='utf8') as fp:
JAML.dump(api_to_dict(), fp)
with open(tmpdir / 'test.json', 'w', encoding='utf8') as fp:
json.dump(api_to_dict(), fp)
@pytest.mark.parametrize('cli', ac_table['commands'])
def test_help_lookup(cli, capsys):
nkw2kw, kw2info = _build_lookup_table()
if cli not in {'--help', '--version', '--version-full'}:
assert cli in nkw2kw
lookup_and_print(cli)
captured = capsys.readouterr()
assert 'Traceback (most recent call last)' not in captured.out
def test_main_cli():
subprocess.check_call(['jina'])
def test_cli_help():
subprocess.check_call(['jina', 'help', 'deployment'])
def test_cli_hub():
subprocess.check_call(['jina', 'hub', '--help'])
for cmd in ['new', 'status', 'pull', 'push']:
subprocess.check_call(['jina', 'hub', cmd, '--help'])
subprocess.check_call(['jina', 'hub', 'pull', 'jinahub://DummyHubExecutor'])
def test_cli_warn_unknown_args():
subprocess.check_call(['jina', 'help', 'deployment', '--abcdefg'])
@pytest.mark.parametrize('cli', ac_table['commands'])
def test_all_cli(cli):
subprocess.check_call(['jina', cli, '--help'])
@pytest.mark.parametrize('smethod', ['fork', 'spawn'])
def test_all_start_method(smethod):
s = subprocess.check_output(
['jina', '-v'],
env=dict(os.environ, JINA_MP_START_METHOD=smethod),
stderr=subprocess.STDOUT,
)
assert 'UserWarning' in s.decode()
assert smethod in s.decode()
def test_help_non_exist():
s = subprocess.check_output(
['jina', 'help', 'abcdefg'],
stderr=subprocess.STDOUT,
)
assert 'misspelling' in s.decode()
def test_help_exist():
s = subprocess.check_output(
['jina', 'help', 'port'],
stderr=subprocess.STDOUT,
)
assert 'a CLI argument of Jina' in s.decode()
def test_parse_env_map():
a = set_deployment_parser().parse_args(
['--env', 'key1=value1', '--env', 'key2=value2']
)
assert a.env == {'key1': 'value1', 'key2': 'value2'}
a = set_deployment_parser().parse_args(
['--env', 'key1=value1', 'key2=value2', 'key3=3']
)
assert a.env == {'key1': 'value1', 'key2': 'value2', 'key3': 3}
@pytest.mark.slow
def test_ping():
a1 = set_pod_parser().parse_args([])
a2 = set_ping_parser().parse_args(['executor', f'0.0.0.0:{a1.port}'])
a3 = set_ping_parser().parse_args(
['executor', f'0.0.0.1:{a1.port}', '--timeout', '1000']
)
with pytest.raises(SystemExit) as cm:
with PodFactory.build_pod(a1):
NetworkChecker(a2)
assert cm.value.code == 0
# test with bad address
with pytest.raises(SystemExit) as cm:
with PodFactory.build_pod(a1):
NetworkChecker(a3)
assert cm.value.code == 1
|
import json
import os
import subprocess
import pytest
from jina.checker import NetworkChecker
from jina.jaml import JAML
from jina.orchestrate.pods.factory import PodFactory
from jina.parsers import set_deployment_parser, set_pod_parser
from jina.parsers.ping import set_ping_parser
from jina_cli.autocomplete import ac_table
from jina_cli.export import api_to_dict
from jina_cli.lookup import _build_lookup_table, lookup_and_print
def test_export_api(tmpdir):
with open(tmpdir / 'test.yml', 'w', encoding='utf8') as fp:
JAML.dump(api_to_dict(), fp)
with open(tmpdir / 'test.json', 'w', encoding='utf8') as fp:
json.dump(api_to_dict(), fp)
@pytest.mark.parametrize('cli', ac_table['commands'])
def test_help_lookup(cli, capsys):
nkw2kw, kw2info = _build_lookup_table()
if cli not in {'--help', '--version', '--version-full'}:
assert cli in nkw2kw
lookup_and_print(cli)
captured = capsys.readouterr()
assert 'Traceback (most recent call last)' not in captured.out
def test_main_cli():
subprocess.check_call(['jina'])
def test_cli_help():
subprocess.check_call(['jina', 'help', 'deployment'])
def test_cli_warn_unknown_args():
subprocess.check_call(['jina', 'help', 'deployment', '--abcdefg'])
@pytest.mark.parametrize('cli', ac_table['commands'])
def test_all_cli(cli):
subprocess.check_call(['jina', cli, '--help'])
@pytest.mark.parametrize('smethod', ['fork', 'spawn'])
def test_all_start_method(smethod):
s = subprocess.check_output(
['jina', '-v'],
env=dict(os.environ, JINA_MP_START_METHOD=smethod),
stderr=subprocess.STDOUT,
)
assert 'UserWarning' in s.decode()
assert smethod in s.decode()
def test_help_non_exist():
s = subprocess.check_output(
['jina', 'help', 'abcdefg'],
stderr=subprocess.STDOUT,
)
assert 'misspelling' in s.decode()
def test_help_exist():
s = subprocess.check_output(
['jina', 'help', 'port'],
stderr=subprocess.STDOUT,
)
assert 'a CLI argument of Jina' in s.decode()
def test_parse_env_map():
a = set_deployment_parser().parse_args(
['--env', 'key1=value1', '--env', 'key2=value2']
)
assert a.env == {'key1': 'value1', 'key2': 'value2'}
a = set_deployment_parser().parse_args(
['--env', 'key1=value1', 'key2=value2', 'key3=3']
)
assert a.env == {'key1': 'value1', 'key2': 'value2', 'key3': 3}
@pytest.mark.slow
def test_ping():
a1 = set_pod_parser().parse_args([])
a2 = set_ping_parser().parse_args(['executor', f'0.0.0.0:{a1.port}'])
a3 = set_ping_parser().parse_args(
['executor', f'0.0.0.1:{a1.port}', '--timeout', '1000']
)
with pytest.raises(SystemExit) as cm:
with PodFactory.build_pod(a1):
NetworkChecker(a2)
assert cm.value.code == 0
# test with bad address
with pytest.raises(SystemExit) as cm:
with PodFactory.build_pod(a1):
NetworkChecker(a3)
assert cm.value.code == 1
|
import langchain_core.tracers.schemas as schemas
from langchain_core.tracers.schemas import __all__ as schemas_all
def test_public_api() -> None:
"""Test for changes in the public API."""
expected_all = [
"BaseRun",
"ChainRun",
"LLMRun",
"Run",
"RunTypeEnum",
"ToolRun",
"TracerSession",
"TracerSessionBase",
"TracerSessionV1",
"TracerSessionV1Base",
"TracerSessionV1Create",
]
assert sorted(schemas_all) == expected_all
# Assert that the object is actually present in the schema module
for module_name in expected_all:
assert hasattr(schemas, module_name)
assert getattr(schemas, module_name) is not None
|
import langchain_core.tracers.schemas as schemas
from langchain_core.tracers.schemas import __all__ as schemas_all
def test_public_api() -> None:
"""Test for changes in the public API."""
expected_all = [
"BaseRun",
"ChainRun",
"LLMRun",
"Run",
"RunTypeEnum",
"ToolRun",
"TracerSession",
"TracerSessionBase",
"TracerSessionV1",
"TracerSessionV1Base",
"TracerSessionV1Create",
]
assert sorted(schemas_all) == expected_all
# Assert that the object is actually present in the schema module
for module_name in expected_all:
assert (
hasattr(schemas, module_name) and getattr(schemas, module_name) is not None
)
|
from contextlib import asynccontextmanager
from datetime import timedelta
from typing import Optional, List, Dict
from urllib.parse import urlparse
from mcp.client.session import ClientSession
from mcp.client.sse import sse_client
from mcp.client.stdio import stdio_client, StdioServerParameters
class BasicMCPClient(ClientSession):
"""
Basic MCP client that can be used to connect to an MCP server.
This is useful for verifying that the MCP server which implements `FastMCP` is working.
Args:
command_or_url: The command to run or the URL to connect to.
args: The arguments to pass to StdioServerParameters.
env: The environment variables to set for StdioServerParameters.
timeout: The timeout for the command in seconds.
"""
def __init__(
self,
command_or_url: str,
args: Optional[List[str]] = None,
env: Optional[Dict[str, str]] = None,
timeout: int = 30,
):
self.command_or_url = command_or_url
self.args = args or []
self.env = env or {}
self.timeout = timeout
@asynccontextmanager
async def _run_session(self):
if urlparse(self.command_or_url).scheme in ("http", "https"):
async with sse_client(self.command_or_url) as streams:
async with ClientSession(
*streams, read_timeout_seconds=timedelta(seconds=self.timeout)
) as session:
await session.initialize()
yield session
else:
server_parameters = StdioServerParameters(
command=self.command_or_url, args=self.args, env=self.env
)
async with stdio_client(server_parameters) as streams:
async with ClientSession(
*streams, read_timeout_seconds=timedelta(seconds=self.timeout)
) as session:
await session.initialize()
yield session
async def call_tool(self, tool_name: str, arguments: dict):
async with self._run_session() as session:
return await session.call_tool(tool_name, arguments)
async def list_tools(self):
async with self._run_session() as session:
return await session.list_tools()
|
from mcp.client.session import ClientSession
from mcp.client.sse import sse_client
from mcp.client.stdio import stdio_client, StdioServerParameters
from urllib.parse import urlparse
from contextlib import asynccontextmanager
class BasicMCPClient(ClientSession):
"""
Basic MCP client that can be used to connect to an MCP server.
This is useful for verifying that the MCP server which implements `FastMCP` is working.
"""
def __init__(
self, command_or_url: str, args: list[str] = [], env: dict[str, str] = {}
):
self.command_or_url = command_or_url
self.args = args
self.env = env
@asynccontextmanager
async def _run_session(self):
if urlparse(self.command_or_url).scheme in ("http", "https"):
async with sse_client(self.command_or_url) as streams:
async with ClientSession(*streams) as session:
await session.initialize()
yield session
else:
server_parameters = StdioServerParameters(
command=self.command_or_url, args=self.args, env=self.env
)
async with stdio_client(server_parameters) as streams:
async with ClientSession(*streams) as session:
await session.initialize()
yield session
async def call_tool(self, tool_name: str, arguments: dict):
async with self._run_session() as session:
return await session.call_tool(tool_name, arguments)
async def list_tools(self):
async with self._run_session() as session:
return await session.list_tools()
|
"""Code to help indexing data into a vectorstore.
This package contains helper logic to help deal with indexing data into
a vectorstore while avoiding duplicated content and over-writing content
if it's unchanged.
"""
from typing import TYPE_CHECKING
from langchain_core._import_utils import import_attr
if TYPE_CHECKING:
from langchain_core.indexing.api import IndexingResult, aindex, index
from langchain_core.indexing.base import (
DeleteResponse,
DocumentIndex,
InMemoryRecordManager,
RecordManager,
UpsertResponse,
)
__all__ = (
"aindex",
"DeleteResponse",
"DocumentIndex",
"index",
"IndexingResult",
"InMemoryRecordManager",
"RecordManager",
"UpsertResponse",
)
_dynamic_imports = {
"aindex": "api",
"index": "api",
"IndexingResult": "api",
"DeleteResponse": "base",
"DocumentIndex": "base",
"InMemoryRecordManager": "base",
"RecordManager": "base",
"UpsertResponse": "base",
}
def __getattr__(attr_name: str) -> object:
module_name = _dynamic_imports.get(attr_name)
result = import_attr(attr_name, module_name, __spec__.parent)
globals()[attr_name] = result
return result
def __dir__() -> list[str]:
return list(__all__)
|
"""Code to help indexing data into a vectorstore.
This package contains helper logic to help deal with indexing data into
a vectorstore while avoiding duplicated content and over-writing content
if it's unchanged.
"""
from importlib import import_module
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from langchain_core.indexing.api import IndexingResult, aindex, index
from langchain_core.indexing.base import (
DeleteResponse,
DocumentIndex,
InMemoryRecordManager,
RecordManager,
UpsertResponse,
)
__all__ = [
"aindex",
"DeleteResponse",
"DocumentIndex",
"index",
"IndexingResult",
"InMemoryRecordManager",
"RecordManager",
"UpsertResponse",
]
_dynamic_imports = {
"aindex": "api",
"index": "api",
"IndexingResult": "api",
"DeleteResponse": "base",
"DocumentIndex": "base",
"InMemoryRecordManager": "base",
"RecordManager": "base",
"UpsertResponse": "base",
}
def __getattr__(attr_name: str) -> object:
module_name = _dynamic_imports.get(attr_name)
package = __spec__.parent
if module_name == "__module__" or module_name is None:
result = import_module(f".{attr_name}", package=package)
else:
module = import_module(f".{module_name}", package=package)
result = getattr(module, attr_name)
globals()[attr_name] = result
return result
def __dir__() -> list[str]:
return list(__all__)
|
from inspect import signature
from typing import (
Any,
Awaitable,
Callable,
List,
Optional,
Tuple,
Type,
Union,
cast,
get_origin,
get_args,
)
import typing
from llama_index.core.bridge.pydantic import BaseModel, FieldInfo, create_model
def create_schema_from_function(
name: str,
func: Union[Callable[..., Any], Callable[..., Awaitable[Any]]],
additional_fields: Optional[
List[Union[Tuple[str, Type, Any], Tuple[str, Type]]]
] = None,
ignore_fields: Optional[List[str]] = None,
) -> Type[BaseModel]:
"""Create schema from function."""
fields = {}
ignore_fields = ignore_fields or []
params = signature(func).parameters
for param_name in params:
if param_name in ignore_fields:
continue
param_type = params[param_name].annotation
param_default = params[param_name].default
description = None
if get_origin(param_type) is typing.Annotated:
args = get_args(param_type)
param_type = args[0]
if isinstance(args[1], str):
description = args[1]
elif isinstance(args[1], FieldInfo):
description = args[1].description
if param_type is params[param_name].empty:
param_type = Any
if param_default is params[param_name].empty:
# Required field
fields[param_name] = (param_type, FieldInfo(description=description))
elif isinstance(param_default, FieldInfo):
# Field with pydantic.Field as default value
fields[param_name] = (param_type, param_default)
else:
fields[param_name] = (
param_type,
FieldInfo(default=param_default, description=description),
)
additional_fields = additional_fields or []
for field_info in additional_fields:
if len(field_info) == 3:
field_info = cast(Tuple[str, Type, Any], field_info)
field_name, field_type, field_default = field_info
fields[field_name] = (field_type, FieldInfo(default=field_default))
elif len(field_info) == 2:
# Required field has no default value
field_info = cast(Tuple[str, Type], field_info)
field_name, field_type = field_info
fields[field_name] = (field_type, FieldInfo())
else:
raise ValueError(
f"Invalid additional field info: {field_info}. "
"Must be a tuple of length 2 or 3."
)
return create_model(name, **fields) # type: ignore
|
from inspect import signature
from typing import (
Any,
Awaitable,
Callable,
List,
Optional,
Tuple,
Type,
Union,
cast,
get_origin,
get_args,
)
import typing
from llama_index.core.bridge.pydantic import BaseModel, FieldInfo, create_model
def create_schema_from_function(
name: str,
func: Union[Callable[..., Any], Callable[..., Awaitable[Any]]],
additional_fields: Optional[
List[Union[Tuple[str, Type, Any], Tuple[str, Type]]]
] = None,
ignore_fields: Optional[List[str]] = None,
) -> Type[BaseModel]:
"""Create schema from function."""
fields = {}
ignore_fields = ignore_fields or []
params = signature(func).parameters
for param_name in params:
if param_name in ignore_fields:
continue
param_type = params[param_name].annotation
param_default = params[param_name].default
description = None
if get_origin(param_type) is typing.Annotated:
args = get_args(param_type)
param_type = args[0]
if isinstance(args[1], str):
description = args[1]
if param_type is params[param_name].empty:
param_type = Any
if param_default is params[param_name].empty:
# Required field
fields[param_name] = (param_type, FieldInfo(description=description))
elif isinstance(param_default, FieldInfo):
# Field with pydantic.Field as default value
fields[param_name] = (param_type, param_default)
else:
fields[param_name] = (
param_type,
FieldInfo(default=param_default, description=description),
)
additional_fields = additional_fields or []
for field_info in additional_fields:
if len(field_info) == 3:
field_info = cast(Tuple[str, Type, Any], field_info)
field_name, field_type, field_default = field_info
fields[field_name] = (field_type, FieldInfo(default=field_default))
elif len(field_info) == 2:
# Required field has no default value
field_info = cast(Tuple[str, Type], field_info)
field_name, field_type = field_info
fields[field_name] = (field_type, FieldInfo())
else:
raise ValueError(
f"Invalid additional field info: {field_info}. "
"Must be a tuple of length 2 or 3."
)
return create_model(name, **fields) # type: ignore
|
import requests # type: ignore
from langchain_core.documents import Document
from langchain_core.embeddings import Embeddings
from langchain_qdrant import SparseEmbeddings, SparseVector
def qdrant_running_locally() -> bool:
"""Check if Qdrant is running at http://localhost:6333."""
try:
response = requests.get("http://localhost:6333", timeout=10.0)
response_json = response.json()
return response_json.get("title") == "qdrant - vector search engine"
except (requests.exceptions.ConnectionError, requests.exceptions.Timeout):
return False
def assert_documents_equals(actual: list[Document], expected: list[Document]): # type: ignore[no-untyped-def]
assert len(actual) == len(expected)
for actual_doc, expected_doc in zip(actual, expected):
assert actual_doc.page_content == expected_doc.page_content
assert "_id" in actual_doc.metadata
assert "_collection_name" in actual_doc.metadata
actual_doc.metadata.pop("_id")
actual_doc.metadata.pop("_collection_name")
assert actual_doc.metadata == expected_doc.metadata
class ConsistentFakeEmbeddings(Embeddings):
"""Fake embeddings which remember all the texts seen so far to return consistent
vectors for the same texts."""
def __init__(self, dimensionality: int = 10) -> None:
self.known_texts: list[str] = []
self.dimensionality = dimensionality
def embed_documents(self, texts: list[str]) -> list[list[float]]:
"""Return consistent embeddings for each text seen so far."""
out_vectors = []
for text in texts:
if text not in self.known_texts:
self.known_texts.append(text)
vector = [1.0] * (self.dimensionality - 1) + [
float(self.known_texts.index(text))
]
out_vectors.append(vector)
return out_vectors
def embed_query(self, text: str) -> list[float]:
"""Return consistent embeddings for the text, if seen before, or a constant
one if the text is unknown."""
return self.embed_documents([text])[0]
class ConsistentFakeSparseEmbeddings(SparseEmbeddings):
"""Fake sparse embeddings which remembers all the texts seen so far "
"to return consistent vectors for the same texts."""
def __init__(self, dimensionality: int = 25) -> None:
self.known_texts: list[str] = []
self.dimensionality = 25
def embed_documents(self, texts: list[str]) -> list[SparseVector]:
"""Return consistent embeddings for each text seen so far."""
out_vectors = []
for text in texts:
if text not in self.known_texts:
self.known_texts.append(text)
index = self.known_texts.index(text)
indices = [i + index for i in range(self.dimensionality)]
values = [1.0] * (self.dimensionality - 1) + [float(index)]
out_vectors.append(SparseVector(indices=indices, values=values))
return out_vectors
def embed_query(self, text: str) -> SparseVector:
"""Return consistent embeddings for the text, "
"if seen before, or a constant one if the text is unknown."""
return self.embed_documents([text])[0]
|
from typing import List
import requests # type: ignore
from langchain_core.documents import Document
from langchain_core.embeddings import Embeddings
from langchain_qdrant import SparseEmbeddings, SparseVector
def qdrant_running_locally() -> bool:
"""Check if Qdrant is running at http://localhost:6333."""
try:
response = requests.get("http://localhost:6333", timeout=10.0)
response_json = response.json()
return response_json.get("title") == "qdrant - vector search engine"
except (requests.exceptions.ConnectionError, requests.exceptions.Timeout):
return False
def assert_documents_equals(actual: List[Document], expected: List[Document]): # type: ignore[no-untyped-def]
assert len(actual) == len(expected)
for actual_doc, expected_doc in zip(actual, expected):
assert actual_doc.page_content == expected_doc.page_content
assert "_id" in actual_doc.metadata
assert "_collection_name" in actual_doc.metadata
actual_doc.metadata.pop("_id")
actual_doc.metadata.pop("_collection_name")
assert actual_doc.metadata == expected_doc.metadata
class ConsistentFakeEmbeddings(Embeddings):
"""Fake embeddings which remember all the texts seen so far to return consistent
vectors for the same texts."""
def __init__(self, dimensionality: int = 10) -> None:
self.known_texts: List[str] = []
self.dimensionality = dimensionality
def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""Return consistent embeddings for each text seen so far."""
out_vectors = []
for text in texts:
if text not in self.known_texts:
self.known_texts.append(text)
vector = [float(1.0)] * (self.dimensionality - 1) + [
float(self.known_texts.index(text))
]
out_vectors.append(vector)
return out_vectors
def embed_query(self, text: str) -> List[float]:
"""Return consistent embeddings for the text, if seen before, or a constant
one if the text is unknown."""
return self.embed_documents([text])[0]
class ConsistentFakeSparseEmbeddings(SparseEmbeddings):
"""Fake sparse embeddings which remembers all the texts seen so far "
"to return consistent vectors for the same texts."""
def __init__(self, dimensionality: int = 25) -> None:
self.known_texts: List[str] = []
self.dimensionality = 25
def embed_documents(self, texts: List[str]) -> List[SparseVector]:
"""Return consistent embeddings for each text seen so far."""
out_vectors = []
for text in texts:
if text not in self.known_texts:
self.known_texts.append(text)
index = self.known_texts.index(text)
indices = [i + index for i in range(self.dimensionality)]
values = [1.0] * (self.dimensionality - 1) + [float(index)]
out_vectors.append(SparseVector(indices=indices, values=values))
return out_vectors
def embed_query(self, text: str) -> SparseVector:
"""Return consistent embeddings for the text, "
"if seen before, or a constant one if the text is unknown."""
return self.embed_documents([text])[0]
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This software may be used and distributed according to the terms of the Llama 2 Community License Agreement.
import fire
from llama import Llama
from typing import List
def main(
ckpt_dir: str,
tokenizer_path: str,
temperature: float = 0.6,
top_p: float = 0.9,
max_seq_len: int = 128,
max_gen_len: int = 64,
max_batch_size: int = 4,
):
generator = Llama.build(
ckpt_dir=ckpt_dir,
tokenizer_path=tokenizer_path,
max_seq_len=max_seq_len,
max_batch_size=max_batch_size,
)
prompts: List[str] = [
# For these prompts, the expected answer is the natural continuation of the prompt
"I believe the meaning of life is",
"Simply put, the theory of relativity states that ",
"""A brief message congratulating the team on the launch:
Hi everyone,
I just """,
# Few shot prompt (providing a few examples before asking model to complete more);
"""Translate English to French:
sea otter => loutre de mer
peppermint => menthe poivrée
plush girafe => girafe peluche
cheese =>""",
]
results = generator.text_completion(
prompts,
max_gen_len=max_gen_len,
temperature=temperature,
top_p=top_p,
)
for prompt, result in zip(prompts, results):
print(prompt)
print(f"> {result['generation']}")
print("\n==================================\n")
if __name__ == "__main__":
fire.Fire(main)
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This software may be used and distributed according to the terms of the Llama 2 Community License Agreement.
import fire
from llama import Llama
def main(
ckpt_dir: str,
tokenizer_path: str,
temperature: float = 0.6,
top_p: float = 0.9,
max_seq_len: int = 128,
max_gen_len: int = 64,
max_batch_size: int = 4,
):
generator = Llama.build(
ckpt_dir=ckpt_dir,
tokenizer_path=tokenizer_path,
max_seq_len=max_seq_len,
max_batch_size=max_batch_size,
)
prompts = [
# For these prompts, the expected answer is the natural continuation of the prompt
"I believe the meaning of life is",
"Simply put, the theory of relativity states that ",
"""A brief message congratulating the team on the launch:
Hi everyone,
I just """,
# Few shot prompt (providing a few examples before asking model to complete more);
"""Translate English to French:
sea otter => loutre de mer
peppermint => menthe poivrée
plush girafe => girafe peluche
cheese =>""",
]
results = generator.text_completion(
prompts,
max_gen_len=max_gen_len,
temperature=temperature,
top_p=top_p,
)
for prompt, result in zip(prompts, results):
print(prompt)
print(f"> {result['generation']}")
print("\n==================================\n")
if __name__ == "__main__":
fire.Fire(main)
|
import operator
import uuid
from collections.abc import Sequence
from typing import Any, Optional, cast
from pydantic import Field
from langchain_core._api import beta
from langchain_core.callbacks import CallbackManagerForRetrieverRun
from langchain_core.documents import Document
from langchain_core.indexing import UpsertResponse
from langchain_core.indexing.base import DeleteResponse, DocumentIndex
@beta(message="Introduced in version 0.2.29. Underlying abstraction subject to change.")
class InMemoryDocumentIndex(DocumentIndex):
"""In memory document index.
This is an in-memory document index that stores documents in a dictionary.
It provides a simple search API that returns documents by the number of
counts the given query appears in the document.
.. versionadded:: 0.2.29
"""
store: dict[str, Document] = Field(default_factory=dict)
top_k: int = 4
def upsert(self, items: Sequence[Document], /, **kwargs: Any) -> UpsertResponse:
"""Upsert items into the index."""
ok_ids = []
for item in items:
if item.id is None:
id_ = str(uuid.uuid4())
item_ = item.model_copy()
item_.id = id_
else:
item_ = item
id_ = item.id
self.store[id_] = item_
ok_ids.append(cast("str", item_.id))
return UpsertResponse(succeeded=ok_ids, failed=[])
def delete(self, ids: Optional[list[str]] = None, **kwargs: Any) -> DeleteResponse:
"""Delete by ID."""
if ids is None:
msg = "IDs must be provided for deletion"
raise ValueError(msg)
ok_ids = []
for id_ in ids:
if id_ in self.store:
del self.store[id_]
ok_ids.append(id_)
return DeleteResponse(
succeeded=ok_ids, num_deleted=len(ok_ids), num_failed=0, failed=[]
)
def get(self, ids: Sequence[str], /, **kwargs: Any) -> list[Document]:
"""Get by ids."""
found_documents = []
for id_ in ids:
if id_ in self.store:
found_documents.append(self.store[id_])
return found_documents
def _get_relevant_documents(
self, query: str, *, run_manager: CallbackManagerForRetrieverRun
) -> list[Document]:
counts_by_doc = []
for document in self.store.values():
count = document.page_content.count(query)
counts_by_doc.append((document, count))
counts_by_doc.sort(key=operator.itemgetter(1), reverse=True)
return [doc.model_copy() for doc, count in counts_by_doc[: self.top_k]]
|
import operator
import uuid
from collections.abc import Sequence
from typing import Any, Optional, cast
from pydantic import Field
from langchain_core._api import beta
from langchain_core.callbacks import CallbackManagerForRetrieverRun
from langchain_core.documents import Document
from langchain_core.indexing import UpsertResponse
from langchain_core.indexing.base import DeleteResponse, DocumentIndex
@beta(message="Introduced in version 0.2.29. Underlying abstraction subject to change.")
class InMemoryDocumentIndex(DocumentIndex):
"""In memory document index.
This is an in-memory document index that stores documents in a dictionary.
It provides a simple search API that returns documents by the number of
counts the given query appears in the document.
.. versionadded:: 0.2.29
"""
store: dict[str, Document] = Field(default_factory=dict)
top_k: int = 4
def upsert(self, items: Sequence[Document], /, **kwargs: Any) -> UpsertResponse:
"""Upsert items into the index."""
ok_ids = []
for item in items:
if item.id is None:
id_ = str(uuid.uuid4())
item_ = item.model_copy()
item_.id = id_
else:
item_ = item
id_ = item.id
self.store[id_] = item_
ok_ids.append(cast(str, item_.id))
return UpsertResponse(succeeded=ok_ids, failed=[])
def delete(self, ids: Optional[list[str]] = None, **kwargs: Any) -> DeleteResponse:
"""Delete by ID."""
if ids is None:
msg = "IDs must be provided for deletion"
raise ValueError(msg)
ok_ids = []
for id_ in ids:
if id_ in self.store:
del self.store[id_]
ok_ids.append(id_)
return DeleteResponse(
succeeded=ok_ids, num_deleted=len(ok_ids), num_failed=0, failed=[]
)
def get(self, ids: Sequence[str], /, **kwargs: Any) -> list[Document]:
"""Get by ids."""
found_documents = []
for id_ in ids:
if id_ in self.store:
found_documents.append(self.store[id_])
return found_documents
def _get_relevant_documents(
self, query: str, *, run_manager: CallbackManagerForRetrieverRun
) -> list[Document]:
counts_by_doc = []
for document in self.store.values():
count = document.page_content.count(query)
counts_by_doc.append((document, count))
counts_by_doc.sort(key=operator.itemgetter(1), reverse=True)
return [doc.model_copy() for doc, count in counts_by_doc[: self.top_k]]
|
import PIL.Image
import pytest
import torch
import torchvision.transforms.v2._utils
from common_utils import DEFAULT_SIZE, make_bounding_boxes, make_detection_mask, make_image
from torchvision import datapoints
from torchvision.transforms.v2._utils import has_all, has_any
from torchvision.transforms.v2.functional import to_pil_image
IMAGE = make_image(DEFAULT_SIZE, color_space="RGB")
BOUNDING_BOX = make_bounding_boxes(DEFAULT_SIZE, format=datapoints.BoundingBoxFormat.XYXY)
MASK = make_detection_mask(DEFAULT_SIZE)
@pytest.mark.parametrize(
("sample", "types", "expected"),
[
((IMAGE, BOUNDING_BOX, MASK), (datapoints.Image,), True),
((IMAGE, BOUNDING_BOX, MASK), (datapoints.BoundingBoxes,), True),
((IMAGE, BOUNDING_BOX, MASK), (datapoints.Mask,), True),
((IMAGE, BOUNDING_BOX, MASK), (datapoints.Image, datapoints.BoundingBoxes), True),
((IMAGE, BOUNDING_BOX, MASK), (datapoints.Image, datapoints.Mask), True),
((IMAGE, BOUNDING_BOX, MASK), (datapoints.BoundingBoxes, datapoints.Mask), True),
((MASK,), (datapoints.Image, datapoints.BoundingBoxes), False),
((BOUNDING_BOX,), (datapoints.Image, datapoints.Mask), False),
((IMAGE,), (datapoints.BoundingBoxes, datapoints.Mask), False),
(
(IMAGE, BOUNDING_BOX, MASK),
(datapoints.Image, datapoints.BoundingBoxes, datapoints.Mask),
True,
),
((), (datapoints.Image, datapoints.BoundingBoxes, datapoints.Mask), False),
((IMAGE, BOUNDING_BOX, MASK), (lambda obj: isinstance(obj, datapoints.Image),), True),
((IMAGE, BOUNDING_BOX, MASK), (lambda _: False,), False),
((IMAGE, BOUNDING_BOX, MASK), (lambda _: True,), True),
((IMAGE,), (datapoints.Image, PIL.Image.Image, torchvision.transforms.v2._utils.is_pure_tensor), True),
(
(torch.Tensor(IMAGE),),
(datapoints.Image, PIL.Image.Image, torchvision.transforms.v2._utils.is_pure_tensor),
True,
),
(
(to_pil_image(IMAGE),),
(datapoints.Image, PIL.Image.Image, torchvision.transforms.v2._utils.is_pure_tensor),
True,
),
],
)
def test_has_any(sample, types, expected):
assert has_any(sample, *types) is expected
@pytest.mark.parametrize(
("sample", "types", "expected"),
[
((IMAGE, BOUNDING_BOX, MASK), (datapoints.Image,), True),
((IMAGE, BOUNDING_BOX, MASK), (datapoints.BoundingBoxes,), True),
((IMAGE, BOUNDING_BOX, MASK), (datapoints.Mask,), True),
((IMAGE, BOUNDING_BOX, MASK), (datapoints.Image, datapoints.BoundingBoxes), True),
((IMAGE, BOUNDING_BOX, MASK), (datapoints.Image, datapoints.Mask), True),
((IMAGE, BOUNDING_BOX, MASK), (datapoints.BoundingBoxes, datapoints.Mask), True),
(
(IMAGE, BOUNDING_BOX, MASK),
(datapoints.Image, datapoints.BoundingBoxes, datapoints.Mask),
True,
),
((BOUNDING_BOX, MASK), (datapoints.Image, datapoints.BoundingBoxes), False),
((BOUNDING_BOX, MASK), (datapoints.Image, datapoints.Mask), False),
((IMAGE, MASK), (datapoints.BoundingBoxes, datapoints.Mask), False),
(
(IMAGE, BOUNDING_BOX, MASK),
(datapoints.Image, datapoints.BoundingBoxes, datapoints.Mask),
True,
),
((BOUNDING_BOX, MASK), (datapoints.Image, datapoints.BoundingBoxes, datapoints.Mask), False),
((IMAGE, MASK), (datapoints.Image, datapoints.BoundingBoxes, datapoints.Mask), False),
((IMAGE, BOUNDING_BOX), (datapoints.Image, datapoints.BoundingBoxes, datapoints.Mask), False),
(
(IMAGE, BOUNDING_BOX, MASK),
(lambda obj: isinstance(obj, (datapoints.Image, datapoints.BoundingBoxes, datapoints.Mask)),),
True,
),
((IMAGE, BOUNDING_BOX, MASK), (lambda _: False,), False),
((IMAGE, BOUNDING_BOX, MASK), (lambda _: True,), True),
],
)
def test_has_all(sample, types, expected):
assert has_all(sample, *types) is expected
|
import PIL.Image
import pytest
import torch
import torchvision.transforms.v2.utils
from common_utils import DEFAULT_SIZE, make_bounding_boxes, make_detection_mask, make_image
from torchvision import datapoints
from torchvision.transforms.v2.functional import to_pil_image
from torchvision.transforms.v2.utils import has_all, has_any
IMAGE = make_image(DEFAULT_SIZE, color_space="RGB")
BOUNDING_BOX = make_bounding_boxes(DEFAULT_SIZE, format=datapoints.BoundingBoxFormat.XYXY)
MASK = make_detection_mask(DEFAULT_SIZE)
@pytest.mark.parametrize(
("sample", "types", "expected"),
[
((IMAGE, BOUNDING_BOX, MASK), (datapoints.Image,), True),
((IMAGE, BOUNDING_BOX, MASK), (datapoints.BoundingBoxes,), True),
((IMAGE, BOUNDING_BOX, MASK), (datapoints.Mask,), True),
((IMAGE, BOUNDING_BOX, MASK), (datapoints.Image, datapoints.BoundingBoxes), True),
((IMAGE, BOUNDING_BOX, MASK), (datapoints.Image, datapoints.Mask), True),
((IMAGE, BOUNDING_BOX, MASK), (datapoints.BoundingBoxes, datapoints.Mask), True),
((MASK,), (datapoints.Image, datapoints.BoundingBoxes), False),
((BOUNDING_BOX,), (datapoints.Image, datapoints.Mask), False),
((IMAGE,), (datapoints.BoundingBoxes, datapoints.Mask), False),
(
(IMAGE, BOUNDING_BOX, MASK),
(datapoints.Image, datapoints.BoundingBoxes, datapoints.Mask),
True,
),
((), (datapoints.Image, datapoints.BoundingBoxes, datapoints.Mask), False),
((IMAGE, BOUNDING_BOX, MASK), (lambda obj: isinstance(obj, datapoints.Image),), True),
((IMAGE, BOUNDING_BOX, MASK), (lambda _: False,), False),
((IMAGE, BOUNDING_BOX, MASK), (lambda _: True,), True),
((IMAGE,), (datapoints.Image, PIL.Image.Image, torchvision.transforms.v2.utils.is_pure_tensor), True),
(
(torch.Tensor(IMAGE),),
(datapoints.Image, PIL.Image.Image, torchvision.transforms.v2.utils.is_pure_tensor),
True,
),
(
(to_pil_image(IMAGE),),
(datapoints.Image, PIL.Image.Image, torchvision.transforms.v2.utils.is_pure_tensor),
True,
),
],
)
def test_has_any(sample, types, expected):
assert has_any(sample, *types) is expected
@pytest.mark.parametrize(
("sample", "types", "expected"),
[
((IMAGE, BOUNDING_BOX, MASK), (datapoints.Image,), True),
((IMAGE, BOUNDING_BOX, MASK), (datapoints.BoundingBoxes,), True),
((IMAGE, BOUNDING_BOX, MASK), (datapoints.Mask,), True),
((IMAGE, BOUNDING_BOX, MASK), (datapoints.Image, datapoints.BoundingBoxes), True),
((IMAGE, BOUNDING_BOX, MASK), (datapoints.Image, datapoints.Mask), True),
((IMAGE, BOUNDING_BOX, MASK), (datapoints.BoundingBoxes, datapoints.Mask), True),
(
(IMAGE, BOUNDING_BOX, MASK),
(datapoints.Image, datapoints.BoundingBoxes, datapoints.Mask),
True,
),
((BOUNDING_BOX, MASK), (datapoints.Image, datapoints.BoundingBoxes), False),
((BOUNDING_BOX, MASK), (datapoints.Image, datapoints.Mask), False),
((IMAGE, MASK), (datapoints.BoundingBoxes, datapoints.Mask), False),
(
(IMAGE, BOUNDING_BOX, MASK),
(datapoints.Image, datapoints.BoundingBoxes, datapoints.Mask),
True,
),
((BOUNDING_BOX, MASK), (datapoints.Image, datapoints.BoundingBoxes, datapoints.Mask), False),
((IMAGE, MASK), (datapoints.Image, datapoints.BoundingBoxes, datapoints.Mask), False),
((IMAGE, BOUNDING_BOX), (datapoints.Image, datapoints.BoundingBoxes, datapoints.Mask), False),
(
(IMAGE, BOUNDING_BOX, MASK),
(lambda obj: isinstance(obj, (datapoints.Image, datapoints.BoundingBoxes, datapoints.Mask)),),
True,
),
((IMAGE, BOUNDING_BOX, MASK), (lambda _: False,), False),
((IMAGE, BOUNDING_BOX, MASK), (lambda _: True,), True),
],
)
def test_has_all(sample, types, expected):
assert has_all(sample, *types) is expected
|
from typing import TYPE_CHECKING
import numpy as np
if TYPE_CHECKING: # pragma: no cover
from docarray.typing import ArrayType
def cosine(x_mat: 'np.ndarray', y_mat: 'np.ndarray', eps: float = 1e-7) -> 'np.ndarray':
"""Cosine distance between each row in x_mat and each row in y_mat.
:param x_mat: np.ndarray with ndim=2
:param y_mat: np.ndarray with ndim=2
:param eps: a small jitter to avoid divde by zero
:return: np.ndarray with ndim=2
"""
return 1 - np.clip(
(np.dot(x_mat, y_mat.T) + eps)
/ (
np.outer(np.linalg.norm(x_mat, axis=1), np.linalg.norm(y_mat, axis=1)) + eps
),
-1,
1,
)
def sqeuclidean(x_mat: 'np.ndarray', y_mat: 'np.ndarray') -> 'np.ndarray':
"""Squared Euclidean distance between each row in x_mat and each row in y_mat.
:param x_mat: np.ndarray with ndim=2
:param y_mat: np.ndarray with ndim=2
:return: np.ndarray with ndim=2
"""
return (
np.sum(y_mat**2, axis=1)
+ np.sum(x_mat**2, axis=1)[:, np.newaxis]
- 2 * np.dot(x_mat, y_mat.T)
)
def sparse_cosine(x_mat: 'ArrayType', y_mat: 'ArrayType') -> 'np.ndarray':
"""Cosine distance between each row in x_mat and each row in y_mat.
:param x_mat: scipy.sparse like array with ndim=2
:param y_mat: scipy.sparse like array with ndim=2
:return: np.ndarray with ndim=2
"""
from scipy.sparse.linalg import norm
# we need the np.asarray otherwise we get a np.matrix object that iterates differently
return 1 - np.clip(
np.asarray(
x_mat.dot(y_mat.T) / (np.outer(norm(x_mat, axis=1), norm(y_mat, axis=1)))
),
-1,
1,
)
def sparse_sqeuclidean(x_mat: 'ArrayType', y_mat: 'ArrayType') -> 'np.ndarray':
"""Cosine distance between each row in x_mat and each row in y_mat.
:param x_mat: scipy.sparse like array with ndim=2
:param y_mat: scipy.sparse like array with ndim=2
:return: np.ndarray with ndim=2
"""
# we need the np.asarray otherwise we get a np.matrix object that iterates differently
return np.asarray(
y_mat.power(2).sum(axis=1).flatten()
+ x_mat.power(2).sum(axis=1)
- 2 * x_mat.dot(y_mat.T)
)
def sparse_euclidean(x_mat: 'ArrayType', y_mat: 'ArrayType') -> 'np.ndarray':
"""Sparse euclidean distance between each row in x_mat and each row in y_mat.
:param x_mat: scipy.sparse like array with ndim=2
:param y_mat: scipy.sparse like array with ndim=2
:return: np.ndarray with ndim=2
"""
return np.sqrt(sparse_sqeuclidean(x_mat, y_mat))
def euclidean(x_mat: 'ArrayType', y_mat: 'ArrayType') -> 'np.ndarray':
"""Euclidean distance between each row in x_mat and each row in y_mat.
:param x_mat: scipy.sparse like array with ndim=2
:param y_mat: scipy.sparse like array with ndim=2
:return: np.ndarray with ndim=2
"""
return np.sqrt(sqeuclidean(x_mat, y_mat))
|
from typing import TYPE_CHECKING
import numpy as np
if TYPE_CHECKING:
from docarray.typing import ArrayType
def cosine(x_mat: 'np.ndarray', y_mat: 'np.ndarray', eps: float = 1e-7) -> 'np.ndarray':
"""Cosine distance between each row in x_mat and each row in y_mat.
:param x_mat: np.ndarray with ndim=2
:param y_mat: np.ndarray with ndim=2
:param eps: a small jitter to avoid divde by zero
:return: np.ndarray with ndim=2
"""
return 1 - np.clip(
(np.dot(x_mat, y_mat.T) + eps)
/ (
np.outer(np.linalg.norm(x_mat, axis=1), np.linalg.norm(y_mat, axis=1)) + eps
),
-1,
1,
)
def sqeuclidean(x_mat: 'np.ndarray', y_mat: 'np.ndarray') -> 'np.ndarray':
"""Squared Euclidean distance between each row in x_mat and each row in y_mat.
:param x_mat: np.ndarray with ndim=2
:param y_mat: np.ndarray with ndim=2
:return: np.ndarray with ndim=2
"""
return (
np.sum(y_mat**2, axis=1)
+ np.sum(x_mat**2, axis=1)[:, np.newaxis]
- 2 * np.dot(x_mat, y_mat.T)
)
def sparse_cosine(x_mat: 'ArrayType', y_mat: 'ArrayType') -> 'np.ndarray':
"""Cosine distance between each row in x_mat and each row in y_mat.
:param x_mat: scipy.sparse like array with ndim=2
:param y_mat: scipy.sparse like array with ndim=2
:return: np.ndarray with ndim=2
"""
from scipy.sparse.linalg import norm
# we need the np.asarray otherwise we get a np.matrix object that iterates differently
return 1 - np.clip(
np.asarray(
x_mat.dot(y_mat.T) / (np.outer(norm(x_mat, axis=1), norm(y_mat, axis=1)))
),
-1,
1,
)
def sparse_sqeuclidean(x_mat: 'ArrayType', y_mat: 'ArrayType') -> 'np.ndarray':
"""Cosine distance between each row in x_mat and each row in y_mat.
:param x_mat: scipy.sparse like array with ndim=2
:param y_mat: scipy.sparse like array with ndim=2
:return: np.ndarray with ndim=2
"""
# we need the np.asarray otherwise we get a np.matrix object that iterates differently
return np.asarray(
y_mat.power(2).sum(axis=1).flatten()
+ x_mat.power(2).sum(axis=1)
- 2 * x_mat.dot(y_mat.T)
)
def sparse_euclidean(x_mat: 'ArrayType', y_mat: 'ArrayType') -> 'np.ndarray':
"""Sparse euclidean distance between each row in x_mat and each row in y_mat.
:param x_mat: scipy.sparse like array with ndim=2
:param y_mat: scipy.sparse like array with ndim=2
:return: np.ndarray with ndim=2
"""
return np.sqrt(sparse_sqeuclidean(x_mat, y_mat))
def euclidean(x_mat: 'ArrayType', y_mat: 'ArrayType') -> 'np.ndarray':
"""Euclidean distance between each row in x_mat and each row in y_mat.
:param x_mat: scipy.sparse like array with ndim=2
:param y_mat: scipy.sparse like array with ndim=2
:return: np.ndarray with ndim=2
"""
return np.sqrt(sqeuclidean(x_mat, y_mat))
|
from jina import Client, Document, DocumentArray, Executor, Flow, requests
from jina.helper import random_port
def test_override_requests():
port = random_port()
class FooExecutor(Executor):
@requests(on='/foo')
def foo(self, docs, **kwargs):
for doc in docs:
doc.text = 'foo called'
with Flow(port=port).add(
uses=FooExecutor, uses_requests={'/non_foo': 'foo', '/another_foo': 'foo'}
) as f:
c = Client(port=f.port)
resp1 = c.post(
on='/foo', inputs=DocumentArray([Document(text='')]), return_responses=True
)
resp2 = c.post(
on='/non_foo',
inputs=DocumentArray([Document(text='')]),
return_responses=True,
)
resp3 = c.post(
on='/another_foo',
inputs=DocumentArray([Document(text='')]),
return_responses=True,
)
assert resp1[0].docs[0].text == ''
assert resp2[0].docs[0].text == 'foo called'
assert resp3[0].docs[0].text == 'foo called'
def test_override_requests_uses_after():
port = random_port()
class FooExecutor(Executor):
@requests(on='/bar')
def foo(self, docs, **kwargs):
for doc in docs:
doc.text = 'foo called'
class OtherExecutor(Executor):
@requests(on='/bar')
def bar(self, docs, **kwargs):
for doc in docs:
doc.text = 'bar called'
with Flow(port=port).add(
uses=FooExecutor,
uses_requests={'/foo': 'foo'},
shards=2,
polling='ANY',
uses_after=OtherExecutor,
uses_before=OtherExecutor,
) as f:
c = Client(port=f.port)
resp1 = c.post(
on='/foo', inputs=DocumentArray([Document(text='')]), return_responses=True
)
resp2 = c.post(
on='/non_foo',
inputs=DocumentArray([Document(text='')]),
return_responses=True,
)
resp3 = c.post(
on='/bar', inputs=DocumentArray([Document(text='')]), return_responses=True
)
assert resp1[0].docs[0].text == 'foo called'
assert resp2[0].docs[0].text == ''
assert resp3[0].docs[0].text == 'bar called'
|
from jina import Client, Document, DocumentArray, Executor, Flow, requests
from jina.helper import random_port
def test_override_requests():
port = random_port()
class FooExecutor(Executor):
@requests(on='/foo')
def foo(self, docs, **kwargs):
for doc in docs:
doc.text = 'foo called'
with Flow(port=port).add(uses=FooExecutor, uses_requests={'/non_foo': 'foo'}) as f:
c = Client(port=f.port)
resp1 = c.post(
on='/foo', inputs=DocumentArray([Document(text='')]), return_responses=True
)
resp2 = c.post(
on='/non_foo',
inputs=DocumentArray([Document(text='')]),
return_responses=True,
)
assert resp1[0].docs[0].text == ''
assert resp2[0].docs[0].text == 'foo called'
def test_override_requests_uses_after():
port = random_port()
class FooExecutor(Executor):
@requests(on='/bar')
def foo(self, docs, **kwargs):
for doc in docs:
doc.text = 'foo called'
class OtherExecutor(Executor):
@requests(on='/bar')
def bar(self, docs, **kwargs):
for doc in docs:
doc.text = 'bar called'
with Flow(port=port).add(
uses=FooExecutor,
uses_requests={'/foo': 'foo'},
shards=2,
polling='ANY',
uses_after=OtherExecutor,
uses_before=OtherExecutor,
) as f:
c = Client(port=f.port)
resp1 = c.post(
on='/foo', inputs=DocumentArray([Document(text='')]), return_responses=True
)
resp2 = c.post(
on='/non_foo',
inputs=DocumentArray([Document(text='')]),
return_responses=True,
)
resp3 = c.post(
on='/bar', inputs=DocumentArray([Document(text='')]), return_responses=True
)
assert resp1[0].docs[0].text == 'foo called'
assert resp2[0].docs[0].text == ''
assert resp3[0].docs[0].text == 'bar called'
|
# Copyright (c) OpenMMLab. All rights reserved.
from .conditional_detr_transformer import (
ConditionalDetrTransformerDecoder, ConditionalDetrTransformerDecoderLayer)
from .deformable_detr_transformer import (
DeformableDetrTransformerDecoder, DeformableDetrTransformerDecoderLayer,
DeformableDetrTransformerEncoder, DeformableDetrTransformerEncoderLayer)
from .detr_transformer import (DetrTransformerDecoder,
DetrTransformerDecoderLayer,
DetrTransformerEncoder,
DetrTransformerEncoderLayer)
from .utils import (MLP, AdaptivePadding, DynamicConv, PatchEmbed,
PatchMerging, inverse_sigmoid, nchw_to_nlc, nlc_to_nchw)
__all__ = [
'nlc_to_nchw', 'nchw_to_nlc', 'AdaptivePadding', 'PatchEmbed',
'PatchMerging', 'inverse_sigmoid', 'DynamicConv', 'MLP',
'DetrTransformerEncoder', 'DetrTransformerDecoder',
'DetrTransformerEncoderLayer', 'DetrTransformerDecoderLayer',
'DeformableDetrTransformerEncoder', 'DeformableDetrTransformerDecoder',
'DeformableDetrTransformerEncoderLayer',
'DeformableDetrTransformerDecoderLayer',
'ConditionalDetrTransformerDecoder',
'ConditionalDetrTransformerDecoderLayer'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .deformable_detr_transformer import (
DeformableDetrTransformerDecoder, DeformableDetrTransformerDecoderLayer,
DeformableDetrTransformerEncoder, DeformableDetrTransformerEncoderLayer)
from .detr_transformer import (DetrTransformerDecoder,
DetrTransformerDecoderLayer,
DetrTransformerEncoder,
DetrTransformerEncoderLayer)
from .utils import (MLP, AdaptivePadding, DynamicConv, PatchEmbed,
PatchMerging, inverse_sigmoid, nchw_to_nlc, nlc_to_nchw)
__all__ = [
'nlc_to_nchw', 'nchw_to_nlc', 'AdaptivePadding', 'PatchEmbed',
'PatchMerging', 'inverse_sigmoid', 'DynamicConv', 'MLP',
'DetrTransformerEncoder', 'DetrTransformerDecoder',
'DetrTransformerEncoderLayer', 'DetrTransformerDecoderLayer',
'DeformableDetrTransformerEncoder', 'DeformableDetrTransformerDecoder',
'DeformableDetrTransformerEncoderLayer',
'DeformableDetrTransformerDecoderLayer'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.registry import MODELS
from .two_stage import TwoStageDetector
@MODELS.register_module()
class SparseRCNN(TwoStageDetector):
r"""Implementation of `Sparse R-CNN: End-to-End Object Detection with
Learnable Proposals <https://arxiv.org/abs/2011.12450>`_"""
def __init__(self, *args, **kwargs):
super(SparseRCNN, self).__init__(*args, **kwargs)
assert self.with_rpn, 'Sparse R-CNN and QueryInst ' \
'do not support external proposals'
def forward_train(self,
img,
img_metas,
gt_bboxes,
gt_labels,
gt_bboxes_ignore=None,
gt_masks=None,
proposals=None,
**kwargs):
"""Forward function of SparseR-CNN and QueryInst in train stage.
Args:
img (Tensor): of shape (N, C, H, W) encoding input images.
Typically these should be mean centered and std scaled.
img_metas (list[dict]): list of image info dict where each dict
has: 'img_shape', 'scale_factor', 'flip', and may also contain
'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
For details on the values of these keys see
:class:`mmdet.datasets.pipelines.Collect`.
gt_bboxes (list[Tensor]): Ground truth bboxes for each image with
shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.
gt_labels (list[Tensor]): class indices corresponding to each box
gt_bboxes_ignore (None | list[Tensor): specify which bounding
boxes can be ignored when computing the loss.
gt_masks (List[Tensor], optional) : Segmentation masks for
each box. This is required to train QueryInst.
proposals (List[Tensor], optional): override rpn proposals with
custom proposals. Use when `with_rpn` is False.
Returns:
dict[str, Tensor]: a dictionary of loss components
"""
assert proposals is None, 'Sparse R-CNN and QueryInst ' \
'do not support external proposals'
x = self.extract_feat(img)
proposal_boxes, proposal_features, imgs_whwh = \
self.rpn_head.forward_train(x, img_metas)
roi_losses = self.roi_head.forward_train(
x,
proposal_boxes,
proposal_features,
img_metas,
gt_bboxes,
gt_labels,
gt_bboxes_ignore=gt_bboxes_ignore,
gt_masks=gt_masks,
imgs_whwh=imgs_whwh)
return roi_losses
def simple_test(self, img, img_metas, rescale=False):
"""Test function without test time augmentation.
Args:
imgs (list[torch.Tensor]): List of multiple images
img_metas (list[dict]): List of image information.
rescale (bool): Whether to rescale the results.
Defaults to False.
Returns:
list[list[np.ndarray]]: BBox results of each image and classes.
The outer list corresponds to each image. The inner list
corresponds to each class.
"""
x = self.extract_feat(img)
proposal_boxes, proposal_features, imgs_whwh = \
self.rpn_head.simple_test_rpn(x, img_metas)
results = self.roi_head.simple_test(
x,
proposal_boxes,
proposal_features,
img_metas,
imgs_whwh=imgs_whwh,
rescale=rescale)
return results
def forward_dummy(self, img):
"""Used for computing network flops.
See `mmdetection/tools/analysis_tools/get_flops.py`
"""
# backbone
x = self.extract_feat(img)
# rpn
num_imgs = len(img)
dummy_img_metas = [
dict(img_shape=(800, 1333, 3)) for _ in range(num_imgs)
]
proposal_boxes, proposal_features, imgs_whwh = \
self.rpn_head.simple_test_rpn(x, dummy_img_metas)
# roi_head
roi_outs = self.roi_head.forward_dummy(x, proposal_boxes,
proposal_features,
dummy_img_metas)
return roi_outs
|
# Copyright (c) OpenMMLab. All rights reserved.
from ..builder import DETECTORS
from .two_stage import TwoStageDetector
@DETECTORS.register_module()
class SparseRCNN(TwoStageDetector):
r"""Implementation of `Sparse R-CNN: End-to-End Object Detection with
Learnable Proposals <https://arxiv.org/abs/2011.12450>`_"""
def __init__(self, *args, **kwargs):
super(SparseRCNN, self).__init__(*args, **kwargs)
assert self.with_rpn, 'Sparse R-CNN and QueryInst ' \
'do not support external proposals'
def forward_train(self,
img,
img_metas,
gt_bboxes,
gt_labels,
gt_bboxes_ignore=None,
gt_masks=None,
proposals=None,
**kwargs):
"""Forward function of SparseR-CNN and QueryInst in train stage.
Args:
img (Tensor): of shape (N, C, H, W) encoding input images.
Typically these should be mean centered and std scaled.
img_metas (list[dict]): list of image info dict where each dict
has: 'img_shape', 'scale_factor', 'flip', and may also contain
'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
For details on the values of these keys see
:class:`mmdet.datasets.pipelines.Collect`.
gt_bboxes (list[Tensor]): Ground truth bboxes for each image with
shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.
gt_labels (list[Tensor]): class indices corresponding to each box
gt_bboxes_ignore (None | list[Tensor): specify which bounding
boxes can be ignored when computing the loss.
gt_masks (List[Tensor], optional) : Segmentation masks for
each box. This is required to train QueryInst.
proposals (List[Tensor], optional): override rpn proposals with
custom proposals. Use when `with_rpn` is False.
Returns:
dict[str, Tensor]: a dictionary of loss components
"""
assert proposals is None, 'Sparse R-CNN and QueryInst ' \
'do not support external proposals'
x = self.extract_feat(img)
proposal_boxes, proposal_features, imgs_whwh = \
self.rpn_head.forward_train(x, img_metas)
roi_losses = self.roi_head.forward_train(
x,
proposal_boxes,
proposal_features,
img_metas,
gt_bboxes,
gt_labels,
gt_bboxes_ignore=gt_bboxes_ignore,
gt_masks=gt_masks,
imgs_whwh=imgs_whwh)
return roi_losses
def simple_test(self, img, img_metas, rescale=False):
"""Test function without test time augmentation.
Args:
imgs (list[torch.Tensor]): List of multiple images
img_metas (list[dict]): List of image information.
rescale (bool): Whether to rescale the results.
Defaults to False.
Returns:
list[list[np.ndarray]]: BBox results of each image and classes.
The outer list corresponds to each image. The inner list
corresponds to each class.
"""
x = self.extract_feat(img)
proposal_boxes, proposal_features, imgs_whwh = \
self.rpn_head.simple_test_rpn(x, img_metas)
results = self.roi_head.simple_test(
x,
proposal_boxes,
proposal_features,
img_metas,
imgs_whwh=imgs_whwh,
rescale=rescale)
return results
def forward_dummy(self, img):
"""Used for computing network flops.
See `mmdetection/tools/analysis_tools/get_flops.py`
"""
# backbone
x = self.extract_feat(img)
# rpn
num_imgs = len(img)
dummy_img_metas = [
dict(img_shape=(800, 1333, 3)) for _ in range(num_imgs)
]
proposal_boxes, proposal_features, imgs_whwh = \
self.rpn_head.simple_test_rpn(x, dummy_img_metas)
# roi_head
roi_outs = self.roi_head.forward_dummy(x, proposal_boxes,
proposal_features,
dummy_img_metas)
return roi_outs
|
# Copyright (c) OpenMMLab. All rights reserved.
import functools
import mmcv
import torch
import torch.nn.functional as F
def reduce_loss(loss, reduction):
"""Reduce loss as specified.
Args:
loss (Tensor): Elementwise loss tensor.
reduction (str): Options are "none", "mean" and "sum".
Return:
Tensor: Reduced loss tensor.
"""
reduction_enum = F._Reduction.get_enum(reduction)
# none: 0, elementwise_mean:1, sum: 2
if reduction_enum == 0:
return loss
elif reduction_enum == 1:
return loss.mean()
elif reduction_enum == 2:
return loss.sum()
@mmcv.jit(derivate=True, coderize=True)
def weight_reduce_loss(loss, weight=None, reduction='mean', avg_factor=None):
"""Apply element-wise weight and reduce loss.
Args:
loss (Tensor): Element-wise loss.
weight (Tensor): Element-wise weights.
reduction (str): Same as built-in losses of PyTorch.
avg_factor (float): Average factor when computing the mean of losses.
Returns:
Tensor: Processed loss values.
"""
# if weight is specified, apply element-wise weight
if weight is not None:
loss = loss * weight
# if avg_factor is not specified, just reduce the loss
if avg_factor is None:
loss = reduce_loss(loss, reduction)
else:
# if reduction is mean, then average the loss by avg_factor
if reduction == 'mean':
# Avoid causing ZeroDivisionError when avg_factor is 0.0,
# i.e., all labels of an image belong to ignore index.
eps = torch.finfo(torch.float32).eps
loss = loss.sum() / (avg_factor + eps)
# if reduction is 'none', then do nothing, otherwise raise an error
elif reduction != 'none':
raise ValueError('avg_factor can not be used with reduction="sum"')
return loss
def weighted_loss(loss_func):
"""Create a weighted version of a given loss function.
To use this decorator, the loss function must have the signature like
`loss_func(pred, target, **kwargs)`. The function only needs to compute
element-wise loss without any reduction. This decorator will add weight
and reduction arguments to the function. The decorated function will have
the signature like `loss_func(pred, target, weight=None, reduction='mean',
avg_factor=None, **kwargs)`.
:Example:
>>> import torch
>>> @weighted_loss
>>> def l1_loss(pred, target):
>>> return (pred - target).abs()
>>> pred = torch.Tensor([0, 2, 3])
>>> target = torch.Tensor([1, 1, 1])
>>> weight = torch.Tensor([1, 0, 1])
>>> l1_loss(pred, target)
tensor(1.3333)
>>> l1_loss(pred, target, weight)
tensor(1.)
>>> l1_loss(pred, target, reduction='none')
tensor([1., 1., 2.])
>>> l1_loss(pred, target, weight, avg_factor=2)
tensor(1.5000)
"""
@functools.wraps(loss_func)
def wrapper(pred,
target,
weight=None,
reduction='mean',
avg_factor=None,
**kwargs):
# get element-wise loss
loss = loss_func(pred, target, **kwargs)
loss = weight_reduce_loss(loss, weight, reduction, avg_factor)
return loss
return wrapper
|
# Copyright (c) OpenMMLab. All rights reserved.
import functools
import mmcv
import torch.nn.functional as F
def reduce_loss(loss, reduction):
"""Reduce loss as specified.
Args:
loss (Tensor): Elementwise loss tensor.
reduction (str): Options are "none", "mean" and "sum".
Return:
Tensor: Reduced loss tensor.
"""
reduction_enum = F._Reduction.get_enum(reduction)
# none: 0, elementwise_mean:1, sum: 2
if reduction_enum == 0:
return loss
elif reduction_enum == 1:
return loss.mean()
elif reduction_enum == 2:
return loss.sum()
@mmcv.jit(derivate=True, coderize=True)
def weight_reduce_loss(loss, weight=None, reduction='mean', avg_factor=None):
"""Apply element-wise weight and reduce loss.
Args:
loss (Tensor): Element-wise loss.
weight (Tensor): Element-wise weights.
reduction (str): Same as built-in losses of PyTorch.
avg_factor (float): Average factor when computing the mean of losses.
Returns:
Tensor: Processed loss values.
"""
# if weight is specified, apply element-wise weight
if weight is not None:
loss = loss * weight
# if avg_factor is not specified, just reduce the loss
if avg_factor is None:
loss = reduce_loss(loss, reduction)
else:
# if reduction is mean, then average the loss by avg_factor
if reduction == 'mean':
loss = loss.sum() / avg_factor
# if reduction is 'none', then do nothing, otherwise raise an error
elif reduction != 'none':
raise ValueError('avg_factor can not be used with reduction="sum"')
return loss
def weighted_loss(loss_func):
"""Create a weighted version of a given loss function.
To use this decorator, the loss function must have the signature like
`loss_func(pred, target, **kwargs)`. The function only needs to compute
element-wise loss without any reduction. This decorator will add weight
and reduction arguments to the function. The decorated function will have
the signature like `loss_func(pred, target, weight=None, reduction='mean',
avg_factor=None, **kwargs)`.
:Example:
>>> import torch
>>> @weighted_loss
>>> def l1_loss(pred, target):
>>> return (pred - target).abs()
>>> pred = torch.Tensor([0, 2, 3])
>>> target = torch.Tensor([1, 1, 1])
>>> weight = torch.Tensor([1, 0, 1])
>>> l1_loss(pred, target)
tensor(1.3333)
>>> l1_loss(pred, target, weight)
tensor(1.)
>>> l1_loss(pred, target, reduction='none')
tensor([1., 1., 2.])
>>> l1_loss(pred, target, weight, avg_factor=2)
tensor(1.5000)
"""
@functools.wraps(loss_func)
def wrapper(pred,
target,
weight=None,
reduction='mean',
avg_factor=None,
**kwargs):
# get element-wise loss
loss = loss_func(pred, target, **kwargs)
loss = weight_reduce_loss(loss, weight, reduction, avg_factor)
return loss
return wrapper
|
import torch
from keras.src.backend import config
from keras.src.backend import standardize_dtype
from keras.src.backend.common import dtypes
from keras.src.backend.torch.core import cast
from keras.src.backend.torch.core import convert_to_tensor
def cholesky(x):
return torch.linalg.cholesky(x)
def det(x):
return torch.det(x)
def eig(x):
return torch.linalg.eig(x)
def eigh(x):
return torch.linalg.eigh(x)
def inv(x):
return torch.linalg.inv(x)
def lu_factor(x):
LU, pivots = torch.linalg.lu_factor(x)
# torch retuns pivots with 1-based indexing
return LU, pivots - 1
def norm(x, ord=None, axis=None, keepdims=False):
x = convert_to_tensor(x)
if standardize_dtype(x.dtype) == "int64":
dtype = config.floatx()
else:
dtype = dtypes.result_type(x.dtype, float)
x = cast(x, dtype)
return torch.linalg.norm(x, ord=ord, dim=axis, keepdim=keepdims)
def qr(x, mode="reduced"):
if mode not in {"reduced", "complete"}:
raise ValueError(
"`mode` argument value not supported. "
"Expected one of {'reduced', 'complete'}. "
f"Received: mode={mode}"
)
return torch.linalg.qr(x, mode=mode)
def solve(a, b):
return torch.linalg.solve(a, b)
def solve_triangular(a, b, lower=False):
if b.ndim == a.ndim - 1:
b = torch.unsqueeze(b, axis=-1)
return torch.linalg.solve_triangular(a, b, upper=not lower).squeeze(
axis=-1
)
return torch.linalg.solve_triangular(a, b, upper=not lower)
def svd(x, full_matrices=True, compute_uv=True):
if not compute_uv:
return torch.linalg.svdvals(x)
return torch.linalg.svd(x, full_matrices=full_matrices)
def lstsq(a, b, rcond=None):
a = convert_to_tensor(a)
b = convert_to_tensor(b)
return torch.linalg.lstsq(a, b, rcond=rcond)[0]
|
import torch
from keras.src.backend import config
from keras.src.backend import standardize_dtype
from keras.src.backend.common import dtypes
from keras.src.backend.torch.core import cast
from keras.src.backend.torch.core import convert_to_tensor
def cholesky(x):
return torch.linalg.cholesky(x)
def det(x):
return torch.det(x)
def eig(x):
return torch.linalg.eig(x)
def eigh(x):
return torch.linalg.eigh(x)
def inv(x):
return torch.linalg.inv(x)
def lu_factor(x):
LU, pivots = torch.linalg.lu_factor(x)
# torch retuns pivots with 1-based indexing
return LU, pivots - 1
def norm(x, ord=None, axis=None, keepdims=False):
x = convert_to_tensor(x)
if standardize_dtype(x.dtype) == "int64":
dtype = config.floatx()
else:
dtype = dtypes.result_type(x.dtype, float)
x = cast(x, dtype)
return torch.linalg.norm(x, ord=ord, dim=axis, keepdim=keepdims)
def qr(x, mode="reduced"):
if mode not in {"reduced", "complete"}:
raise ValueError(
"`mode` argument value not supported. "
"Expected one of {'reduced', 'complete'}. "
f"Received: mode={mode}"
)
return torch.linalg.qr(x, mode=mode)
def solve(a, b):
return torch.linalg.solve(a, b)
def solve_triangular(a, b, lower=False):
if b.ndim == a.ndim - 1:
b = torch.unsqueeze(b, axis=-1)
return torch.linalg.solve_triangular(a, b, upper=not lower).squeeze(
axis=-1
)
return torch.linalg.solve_triangular(a, b, upper=not lower)
def svd(x, full_matrices=True, compute_uv=True):
if not compute_uv:
raise NotImplementedError(
"`compute_uv=False` is not supported for torch backend."
)
return torch.linalg.svd(x, full_matrices=full_matrices)
def lstsq(a, b, rcond=None):
a = convert_to_tensor(a)
b = convert_to_tensor(b)
return torch.linalg.lstsq(a, b, rcond=rcond)[0]
|
_base_ = [
'../_base_/models/retinanet_r50_fpn.py',
'../_base_/schedules/schedule_1x.py',
'../_base_/datasets/coco_detection.py', '../_base_/default_runtime.py'
]
image_size = (896, 896)
batch_augments = [dict(type='BatchFixedSizePad', size=image_size)]
norm_cfg = dict(type='BN', requires_grad=True)
checkpoint = 'https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b3_3rdparty_8xb32-aa_in1k_20220119-5b4887a0.pth' # noqa
model = dict(
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
bgr_to_rgb=True,
pad_size_divisor=32,
batch_augments=batch_augments),
backbone=dict(
_delete_=True,
type='EfficientNet',
arch='b3',
drop_path_rate=0.2,
out_indices=(3, 4, 5),
frozen_stages=0,
norm_cfg=dict(
type='SyncBN', requires_grad=True, eps=1e-3, momentum=0.01),
norm_eval=False,
init_cfg=dict(
type='Pretrained', prefix='backbone', checkpoint=checkpoint)),
neck=dict(
in_channels=[48, 136, 384],
start_level=0,
out_channels=256,
relu_before_extra_convs=True,
no_norm_on_lateral=True,
norm_cfg=norm_cfg),
bbox_head=dict(type='RetinaSepBNHead', num_ins=5, norm_cfg=norm_cfg),
# training and testing settings
train_cfg=dict(assigner=dict(neg_iou_thr=0.5)))
# dataset settings
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='RandomResize',
scale=image_size,
ratio_range=(0.8, 1.2),
keep_ratio=True),
dict(type='RandomCrop', crop_size=image_size),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='Resize', scale=image_size, keep_ratio=True),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(
batch_size=4, num_workers=4, dataset=dict(pipeline=train_pipeline))
val_dataloader = dict(dataset=dict(pipeline=test_pipeline))
test_dataloader = val_dataloader
# optimizer
optim_wrapper = dict(
optimizer=dict(lr=0.04),
paramwise_cfg=dict(norm_decay_mult=0, bypass_duplicate=True))
# learning policy
max_epochs = 12
param_scheduler = [
dict(type='LinearLR', start_factor=0.1, by_epoch=False, begin=0, end=1000),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[8, 11],
gamma=0.1)
]
train_cfg = dict(max_epochs=max_epochs)
# cudnn_benchmark=True can accelerate fix-size training
env_cfg = dict(cudnn_benchmark=True)
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (8 GPUs) x (4 samples per GPU)
auto_scale_lr = dict(base_batch_size=32)
|
_base_ = [
'../_base_/models/retinanet_r50_fpn.py',
'../_base_/schedules/schedule_1x.py',
'../_base_/datasets/coco_detection.py', '../_base_/default_runtime.py'
]
image_size = (896, 896)
batch_augments = [dict(type='BatchFixedSizePad', size=image_size)]
norm_cfg = dict(type='BN', requires_grad=True)
checkpoint = 'https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b3_3rdparty_8xb32-aa_in1k_20220119-5b4887a0.pth' # noqa
model = dict(
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
bgr_to_rgb=True,
pad_size_divisor=32,
batch_augments=batch_augments),
backbone=dict(
_delete_=True,
type='EfficientNet',
arch='b3',
drop_path_rate=0.2,
out_indices=(3, 4, 5),
frozen_stages=0,
norm_cfg=dict(
type='SyncBN', requires_grad=True, eps=1e-3, momentum=0.01),
norm_eval=False,
init_cfg=dict(
type='Pretrained', prefix='backbone', checkpoint=checkpoint)),
neck=dict(
in_channels=[48, 136, 384],
start_level=0,
out_channels=256,
relu_before_extra_convs=True,
no_norm_on_lateral=True,
norm_cfg=norm_cfg),
bbox_head=dict(type='RetinaSepBNHead', num_ins=5, norm_cfg=norm_cfg),
# training and testing settings
train_cfg=dict(assigner=dict(neg_iou_thr=0.5)))
# dataset settings
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='RandomResize',
scale=image_size,
ratio_range=(0.8, 1.2),
keep_ratio=True),
dict(type='RandomCrop', crop_size=image_size),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='Resize', scale=image_size, keep_ratio=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(
batch_size=4, num_workers=4, dataset=dict(pipeline=train_pipeline))
val_dataloader = dict(dataset=dict(pipeline=test_pipeline))
test_dataloader = val_dataloader
# optimizer
optim_wrapper = dict(
optimizer=dict(lr=0.04),
paramwise_cfg=dict(norm_decay_mult=0, bypass_duplicate=True))
# learning policy
max_epochs = 12
param_scheduler = [
dict(type='LinearLR', start_factor=0.1, by_epoch=False, begin=0, end=1000),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[8, 11],
gamma=0.1)
]
train_cfg = dict(max_epochs=max_epochs)
# cudnn_benchmark=True can accelerate fix-size training
env_cfg = dict(cudnn_benchmark=True)
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (8 GPUs) x (4 samples per GPU)
auto_scale_lr = dict(base_batch_size=32)
|
_base_ = '../fast_rcnn/fast-rcnn_r50-caffe_fpn_1x_coco.py'
model = dict(
roi_head=dict(
bbox_head=dict(
bbox_coder=dict(target_stds=[0.04, 0.04, 0.08, 0.08]),
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.5),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0))),
# model training and testing settings
train_cfg=dict(
rcnn=dict(
assigner=dict(
pos_iou_thr=0.65, neg_iou_thr=0.65, min_pos_iou=0.65),
sampler=dict(num=256))),
test_cfg=dict(rcnn=dict(score_thr=1e-3)))
# MMEngine support the following two ways, users can choose
# according to convenience
# train_dataloader = dict(dataset=dict(proposal_file='proposals/crpn_r50_caffe_fpn_1x_train2017.pkl')) # noqa
_base_.train_dataloader.dataset.proposal_file = 'proposals/crpn_r50_caffe_fpn_1x_train2017.pkl' # noqa
# val_dataloader = dict(dataset=dict(proposal_file='proposals/crpn_r50_caffe_fpn_1x_val2017.pkl')) # noqa
# test_dataloader = val_dataloader
_base_.val_dataloader.dataset.proposal_file = 'proposals/crpn_r50_caffe_fpn_1x_val2017.pkl' # noqa
test_dataloader = _base_.val_dataloader
optim_wrapper = dict(clip_grad=dict(max_norm=35, norm_type=2))
|
_base_ = '../fast_rcnn/fast-rcnn_r50_fpn_1x_coco.py'
model = dict(
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=False),
norm_eval=True,
style='caffe',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet50_caffe')),
roi_head=dict(
bbox_head=dict(
bbox_coder=dict(target_stds=[0.04, 0.04, 0.08, 0.08]),
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.5),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0))),
# model training and testing settings
train_cfg=dict(
rcnn=dict(
assigner=dict(
pos_iou_thr=0.65, neg_iou_thr=0.65, min_pos_iou=0.65),
sampler=dict(num=256))),
test_cfg=dict(rcnn=dict(score_thr=1e-3)))
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
img_norm_cfg = dict(
mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadProposals', num_max_proposals=300),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'proposals', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadProposals', num_max_proposals=300),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='ToTensor', keys=['proposals']),
dict(
type='ToDataContainer',
fields=[dict(key='proposals', stack=False)]),
dict(type='Collect', keys=['img', 'proposals']),
])
]
# TODO support proposals input
data = dict(
train=dict(
proposal_file=data_root +
'proposals/crpn_r50_caffe_fpn_1x_train2017.pkl',
pipeline=train_pipeline),
val=dict(
proposal_file=data_root +
'proposals/crpn_r50_caffe_fpn_1x_val2017.pkl',
pipeline=test_pipeline),
test=dict(
proposal_file=data_root +
'proposals/crpn_r50_caffe_fpn_1x_val2017.pkl',
pipeline=test_pipeline))
optim_wrapper = dict(clip_grad=dict(max_norm=35, norm_type=2))
|
# Copyright (c) OpenMMLab. All rights reserved.
from pathlib import Path
from typing import Any, Optional, Union
import torch
import torch.nn as nn
from mmengine.config import Config
from mmengine.runner import load_checkpoint
from torch import Tensor
from mmdet.data_elements import SampleList
from mmdet.registry import MODELS
from mmdet.utils import ConfigType, OptConfigType
from .single_stage import SingleStageDetector
@MODELS.register_module()
class KnowledgeDistillationSingleStageDetector(SingleStageDetector):
r"""Implementation of `Distilling the Knowledge in a Neural Network.
<https://arxiv.org/abs/1503.02531>`_.
Args:
backbone (:obj:`ConfigDict` or dict): The backbone module.
neck (:obj:`ConfigDict` or dict): The neck module.
bbox_head (:obj:`ConfigDict` or dict): The bbox head module.
teacher_config (:obj:`ConfigDict` | dict | str | Path): Config file
path or the config object of teacher model.
teacher_ckpt (str, optional): Checkpoint path of teacher model.
If left as None, the model will not load any weights.
Defaults to True.
eval_teacher (bool): Set the train mode for teacher.
Defaults to True.
train_cfg (:obj:`ConfigDict` or dict, optional): The training config
of ATSS. Defaults to None.
test_cfg (:obj:`ConfigDict` or dict, optional): The testing config
of ATSS. Defaults to None.
data_preprocessor (:obj:`ConfigDict` or dict, optional): Config of
:class:`DetDataPreprocessor` to process the input data.
Defaults to None.
"""
def __init__(
self,
backbone: ConfigType,
neck: ConfigType,
bbox_head: ConfigType,
teacher_config: Union[ConfigType, str, Path],
teacher_ckpt: Optional[str] = None,
eval_teacher: bool = True,
train_cfg: OptConfigType = None,
test_cfg: OptConfigType = None,
data_preprocessor: OptConfigType = None,
) -> None:
super().__init__(
backbone=backbone,
neck=neck,
bbox_head=bbox_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
data_preprocessor=data_preprocessor)
self.eval_teacher = eval_teacher
# Build teacher model
if isinstance(teacher_config, (str, Path)):
teacher_config = Config.fromfile(teacher_config)
self.teacher_model = MODELS.build(teacher_config['model'])
if teacher_ckpt is not None:
load_checkpoint(
self.teacher_model, teacher_ckpt, map_location='cpu')
def loss(self, batch_inputs: Tensor,
batch_data_samples: SampleList) -> dict:
"""
Args:
batch_inputs (Tensor): Input images of shape (N, C, H, W).
These should usually be mean centered and std scaled.
batch_data_samples (list[:obj:`DetDataSample`]): The batch
data samples. It usually includes information such
as `gt_instance` or `gt_panoptic_seg` or `gt_sem_seg`.
Returns:
dict[str, Tensor]: A dictionary of loss components.
"""
x = self.extract_feat(batch_inputs)
with torch.no_grad():
teacher_x = self.teacher_model.extract_feat(batch_inputs)
out_teacher = self.teacher_model.bbox_head(teacher_x)
losses = self.bbox_head.loss(x, out_teacher, batch_data_samples)
return losses
def cuda(self, device: Optional[str] = None) -> nn.Module:
"""Since teacher_model is registered as a plain object, it is necessary
to put the teacher model to cuda when calling ``cuda`` function."""
self.teacher_model.cuda(device=device)
return super().cuda(device=device)
def to(self, device: Optional[str] = None) -> nn.Module:
"""Since teacher_model is registered as a plain object, it is necessary
to put the teacher model to other device when calling ``to``
function."""
self.teacher_model.to(device=device)
return super().to(device=device)
def train(self, mode: bool = True) -> None:
"""Set the same train mode for teacher and student model."""
if self.eval_teacher:
self.teacher_model.train(False)
else:
self.teacher_model.train(mode)
super().train(mode)
def __setattr__(self, name: str, value: Any) -> None:
"""Set attribute, i.e. self.name = value
This reloading prevent the teacher model from being registered as a
nn.Module. The teacher module is registered as a plain object, so that
the teacher parameters will not show up when calling
``self.parameters``, ``self.modules``, ``self.children`` methods.
"""
if name == 'teacher_model':
object.__setattr__(self, name, value)
else:
super().__setattr__(name, value)
|
# Copyright (c) OpenMMLab. All rights reserved.
from pathlib import Path
from typing import Any, Optional, Union
import torch
import torch.nn as nn
from mmengine.config import Config
from mmengine.runner import load_checkpoint
from torch import Tensor
from mmdet.core import ConfigType, OptConfigType, SampleList
from mmdet.registry import MODELS
from .single_stage import SingleStageDetector
@MODELS.register_module()
class KnowledgeDistillationSingleStageDetector(SingleStageDetector):
r"""Implementation of `Distilling the Knowledge in a Neural Network.
<https://arxiv.org/abs/1503.02531>`_.
Args:
backbone (:obj:`ConfigDict` or dict): The backbone module.
neck (:obj:`ConfigDict` or dict): The neck module.
bbox_head (:obj:`ConfigDict` or dict): The bbox head module.
teacher_config (:obj:`ConfigDict` | dict | str | Path): Config file
path or the config object of teacher model.
teacher_ckpt (str, optional): Checkpoint path of teacher model.
If left as None, the model will not load any weights.
Defaults to True.
eval_teacher (bool): Set the train mode for teacher.
Defaults to True.
train_cfg (:obj:`ConfigDict` or dict, optional): The training config
of ATSS. Defaults to None.
test_cfg (:obj:`ConfigDict` or dict, optional): The testing config
of ATSS. Defaults to None.
data_preprocessor (:obj:`ConfigDict` or dict, optional): Config of
:class:`DetDataPreprocessor` to process the input data.
Defaults to None.
"""
def __init__(
self,
backbone: ConfigType,
neck: ConfigType,
bbox_head: ConfigType,
teacher_config: Union[ConfigType, str, Path],
teacher_ckpt: Optional[str] = None,
eval_teacher: bool = True,
train_cfg: OptConfigType = None,
test_cfg: OptConfigType = None,
data_preprocessor: OptConfigType = None,
) -> None:
super().__init__(
backbone=backbone,
neck=neck,
bbox_head=bbox_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
data_preprocessor=data_preprocessor)
self.eval_teacher = eval_teacher
# Build teacher model
if isinstance(teacher_config, (str, Path)):
teacher_config = Config.fromfile(teacher_config)
self.teacher_model = MODELS.build(teacher_config['model'])
if teacher_ckpt is not None:
load_checkpoint(
self.teacher_model, teacher_ckpt, map_location='cpu')
def loss(self, batch_inputs: Tensor,
batch_data_samples: SampleList) -> dict:
"""
Args:
batch_inputs (Tensor): Input images of shape (N, C, H, W).
These should usually be mean centered and std scaled.
batch_data_samples (list[:obj:`DetDataSample`]): The batch
data samples. It usually includes information such
as `gt_instance` or `gt_panoptic_seg` or `gt_sem_seg`.
Returns:
dict[str, Tensor]: A dictionary of loss components.
"""
x = self.extract_feat(batch_inputs)
with torch.no_grad():
teacher_x = self.teacher_model.extract_feat(batch_inputs)
out_teacher = self.teacher_model.bbox_head(teacher_x)
losses = self.bbox_head.loss(x, out_teacher, batch_data_samples)
return losses
def cuda(self, device: Optional[str] = None) -> nn.Module:
"""Since teacher_model is registered as a plain object, it is necessary
to put the teacher model to cuda when calling ``cuda`` function."""
self.teacher_model.cuda(device=device)
return super().cuda(device=device)
def to(self, device: Optional[str] = None) -> nn.Module:
"""Since teacher_model is registered as a plain object, it is necessary
to put the teacher model to other device when calling ``to``
function."""
self.teacher_model.to(device=device)
return super().to(device=device)
def train(self, mode: bool = True) -> None:
"""Set the same train mode for teacher and student model."""
if self.eval_teacher:
self.teacher_model.train(False)
else:
self.teacher_model.train(mode)
super().train(mode)
def __setattr__(self, name: str, value: Any) -> None:
"""Set attribute, i.e. self.name = value
This reloading prevent the teacher model from being registered as a
nn.Module. The teacher module is registered as a plain object, so that
the teacher parameters will not show up when calling
``self.parameters``, ``self.modules``, ``self.children`` methods.
"""
if name == 'teacher_model':
object.__setattr__(self, name, value)
else:
super().__setattr__(name, value)
|
_base_ = './cornernet_hourglass104_mstest_8x6_210e_coco.py'
train_dataloader = dict(batch_size=5)
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (10 GPUs) x (5 samples per GPU)
auto_scale_lr = dict(base_batch_size=50)
|
_base_ = [
'../_base_/default_runtime.py', '../_base_/datasets/coco_detection.py'
]
# model settings
model = dict(
type='CornerNet',
backbone=dict(
type='HourglassNet',
downsample_times=5,
num_stacks=2,
stage_channels=[256, 256, 384, 384, 384, 512],
stage_blocks=[2, 2, 2, 2, 2, 4],
norm_cfg=dict(type='BN', requires_grad=True)),
neck=None,
bbox_head=dict(
type='CornerHead',
num_classes=80,
in_channels=256,
num_feat_levels=2,
corner_emb_channels=1,
loss_heatmap=dict(
type='GaussianFocalLoss', alpha=2.0, gamma=4.0, loss_weight=1),
loss_embedding=dict(
type='AssociativeEmbeddingLoss',
pull_weight=0.10,
push_weight=0.10),
loss_offset=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1)),
# training and testing settings
train_cfg=None,
test_cfg=dict(
corner_topk=100,
local_maximum_kernel=3,
distance_threshold=0.5,
score_thr=0.05,
max_per_img=100,
nms=dict(type='soft_nms', iou_threshold=0.5, method='gaussian')))
# data settings
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile', to_float32=True),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PhotoMetricDistortion',
brightness_delta=32,
contrast_range=(0.5, 1.5),
saturation_range=(0.5, 1.5),
hue_delta=18),
dict(
type='RandomCenterCropPad',
crop_size=(511, 511),
ratios=(0.6, 0.7, 0.8, 0.9, 1.0, 1.1, 1.2, 1.3),
test_mode=False,
test_pad_mode=None,
**img_norm_cfg),
dict(type='Resize', img_scale=(511, 511), keep_ratio=False),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile', to_float32=True),
dict(
type='MultiScaleFlipAug',
scale_factor=1.0,
flip=True,
transforms=[
dict(type='Resize'),
dict(
type='RandomCenterCropPad',
crop_size=None,
ratios=None,
border=None,
test_mode=True,
test_pad_mode=['logical_or', 127],
**img_norm_cfg),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='ImageToTensor', keys=['img']),
dict(
type='Collect',
keys=['img'],
meta_keys=('filename', 'ori_shape', 'img_shape', 'pad_shape',
'scale_factor', 'flip', 'img_norm_cfg', 'border')),
])
]
data = dict(
samples_per_gpu=5,
workers_per_gpu=3,
train=dict(pipeline=train_pipeline),
val=dict(pipeline=test_pipeline),
test=dict(pipeline=test_pipeline))
# optimizer
optimizer = dict(type='Adam', lr=0.0005)
optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=1.0 / 3,
step=[180])
runner = dict(type='EpochBasedRunner', max_epochs=210)
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (10 GPUs) x (5 samples per GPU)
auto_scale_lr = dict(base_batch_size=50)
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmcv.cnn import VGG
from mmengine.hooks import Hook
from mmengine.runner import Runner
from mmdet.registry import HOOKS
@HOOKS.register_module()
class NumClassCheckHook(Hook):
"""Check whether the `num_classes` in head matches the length of `classes`
in `dataset.metainfo`."""
def _check_head(self, runner: Runner, mode: str) -> None:
"""Check whether the `num_classes` in head matches the length of
`classes` in `dataset.metainfo`.
Args:
runner (:obj:`Runner`): The runner of the training or evaluation
process.
"""
assert mode in ['train', 'val']
model = runner.model
dataset = runner.train_dataloader.dataset if mode == 'train' else \
runner.val_dataloader.dataset
if dataset.metainfo.get('classes', None) is None:
runner.logger.warning(
f'Please set `classes` '
f'in the {dataset.__class__.__name__} `metainfo` and'
f'check if it is consistent with the `num_classes` '
f'of head')
else:
classes = dataset.metainfo['classes']
assert type(classes) is not str, \
(f'`classes` in {dataset.__class__.__name__}'
f'should be a tuple of str.'
f'Add comma if number of classes is 1 as '
f'classes = ({classes},)')
from mmdet.models.roi_heads.mask_heads import FusedSemanticHead
for name, module in model.named_modules():
if hasattr(module, 'num_classes') and not name.endswith(
'rpn_head') and not isinstance(
module, (VGG, FusedSemanticHead)):
assert module.num_classes == len(classes), \
(f'The `num_classes` ({module.num_classes}) in '
f'{module.__class__.__name__} of '
f'{model.__class__.__name__} does not matches '
f'the length of `classes` '
f'{len(classes)}) in '
f'{dataset.__class__.__name__}')
def before_train_epoch(self, runner: Runner) -> None:
"""Check whether the training dataset is compatible with head.
Args:
runner (:obj:`Runner`): The runner of the training or evaluation
process.
"""
self._check_head(runner, 'train')
def before_val_epoch(self, runner: Runner) -> None:
"""Check whether the dataset in val epoch is compatible with head.
Args:
runner (:obj:`Runner`): The runner of the training or evaluation
process.
"""
self._check_head(runner, 'val')
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmcv.cnn import VGG
from mmengine.hooks import Hook
from mmengine.runner import Runner
from mmdet.registry import HOOKS
@HOOKS.register_module()
class NumClassCheckHook(Hook):
"""Check whether the `num_classes` in head matches the length of `CLASSES`
in `dataset.metainfo`."""
def _check_head(self, runner: Runner, mode: str) -> None:
"""Check whether the `num_classes` in head matches the length of
`CLASSES` in `dataset.metainfo`.
Args:
runner (:obj:`Runner`): The runner of the training or evaluation
process.
"""
assert mode in ['train', 'val']
model = runner.model
dataset = runner.train_dataloader.dataset if mode == 'train' else \
runner.val_dataloader.dataset
if dataset.metainfo.get('CLASSES', None) is None:
runner.logger.warning(
f'Please set `CLASSES` '
f'in the {dataset.__class__.__name__} `metainfo` and'
f'check if it is consistent with the `num_classes` '
f'of head')
else:
CLASSES = dataset.metainfo['CLASSES']
assert type(CLASSES) is not str, \
(f'`CLASSES` in {dataset.__class__.__name__}'
f'should be a tuple of str.'
f'Add comma if number of classes is 1 as '
f'CLASSES = ({CLASSES},)')
from mmdet.models.roi_heads.mask_heads import FusedSemanticHead
for name, module in model.named_modules():
if hasattr(module, 'num_classes') and not name.endswith(
'rpn_head') and not isinstance(
module, (VGG, FusedSemanticHead)):
assert module.num_classes == len(CLASSES), \
(f'The `num_classes` ({module.num_classes}) in '
f'{module.__class__.__name__} of '
f'{model.__class__.__name__} does not matches '
f'the length of `CLASSES` '
f'{len(CLASSES)}) in '
f'{dataset.__class__.__name__}')
def before_train_epoch(self, runner: Runner) -> None:
"""Check whether the training dataset is compatible with head.
Args:
runner (:obj:`Runner`): The runner of the training or evaluation
process.
"""
self._check_head(runner, 'train')
def before_val_epoch(self, runner: Runner) -> None:
"""Check whether the dataset in val epoch is compatible with head.
Args:
runner (:obj:`Runner`): The runner of the training or evaluation
process.
"""
self._check_head(runner, 'val')
|
# Copyright (c) OpenMMLab. All rights reserved.
import os.path as osp
from typing import Optional, Sequence, Tuple
import cv2
import numpy as np
from mmengine.data import BaseDataElement
from mmengine.hooks import Hook
from mmengine.registry import HOOKS
from mmengine.utils.misc import tensor2imgs
# TODO: Due to interface changes, the current class
# functions incorrectly
@HOOKS.register_module()
class NaiveVisualizationHook(Hook):
"""Show or Write the predicted results during the process of testing.
Args:
interval (int): Visualization interval. Default: 1.
draw_gt (bool): Whether to draw the ground truth. Default to True.
draw_pred (bool): Whether to draw the predicted result.
Default to True.
"""
priority = 'NORMAL'
def __init__(self,
interval: int = 1,
draw_gt: bool = True,
draw_pred: bool = True):
self.draw_gt = draw_gt
self.draw_pred = draw_pred
self._interval = interval
def _unpad(self, input: np.ndarray, unpad_shape: Tuple[int,
int]) -> np.ndarray:
"""Unpad the input image.
Args:
input (np.ndarray): The image to unpad.
unpad_shape (tuple): The shape of image before padding.
Returns:
np.ndarray: The image before padding.
"""
unpad_width, unpad_height = unpad_shape
unpad_image = input[:unpad_height, :unpad_width]
return unpad_image
def after_test_iter(
self,
runner,
batch_idx: int,
data_batch: Optional[Sequence[dict]] = None,
outputs: Optional[Sequence[BaseDataElement]] = None) -> None:
"""Show or Write the predicted results.
Args:
runner (Runner): The runner of the training process.
batch_idx (int): The index of the current batch in the test loop.
data_batch (Sequence[dict], optional): Data
from dataloader. Defaults to None.
outputs (Sequence[BaseDataElement], optional): Outputs from model.
Defaults to None.
"""
if self.every_n_inner_iters(batch_idx, self._interval):
for data, output in zip(data_batch, outputs): # type: ignore
input = data['inputs']
data_sample = data['data_sample']
input = tensor2imgs(input,
**data_sample.get('img_norm_cfg',
dict()))[0]
# TODO We will implement a function to revert the augmentation
# in the future.
ori_shape = (data_sample.ori_width, data_sample.ori_height)
if 'pad_shape' in data_sample:
input = self._unpad(input,
data_sample.get('scale', ori_shape))
origin_image = cv2.resize(input, ori_shape)
name = osp.basename(data_sample.img_path)
runner.visualizer.add_datasample(name, origin_image,
data_sample, output,
self.draw_gt, self.draw_pred)
|
# Copyright (c) OpenMMLab. All rights reserved.
import os.path as osp
from typing import Optional, Sequence, Tuple
import cv2
import numpy as np
from mmengine.data import BaseDataElement
from mmengine.hooks import Hook
from mmengine.registry import HOOKS
from mmengine.utils.misc import tensor2imgs
# TODO: Due to interface changes, the current class
# functions incorrectly
@HOOKS.register_module()
class NaiveVisualizationHook(Hook):
"""Show or Write the predicted results during the process of testing.
Args:
interval (int): Visualization interval. Default: 1.
draw_gt (bool): Whether to draw the ground truth. Default to True.
draw_pred (bool): Whether to draw the predicted result.
Default to True.
"""
priority = 'NORMAL'
def __init__(self,
interval: int = 1,
draw_gt: bool = True,
draw_pred: bool = True):
self.draw_gt = draw_gt
self.draw_pred = draw_pred
self._interval = interval
def _unpad(self, input: np.ndarray, unpad_shape: Tuple[int,
int]) -> np.ndarray:
"""Unpad the input image.
Args:
input (np.ndarray): The image to unpad.
unpad_shape (tuple): The shape of image before padding.
Returns:
np.ndarray: The image before padding.
"""
unpad_width, unpad_height = unpad_shape
unpad_image = input[:unpad_height, :unpad_width]
return unpad_image
def after_test_iter(
self,
runner,
batch_idx: int,
data_batch: Optional[Sequence[dict]] = None,
outputs: Optional[Sequence[BaseDataElement]] = None) -> None:
"""Show or Write the predicted results.
Args:
runner (Runner): The runner of the training process.
batch_idx (int): The index of the current batch in the test loop.
data_batch (Sequence[dict], optional): Data
from dataloader. Defaults to None.
outputs (Sequence[BaseDataElement], optional): Outputs from model.
Defaults to None.
"""
if self.every_n_iters(runner, self._interval):
for data, output in zip(data_batch, outputs): # type: ignore
input = data['inputs']
data_sample = data['data_sample']
input = tensor2imgs(input,
**data_sample.get('img_norm_cfg',
dict()))[0]
# TODO We will implement a function to revert the augmentation
# in the future.
ori_shape = (data_sample.ori_width, data_sample.ori_height)
if 'pad_shape' in data_sample:
input = self._unpad(input,
data_sample.get('scale', ori_shape))
origin_image = cv2.resize(input, ori_shape)
name = osp.basename(data_sample.img_path)
runner.visualizer.add_datasample(name, origin_image,
data_sample, output,
self.draw_gt, self.draw_pred)
|
import numpy as np
from jina import Flow, Document, DocumentArray
from ..simple_indexer import SimpleIndexer
def test_simple_indexer_flow(tmpdir):
f = Flow().add(
uses=SimpleIndexer,
override_with={'index_file_name': 'name'},
override_metas={'workspace': str(tmpdir)},
)
with f:
resp = f.post(
on='/index',
inputs=[Document(id='a', embedding=np.array([1]))],
return_results=True,
)
print(f'{resp}')
resp = f.post(
on='/search',
inputs=[Document(embedding=np.array([1]))],
return_results=True,
parameters={'top_k': 5},
)
assert resp[0].docs[0].matches[0].id == 'a'
def test_simple_indexer(tmpdir):
metas = {'workspace': str(tmpdir)}
indexer = SimpleIndexer(index_file_name='name', metas=metas)
assert indexer._flush
index_docs = DocumentArray([Document(id='a', embedding=np.array([1]))])
indexer.index(index_docs, {})
assert indexer._flush
search_docs = DocumentArray(([Document(embedding=np.array([1]))]))
indexer.search(
docs=search_docs,
parameters={'top_k': 5},
)
assert not indexer._flush
assert search_docs[0].matches[0].id == 'a'
search_docs_id = DocumentArray([Document(id='a')])
assert search_docs_id[0].embedding is None
indexer.fill_embedding(search_docs_id)
assert search_docs_id[0].embedding is not None
|
import numpy as np
from jina import Flow, Document, DocumentArray
from .. import SimpleIndexer
def test_simple_indexer_flow(tmpdir):
f = Flow().add(
uses=SimpleIndexer,
override_with={'index_file_name': 'name'},
override_metas={'workspace': str(tmpdir)},
)
with f:
resp = f.post(
on='/index',
inputs=[Document(id='a', embedding=np.array([1]))],
return_results=True,
)
print(f'{resp}')
resp = f.post(
on='/search',
inputs=[Document(embedding=np.array([1]))],
return_results=True,
parameters={'top_k': 5},
)
assert resp[0].docs[0].matches[0].id == 'a'
def test_simple_indexer(tmpdir):
metas = {'workspace': str(tmpdir)}
indexer = SimpleIndexer(index_file_name='name', metas=metas)
assert indexer._flush
index_docs = DocumentArray([Document(id='a', embedding=np.array([1]))])
indexer.index(index_docs, {})
assert indexer._flush
search_docs = DocumentArray(([Document(embedding=np.array([1]))]))
indexer.search(
docs=search_docs,
parameters={'top_k': 5},
)
assert not indexer._flush
assert search_docs[0].matches[0].id == 'a'
search_docs_id = DocumentArray([Document(id='a')])
assert search_docs_id[0].embedding is None
indexer.fill_embedding(search_docs_id)
assert search_docs_id[0].embedding is not None
|
from sentence_transformers import SentenceTransformer
from contextlib import nullcontext
from sentence_transformers.evaluation import SentenceEvaluator
import logging
import os
import csv
from typing import List, Optional
logger = logging.getLogger(__name__)
class MSEEvaluator(SentenceEvaluator):
"""
Computes the mean squared error (x100) between the computed sentence embedding
and some target sentence embedding.
The MSE is computed between ||teacher.encode(source_sentences) - student.encode(target_sentences)||.
For multilingual knowledge distillation (https://arxiv.org/abs/2004.09813), source_sentences are in English
and target_sentences are in a different language like German, Chinese, Spanish...
:param source_sentences: Source sentences are embedded with the teacher model
:param target_sentences: Target sentences are ambedding with the student model.
:param show_progress_bar: Show progress bar when computing embeddings
:param batch_size: Batch size to compute sentence embeddings
:param name: Name of the evaluator
:param write_csv: Write results to CSV file
:param truncate_dim: The dimension to truncate sentence embeddings to. `None` uses the model's current truncation
dimension. Defaults to None.
"""
def __init__(
self,
source_sentences: List[str],
target_sentences: List[str],
teacher_model=None,
show_progress_bar: bool = False,
batch_size: int = 32,
name: str = "",
write_csv: bool = True,
truncate_dim: Optional[int] = None,
):
self.truncate_dim = truncate_dim
with nullcontext() if self.truncate_dim is None else teacher_model.truncate_sentence_embeddings(
self.truncate_dim
):
self.source_embeddings = teacher_model.encode(
source_sentences, show_progress_bar=show_progress_bar, batch_size=batch_size, convert_to_numpy=True
)
self.target_sentences = target_sentences
self.show_progress_bar = show_progress_bar
self.batch_size = batch_size
self.name = name
self.csv_file = "mse_evaluation_" + name + "_results.csv"
self.csv_headers = ["epoch", "steps", "MSE"]
self.write_csv = write_csv
def __call__(self, model: SentenceTransformer, output_path, epoch=-1, steps=-1):
if epoch != -1:
if steps == -1:
out_txt = f" after epoch {epoch}"
else:
out_txt = f" in epoch {epoch} after {steps} steps"
else:
out_txt = ""
if self.truncate_dim is not None:
out_txt += f" (truncated to {self.truncate_dim})"
with nullcontext() if self.truncate_dim is None else model.truncate_sentence_embeddings(self.truncate_dim):
target_embeddings = model.encode(
self.target_sentences,
show_progress_bar=self.show_progress_bar,
batch_size=self.batch_size,
convert_to_numpy=True,
)
mse = ((self.source_embeddings - target_embeddings) ** 2).mean()
mse *= 100
logger.info(f"MSE evaluation (lower = better) on the {self.name} dataset{out_txt}:")
logger.info("MSE (*100):\t{:4f}".format(mse))
if output_path is not None and self.write_csv:
csv_path = os.path.join(output_path, self.csv_file)
output_file_exists = os.path.isfile(csv_path)
with open(csv_path, newline="", mode="a" if output_file_exists else "w", encoding="utf-8") as f:
writer = csv.writer(f)
if not output_file_exists:
writer.writerow(self.csv_headers)
writer.writerow([epoch, steps, mse])
return -mse # Return negative score as SentenceTransformers maximizes the performance
|
from contextlib import nullcontext
from sentence_transformers.evaluation import SentenceEvaluator
import logging
import os
import csv
from typing import List, Optional
logger = logging.getLogger(__name__)
class MSEEvaluator(SentenceEvaluator):
"""
Computes the mean squared error (x100) between the computed sentence embedding
and some target sentence embedding.
The MSE is computed between ||teacher.encode(source_sentences) - student.encode(target_sentences)||.
For multilingual knowledge distillation (https://arxiv.org/abs/2004.09813), source_sentences are in English
and target_sentences are in a different language like German, Chinese, Spanish...
:param source_sentences: Source sentences are embedded with the teacher model
:param target_sentences: Target sentences are ambedding with the student model.
:param show_progress_bar: Show progress bar when computing embeddings
:param batch_size: Batch size to compute sentence embeddings
:param name: Name of the evaluator
:param write_csv: Write results to CSV file
:param truncate_dim: The dimension to truncate sentence embeddings to. `None` uses the model's current truncation
dimension. Defaults to None.
"""
def __init__(
self,
source_sentences: List[str],
target_sentences: List[str],
teacher_model=None,
show_progress_bar: bool = False,
batch_size: int = 32,
name: str = "",
write_csv: bool = True,
truncate_dim: Optional[int] = None,
):
self.truncate_dim = truncate_dim
with nullcontext() if self.truncate_dim is None else teacher_model.truncate_sentence_embeddings(
self.truncate_dim
):
self.source_embeddings = teacher_model.encode(
source_sentences, show_progress_bar=show_progress_bar, batch_size=batch_size, convert_to_numpy=True
)
self.target_sentences = target_sentences
self.show_progress_bar = show_progress_bar
self.batch_size = batch_size
self.name = name
self.csv_file = "mse_evaluation_" + name + "_results.csv"
self.csv_headers = ["epoch", "steps", "MSE"]
self.write_csv = write_csv
def __call__(self, model, output_path, epoch=-1, steps=-1):
if epoch != -1:
if steps == -1:
out_txt = f" after epoch {epoch}"
else:
out_txt = f" in epoch {epoch} after {steps} steps"
else:
out_txt = ""
if self.truncate_dim is not None:
out_txt += f" (truncated to {self.truncate_dim})"
with nullcontext() if self.truncate_dim is None else model.truncate_sentence_embeddings(self.truncate_dim):
target_embeddings = model.encode(
self.target_sentences,
show_progress_bar=self.show_progress_bar,
batch_size=self.batch_size,
convert_to_numpy=True,
)
mse = ((self.source_embeddings - target_embeddings) ** 2).mean()
mse *= 100
logger.info(f"MSE evaluation (lower = better) on the {self.name} dataset{out_txt}:")
logger.info("MSE (*100):\t{:4f}".format(mse))
if output_path is not None and self.write_csv:
csv_path = os.path.join(output_path, self.csv_file)
output_file_exists = os.path.isfile(csv_path)
with open(csv_path, newline="", mode="a" if output_file_exists else "w", encoding="utf-8") as f:
writer = csv.writer(f)
if not output_file_exists:
writer.writerow(self.csv_headers)
writer.writerow([epoch, steps, mse])
return -mse # Return negative score as SentenceTransformers maximizes the performance
|
# Copyright (c) OpenMMLab. All rights reserved.
from .res_layer import ResLayer
__all__ = ['ResLayer']
|
from .res_layer import ResLayer
__all__ = ['ResLayer']
|
from typing import (
TYPE_CHECKING,
Iterable,
)
from docarray.array.memory import DocumentArrayInMemory
if TYPE_CHECKING: # pragma: no cover
from docarray.document import Document
class ChunkArray(DocumentArrayInMemory):
"""
:class:`ChunkArray` inherits from :class:`DocumentArray`.
It's a subset of Documents.
:param docs: Set of sub-documents (i.e chunks) of `reference_doc`
:param reference_doc: Reference :class:`Document` for the sub-documents
"""
def __init__(self, docs, reference_doc: 'Document'):
"""
Set constructor method.
:param doc_views: protobuf representation of the chunks
:param reference_doc: parent document
"""
self._ref_doc = reference_doc
super().__init__(docs)
if isinstance(docs, Iterable) and self._ref_doc is not None:
for d in docs:
d.parent_id = self._ref_doc.id
d.granularity = self._ref_doc.granularity + 1
def append(self, document: 'Document'):
"""Add a sub-document (i.e chunk) to the current Document.
:param document: Sub-document to be appended
.. note::
Comparing to :attr:`DocumentArray.append()`, this method adds more safeguard to
make sure the added chunk is legit.
"""
document.parent_id = self._ref_doc.id
document.granularity = self._ref_doc.granularity + 1
super().append(document)
@property
def reference_doc(self) -> 'Document':
"""
Get the document that :class:`ChunkArray` belongs to.
:return: reference doc
"""
return self._ref_doc
@property
def granularity(self) -> int:
"""
Get granularity of all document in this array.
:return: granularity
"""
return self._ref_doc.granularity + 1
@property
def adjacency(self) -> int:
"""
Get adjacency of all document in this array.
:return: adjacency
"""
return self._ref_doc.adjacency
|
from typing import (
TYPE_CHECKING,
Iterable,
)
from docarray.array.memory import DocumentArrayInMemory
if TYPE_CHECKING:
from docarray.document import Document
class ChunkArray(DocumentArrayInMemory):
"""
:class:`ChunkArray` inherits from :class:`DocumentArray`.
It's a subset of Documents.
:param docs: Set of sub-documents (i.e chunks) of `reference_doc`
:param reference_doc: Reference :class:`Document` for the sub-documents
"""
def __init__(self, docs, reference_doc: 'Document'):
"""
Set constructor method.
:param doc_views: protobuf representation of the chunks
:param reference_doc: parent document
"""
self._ref_doc = reference_doc
super().__init__(docs)
if isinstance(docs, Iterable) and self._ref_doc is not None:
for d in docs:
d.parent_id = self._ref_doc.id
d.granularity = self._ref_doc.granularity + 1
def append(self, document: 'Document'):
"""Add a sub-document (i.e chunk) to the current Document.
:param document: Sub-document to be appended
.. note::
Comparing to :attr:`DocumentArray.append()`, this method adds more safeguard to
make sure the added chunk is legit.
"""
document.parent_id = self._ref_doc.id
document.granularity = self._ref_doc.granularity + 1
super().append(document)
@property
def reference_doc(self) -> 'Document':
"""
Get the document that :class:`ChunkArray` belongs to.
:return: reference doc
"""
return self._ref_doc
@property
def granularity(self) -> int:
"""
Get granularity of all document in this array.
:return: granularity
"""
return self._ref_doc.granularity + 1
@property
def adjacency(self) -> int:
"""
Get adjacency of all document in this array.
:return: adjacency
"""
return self._ref_doc.adjacency
|
from typing import Any, Optional
import pytest
from langchain.callbacks import StdOutCallbackHandler
from langchain.chains.base import CallbackManagerForChainRun, Chain
class FakeChain(Chain):
"""Fake chain class for testing purposes."""
be_correct: bool = True
the_input_keys: list[str] = ["foo"]
the_output_keys: list[str] = ["bar"]
@property
def input_keys(self) -> list[str]:
"""Input keys."""
return self.the_input_keys
@property
def output_keys(self) -> list[str]:
"""Output key of bar."""
return self.the_output_keys
def _call(
self,
inputs: dict[str, str],
run_manager: Optional[CallbackManagerForChainRun] = None,
) -> dict[str, str]:
return {"bar": "bar"}
def test_stdoutcallback(capsys: pytest.CaptureFixture) -> Any:
"""Test the stdout callback handler."""
chain_test = FakeChain(callbacks=[StdOutCallbackHandler(color="red")])
chain_test.invoke({"foo": "bar"})
# Capture the output
captured = capsys.readouterr()
# Assert the output is as expected
assert captured.out == (
"\n\n\x1b[1m> Entering new FakeChain "
"chain...\x1b[0m\n\n\x1b[1m> Finished chain.\x1b[0m\n"
)
|
from typing import Any, Dict, List, Optional
import pytest
from langchain.callbacks import StdOutCallbackHandler
from langchain.chains.base import CallbackManagerForChainRun, Chain
class FakeChain(Chain):
"""Fake chain class for testing purposes."""
be_correct: bool = True
the_input_keys: List[str] = ["foo"]
the_output_keys: List[str] = ["bar"]
@property
def input_keys(self) -> List[str]:
"""Input keys."""
return self.the_input_keys
@property
def output_keys(self) -> List[str]:
"""Output key of bar."""
return self.the_output_keys
def _call(
self,
inputs: Dict[str, str],
run_manager: Optional[CallbackManagerForChainRun] = None,
) -> Dict[str, str]:
return {"bar": "bar"}
def test_stdoutcallback(capsys: pytest.CaptureFixture) -> Any:
"""Test the stdout callback handler."""
chain_test = FakeChain(callbacks=[StdOutCallbackHandler(color="red")])
chain_test.invoke({"foo": "bar"})
# Capture the output
captured = capsys.readouterr()
# Assert the output is as expected
assert captured.out == (
"\n\n\x1b[1m> Entering new FakeChain "
"chain...\x1b[0m\n\n\x1b[1m> Finished chain.\x1b[0m\n"
)
|
import json
import os
from typing import Dict
import torch
from torch import Tensor, nn
class WeightedLayerPooling(nn.Module):
"""Token embeddings are weighted mean of their different hidden layer representations"""
def __init__(
self, word_embedding_dimension, num_hidden_layers: int = 12, layer_start: int = 4, layer_weights=None
):
super(WeightedLayerPooling, self).__init__()
self.config_keys = ["word_embedding_dimension", "layer_start", "num_hidden_layers"]
self.word_embedding_dimension = word_embedding_dimension
self.layer_start = layer_start
self.num_hidden_layers = num_hidden_layers
self.layer_weights = (
layer_weights
if layer_weights is not None
else nn.Parameter(torch.tensor([1] * (num_hidden_layers + 1 - layer_start), dtype=torch.float))
)
def forward(self, features: Dict[str, Tensor]):
ft_all_layers = features["all_layer_embeddings"]
all_layer_embedding = torch.stack(ft_all_layers)
all_layer_embedding = all_layer_embedding[self.layer_start :, :, :, :] # Start from 4th layers output
weight_factor = self.layer_weights.unsqueeze(-1).unsqueeze(-1).unsqueeze(-1).expand(all_layer_embedding.size())
weighted_average = (weight_factor * all_layer_embedding).sum(dim=0) / self.layer_weights.sum()
features.update({"token_embeddings": weighted_average})
return features
def get_word_embedding_dimension(self):
return self.word_embedding_dimension
def get_config_dict(self):
return {key: self.__dict__[key] for key in self.config_keys}
def save(self, output_path):
with open(os.path.join(output_path, "config.json"), "w") as fOut:
json.dump(self.get_config_dict(), fOut, indent=2)
torch.save(self.state_dict(), os.path.join(output_path, "pytorch_model.bin"))
@staticmethod
def load(input_path):
with open(os.path.join(input_path, "config.json")) as fIn:
config = json.load(fIn)
model = WeightedLayerPooling(**config)
model.load_state_dict(
torch.load(os.path.join(input_path, "pytorch_model.bin"), map_location=torch.device("cpu"))
)
return model
|
import torch
from torch import Tensor
from torch import nn
from typing import Dict
import os
import json
class WeightedLayerPooling(nn.Module):
"""
Token embeddings are weighted mean of their different hidden layer representations
"""
def __init__(
self, word_embedding_dimension, num_hidden_layers: int = 12, layer_start: int = 4, layer_weights=None
):
super(WeightedLayerPooling, self).__init__()
self.config_keys = ["word_embedding_dimension", "layer_start", "num_hidden_layers"]
self.word_embedding_dimension = word_embedding_dimension
self.layer_start = layer_start
self.num_hidden_layers = num_hidden_layers
self.layer_weights = (
layer_weights
if layer_weights is not None
else nn.Parameter(torch.tensor([1] * (num_hidden_layers + 1 - layer_start), dtype=torch.float))
)
def forward(self, features: Dict[str, Tensor]):
ft_all_layers = features["all_layer_embeddings"]
all_layer_embedding = torch.stack(ft_all_layers)
all_layer_embedding = all_layer_embedding[self.layer_start :, :, :, :] # Start from 4th layers output
weight_factor = self.layer_weights.unsqueeze(-1).unsqueeze(-1).unsqueeze(-1).expand(all_layer_embedding.size())
weighted_average = (weight_factor * all_layer_embedding).sum(dim=0) / self.layer_weights.sum()
features.update({"token_embeddings": weighted_average})
return features
def get_word_embedding_dimension(self):
return self.word_embedding_dimension
def get_config_dict(self):
return {key: self.__dict__[key] for key in self.config_keys}
def save(self, output_path):
with open(os.path.join(output_path, "config.json"), "w") as fOut:
json.dump(self.get_config_dict(), fOut, indent=2)
torch.save(self.state_dict(), os.path.join(output_path, "pytorch_model.bin"))
@staticmethod
def load(input_path):
with open(os.path.join(input_path, "config.json")) as fIn:
config = json.load(fIn)
model = WeightedLayerPooling(**config)
model.load_state_dict(
torch.load(os.path.join(input_path, "pytorch_model.bin"), map_location=torch.device("cpu"))
)
return model
|
"""Init file of LlamaIndex."""
__version__ = "0.12.29"
import logging
from logging import NullHandler
from typing import Callable, Optional
try:
# Force pants to install eval_type_backport on 3.9
import eval_type_backport # noqa # type: ignore
except ImportError:
pass
# response
from llama_index.core.base.response.schema import Response
# import global eval handler
from llama_index.core.callbacks.global_handlers import set_global_handler
from llama_index.core.data_structs.struct_type import IndexStructType
from llama_index.core.embeddings.mock_embed_model import MockEmbedding
# indices
# loading
from llama_index.core.indices import (
ComposableGraph,
DocumentSummaryIndex,
GPTDocumentSummaryIndex,
GPTKeywordTableIndex,
GPTListIndex,
GPTRAKEKeywordTableIndex,
GPTSimpleKeywordTableIndex,
GPTTreeIndex,
GPTVectorStoreIndex,
KeywordTableIndex,
KnowledgeGraphIndex,
ListIndex,
PropertyGraphIndex,
RAKEKeywordTableIndex,
SimpleKeywordTableIndex,
SummaryIndex,
TreeIndex,
VectorStoreIndex,
load_graph_from_storage,
load_index_from_storage,
load_indices_from_storage,
)
# structured
from llama_index.core.indices.common.struct_store.base import (
SQLDocumentContextBuilder,
)
# prompt helper
from llama_index.core.indices.prompt_helper import PromptHelper
# prompts
from llama_index.core.prompts import (
BasePromptTemplate,
ChatPromptTemplate,
# backwards compatibility
Prompt,
PromptTemplate,
SelectorPromptTemplate,
)
from llama_index.core.readers import SimpleDirectoryReader, download_loader
# Response Synthesizer
from llama_index.core.response_synthesizers.factory import get_response_synthesizer
from llama_index.core.schema import Document, QueryBundle
from llama_index.core.service_context import (
ServiceContext,
set_global_service_context,
)
# global settings
from llama_index.core.settings import Settings
# storage
from llama_index.core.storage.storage_context import StorageContext
# sql wrapper
from llama_index.core.utilities.sql_wrapper import SQLDatabase
# global tokenizer
from llama_index.core.utils import get_tokenizer, set_global_tokenizer
# best practices for library logging:
# https://docs.python.org/3/howto/logging.html#configuring-logging-for-a-library
logging.getLogger(__name__).addHandler(NullHandler())
__all__ = [
"StorageContext",
"ServiceContext",
"ComposableGraph",
# indices
"SummaryIndex",
"VectorStoreIndex",
"SimpleKeywordTableIndex",
"KeywordTableIndex",
"RAKEKeywordTableIndex",
"TreeIndex",
"DocumentSummaryIndex",
"KnowledgeGraphIndex",
"PropertyGraphIndex",
# indices - legacy names
"GPTKeywordTableIndex",
"GPTKnowledgeGraphIndex",
"GPTSimpleKeywordTableIndex",
"GPTRAKEKeywordTableIndex",
"GPTListIndex",
"ListIndex",
"GPTTreeIndex",
"GPTVectorStoreIndex",
"GPTDocumentSummaryIndex",
"Prompt",
"PromptTemplate",
"BasePromptTemplate",
"ChatPromptTemplate",
"SelectorPromptTemplate",
"SummaryPrompt",
"TreeInsertPrompt",
"TreeSelectPrompt",
"TreeSelectMultiplePrompt",
"RefinePrompt",
"QuestionAnswerPrompt",
"KeywordExtractPrompt",
"QueryKeywordExtractPrompt",
"Response",
"Document",
"SimpleDirectoryReader",
"MockEmbedding",
"SQLDatabase",
"SQLDocumentContextBuilder",
"SQLContextBuilder",
"PromptHelper",
"IndexStructType",
"download_loader",
"load_graph_from_storage",
"load_index_from_storage",
"load_indices_from_storage",
"QueryBundle",
"get_response_synthesizer",
"set_global_service_context",
"set_global_handler",
"set_global_tokenizer",
"get_tokenizer",
"Settings",
]
# eval global toggle
from llama_index.core.callbacks.base_handler import BaseCallbackHandler
global_handler: Optional[BaseCallbackHandler] = None
# NOTE: keep for backwards compatibility
SQLContextBuilder = SQLDocumentContextBuilder
# global tokenizer
global_tokenizer: Optional[Callable[[str], list]] = None
|
"""Init file of LlamaIndex."""
__version__ = "0.12.28"
import logging
from logging import NullHandler
from typing import Callable, Optional
try:
# Force pants to install eval_type_backport on 3.9
import eval_type_backport # noqa # type: ignore
except ImportError:
pass
# response
from llama_index.core.base.response.schema import Response
# import global eval handler
from llama_index.core.callbacks.global_handlers import set_global_handler
from llama_index.core.data_structs.struct_type import IndexStructType
from llama_index.core.embeddings.mock_embed_model import MockEmbedding
# indices
# loading
from llama_index.core.indices import (
ComposableGraph,
DocumentSummaryIndex,
GPTDocumentSummaryIndex,
GPTKeywordTableIndex,
GPTListIndex,
GPTRAKEKeywordTableIndex,
GPTSimpleKeywordTableIndex,
GPTTreeIndex,
GPTVectorStoreIndex,
KeywordTableIndex,
KnowledgeGraphIndex,
ListIndex,
PropertyGraphIndex,
RAKEKeywordTableIndex,
SimpleKeywordTableIndex,
SummaryIndex,
TreeIndex,
VectorStoreIndex,
load_graph_from_storage,
load_index_from_storage,
load_indices_from_storage,
)
# structured
from llama_index.core.indices.common.struct_store.base import (
SQLDocumentContextBuilder,
)
# prompt helper
from llama_index.core.indices.prompt_helper import PromptHelper
# prompts
from llama_index.core.prompts import (
BasePromptTemplate,
ChatPromptTemplate,
# backwards compatibility
Prompt,
PromptTemplate,
SelectorPromptTemplate,
)
from llama_index.core.readers import SimpleDirectoryReader, download_loader
# Response Synthesizer
from llama_index.core.response_synthesizers.factory import get_response_synthesizer
from llama_index.core.schema import Document, QueryBundle
from llama_index.core.service_context import (
ServiceContext,
set_global_service_context,
)
# global settings
from llama_index.core.settings import Settings
# storage
from llama_index.core.storage.storage_context import StorageContext
# sql wrapper
from llama_index.core.utilities.sql_wrapper import SQLDatabase
# global tokenizer
from llama_index.core.utils import get_tokenizer, set_global_tokenizer
# best practices for library logging:
# https://docs.python.org/3/howto/logging.html#configuring-logging-for-a-library
logging.getLogger(__name__).addHandler(NullHandler())
__all__ = [
"StorageContext",
"ServiceContext",
"ComposableGraph",
# indices
"SummaryIndex",
"VectorStoreIndex",
"SimpleKeywordTableIndex",
"KeywordTableIndex",
"RAKEKeywordTableIndex",
"TreeIndex",
"DocumentSummaryIndex",
"KnowledgeGraphIndex",
"PropertyGraphIndex",
# indices - legacy names
"GPTKeywordTableIndex",
"GPTKnowledgeGraphIndex",
"GPTSimpleKeywordTableIndex",
"GPTRAKEKeywordTableIndex",
"GPTListIndex",
"ListIndex",
"GPTTreeIndex",
"GPTVectorStoreIndex",
"GPTDocumentSummaryIndex",
"Prompt",
"PromptTemplate",
"BasePromptTemplate",
"ChatPromptTemplate",
"SelectorPromptTemplate",
"SummaryPrompt",
"TreeInsertPrompt",
"TreeSelectPrompt",
"TreeSelectMultiplePrompt",
"RefinePrompt",
"QuestionAnswerPrompt",
"KeywordExtractPrompt",
"QueryKeywordExtractPrompt",
"Response",
"Document",
"SimpleDirectoryReader",
"MockEmbedding",
"SQLDatabase",
"SQLDocumentContextBuilder",
"SQLContextBuilder",
"PromptHelper",
"IndexStructType",
"download_loader",
"load_graph_from_storage",
"load_index_from_storage",
"load_indices_from_storage",
"QueryBundle",
"get_response_synthesizer",
"set_global_service_context",
"set_global_handler",
"set_global_tokenizer",
"get_tokenizer",
"Settings",
]
# eval global toggle
from llama_index.core.callbacks.base_handler import BaseCallbackHandler
global_handler: Optional[BaseCallbackHandler] = None
# NOTE: keep for backwards compatibility
SQLContextBuilder = SQLDocumentContextBuilder
# global tokenizer
global_tokenizer: Optional[Callable[[str], list]] = None
|
# Copyright (c) OpenMMLab. All rights reserved.
from .build_functions import (build_from_cfg, build_model_from_cfg,
build_runner_from_cfg)
from .default_scope import DefaultScope
from .registry import Registry
from .root import (DATA_SAMPLERS, DATASETS, EVALUATOR, HOOKS, LOG_PROCESSORS,
LOOPS, METRICS, MODEL_WRAPPERS, MODELS,
OPTIM_WRAPPER_CONSTRUCTORS, OPTIM_WRAPPERS, OPTIMIZERS,
PARAM_SCHEDULERS, RUNNER_CONSTRUCTORS, RUNNERS, TASK_UTILS,
TRANSFORMS, VISBACKENDS, VISUALIZERS, WEIGHT_INITIALIZERS)
from .utils import count_registered_modules, traverse_registry_tree
__all__ = [
'Registry', 'RUNNERS', 'RUNNER_CONSTRUCTORS', 'HOOKS', 'DATASETS',
'DATA_SAMPLERS', 'TRANSFORMS', 'MODELS', 'WEIGHT_INITIALIZERS',
'OPTIMIZERS', 'OPTIM_WRAPPER_CONSTRUCTORS', 'TASK_UTILS',
'PARAM_SCHEDULERS', 'METRICS', 'MODEL_WRAPPERS', 'OPTIM_WRAPPERS', 'LOOPS',
'VISBACKENDS', 'VISUALIZERS', 'LOG_PROCESSORS', 'EVALUATOR',
'DefaultScope', 'traverse_registry_tree', 'count_registered_modules',
'build_model_from_cfg', 'build_runner_from_cfg', 'build_from_cfg'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .default_scope import DefaultScope
from .registry import Registry, build_from_cfg, build_runner_from_cfg
from .root import (DATA_SAMPLERS, DATASETS, EVALUATOR, HOOKS, LOG_PROCESSORS,
LOOPS, METRICS, MODEL_WRAPPERS, MODELS,
OPTIM_WRAPPER_CONSTRUCTORS, OPTIM_WRAPPERS, OPTIMIZERS,
PARAM_SCHEDULERS, RUNNER_CONSTRUCTORS, RUNNERS, TASK_UTILS,
TRANSFORMS, VISBACKENDS, VISUALIZERS, WEIGHT_INITIALIZERS)
from .utils import count_registered_modules, traverse_registry_tree
__all__ = [
'Registry', 'build_from_cfg', 'RUNNERS', 'RUNNER_CONSTRUCTORS', 'HOOKS',
'DATASETS', 'DATA_SAMPLERS', 'TRANSFORMS', 'MODELS', 'WEIGHT_INITIALIZERS',
'OPTIMIZERS', 'OPTIM_WRAPPER_CONSTRUCTORS', 'TASK_UTILS',
'PARAM_SCHEDULERS', 'METRICS', 'MODEL_WRAPPERS', 'OPTIM_WRAPPERS', 'LOOPS',
'VISBACKENDS', 'VISUALIZERS', 'LOG_PROCESSORS', 'EVALUATOR',
'DefaultScope', 'traverse_registry_tree', 'count_registered_modules',
'build_runner_from_cfg'
]
|
from argparse import Namespace
from copy import deepcopy
from typing import TYPE_CHECKING, Type
from hubble.executor.helper import is_valid_huburi
from hubble.executor.hubio import HubIO
from jina.enums import PodRoleType
from jina.orchestrate.pods import Pod
from jina.orchestrate.pods.container import ContainerPod
if TYPE_CHECKING: # pragma: no cover
from jina.orchestrate.pods import BasePod
class PodFactory:
"""
A PodFactory is a factory class, abstracting the Pod creation
"""
@staticmethod
def build_pod(args: 'Namespace') -> Type['BasePod']:
"""Build an implementation of a `BasePod` interface
:param args: deployment arguments parsed from the CLI.
:return: the created BaseDeployment
"""
# copy to update but forward original
cargs = deepcopy(args)
if is_valid_huburi(cargs.uses):
_hub_args = deepcopy(args)
_hub_args.uri = args.uses
_hub_args.no_usage = True
cargs.uses = HubIO(_hub_args).pull()
if (
cargs.pod_role != PodRoleType.HEAD
and cargs.uses
and cargs.uses.startswith('docker://')
):
return ContainerPod(cargs)
else:
return Pod(args)
|
from argparse import Namespace
from copy import deepcopy
from typing import TYPE_CHECKING, Type
from hubble.executor.helper import is_valid_huburi
from hubble.executor.hubio import HubIO
from jina.enums import PodRoleType
from jina.orchestrate.pods import Pod
from jina.orchestrate.pods.container import ContainerPod
if TYPE_CHECKING: # pragma: no cover
from jina.orchestrate.pods import BasePod
class PodFactory:
"""
A PodFactory is a factory class, abstracting the Pod creation
"""
@staticmethod
def build_pod(args: 'Namespace') -> Type['BasePod']:
"""Build an implementation of a `BasePod` interface
:param args: deployment arguments parsed from the CLI.
:return: the created BaseDeployment
"""
# copy to update but forward original
cargs = deepcopy(args)
if is_valid_huburi(cargs.uses):
_hub_args = deepcopy(args)
_hub_args.uri = args.uses
_hub_args.no_usage = True
cargs.uses = HubIO(_hub_args).pull()
if (
cargs.pod_role != PodRoleType.HEAD
and cargs.uses
and cargs.uses.startswith('docker://')
):
return ContainerPod(cargs)
else:
return Pod(args)
|
import warnings
from typing import TYPE_CHECKING, Any, Optional, Tuple, Type, TypeVar, Union
import numpy as np
from docarray.typing.proto_register import _register_proto
from docarray.typing.url.any_url import AnyUrl
from docarray.utils._internal.misc import is_notebook
if TYPE_CHECKING:
from pydantic import BaseConfig
from pydantic.fields import ModelField
T = TypeVar('T', bound='ImageUrl')
IMAGE_FILE_FORMATS = ('png', 'jpeg', 'jpg')
@_register_proto(proto_type_name='image_url')
class ImageUrl(AnyUrl):
"""
URL to a .png, .jpeg, or .jpg file.
Can be remote (web) URL, or a local file path.
"""
@classmethod
def validate(
cls: Type[T],
value: Union[T, str, Any],
field: 'ModelField',
config: 'BaseConfig',
) -> T:
url = super().validate(value, field, config) # basic url validation
has_image_extension = any(url.endswith(ext) for ext in IMAGE_FILE_FORMATS)
if not has_image_extension:
raise ValueError(
f'Image URL must have one of the following extensions:'
f'{IMAGE_FILE_FORMATS}'
)
return cls(str(url), scheme=None)
def load(
self,
width: Optional[int] = None,
height: Optional[int] = None,
axis_layout: Tuple[str, str, str] = ('H', 'W', 'C'),
timeout: Optional[float] = None,
) -> np.ndarray:
"""
Load the data from the url into a numpy.ndarray image tensor
---
```python
from docarray import BaseDoc
from docarray.typing import ImageUrl
import numpy as np
class MyDoc(BaseDoc):
img_url: ImageUrl
doc = MyDoc(
img_url="https://upload.wikimedia.org/wikipedia/commons/8/80/"
"Dag_Sebastian_Ahlander_at_G%C3%B6teborg_Book_Fair_2012b.jpg"
)
img_tensor = doc.img_url.load()
assert isinstance(img_tensor, np.ndarray)
img_tensor = doc.img_url.load(height=224, width=224)
assert img_tensor.shape == (224, 224, 3)
layout = ('C', 'W', 'H')
img_tensor = doc.img_url.load(height=100, width=200, axis_layout=layout)
assert img_tensor.shape == (3, 200, 100)
```
---
:param width: width of the image tensor.
:param height: height of the image tensor.
:param axis_layout: ordering of the different image axes.
'H' = height, 'W' = width, 'C' = color channel
:param timeout: timeout (sec) for urlopen network request.
Only relevant if URL is not local
:return: np.ndarray representing the image as RGB values
"""
from docarray.typing.bytes.image_bytes import ImageBytes
buffer = ImageBytes(self.load_bytes(timeout=timeout))
return buffer.load(width, height, axis_layout)
def display(self) -> None:
"""
Display image data from url in notebook.
"""
if is_notebook():
from IPython.display import Image, display
remote_url = True if self.startswith('http') else False
if remote_url:
display(Image(url=self))
else:
display(Image(filename=self))
else:
warnings.warn('Display of image is only possible in a notebook.')
|
import warnings
from typing import TYPE_CHECKING, Any, Optional, Tuple, Type, TypeVar, Union
import numpy as np
from docarray.typing.proto_register import _register_proto
from docarray.typing.url.any_url import AnyUrl
from docarray.utils._internal.misc import is_notebook
if TYPE_CHECKING:
from pydantic import BaseConfig
from pydantic.fields import ModelField
T = TypeVar('T', bound='ImageUrl')
IMAGE_FILE_FORMATS = ('png', 'jpeg', 'jpg')
@_register_proto(proto_type_name='image_url')
class ImageUrl(AnyUrl):
"""
URL to a .png, .jpeg, or .jpg file.
Can be remote (web) URL, or a local file path.
"""
@classmethod
def validate(
cls: Type[T],
value: Union[T, str, Any],
field: 'ModelField',
config: 'BaseConfig',
) -> T:
url = super().validate(value, field, config) # basic url validation
has_image_extension = any(url.endswith(ext) for ext in IMAGE_FILE_FORMATS)
if not has_image_extension:
raise ValueError(
f'Image URL must have one of the following extensions:'
f'{IMAGE_FILE_FORMATS}'
)
return cls(str(url), scheme=None)
def load(
self,
width: Optional[int] = None,
height: Optional[int] = None,
axis_layout: Tuple[str, str, str] = ('H', 'W', 'C'),
timeout: Optional[float] = None,
) -> np.ndarray:
"""
Load the data from the url into a numpy.ndarray image tensor
EXAMPLE USAGE
.. code-block:: python
from docarray import BaseDoc
from docarray.typing import ImageUrl
import numpy as np
class MyDoc(BaseDoc):
img_url: ImageUrl
doc = MyDoc(
img_url="https://upload.wikimedia.org/wikipedia/commons/8/80/"
"Dag_Sebastian_Ahlander_at_G%C3%B6teborg_Book_Fair_2012b.jpg"
)
img_tensor = doc.img_url.load()
assert isinstance(img_tensor, np.ndarray)
img_tensor = doc.img_url.load(height=224, width=224)
assert img_tensor.shape == (224, 224, 3)
layout = ('C', 'W', 'H')
img_tensor = doc.img_url.load(height=100, width=200, axis_layout=layout)
assert img_tensor.shape == (3, 200, 100)
:param width: width of the image tensor.
:param height: height of the image tensor.
:param axis_layout: ordering of the different image axes.
'H' = height, 'W' = width, 'C' = color channel
:param timeout: timeout (sec) for urlopen network request.
Only relevant if URL is not local
:return: np.ndarray representing the image as RGB values
"""
from docarray.typing.bytes.image_bytes import ImageBytes
buffer = ImageBytes(self.load_bytes(timeout=timeout))
return buffer.load(width, height, axis_layout)
def display(self) -> None:
"""
Display image data from url in notebook.
"""
if is_notebook():
from IPython.display import Image, display
remote_url = True if self.startswith('http') else False
if remote_url:
display(Image(url=self))
else:
display(Image(filename=self))
else:
warnings.warn('Display of image is only possible in a notebook.')
|
import gzip
import logging
import os
from datetime import datetime
from torch.utils.data import DataLoader
from sentence_transformers import InputExample, LoggingHandler, SentenceTransformer, evaluation, losses, models, util
#### Just some code to print debug information to stdout
logging.basicConfig(
format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO, handlers=[LoggingHandler()]
)
#### /print debug information to stdout
# Some training parameters. We use a batch size of 16, for every positive example we include 8-1=7 negative examples
# Sentences are truncated to 75 word pieces
## Training parameters
model_name = "distilbert-base-uncased"
batch_size = 128
epochs = 1
max_seq_length = 75
################# Download AskUbuntu and extract training corpus #################
askubuntu_folder = "askubuntu"
output_path = "output/train_askubuntu_ct-improved-{}-{}-{}".format(
model_name, batch_size, datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
)
## Download the AskUbuntu dataset from https://github.com/taolei87/askubuntu
for filename in ["text_tokenized.txt.gz", "dev.txt", "test.txt", "train_random.txt"]:
filepath = os.path.join(askubuntu_folder, filename)
if not os.path.exists(filepath):
util.http_get("https://github.com/taolei87/askubuntu/raw/master/" + filename, filepath)
# Read the corpus
corpus = {}
dev_test_ids = set()
with gzip.open(os.path.join(askubuntu_folder, "text_tokenized.txt.gz"), "rt", encoding="utf8") as fIn:
for line in fIn:
splits = line.strip().split("\t")
id = splits[0]
title = splits[1]
corpus[id] = title
# Read dev & test dataset
def read_eval_dataset(filepath):
dataset = []
with open(filepath) as fIn:
for line in fIn:
query_id, relevant_id, candidate_ids, bm25_scores = line.strip().split("\t")
if len(relevant_id) == 0: # Skip examples without relevant entries
continue
relevant_id = relevant_id.split(" ")
candidate_ids = candidate_ids.split(" ")
negative_ids = set(candidate_ids) - set(relevant_id)
dataset.append(
{
"query": corpus[query_id],
"positive": [corpus[pid] for pid in relevant_id],
"negative": [corpus[pid] for pid in negative_ids],
}
)
dev_test_ids.add(query_id)
dev_test_ids.update(candidate_ids)
return dataset
dev_dataset = read_eval_dataset(os.path.join(askubuntu_folder, "dev.txt"))
test_dataset = read_eval_dataset(os.path.join(askubuntu_folder, "test.txt"))
## Now we need a list of train sentences.
## In this example we simply use all sentences that don't appear in the train/dev set
train_sentences = []
for id, sentence in corpus.items():
if id not in dev_test_ids:
train_sentences.append(InputExample(texts=[sentence, sentence]))
logging.info("{} train sentences".format(len(train_sentences)))
################# Initialize an SBERT model #################
word_embedding_model = models.Transformer(model_name, max_seq_length=max_seq_length)
pooling_model = models.Pooling(word_embedding_model.get_word_embedding_dimension())
model = SentenceTransformer(modules=[word_embedding_model, pooling_model])
################# Train the model #################
# For ContrastiveTension we need a special data loader to construct batches with the desired properties
train_dataloader = DataLoader(train_sentences, batch_size=batch_size, shuffle=True, drop_last=True)
# As loss, we losses.ContrastiveTensionLoss
train_loss = losses.ContrastiveTensionLossInBatchNegatives(model)
# Create a dev evaluator
dev_evaluator = evaluation.RerankingEvaluator(dev_dataset, name="AskUbuntu dev")
test_evaluator = evaluation.RerankingEvaluator(test_dataset, name="AskUbuntu test")
logging.info("Start training")
model.fit(
train_objectives=[(train_dataloader, train_loss)],
evaluator=dev_evaluator,
evaluation_steps=100,
epochs=1,
warmup_steps=100,
use_amp=True, # Set to True, if your GPU has optimized FP16 cores
)
latest_output_path = output_path + "-latest"
model.save(latest_output_path)
### Run test evaluation on the latest model. This is equivalent to not having a dev dataset
model = SentenceTransformer(latest_output_path)
test_evaluator(model)
|
from sentence_transformers import SentenceTransformer, LoggingHandler, InputExample
from sentence_transformers import models, util, evaluation, losses
import logging
import os
import gzip
from datetime import datetime
from torch.utils.data import DataLoader
#### Just some code to print debug information to stdout
logging.basicConfig(
format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO, handlers=[LoggingHandler()]
)
#### /print debug information to stdout
# Some training parameters. We use a batch size of 16, for every positive example we include 8-1=7 negative examples
# Sentences are truncated to 75 word pieces
## Training parameters
model_name = "distilbert-base-uncased"
batch_size = 128
epochs = 1
max_seq_length = 75
################# Download AskUbuntu and extract training corpus #################
askubuntu_folder = "askubuntu"
output_path = "output/train_askubuntu_ct-improved-{}-{}-{}".format(
model_name, batch_size, datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
)
## Download the AskUbuntu dataset from https://github.com/taolei87/askubuntu
for filename in ["text_tokenized.txt.gz", "dev.txt", "test.txt", "train_random.txt"]:
filepath = os.path.join(askubuntu_folder, filename)
if not os.path.exists(filepath):
util.http_get("https://github.com/taolei87/askubuntu/raw/master/" + filename, filepath)
# Read the corpus
corpus = {}
dev_test_ids = set()
with gzip.open(os.path.join(askubuntu_folder, "text_tokenized.txt.gz"), "rt", encoding="utf8") as fIn:
for line in fIn:
splits = line.strip().split("\t")
id = splits[0]
title = splits[1]
corpus[id] = title
# Read dev & test dataset
def read_eval_dataset(filepath):
dataset = []
with open(filepath) as fIn:
for line in fIn:
query_id, relevant_id, candidate_ids, bm25_scores = line.strip().split("\t")
if len(relevant_id) == 0: # Skip examples without relevant entries
continue
relevant_id = relevant_id.split(" ")
candidate_ids = candidate_ids.split(" ")
negative_ids = set(candidate_ids) - set(relevant_id)
dataset.append(
{
"query": corpus[query_id],
"positive": [corpus[pid] for pid in relevant_id],
"negative": [corpus[pid] for pid in negative_ids],
}
)
dev_test_ids.add(query_id)
dev_test_ids.update(candidate_ids)
return dataset
dev_dataset = read_eval_dataset(os.path.join(askubuntu_folder, "dev.txt"))
test_dataset = read_eval_dataset(os.path.join(askubuntu_folder, "test.txt"))
## Now we need a list of train sentences.
## In this example we simply use all sentences that don't appear in the train/dev set
train_sentences = []
for id, sentence in corpus.items():
if id not in dev_test_ids:
train_sentences.append(InputExample(texts=[sentence, sentence]))
logging.info("{} train sentences".format(len(train_sentences)))
################# Initialize an SBERT model #################
word_embedding_model = models.Transformer(model_name, max_seq_length=max_seq_length)
pooling_model = models.Pooling(word_embedding_model.get_word_embedding_dimension())
model = SentenceTransformer(modules=[word_embedding_model, pooling_model])
################# Train the model #################
# For ContrastiveTension we need a special data loader to construct batches with the desired properties
train_dataloader = DataLoader(train_sentences, batch_size=batch_size, shuffle=True, drop_last=True)
# As loss, we losses.ContrastiveTensionLoss
train_loss = losses.ContrastiveTensionLossInBatchNegatives(model)
# Create a dev evaluator
dev_evaluator = evaluation.RerankingEvaluator(dev_dataset, name="AskUbuntu dev")
test_evaluator = evaluation.RerankingEvaluator(test_dataset, name="AskUbuntu test")
logging.info("Start training")
model.fit(
train_objectives=[(train_dataloader, train_loss)],
epochs=1,
warmup_steps=100,
use_amp=True, # Set to True, if your GPU has optimized FP16 cores
)
latest_output_path = output_path + "-latest"
model.save(latest_output_path)
### Run test evaluation on the latest model. This is equivalent to not having a dev dataset
model = SentenceTransformer(latest_output_path)
test_evaluator(model)
|
_base_ = './paa_r50_fpn_1x_coco.py'
max_epochs = 36
# learning rate
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[28, 34],
gamma=0.1)
]
# training schedule for 3x
train_cfg = dict(max_epochs=max_epochs)
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='RandomResize', scale=[(1333, 640), (1333, 800)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
|
_base_ = './paa_r50_fpn_1x_coco.py'
max_epochs = 36
# learning rate
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[28, 34],
gamma=0.1)
]
# training schedule for 3x
train_cfg = dict(max_epochs=max_epochs)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='RandomResize',
scale=[(1333, 640), (1333, 800)],
resize_cfg=dict(type='Resize', keep_ratio=True)),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
|
from typing import Any, Dict, Optional, Union
import numpy as np
import PIL.Image
import torch
from torchvision import datapoints
from torchvision.transforms.v2 import functional as F, Transform
from torchvision.transforms.v2.utils import is_simple_tensor
class PILToTensor(Transform):
"""[BETA] Convert a PIL Image to a tensor of the same type - this does not scale values.
.. v2betastatus:: PILToTensor transform
This transform does not support torchscript.
Converts a PIL Image (H x W x C) to a Tensor of shape (C x H x W).
"""
_transformed_types = (PIL.Image.Image,)
def _transform(self, inpt: PIL.Image.Image, params: Dict[str, Any]) -> torch.Tensor:
return F.pil_to_tensor(inpt)
class ToImage(Transform):
"""[BETA] Convert a tensor, ndarray, or PIL Image to :class:`~torchvision.datapoints.Image`
; this does not scale values.
.. v2betastatus:: ToImage transform
This transform does not support torchscript.
"""
_transformed_types = (is_simple_tensor, PIL.Image.Image, np.ndarray)
def _transform(
self, inpt: Union[torch.Tensor, PIL.Image.Image, np.ndarray], params: Dict[str, Any]
) -> datapoints.Image:
return F.to_image(inpt)
class ToPILImage(Transform):
"""[BETA] Convert a tensor or an ndarray to PIL Image - this does not scale values.
.. v2betastatus:: ToPILImage transform
This transform does not support torchscript.
Converts a torch.*Tensor of shape C x H x W or a numpy ndarray of shape
H x W x C to a PIL Image while preserving the value range.
Args:
mode (`PIL.Image mode`_): color space and pixel depth of input data (optional).
If ``mode`` is ``None`` (default) there are some assumptions made about the input data:
- If the input has 4 channels, the ``mode`` is assumed to be ``RGBA``.
- If the input has 3 channels, the ``mode`` is assumed to be ``RGB``.
- If the input has 2 channels, the ``mode`` is assumed to be ``LA``.
- If the input has 1 channel, the ``mode`` is determined by the data type (i.e ``int``, ``float``,
``short``).
.. _PIL.Image mode: https://pillow.readthedocs.io/en/latest/handbook/concepts.html#concept-modes
"""
_transformed_types = (is_simple_tensor, datapoints.Image, np.ndarray)
def __init__(self, mode: Optional[str] = None) -> None:
super().__init__()
self.mode = mode
def _transform(
self, inpt: Union[torch.Tensor, PIL.Image.Image, np.ndarray], params: Dict[str, Any]
) -> PIL.Image.Image:
return F.to_pil_image(inpt, mode=self.mode)
|
from typing import Any, Dict, Optional, Union
import numpy as np
import PIL.Image
import torch
from torchvision import datapoints
from torchvision.transforms.v2 import functional as F, Transform
from torchvision.transforms.v2.utils import is_simple_tensor
class PILToTensor(Transform):
"""[BETA] Convert a PIL Image to a tensor of the same type - this does not scale values.
.. v2betastatus:: PILToTensor transform
This transform does not support torchscript.
Converts a PIL Image (H x W x C) to a Tensor of shape (C x H x W).
"""
_transformed_types = (PIL.Image.Image,)
def _transform(self, inpt: PIL.Image.Image, params: Dict[str, Any]) -> torch.Tensor:
return F.pil_to_tensor(inpt)
class ToImage(Transform):
"""[BETA] Convert a tensor, ndarray, or PIL Image to :class:`~torchvision.datapoints.Image`
; this does not scale values.
.. v2betastatus:: ToImageTensor transform
This transform does not support torchscript.
"""
_transformed_types = (is_simple_tensor, PIL.Image.Image, np.ndarray)
def _transform(
self, inpt: Union[torch.Tensor, PIL.Image.Image, np.ndarray], params: Dict[str, Any]
) -> datapoints.Image:
return F.to_image(inpt)
class ToPILImage(Transform):
"""[BETA] Convert a tensor or an ndarray to PIL Image - this does not scale values.
.. v2betastatus:: ToImagePIL transform
This transform does not support torchscript.
Converts a torch.*Tensor of shape C x H x W or a numpy ndarray of shape
H x W x C to a PIL Image while preserving the value range.
Args:
mode (`PIL.Image mode`_): color space and pixel depth of input data (optional).
If ``mode`` is ``None`` (default) there are some assumptions made about the input data:
- If the input has 4 channels, the ``mode`` is assumed to be ``RGBA``.
- If the input has 3 channels, the ``mode`` is assumed to be ``RGB``.
- If the input has 2 channels, the ``mode`` is assumed to be ``LA``.
- If the input has 1 channel, the ``mode`` is determined by the data type (i.e ``int``, ``float``,
``short``).
.. _PIL.Image mode: https://pillow.readthedocs.io/en/latest/handbook/concepts.html#concept-modes
"""
_transformed_types = (is_simple_tensor, datapoints.Image, np.ndarray)
def __init__(self, mode: Optional[str] = None) -> None:
super().__init__()
self.mode = mode
def _transform(
self, inpt: Union[torch.Tensor, PIL.Image.Image, np.ndarray], params: Dict[str, Any]
) -> PIL.Image.Image:
return F.to_pil_image(inpt, mode=self.mode)
|
# Copyright (c) OpenMMLab. All rights reserved.
from .checkpoint_hook import CheckpointHook
from .empty_cache_hook import EmptyCacheHook
from .hook import Hook
from .iter_timer_hook import IterTimerHook
from .logger_hook import LoggerHook
from .naive_visualization_hook import NaiveVisualizationHook
from .optimizer_hook import OptimizerHook
from .param_scheduler_hook import ParamSchedulerHook
from .sampler_seed_hook import DistSamplerSeedHook
from .sync_buffer_hook import SyncBuffersHook
__all__ = [
'Hook', 'IterTimerHook', 'DistSamplerSeedHook', 'ParamSchedulerHook',
'OptimizerHook', 'SyncBuffersHook', 'EmptyCacheHook', 'CheckpointHook',
'LoggerHook', 'NaiveVisualizationHook'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .checkpoint_hook import CheckpointHook
from .empty_cache_hook import EmptyCacheHook
from .hook import Hook
from .iter_timer_hook import IterTimerHook
from .logger_hook import LoggerHook
from .optimizer_hook import OptimizerHook
from .param_scheduler_hook import ParamSchedulerHook
from .sampler_seed_hook import DistSamplerSeedHook
from .sync_buffer_hook import SyncBuffersHook
__all__ = [
'Hook', 'IterTimerHook', 'DistSamplerSeedHook', 'ParamSchedulerHook',
'OptimizerHook', 'SyncBuffersHook', 'EmptyCacheHook', 'CheckpointHook',
'LoggerHook'
]
|
import gzip
from . import InputExample
class PairedFilesReader(object):
"""Reads in the a Pair Dataset, split in two files"""
def __init__(self, filepaths):
self.filepaths = filepaths
def get_examples(self, max_examples=0):
fIns = []
for filepath in self.filepaths:
fIn = (
gzip.open(filepath, "rt", encoding="utf-8")
if filepath.endswith(".gz")
else open(filepath, encoding="utf-8")
)
fIns.append(fIn)
examples = []
eof = False
while not eof:
texts = []
for fIn in fIns:
text = fIn.readline()
if text == "":
eof = True
break
texts.append(text)
if eof:
break
examples.append(InputExample(guid=str(len(examples)), texts=texts, label=1))
if max_examples > 0 and len(examples) >= max_examples:
break
return examples
|
from . import InputExample
import gzip
class PairedFilesReader(object):
"""
Reads in the a Pair Dataset, split in two files
"""
def __init__(self, filepaths):
self.filepaths = filepaths
def get_examples(self, max_examples=0):
""" """
fIns = []
for filepath in self.filepaths:
fIn = (
gzip.open(filepath, "rt", encoding="utf-8")
if filepath.endswith(".gz")
else open(filepath, encoding="utf-8")
)
fIns.append(fIn)
examples = []
eof = False
while not eof:
texts = []
for fIn in fIns:
text = fIn.readline()
if text == "":
eof = True
break
texts.append(text)
if eof:
break
examples.append(InputExample(guid=str(len(examples)), texts=texts, label=1))
if max_examples > 0 and len(examples) >= max_examples:
break
return examples
|
from io import BytesIO
from typing import TYPE_CHECKING, Any, NamedTuple, Type, TypeVar
import numpy as np
from pydantic import parse_obj_as
from pydantic.validators import bytes_validator
from docarray.typing.abstract_type import AbstractType
from docarray.typing.proto_register import _register_proto
from docarray.typing.tensor import AudioNdArray, NdArray, VideoNdArray
from docarray.utils._internal.misc import import_library
if TYPE_CHECKING:
from pydantic.fields import BaseConfig, ModelField
from docarray.proto import NodeProto
T = TypeVar('T', bound='VideoBytes')
class VideoLoadResult(NamedTuple):
video: VideoNdArray
audio: AudioNdArray
key_frame_indices: NdArray
@_register_proto(proto_type_name='video_bytes')
class VideoBytes(bytes, AbstractType):
"""
Bytes that store a video and that can be load into a video tensor
"""
@classmethod
def validate(
cls: Type[T],
value: Any,
field: 'ModelField',
config: 'BaseConfig',
) -> T:
value = bytes_validator(value)
return cls(value)
@classmethod
def from_protobuf(cls: Type[T], pb_msg: T) -> T:
return parse_obj_as(cls, pb_msg)
def _to_node_protobuf(self: T) -> 'NodeProto':
from docarray.proto import NodeProto
return NodeProto(blob=self, type=self._proto_type_name)
def load(self, **kwargs) -> VideoLoadResult:
"""
Load the video from the bytes into a VideoLoadResult object consisting of:
- a [`VideoNdArray`][docarray.typing.VideoNdArray] (`VideoLoadResult.video`)
- an [`AudioNdArray`][docarray.typing.AudioNdArray] (`VideoLoadResult.audio`)
- an [`NdArray`][docarray.typing.NdArray] containing the key frame indices (`VideoLoadResult.key_frame_indices`).
---
```python
from docarray import BaseDoc
from docarray.typing import AudioNdArray, NdArray, VideoNdArray, VideoUrl
class MyDoc(BaseDoc):
video_url: VideoUrl
doc = MyDoc(
video_url='https://github.com/docarray/docarray/blob/feat-rewrite-v2/tests/toydata/mov_bbb.mp4?raw=true'
)
video, audio, key_frame_indices = doc.video_url.load()
assert isinstance(video, VideoNdArray)
assert isinstance(audio, AudioNdArray)
assert isinstance(key_frame_indices, NdArray)
```
---
:param kwargs: supports all keyword arguments that are being supported by
av.open() as described [here](https://pyav.org/docs/stable/api/_globals.html?highlight=open#av.open)
:return: a `VideoLoadResult` instance with video, audio and keyframe indices
"""
if TYPE_CHECKING:
import av
else:
av = import_library('av')
with av.open(BytesIO(self), **kwargs) as container:
audio_frames = []
video_frames = []
keyframe_indices = []
for frame in container.decode():
if type(frame) == av.audio.frame.AudioFrame:
audio_frames.append(frame.to_ndarray())
elif type(frame) == av.video.frame.VideoFrame:
video_frames.append(frame.to_ndarray(format='rgb24'))
if frame.key_frame == 1:
curr_index = len(video_frames)
keyframe_indices.append(curr_index)
if len(audio_frames) == 0:
audio = parse_obj_as(AudioNdArray, np.array(audio_frames))
else:
audio = parse_obj_as(AudioNdArray, np.stack(audio_frames))
video = parse_obj_as(VideoNdArray, np.stack(video_frames))
indices = parse_obj_as(NdArray, keyframe_indices)
return VideoLoadResult(video=video, audio=audio, key_frame_indices=indices)
|
from io import BytesIO
from typing import TYPE_CHECKING, Any, NamedTuple, Type, TypeVar
import numpy as np
from pydantic import parse_obj_as
from pydantic.validators import bytes_validator
from docarray.typing.abstract_type import AbstractType
from docarray.typing.proto_register import _register_proto
from docarray.typing.tensor import AudioNdArray, NdArray, VideoNdArray
from docarray.utils._internal.misc import import_library
if TYPE_CHECKING:
from pydantic.fields import BaseConfig, ModelField
from docarray.proto import NodeProto
T = TypeVar('T', bound='VideoBytes')
class VideoLoadResult(NamedTuple):
video: VideoNdArray
audio: AudioNdArray
key_frame_indices: NdArray
@_register_proto(proto_type_name='video_bytes')
class VideoBytes(bytes, AbstractType):
"""
Bytes that store a video and that can be load into a video tensor
"""
@classmethod
def validate(
cls: Type[T],
value: Any,
field: 'ModelField',
config: 'BaseConfig',
) -> T:
value = bytes_validator(value)
return cls(value)
@classmethod
def from_protobuf(cls: Type[T], pb_msg: T) -> T:
return parse_obj_as(cls, pb_msg)
def _to_node_protobuf(self: T) -> 'NodeProto':
from docarray.proto import NodeProto
return NodeProto(blob=self, type=self._proto_type_name)
def load(self, **kwargs) -> VideoLoadResult:
"""
Load the video from the bytes into a VideoLoadResult object consisting of a
VideoNdArray (`VideoLoadResult.video`), an AudioNdArray
(`VideoLoadResult.audio`) and an NdArray containing the key frame indices
(`VideoLoadResult.key_frame_indices`).
---
```python
from docarray import BaseDoc
from docarray.typing import AudioNdArray, NdArray, VideoNdArray, VideoUrl
class MyDoc(BaseDoc):
video_url: VideoUrl
doc = MyDoc(
video_url='https://github.com/docarray/docarray/blob/feat-rewrite-v2/tests/toydata/mov_bbb.mp4?raw=true'
)
video, audio, key_frame_indices = doc.video_url.load()
assert isinstance(video, VideoNdArray)
assert isinstance(audio, AudioNdArray)
assert isinstance(key_frame_indices, NdArray)
```
---
:param kwargs: supports all keyword arguments that are being supported by
av.open() as described [here](https://pyav.org/docs/stable/api/_globals.html?highlight=open#av.open)
:return: a VideoLoadResult instance with video, audio and keyframe indices
"""
if TYPE_CHECKING:
import av
else:
av = import_library('av')
with av.open(BytesIO(self), **kwargs) as container:
audio_frames = []
video_frames = []
keyframe_indices = []
for frame in container.decode():
if type(frame) == av.audio.frame.AudioFrame:
audio_frames.append(frame.to_ndarray())
elif type(frame) == av.video.frame.VideoFrame:
video_frames.append(frame.to_ndarray(format='rgb24'))
if frame.key_frame == 1:
curr_index = len(video_frames)
keyframe_indices.append(curr_index)
if len(audio_frames) == 0:
audio = parse_obj_as(AudioNdArray, np.array(audio_frames))
else:
audio = parse_obj_as(AudioNdArray, np.stack(audio_frames))
video = parse_obj_as(VideoNdArray, np.stack(video_frames))
indices = parse_obj_as(NdArray, keyframe_indices)
return VideoLoadResult(video=video, audio=audio, key_frame_indices=indices)
|
# Copyright (c) OpenMMLab. All rights reserved.
from .atss import ATSS
from .autoassign import AutoAssign
from .base import BaseDetector
from .cascade_rcnn import CascadeRCNN
from .centernet import CenterNet
from .cornernet import CornerNet
from .deformable_detr import DeformableDETR
from .detr import DETR
from .fast_rcnn import FastRCNN
from .faster_rcnn import FasterRCNN
from .fcos import FCOS
from .fovea import FOVEA
from .fsaf import FSAF
from .gfl import GFL
from .grid_rcnn import GridRCNN
from .htc import HybridTaskCascade
from .kd_one_stage import KnowledgeDistillationSingleStageDetector
from .mask_rcnn import MaskRCNN
from .mask_scoring_rcnn import MaskScoringRCNN
from .nasfcos import NASFCOS
from .paa import PAA
from .panoptic_fpn import PanopticFPN
from .panoptic_two_stage_segmentor import TwoStagePanopticSegmentor
from .point_rend import PointRend
from .reppoints_detector import RepPointsDetector
from .retinanet import RetinaNet
from .rpn import RPN
from .scnet import SCNet
from .single_stage import SingleStageDetector
from .solo import SOLO
from .sparse_rcnn import SparseRCNN
from .trident_faster_rcnn import TridentFasterRCNN
from .two_stage import TwoStageDetector
from .vfnet import VFNet
from .yolact import YOLACT
from .yolo import YOLOV3
from .yolof import YOLOF
from .yolox import YOLOX
__all__ = [
'ATSS', 'BaseDetector', 'SingleStageDetector', 'TwoStageDetector', 'RPN',
'KnowledgeDistillationSingleStageDetector', 'FastRCNN', 'FasterRCNN',
'MaskRCNN', 'CascadeRCNN', 'HybridTaskCascade', 'RetinaNet', 'FCOS',
'GridRCNN', 'MaskScoringRCNN', 'RepPointsDetector', 'FOVEA', 'FSAF',
'NASFCOS', 'PointRend', 'GFL', 'CornerNet', 'PAA', 'YOLOV3', 'YOLACT',
'VFNet', 'DETR', 'TridentFasterRCNN', 'SparseRCNN', 'SCNet', 'SOLO',
'DeformableDETR', 'AutoAssign', 'YOLOF', 'CenterNet', 'YOLOX',
'TwoStagePanopticSegmentor', 'PanopticFPN'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .atss import ATSS
from .autoassign import AutoAssign
from .base import BaseDetector
from .cascade_rcnn import CascadeRCNN
from .centernet import CenterNet
from .cornernet import CornerNet
from .deformable_detr import DeformableDETR
from .detr import DETR
from .fast_rcnn import FastRCNN
from .faster_rcnn import FasterRCNN
from .fcos import FCOS
from .fovea import FOVEA
from .fsaf import FSAF
from .gfl import GFL
from .grid_rcnn import GridRCNN
from .htc import HybridTaskCascade
from .kd_one_stage import KnowledgeDistillationSingleStageDetector
from .mask_rcnn import MaskRCNN
from .mask_scoring_rcnn import MaskScoringRCNN
from .nasfcos import NASFCOS
from .paa import PAA
from .panoptic_fpn import PanopticFPN
from .panoptic_two_stage_segmentor import TwoStagePanopticSegmentor
from .point_rend import PointRend
from .reppoints_detector import RepPointsDetector
from .retinanet import RetinaNet
from .rpn import RPN
from .scnet import SCNet
from .single_stage import SingleStageDetector
from .sparse_rcnn import SparseRCNN
from .trident_faster_rcnn import TridentFasterRCNN
from .two_stage import TwoStageDetector
from .vfnet import VFNet
from .yolact import YOLACT
from .yolo import YOLOV3
from .yolof import YOLOF
from .yolox import YOLOX
__all__ = [
'ATSS', 'BaseDetector', 'SingleStageDetector', 'TwoStageDetector', 'RPN',
'KnowledgeDistillationSingleStageDetector', 'FastRCNN', 'FasterRCNN',
'MaskRCNN', 'CascadeRCNN', 'HybridTaskCascade', 'RetinaNet', 'FCOS',
'GridRCNN', 'MaskScoringRCNN', 'RepPointsDetector', 'FOVEA', 'FSAF',
'NASFCOS', 'PointRend', 'GFL', 'CornerNet', 'PAA', 'YOLOV3', 'YOLACT',
'VFNet', 'DETR', 'TridentFasterRCNN', 'SparseRCNN', 'SCNet',
'DeformableDETR', 'AutoAssign', 'YOLOF', 'CenterNet', 'YOLOX',
'TwoStagePanopticSegmentor', 'PanopticFPN'
]
|
from __future__ import annotations
import logging
import time
from typing import Any, Dict, Optional
from langchain_core.callbacks import CallbackManagerForToolRun
from langchain_core.tools import BaseTool
from langchain_core.utils import get_from_dict_or_env
from pydantic import model_validator
from langchain_community.tools.azure_cognitive_services.utils import (
detect_file_src_type,
download_audio_from_url,
)
logger = logging.getLogger(__name__)
class AzureCogsSpeech2TextTool(BaseTool):
"""Tool that queries the Azure Cognitive Services Speech2Text API.
In order to set this up, follow instructions at:
https://learn.microsoft.com/en-us/azure/cognitive-services/speech-service/get-started-speech-to-text?pivots=programming-language-python
"""
azure_cogs_key: str = "" #: :meta private:
azure_cogs_region: str = "" #: :meta private:
speech_language: str = "en-US" #: :meta private:
speech_config: Any #: :meta private:
name: str = "azure_cognitive_services_speech2text"
description: str = (
"A wrapper around Azure Cognitive Services Speech2Text. "
"Useful for when you need to transcribe audio to text. "
"Input should be a url to an audio file."
)
@model_validator(mode="before")
@classmethod
def validate_environment(cls, values: Dict) -> Any:
"""Validate that api key and endpoint exists in environment."""
azure_cogs_key = get_from_dict_or_env(
values, "azure_cogs_key", "AZURE_COGS_KEY"
)
azure_cogs_region = get_from_dict_or_env(
values, "azure_cogs_region", "AZURE_COGS_REGION"
)
try:
import azure.cognitiveservices.speech as speechsdk
values["speech_config"] = speechsdk.SpeechConfig(
subscription=azure_cogs_key, region=azure_cogs_region
)
except ImportError:
raise ImportError(
"azure-cognitiveservices-speech is not installed. "
"Run `pip install azure-cognitiveservices-speech` to install."
)
return values
def _continuous_recognize(self, speech_recognizer: Any) -> str:
done = False
text = ""
def stop_cb(evt: Any) -> None:
"""callback that stop continuous recognition"""
speech_recognizer.stop_continuous_recognition_async()
nonlocal done
done = True
def retrieve_cb(evt: Any) -> None:
"""callback that retrieves the intermediate recognition results"""
nonlocal text
text += evt.result.text
# retrieve text on recognized events
speech_recognizer.recognized.connect(retrieve_cb)
# stop continuous recognition on either session stopped or canceled events
speech_recognizer.session_stopped.connect(stop_cb)
speech_recognizer.canceled.connect(stop_cb)
# Start continuous speech recognition
speech_recognizer.start_continuous_recognition_async()
while not done:
time.sleep(0.5)
return text
def _speech2text(self, audio_path: str, speech_language: str) -> str:
try:
import azure.cognitiveservices.speech as speechsdk
except ImportError:
pass
audio_src_type = detect_file_src_type(audio_path)
if audio_src_type == "local":
audio_config = speechsdk.AudioConfig(filename=audio_path)
elif audio_src_type == "remote":
tmp_audio_path = download_audio_from_url(audio_path)
audio_config = speechsdk.AudioConfig(filename=tmp_audio_path)
else:
raise ValueError(f"Invalid audio path: {audio_path}")
self.speech_config.speech_recognition_language = speech_language
speech_recognizer = speechsdk.SpeechRecognizer(self.speech_config, audio_config)
return self._continuous_recognize(speech_recognizer)
def _run(
self,
query: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the tool."""
try:
text = self._speech2text(query, self.speech_language)
return text
except Exception as e:
raise RuntimeError(f"Error while running AzureCogsSpeech2TextTool: {e}")
|
from __future__ import annotations
import logging
import time
from typing import Any, Dict, Optional
from langchain_core.callbacks import CallbackManagerForToolRun
from langchain_core.tools import BaseTool
from langchain_core.utils import get_from_dict_or_env
from pydantic import model_validator
from langchain_community.tools.azure_cognitive_services.utils import (
detect_file_src_type,
download_audio_from_url,
)
logger = logging.getLogger(__name__)
class AzureCogsSpeech2TextTool(BaseTool): # type: ignore[override]
"""Tool that queries the Azure Cognitive Services Speech2Text API.
In order to set this up, follow instructions at:
https://learn.microsoft.com/en-us/azure/cognitive-services/speech-service/get-started-speech-to-text?pivots=programming-language-python
"""
azure_cogs_key: str = "" #: :meta private:
azure_cogs_region: str = "" #: :meta private:
speech_language: str = "en-US" #: :meta private:
speech_config: Any #: :meta private:
name: str = "azure_cognitive_services_speech2text"
description: str = (
"A wrapper around Azure Cognitive Services Speech2Text. "
"Useful for when you need to transcribe audio to text. "
"Input should be a url to an audio file."
)
@model_validator(mode="before")
@classmethod
def validate_environment(cls, values: Dict) -> Any:
"""Validate that api key and endpoint exists in environment."""
azure_cogs_key = get_from_dict_or_env(
values, "azure_cogs_key", "AZURE_COGS_KEY"
)
azure_cogs_region = get_from_dict_or_env(
values, "azure_cogs_region", "AZURE_COGS_REGION"
)
try:
import azure.cognitiveservices.speech as speechsdk
values["speech_config"] = speechsdk.SpeechConfig(
subscription=azure_cogs_key, region=azure_cogs_region
)
except ImportError:
raise ImportError(
"azure-cognitiveservices-speech is not installed. "
"Run `pip install azure-cognitiveservices-speech` to install."
)
return values
def _continuous_recognize(self, speech_recognizer: Any) -> str:
done = False
text = ""
def stop_cb(evt: Any) -> None:
"""callback that stop continuous recognition"""
speech_recognizer.stop_continuous_recognition_async()
nonlocal done
done = True
def retrieve_cb(evt: Any) -> None:
"""callback that retrieves the intermediate recognition results"""
nonlocal text
text += evt.result.text
# retrieve text on recognized events
speech_recognizer.recognized.connect(retrieve_cb)
# stop continuous recognition on either session stopped or canceled events
speech_recognizer.session_stopped.connect(stop_cb)
speech_recognizer.canceled.connect(stop_cb)
# Start continuous speech recognition
speech_recognizer.start_continuous_recognition_async()
while not done:
time.sleep(0.5)
return text
def _speech2text(self, audio_path: str, speech_language: str) -> str:
try:
import azure.cognitiveservices.speech as speechsdk
except ImportError:
pass
audio_src_type = detect_file_src_type(audio_path)
if audio_src_type == "local":
audio_config = speechsdk.AudioConfig(filename=audio_path)
elif audio_src_type == "remote":
tmp_audio_path = download_audio_from_url(audio_path)
audio_config = speechsdk.AudioConfig(filename=tmp_audio_path)
else:
raise ValueError(f"Invalid audio path: {audio_path}")
self.speech_config.speech_recognition_language = speech_language
speech_recognizer = speechsdk.SpeechRecognizer(self.speech_config, audio_config)
return self._continuous_recognize(speech_recognizer)
def _run(
self,
query: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the tool."""
try:
text = self._speech2text(query, self.speech_language)
return text
except Exception as e:
raise RuntimeError(f"Error while running AzureCogsSpeech2TextTool: {e}")
|
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import subprocess
import sys
import pytorch_sphinx_theme
sys.path.insert(0, os.path.abspath('../..'))
# -- Project information -----------------------------------------------------
project = 'mmengine'
copyright = '2022, mmengine contributors'
author = 'mmengine contributors'
version_file = '../../mmengine/version.py'
with open(version_file) as f:
exec(compile(f.read(), version_file, 'exec'))
__version__ = locals()['__version__']
# The short X.Y version
version = __version__
# The full version, including alpha/beta/rc tags
release = __version__
# -- General configuration ---------------------------------------------------
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = 'zh_CN'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.intersphinx',
'sphinx.ext.napoleon',
'sphinx.ext.viewcode',
'myst_parser',
'sphinx_copybutton',
'sphinx.ext.autodoc.typehints',
'sphinx_tabs.tabs',
] # yapf: disable
autodoc_typehints = 'description'
myst_heading_anchors = 4
myst_enable_extensions = ['colon_fence']
# Configuration for intersphinx
intersphinx_mapping = {
'python': ('https://docs.python.org/3', None),
'numpy': ('https://numpy.org/doc/stable', None),
'torch': ('https://pytorch.org/docs/stable/', None),
'mmcv': ('https://mmcv.readthedocs.io/zh_CN/2.x/', None),
}
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'pytorch_sphinx_theme'
html_theme_path = [pytorch_sphinx_theme.get_html_theme_path()]
html_theme_options = {
'menu': [
{
'name': 'GitHub',
'url': 'https://github.com/open-mmlab/mmengine'
},
],
# Specify the language of shared menu
'menu_lang': 'cn',
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
html_css_files = ['css/readthedocs.css']
# -- Extension configuration -------------------------------------------------
# Ignore >>> when copying code
copybutton_prompt_text = r'>>> |\.\.\. '
copybutton_prompt_is_regexp = True
def builder_inited_handler(app):
subprocess.run(['./cp_origin_docs.sh'])
def setup(app):
app.connect('builder-inited', builder_inited_handler)
|
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import subprocess
import sys
import pytorch_sphinx_theme
sys.path.insert(0, os.path.abspath('../..'))
# -- Project information -----------------------------------------------------
project = 'mmengine'
copyright = '2022, mmengine contributors'
author = 'mmengine contributors'
version_file = '../../mmengine/version.py'
with open(version_file) as f:
exec(compile(f.read(), version_file, 'exec'))
__version__ = locals()['__version__']
# The short X.Y version
version = __version__
# The full version, including alpha/beta/rc tags
release = __version__
# -- General configuration ---------------------------------------------------
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = 'zh_CN'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.intersphinx',
'sphinx.ext.napoleon',
'sphinx.ext.viewcode',
'myst_parser',
'sphinx_copybutton',
'sphinx.ext.autodoc.typehints',
] # yapf: disable
autodoc_typehints = 'description'
myst_heading_anchors = 4
myst_enable_extensions = ['colon_fence']
# Configuration for intersphinx
intersphinx_mapping = {
'python': ('https://docs.python.org/3', None),
'numpy': ('https://numpy.org/doc/stable', None),
'torch': ('https://pytorch.org/docs/stable/', None),
'mmcv': ('https://mmcv.readthedocs.io/zh_CN/2.x/', None),
}
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'pytorch_sphinx_theme'
html_theme_path = [pytorch_sphinx_theme.get_html_theme_path()]
html_theme_options = {
'menu': [
{
'name': 'GitHub',
'url': 'https://github.com/open-mmlab/mmengine'
},
],
# Specify the language of shared menu
'menu_lang': 'cn',
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
html_css_files = ['css/readthedocs.css']
# -- Extension configuration -------------------------------------------------
# Ignore >>> when copying code
copybutton_prompt_text = r'>>> |\.\.\. '
copybutton_prompt_is_regexp = True
def builder_inited_handler(app):
subprocess.run(['./cp_origin_docs.sh'])
def setup(app):
app.connect('builder-inited', builder_inited_handler)
|
"""Test yamlOutputParser"""
from enum import Enum
from typing import Optional
import pytest
from langchain_core.exceptions import OutputParserException
from pydantic import BaseModel, Field
from langchain.output_parsers.yaml import YamlOutputParser
class Actions(Enum):
SEARCH = "Search"
CREATE = "Create"
UPDATE = "Update"
DELETE = "Delete"
class TestModel(BaseModel):
action: Actions = Field(description="Action to be performed")
action_input: str = Field(description="Input to be used in the action")
additional_fields: Optional[str] = Field(
description="Additional fields",
default=None,
)
for_new_lines: str = Field(description="To be used to test newlines")
# Prevent pytest from trying to run tests on TestModel
TestModel.__test__ = False # type: ignore[attr-defined]
DEF_RESULT = """```yaml
---
action: Update
action_input: The yamlOutputParser class is powerful
additional_fields: null
for_new_lines: |
not_escape_newline:
escape_newline:
```"""
DEF_RESULT_NO_BACKTICKS = """
action: Update
action_input: The yamlOutputParser class is powerful
additional_fields: null
for_new_lines: |
not_escape_newline:
escape_newline:
"""
# action 'update' with a lowercase 'u' to test schema validation failure.
DEF_RESULT_FAIL = """```yaml
action: update
action_input: The yamlOutputParser class is powerful
additional_fields: null
```"""
DEF_EXPECTED_RESULT = TestModel(
action=Actions.UPDATE,
action_input="The yamlOutputParser class is powerful",
additional_fields=None,
for_new_lines="not_escape_newline:\n escape_newline:\n",
)
@pytest.mark.parametrize("result", [DEF_RESULT, DEF_RESULT_NO_BACKTICKS])
def test_yaml_output_parser(result: str) -> None:
"""Test yamlOutputParser."""
yaml_parser: YamlOutputParser[TestModel] = YamlOutputParser(
pydantic_object=TestModel,
)
model = yaml_parser.parse(result)
print("parse_result:", result) # noqa: T201
assert model == DEF_EXPECTED_RESULT
def test_yaml_output_parser_fail() -> None:
"""Test YamlOutputParser where completion result fails schema validation."""
yaml_parser: YamlOutputParser[TestModel] = YamlOutputParser(
pydantic_object=TestModel,
)
try:
yaml_parser.parse(DEF_RESULT_FAIL)
except OutputParserException as e:
print("parse_result:", e) # noqa: T201
assert "Failed to parse TestModel from completion" in str(e)
else:
assert False, "Expected OutputParserException"
def test_yaml_output_parser_output_type() -> None:
"""Test YamlOutputParser OutputType."""
yaml_parser = YamlOutputParser(pydantic_object=TestModel)
assert yaml_parser.OutputType is TestModel
|
"""Test yamlOutputParser"""
from enum import Enum
from typing import Optional
import pytest
from langchain_core.exceptions import OutputParserException
from pydantic import BaseModel, Field
from langchain.output_parsers.yaml import YamlOutputParser
class Actions(Enum):
SEARCH = "Search"
CREATE = "Create"
UPDATE = "Update"
DELETE = "Delete"
class TestModel(BaseModel):
action: Actions = Field(description="Action to be performed")
action_input: str = Field(description="Input to be used in the action")
additional_fields: Optional[str] = Field(
description="Additional fields", default=None
)
for_new_lines: str = Field(description="To be used to test newlines")
# Prevent pytest from trying to run tests on TestModel
TestModel.__test__ = False # type: ignore[attr-defined]
DEF_RESULT = """```yaml
---
action: Update
action_input: The yamlOutputParser class is powerful
additional_fields: null
for_new_lines: |
not_escape_newline:
escape_newline:
```"""
DEF_RESULT_NO_BACKTICKS = """
action: Update
action_input: The yamlOutputParser class is powerful
additional_fields: null
for_new_lines: |
not_escape_newline:
escape_newline:
"""
# action 'update' with a lowercase 'u' to test schema validation failure.
DEF_RESULT_FAIL = """```yaml
action: update
action_input: The yamlOutputParser class is powerful
additional_fields: null
```"""
DEF_EXPECTED_RESULT = TestModel(
action=Actions.UPDATE,
action_input="The yamlOutputParser class is powerful",
additional_fields=None,
for_new_lines="not_escape_newline:\n escape_newline:\n",
)
@pytest.mark.parametrize("result", [DEF_RESULT, DEF_RESULT_NO_BACKTICKS])
def test_yaml_output_parser(result: str) -> None:
"""Test yamlOutputParser."""
yaml_parser: YamlOutputParser[TestModel] = YamlOutputParser(
pydantic_object=TestModel
)
model = yaml_parser.parse(result)
print("parse_result:", result) # noqa: T201
assert model == DEF_EXPECTED_RESULT
def test_yaml_output_parser_fail() -> None:
"""Test YamlOutputParser where completion result fails schema validation."""
yaml_parser: YamlOutputParser[TestModel] = YamlOutputParser(
pydantic_object=TestModel
)
try:
yaml_parser.parse(DEF_RESULT_FAIL)
except OutputParserException as e:
print("parse_result:", e) # noqa: T201
assert "Failed to parse TestModel from completion" in str(e)
else:
assert False, "Expected OutputParserException"
def test_yaml_output_parser_output_type() -> None:
"""Test YamlOutputParser OutputType."""
yaml_parser = YamlOutputParser(pydantic_object=TestModel)
assert yaml_parser.OutputType is TestModel
|
from pathlib import Path
from typing import Any, List, Union
from langchain_community.document_loaders.unstructured import (
UnstructuredFileLoader,
validate_unstructured_version,
)
class UnstructuredEPubLoader(UnstructuredFileLoader):
"""Load `EPub` files using `Unstructured`.
You can run the loader in one of two modes: "single" and "elements".
If you use "single" mode, the document will be returned as a single
langchain Document object. If you use "elements" mode, the unstructured
library will split the document into elements such as Title and NarrativeText.
You can pass in additional unstructured kwargs after mode to apply
different unstructured settings.
Examples
--------
from langchain_community.document_loaders import UnstructuredEPubLoader
loader = UnstructuredEPubLoader(
"example.epub", mode="elements", strategy="fast",
)
docs = loader.load()
References
----------
https://unstructured-io.github.io/unstructured/bricks.html#partition-epub
"""
def __init__(
self,
file_path: Union[str, Path],
mode: str = "single",
**unstructured_kwargs: Any,
):
"""
Args:
file_path: The path to the EPub file to load.
mode: The mode to use when loading the file. Can be one of "single",
"multi", or "all". Default is "single".
**unstructured_kwargs: Any kwargs to pass to the unstructured.
"""
file_path = str(file_path)
validate_unstructured_version("0.5.4")
super().__init__(file_path=file_path, mode=mode, **unstructured_kwargs)
def _get_elements(self) -> List:
from unstructured.partition.epub import partition_epub
return partition_epub(filename=self.file_path, **self.unstructured_kwargs)
|
from pathlib import Path
from typing import Any, List, Union
from langchain_community.document_loaders.unstructured import (
UnstructuredFileLoader,
validate_unstructured_version,
)
class UnstructuredEPubLoader(UnstructuredFileLoader):
"""Load `EPub` files using `Unstructured`.
You can run the loader in one of two modes: "single" and "elements".
If you use "single" mode, the document will be returned as a single
langchain Document object. If you use "elements" mode, the unstructured
library will split the document into elements such as Title and NarrativeText.
You can pass in additional unstructured kwargs after mode to apply
different unstructured settings.
Examples
--------
from langchain_community.document_loaders import UnstructuredEPubLoader
loader = UnstructuredEPubLoader(
"example.epub", mode="elements", strategy="fast",
)
docs = loader.load()
References
----------
https://unstructured-io.github.io/unstructured/bricks.html#partition-epub
"""
def __init__(
self,
file_path: Union[str, Path],
mode: str = "single",
**unstructured_kwargs: Any,
):
"""
Args:
file_path: The path to the EPub file to load.
mode: The mode to use when loading the file. Can be one of "single",
"multi", or "all". Default is "single".
**unstructured_kwargs: Any kwargs to pass to the unstructured.
"""
file_path = str(file_path)
validate_unstructured_version("0.5.4")
super().__init__(file_path=file_path, mode=mode, **unstructured_kwargs)
def _get_elements(self) -> List:
from unstructured.partition.epub import partition_epub
return partition_epub(filename=self.file_path, **self.unstructured_kwargs) # type: ignore[arg-type]
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.registry import MODELS
from .cascade_rcnn import CascadeRCNN
@MODELS.register_module()
class SCNet(CascadeRCNN):
"""Implementation of `SCNet <https://arxiv.org/abs/2012.10150>`_"""
def __init__(self, **kwargs) -> None:
super().__init__(**kwargs)
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.registry import MODELS
from .cascade_rcnn import CascadeRCNN
@MODELS.register_module()
class SCNet(CascadeRCNN):
"""Implementation of `SCNet <https://arxiv.org/abs/2012.10150>`_"""
def __init__(self, **kwargs):
super(SCNet, self).__init__(**kwargs)
|
import io
import pathlib
from collections import namedtuple
from typing import Any, Dict, Iterator, List, Optional, Tuple, Union
from torchdata.datapipes.iter import IterDataPipe, Mapper, Zipper
from torchvision.prototype import features
from torchvision.prototype.datasets.utils import Dataset, GDriveResource, OnlineResource
from torchvision.prototype.datasets.utils._internal import hint_sharding, hint_shuffling
from torchvision.prototype.features import Label
from .._api import register_dataset, register_info
NAME = "pcam"
class PCAMH5Reader(IterDataPipe[Tuple[str, io.IOBase]]):
def __init__(
self,
datapipe: IterDataPipe[Tuple[str, io.IOBase]],
key: Optional[str] = None, # Note: this key thing might be very specific to the PCAM dataset
) -> None:
self.datapipe = datapipe
self.key = key
def __iter__(self) -> Iterator[Tuple[str, io.IOBase]]:
import h5py
for _, handle in self.datapipe:
try:
with h5py.File(handle) as data:
if self.key is not None:
data = data[self.key]
yield from data
finally:
handle.close()
_Resource = namedtuple("_Resource", ("file_name", "gdrive_id", "sha256"))
@register_info(NAME)
def _info() -> Dict[str, Any]:
return dict(categories=["0", "1"])
@register_dataset(NAME)
class PCAM(Dataset):
# TODO write proper docstring
"""PCAM Dataset
homepage="https://github.com/basveeling/pcam"
"""
def __init__(
self, root: Union[str, pathlib.Path], split: str = "train", *, skip_integrity_check: bool = False
) -> None:
self._split = self._verify_str_arg(split, "split", {"train", "val", "test"})
self._categories = _info()["categories"]
super().__init__(root, skip_integrity_check=skip_integrity_check, dependencies=("h5py",))
_RESOURCES = {
"train": (
_Resource( # Images
file_name="camelyonpatch_level_2_split_train_x.h5.gz",
gdrive_id="1Ka0XfEMiwgCYPdTI-vv6eUElOBnKFKQ2",
sha256="d619e741468a7ab35c7e4a75e6821b7e7e6c9411705d45708f2a0efc8960656c",
),
_Resource( # Targets
file_name="camelyonpatch_level_2_split_train_y.h5.gz",
gdrive_id="1269yhu3pZDP8UYFQs-NYs3FPwuK-nGSG",
sha256="b74126d2c01b20d3661f9b46765d29cf4e4fba6faba29c8e0d09d406331ab75a",
),
),
"test": (
_Resource( # Images
file_name="camelyonpatch_level_2_split_test_x.h5.gz",
gdrive_id="1qV65ZqZvWzuIVthK8eVDhIwrbnsJdbg_",
sha256="79174c2201ad521602a5888be8f36ee10875f37403dd3f2086caf2182ef87245",
),
_Resource( # Targets
file_name="camelyonpatch_level_2_split_test_y.h5.gz",
gdrive_id="17BHrSrwWKjYsOgTMmoqrIjDy6Fa2o_gP",
sha256="0a522005fccc8bbd04c5a117bfaf81d8da2676f03a29d7499f71d0a0bd6068ef",
),
),
"val": (
_Resource( # Images
file_name="camelyonpatch_level_2_split_valid_x.h5.gz",
gdrive_id="1hgshYGWK8V-eGRy8LToWJJgDU_rXWVJ3",
sha256="f82ee1670d027b4ec388048d9eabc2186b77c009655dae76d624c0ecb053ccb2",
),
_Resource( # Targets
file_name="camelyonpatch_level_2_split_valid_y.h5.gz",
gdrive_id="1bH8ZRbhSVAhScTS0p9-ZzGnX91cHT3uO",
sha256="ce1ae30f08feb468447971cfd0472e7becd0ad96d877c64120c72571439ae48c",
),
),
}
def _resources(self) -> List[OnlineResource]:
return [ # = [images resource, targets resource]
GDriveResource(file_name=file_name, id=gdrive_id, sha256=sha256, preprocess="decompress")
for file_name, gdrive_id, sha256 in self._RESOURCES[self._split]
]
def _prepare_sample(self, data: Tuple[Any, Any]) -> Dict[str, Any]:
image, target = data # They're both numpy arrays at this point
return {
"image": features.Image(image.transpose(2, 0, 1)),
"label": Label(target.item(), categories=self._categories),
}
def _datapipe(self, resource_dps: List[IterDataPipe]) -> IterDataPipe[Dict[str, Any]]:
images_dp, targets_dp = resource_dps
images_dp = PCAMH5Reader(images_dp, key="x")
targets_dp = PCAMH5Reader(targets_dp, key="y")
dp = Zipper(images_dp, targets_dp)
dp = hint_shuffling(dp)
dp = hint_sharding(dp)
return Mapper(dp, self._prepare_sample)
def __len__(self) -> int:
return 262_144 if self._split == "train" else 32_768
|
import io
import pathlib
from collections import namedtuple
from typing import Any, Dict, Iterator, List, Optional, Tuple, Union
from torchdata.datapipes.iter import IterDataPipe, Mapper, Zipper
from torchvision.prototype import features
from torchvision.prototype.datasets.utils import Dataset, GDriveResource, OnlineResource
from torchvision.prototype.datasets.utils._internal import hint_sharding, hint_shuffling
from torchvision.prototype.features import Label
from .._api import register_dataset, register_info
NAME = "pcam"
class PCAMH5Reader(IterDataPipe[Tuple[str, io.IOBase]]):
def __init__(
self,
datapipe: IterDataPipe[Tuple[str, io.IOBase]],
key: Optional[str] = None, # Note: this key thing might be very specific to the PCAM dataset
) -> None:
self.datapipe = datapipe
self.key = key
def __iter__(self) -> Iterator[Tuple[str, io.IOBase]]:
import h5py
for _, handle in self.datapipe:
with h5py.File(handle) as data:
if self.key is not None:
data = data[self.key]
yield from data
handle.close()
_Resource = namedtuple("_Resource", ("file_name", "gdrive_id", "sha256"))
@register_info(NAME)
def _info() -> Dict[str, Any]:
return dict(categories=["0", "1"])
@register_dataset(NAME)
class PCAM(Dataset):
# TODO write proper docstring
"""PCAM Dataset
homepage="https://github.com/basveeling/pcam"
"""
def __init__(
self, root: Union[str, pathlib.Path], split: str = "train", *, skip_integrity_check: bool = False
) -> None:
self._split = self._verify_str_arg(split, "split", {"train", "val", "test"})
self._categories = _info()["categories"]
super().__init__(root, skip_integrity_check=skip_integrity_check, dependencies=("h5py",))
_RESOURCES = {
"train": (
_Resource( # Images
file_name="camelyonpatch_level_2_split_train_x.h5.gz",
gdrive_id="1Ka0XfEMiwgCYPdTI-vv6eUElOBnKFKQ2",
sha256="d619e741468a7ab35c7e4a75e6821b7e7e6c9411705d45708f2a0efc8960656c",
),
_Resource( # Targets
file_name="camelyonpatch_level_2_split_train_y.h5.gz",
gdrive_id="1269yhu3pZDP8UYFQs-NYs3FPwuK-nGSG",
sha256="b74126d2c01b20d3661f9b46765d29cf4e4fba6faba29c8e0d09d406331ab75a",
),
),
"test": (
_Resource( # Images
file_name="camelyonpatch_level_2_split_test_x.h5.gz",
gdrive_id="1qV65ZqZvWzuIVthK8eVDhIwrbnsJdbg_",
sha256="79174c2201ad521602a5888be8f36ee10875f37403dd3f2086caf2182ef87245",
),
_Resource( # Targets
file_name="camelyonpatch_level_2_split_test_y.h5.gz",
gdrive_id="17BHrSrwWKjYsOgTMmoqrIjDy6Fa2o_gP",
sha256="0a522005fccc8bbd04c5a117bfaf81d8da2676f03a29d7499f71d0a0bd6068ef",
),
),
"val": (
_Resource( # Images
file_name="camelyonpatch_level_2_split_valid_x.h5.gz",
gdrive_id="1hgshYGWK8V-eGRy8LToWJJgDU_rXWVJ3",
sha256="f82ee1670d027b4ec388048d9eabc2186b77c009655dae76d624c0ecb053ccb2",
),
_Resource( # Targets
file_name="camelyonpatch_level_2_split_valid_y.h5.gz",
gdrive_id="1bH8ZRbhSVAhScTS0p9-ZzGnX91cHT3uO",
sha256="ce1ae30f08feb468447971cfd0472e7becd0ad96d877c64120c72571439ae48c",
),
),
}
def _resources(self) -> List[OnlineResource]:
return [ # = [images resource, targets resource]
GDriveResource(file_name=file_name, id=gdrive_id, sha256=sha256, preprocess="decompress")
for file_name, gdrive_id, sha256 in self._RESOURCES[self._split]
]
def _prepare_sample(self, data: Tuple[Any, Any]) -> Dict[str, Any]:
image, target = data # They're both numpy arrays at this point
return {
"image": features.Image(image.transpose(2, 0, 1)),
"label": Label(target.item(), categories=self._categories),
}
def _datapipe(self, resource_dps: List[IterDataPipe]) -> IterDataPipe[Dict[str, Any]]:
images_dp, targets_dp = resource_dps
images_dp = PCAMH5Reader(images_dp, key="x")
targets_dp = PCAMH5Reader(targets_dp, key="y")
dp = Zipper(images_dp, targets_dp)
dp = hint_shuffling(dp)
dp = hint_sharding(dp)
return Mapper(dp, self._prepare_sample)
def __len__(self) -> int:
return 262_144 if self._split == "train" else 32_768
|
from __future__ import annotations
from typing import TYPE_CHECKING, Iterable
from sentence_transformers.evaluation.SentenceEvaluator import SentenceEvaluator
if TYPE_CHECKING:
from sentence_transformers.SentenceTransformer import SentenceTransformer
class SequentialEvaluator(SentenceEvaluator):
"""
This evaluator allows that multiple sub-evaluators are passed. When the model is evaluated,
the data is passed sequentially to all sub-evaluators.
All scores are passed to 'main_score_function', which derives one final score value
"""
def __init__(self, evaluators: Iterable[SentenceEvaluator], main_score_function=lambda scores: scores[-1]):
"""
Initializes a SequentialEvaluator object.
Args:
evaluators (Iterable[SentenceEvaluator]): A collection of SentenceEvaluator objects.
main_score_function (function, optional): A function that takes a list of scores and returns the main score.
Defaults to selecting the last score in the list.
Example:
::
evaluator1 = BinaryClassificationEvaluator(...)
evaluator2 = InformationRetrievalEvaluator(...)
evaluator3 = MSEEvaluator(...)
seq_evaluator = SequentialEvaluator([evaluator1, evaluator2, evaluator3])
"""
super().__init__()
self.evaluators = evaluators
self.main_score_function = main_score_function
def __call__(
self, model: SentenceTransformer, output_path: str = None, epoch: int = -1, steps: int = -1
) -> dict[str, float]:
evaluations = []
scores = []
for evaluator_idx, evaluator in enumerate(self.evaluators):
evaluation = evaluator(model, output_path, epoch, steps)
if not isinstance(evaluation, dict):
scores.append(evaluation)
evaluation = {f"evaluator_{evaluator_idx}": evaluation}
else:
if hasattr(evaluator, "primary_metric"):
scores.append(evaluation[evaluator.primary_metric])
else:
scores.append(evaluation[list(evaluation.keys())[0]])
evaluations.append(evaluation)
self.primary_metric = "sequential_score"
main_score = self.main_score_function(scores)
results = {key: value for evaluation in evaluations for key, value in evaluation.items()}
results["sequential_score"] = main_score
return results
|
from __future__ import annotations
from typing import TYPE_CHECKING, Iterable
from sentence_transformers.evaluation.SentenceEvaluator import SentenceEvaluator
if TYPE_CHECKING:
from sentence_transformers.SentenceTransformer import SentenceTransformer
class SequentialEvaluator(SentenceEvaluator):
"""
This evaluator allows that multiple sub-evaluators are passed. When the model is evaluated,
the data is passed sequentially to all sub-evaluators.
All scores are passed to 'main_score_function', which derives one final score value
"""
def __init__(self, evaluators: Iterable[SentenceEvaluator], main_score_function=lambda scores: scores[-1]):
"""
Initializes a SequentialEvaluator object.
Args:
evaluators (Iterable[SentenceEvaluator]): A collection of SentenceEvaluator objects.
main_score_function (function, optional): A function that takes a list of scores and returns the main score.
Defaults to selecting the last score in the list.
Example:
::
evaluator1 = BinaryClassificationEvaluator(...)
evaluator2 = InformationRetrievalEvaluator(...)
evaluator3 = MSEEvaluator(...)
seq_evaluator = SequentialEvaluator([evaluator1, evaluator2, evaluator3])
"""
super().__init__()
self.evaluators = evaluators
self.main_score_function = main_score_function
def __call__(
self, model: "SentenceTransformer", output_path: str = None, epoch: int = -1, steps: int = -1
) -> dict[str, float]:
evaluations = []
scores = []
for evaluator_idx, evaluator in enumerate(self.evaluators):
evaluation = evaluator(model, output_path, epoch, steps)
if not isinstance(evaluation, dict):
scores.append(evaluation)
evaluation = {f"evaluator_{evaluator_idx}": evaluation}
else:
if hasattr(evaluator, "primary_metric"):
scores.append(evaluation[evaluator.primary_metric])
else:
scores.append(evaluation[list(evaluation.keys())[0]])
evaluations.append(evaluation)
self.primary_metric = "sequential_score"
main_score = self.main_score_function(scores)
results = {key: value for evaluation in evaluations for key, value in evaluation.items()}
results["sequential_score"] = main_score
return results
|
"""
This tool allows agents to interact with the clickup library
and operate on a Clickup instance.
To use this tool, you must first set as environment variables:
client_secret
client_id
code
Below is a sample script that uses the Clickup tool:
```python
from langchain_community.agent_toolkits.clickup.toolkit import ClickupToolkit
from langchain_community.utilities.clickup import ClickupAPIWrapper
clickup = ClickupAPIWrapper()
toolkit = ClickupToolkit.from_clickup_api_wrapper(clickup)
```
"""
from typing import Optional
from langchain_core.callbacks import CallbackManagerForToolRun
from langchain_core.tools import BaseTool
from pydantic import Field
from langchain_community.utilities.clickup import ClickupAPIWrapper
class ClickupAction(BaseTool):
"""Tool that queries the Clickup API."""
api_wrapper: ClickupAPIWrapper = Field(default_factory=ClickupAPIWrapper)
mode: str
name: str = ""
description: str = ""
def _run(
self,
instructions: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the Clickup API to run an operation."""
return self.api_wrapper.run(self.mode, instructions)
|
"""
This tool allows agents to interact with the clickup library
and operate on a Clickup instance.
To use this tool, you must first set as environment variables:
client_secret
client_id
code
Below is a sample script that uses the Clickup tool:
```python
from langchain_community.agent_toolkits.clickup.toolkit import ClickupToolkit
from langchain_community.utilities.clickup import ClickupAPIWrapper
clickup = ClickupAPIWrapper()
toolkit = ClickupToolkit.from_clickup_api_wrapper(clickup)
```
"""
from typing import Optional
from langchain_core.callbacks import CallbackManagerForToolRun
from langchain_core.tools import BaseTool
from pydantic import Field
from langchain_community.utilities.clickup import ClickupAPIWrapper
class ClickupAction(BaseTool): # type: ignore[override]
"""Tool that queries the Clickup API."""
api_wrapper: ClickupAPIWrapper = Field(default_factory=ClickupAPIWrapper)
mode: str
name: str = ""
description: str = ""
def _run(
self,
instructions: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the Clickup API to run an operation."""
return self.api_wrapper.run(self.mode, instructions)
|
"""
===========================================================
Plot Ridge coefficients as a function of the regularization
===========================================================
Shows the effect of collinearity in the coefficients of an estimator.
.. currentmodule:: sklearn.linear_model
:class:`Ridge` Regression is the estimator used in this example.
Each color represents a different feature of the
coefficient vector, and this is displayed as a function of the
regularization parameter.
This example also shows the usefulness of applying Ridge regression
to highly ill-conditioned matrices. For such matrices, a slight
change in the target variable can cause huge variances in the
calculated weights. In such cases, it is useful to set a certain
regularization (alpha) to reduce this variation (noise).
When alpha is very large, the regularization effect dominates the
squared loss function and the coefficients tend to zero.
At the end of the path, as alpha tends toward zero
and the solution tends towards the ordinary least squares, coefficients
exhibit big oscillations. In practice it is necessary to tune alpha
in such a way that a balance is maintained between both.
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import matplotlib.pyplot as plt
import numpy as np
from sklearn import linear_model
# X is the 10x10 Hilbert matrix
X = 1.0 / (np.arange(1, 11) + np.arange(0, 10)[:, np.newaxis])
y = np.ones(10)
# %%
# Compute paths
# -------------
n_alphas = 200
alphas = np.logspace(-10, -2, n_alphas)
coefs = []
for a in alphas:
ridge = linear_model.Ridge(alpha=a, fit_intercept=False)
ridge.fit(X, y)
coefs.append(ridge.coef_)
# %%
# Display results
# ---------------
ax = plt.gca()
ax.plot(alphas, coefs)
ax.set_xscale("log")
ax.set_xlim(ax.get_xlim()[::-1]) # reverse axis
plt.xlabel("alpha")
plt.ylabel("weights")
plt.title("Ridge Coefficients vs Regularization Strength (alpha)")
plt.axis("tight")
plt.legend(
[f"Feature {i + 1}" for i in range(X.shape[1])], loc="best", fontsize="small"
)
plt.show()
|
"""
===========================================================
Plot Ridge coefficients as a function of the regularization
===========================================================
Shows the effect of collinearity in the coefficients of an estimator.
.. currentmodule:: sklearn.linear_model
:class:`Ridge` Regression is the estimator used in this example.
Each color represents a different feature of the
coefficient vector, and this is displayed as a function of the
regularization parameter.
This example also shows the usefulness of applying Ridge regression
to highly ill-conditioned matrices. For such matrices, a slight
change in the target variable can cause huge variances in the
calculated weights. In such cases, it is useful to set a certain
regularization (alpha) to reduce this variation (noise).
When alpha is very large, the regularization effect dominates the
squared loss function and the coefficients tend to zero.
At the end of the path, as alpha tends toward zero
and the solution tends towards the ordinary least squares, coefficients
exhibit big oscillations. In practise it is necessary to tune alpha
in such a way that a balance is maintained between both.
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import matplotlib.pyplot as plt
import numpy as np
from sklearn import linear_model
# X is the 10x10 Hilbert matrix
X = 1.0 / (np.arange(1, 11) + np.arange(0, 10)[:, np.newaxis])
y = np.ones(10)
# %%
# Compute paths
# -------------
n_alphas = 200
alphas = np.logspace(-10, -2, n_alphas)
coefs = []
for a in alphas:
ridge = linear_model.Ridge(alpha=a, fit_intercept=False)
ridge.fit(X, y)
coefs.append(ridge.coef_)
# %%
# Display results
# ---------------
ax = plt.gca()
ax.plot(alphas, coefs)
ax.set_xscale("log")
ax.set_xlim(ax.get_xlim()[::-1]) # reverse axis
plt.xlabel("alpha")
plt.ylabel("weights")
plt.title("Ridge coefficients as a function of the regularization")
plt.axis("tight")
plt.show()
|
from typing import Optional
from docarray import Document, DocumentArray
from pydantic import BaseModel
from jina.clients.request import request_generator
from jina.serve.runtimes.gateway.http.fastapi import FastAPIBaseGateway
class DummyResponseModel(BaseModel):
arg1: Optional[str]
arg2: Optional[str]
arg3: Optional[str]
class ProcessedResponseModel(BaseModel):
text: str
tags: Optional[dict]
class DummyFastAPIGateway(FastAPIBaseGateway):
def __init__(
self,
arg1: str = None,
arg2: str = None,
arg3: str = 'default-arg3',
default_health_check: bool = False,
**kwargs
):
super().__init__(**kwargs)
self.arg1 = arg1
self.arg2 = arg2
self.arg3 = arg3
self.default_health_check = default_health_check
@property
def app(self):
from fastapi import FastAPI
app = FastAPI(
title='Dummy Server',
)
if not self.default_health_check:
@app.get(path='/', response_model=DummyResponseModel)
def _get_response():
return {
'arg1': self.arg1,
'arg2': self.arg2,
'arg3': self.arg3,
}
@app.get(
path='/stream',
response_model=ProcessedResponseModel,
)
async def _process(text: str):
doc = None
async for req in self.streamer.stream(
request_generator(
exec_endpoint='/',
data=DocumentArray([Document(text=text)]),
)
):
doc = req.to_dict()['data'][0]
return {'text': doc['text'], 'tags': doc['tags']}
return app
|
from typing import Optional
from docarray import Document, DocumentArray
from pydantic import BaseModel
from jina.clients.request import request_generator
from jina.serve.runtimes.gateway.http.fastapi import FastAPIBaseGateway
class DummyResponseModel(BaseModel):
arg1: Optional[str]
arg2: Optional[str]
arg3: Optional[str]
class ProcessedResponseModel(BaseModel):
text: str
tags: Optional[dict]
class DummyFastAPIGateway(FastAPIBaseGateway):
def __init__(
self, arg1: str = None, arg2: str = None, arg3: str = 'default-arg3', **kwargs
):
super().__init__(**kwargs)
self.arg1 = arg1
self.arg2 = arg2
self.arg3 = arg3
@property
def app(self):
from fastapi import FastAPI
app = FastAPI(
title='Dummy Server',
)
@app.get(path='/', response_model=DummyResponseModel)
def _get_response():
return {
'arg1': self.arg1,
'arg2': self.arg2,
'arg3': self.arg3,
}
@app.get(
path='/stream',
response_model=ProcessedResponseModel,
)
async def _process(text: str):
doc = None
async for req in self.streamer.stream(
request_generator(
exec_endpoint='/',
data=DocumentArray([Document(text=text)]),
)
):
doc = req.to_dict()['data'][0]
return {'text': doc['text'], 'tags': doc['tags']}
return app
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import cv2
import mmcv
from mmcv.transforms import Compose
from mmengine.utils import track_iter_progress
from mmdet.apis import inference_detector, init_detector
from mmdet.registry import VISUALIZERS
def parse_args():
parser = argparse.ArgumentParser(description='MMDetection video demo')
parser.add_argument('video', help='Video file')
parser.add_argument('config', help='Config file')
parser.add_argument('checkpoint', help='Checkpoint file')
parser.add_argument(
'--device', default='cuda:0', help='Device used for inference')
parser.add_argument(
'--score-thr', type=float, default=0.3, help='Bbox score threshold')
parser.add_argument('--out', type=str, help='Output video file')
parser.add_argument('--show', action='store_true', help='Show video')
parser.add_argument(
'--wait-time',
type=float,
default=1,
help='The interval of show (s), 0 is block')
args = parser.parse_args()
return args
def main():
args = parse_args()
assert args.out or args.show, \
('Please specify at least one operation (save/show the '
'video) with the argument "--out" or "--show"')
# build the model from a config file and a checkpoint file
model = init_detector(args.config, args.checkpoint, device=args.device)
# build test pipeline
model.cfg.test_dataloader.dataset.pipeline[0].type = 'LoadImageFromNDArray'
test_pipeline = Compose(model.cfg.test_dataloader.dataset.pipeline)
# init visualizer
visualizer = VISUALIZERS.build(model.cfg.visualizer)
# the dataset_meta is loaded from the checkpoint and
# then pass to the model in init_detector
visualizer.dataset_meta = model.dataset_meta
video_reader = mmcv.VideoReader(args.video)
video_writer = None
if args.out:
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
video_writer = cv2.VideoWriter(
args.out, fourcc, video_reader.fps,
(video_reader.width, video_reader.height))
for frame in track_iter_progress(video_reader):
result = inference_detector(model, frame, test_pipeline=test_pipeline)
visualizer.add_datasample(
name='video',
image=frame,
data_sample=result,
draw_gt=False,
show=False,
pred_score_thr=args.score_thr)
frame = visualizer.get_image()
if args.show:
cv2.namedWindow('video', 0)
mmcv.imshow(frame, 'video', args.wait_time)
if args.out:
video_writer.write(frame)
if video_writer:
video_writer.release()
cv2.destroyAllWindows()
if __name__ == '__main__':
main()
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import cv2
import mmcv
from mmcv.transforms import Compose
from mmengine.utils import track_iter_progress
from mmdet.apis import inference_detector, init_detector
from mmdet.registry import VISUALIZERS
from mmdet.utils import register_all_modules
def parse_args():
parser = argparse.ArgumentParser(description='MMDetection video demo')
parser.add_argument('video', help='Video file')
parser.add_argument('config', help='Config file')
parser.add_argument('checkpoint', help='Checkpoint file')
parser.add_argument(
'--device', default='cuda:0', help='Device used for inference')
parser.add_argument(
'--score-thr', type=float, default=0.3, help='Bbox score threshold')
parser.add_argument('--out', type=str, help='Output video file')
parser.add_argument('--show', action='store_true', help='Show video')
parser.add_argument(
'--wait-time',
type=float,
default=1,
help='The interval of show (s), 0 is block')
args = parser.parse_args()
return args
def main():
args = parse_args()
assert args.out or args.show, \
('Please specify at least one operation (save/show the '
'video) with the argument "--out" or "--show"')
# register all modules in mmdet into the registries
register_all_modules()
# build the model from a config file and a checkpoint file
model = init_detector(args.config, args.checkpoint, device=args.device)
# build test pipeline
model.cfg.test_dataloader.dataset.pipeline[0].type = 'LoadImageFromNDArray'
test_pipeline = Compose(model.cfg.test_dataloader.dataset.pipeline)
# init visualizer
visualizer = VISUALIZERS.build(model.cfg.visualizer)
# the dataset_meta is loaded from the checkpoint and
# then pass to the model in init_detector
visualizer.dataset_meta = model.dataset_meta
video_reader = mmcv.VideoReader(args.video)
video_writer = None
if args.out:
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
video_writer = cv2.VideoWriter(
args.out, fourcc, video_reader.fps,
(video_reader.width, video_reader.height))
for frame in track_iter_progress(video_reader):
result = inference_detector(model, frame, test_pipeline=test_pipeline)
visualizer.add_datasample(
name='video',
image=frame,
data_sample=result,
draw_gt=False,
show=False,
pred_score_thr=args.score_thr)
frame = visualizer.get_image()
if args.show:
cv2.namedWindow('video', 0)
mmcv.imshow(frame, 'video', args.wait_time)
if args.out:
video_writer.write(frame)
if video_writer:
video_writer.release()
cv2.destroyAllWindows()
if __name__ == '__main__':
main()
|
from ...utils import is_torch_available
if is_torch_available():
from .auraflow_transformer_2d import AuraFlowTransformer2DModel
from .cogvideox_transformer_3d import CogVideoXTransformer3DModel
from .consisid_transformer_3d import ConsisIDTransformer3DModel
from .dit_transformer_2d import DiTTransformer2DModel
from .dual_transformer_2d import DualTransformer2DModel
from .hunyuan_transformer_2d import HunyuanDiT2DModel
from .latte_transformer_3d import LatteTransformer3DModel
from .lumina_nextdit2d import LuminaNextDiT2DModel
from .pixart_transformer_2d import PixArtTransformer2DModel
from .prior_transformer import PriorTransformer
from .sana_transformer import SanaTransformer2DModel
from .stable_audio_transformer import StableAudioDiTModel
from .t5_film_transformer import T5FilmDecoder
from .transformer_2d import Transformer2DModel
from .transformer_allegro import AllegroTransformer3DModel
from .transformer_cogview3plus import CogView3PlusTransformer2DModel
from .transformer_flux import FluxTransformer2DModel
from .transformer_hunyuan_video import HunyuanVideoTransformer3DModel
from .transformer_ltx import LTXVideoTransformer3DModel
from .transformer_lumina2 import Lumina2Transformer2DModel
from .transformer_mochi import MochiTransformer3DModel
from .transformer_omnigen import OmniGenTransformer2DModel
from .transformer_sd3 import SD3Transformer2DModel
from .transformer_temporal import TransformerTemporalModel
|
from ...utils import is_torch_available
if is_torch_available():
from .auraflow_transformer_2d import AuraFlowTransformer2DModel
from .cogvideox_transformer_3d import CogVideoXTransformer3DModel
from .consisid_transformer_3d import ConsisIDTransformer3DModel
from .dit_transformer_2d import DiTTransformer2DModel
from .dual_transformer_2d import DualTransformer2DModel
from .hunyuan_transformer_2d import HunyuanDiT2DModel
from .latte_transformer_3d import LatteTransformer3DModel
from .lumina_nextdit2d import LuminaNextDiT2DModel
from .pixart_transformer_2d import PixArtTransformer2DModel
from .prior_transformer import PriorTransformer
from .sana_transformer import SanaTransformer2DModel
from .stable_audio_transformer import StableAudioDiTModel
from .t5_film_transformer import T5FilmDecoder
from .transformer_2d import Transformer2DModel
from .transformer_allegro import AllegroTransformer3DModel
from .transformer_cogview3plus import CogView3PlusTransformer2DModel
from .transformer_flux import FluxTransformer2DModel
from .transformer_hunyuan_video import HunyuanVideoTransformer3DModel
from .transformer_ltx import LTXVideoTransformer3DModel
from .transformer_mochi import MochiTransformer3DModel
from .transformer_omnigen import OmniGenTransformer2DModel
from .transformer_sd3 import SD3Transformer2DModel
from .transformer_temporal import TransformerTemporalModel
|
"""Module containing the base parser for arguments of Jina."""
import argparse
from jina.parsers.helper import _chf
def set_base_parser():
"""Set the base parser
:return: the parser
"""
from jina import __version__
from jina.helper import colored, format_full_version_info, get_full_version
# create the top-level parser
urls = {
'Code': ('💻', 'https://oss.jina.ai'),
'Docs': ('📖', 'https://jina.ai/serve'),
'Help': ('💬', 'https://discord.jina.ai'),
'Hiring!': ('🙌', 'https://jobs.jina.ai'),
}
url_str = '\n'.join(
f'- {v[0]:<10} {k:10.10}\t{colored(v[1], "cyan", attrs=["underline"])}'
for k, v in urls.items()
)
parser = argparse.ArgumentParser(
epilog=f'''
Jina v{colored(__version__, "green")}: Build multimodal AI services via cloud native technologies.
{url_str}
''',
formatter_class=_chf,
)
parser.add_argument(
'-v',
'--version',
action='version',
version=__version__,
help='Show Jina version',
)
parser.add_argument(
'-vf',
'--version-full',
action='version',
version=format_full_version_info(*get_full_version()),
help='Show Jina and all dependencies\' versions',
)
return parser
|
"""Module containing the base parser for arguments of Jina."""
import argparse
from jina.parsers.helper import _chf
def set_base_parser():
"""Set the base parser
:return: the parser
"""
from jina import __version__
from jina.helper import colored, format_full_version_info, get_full_version
# create the top-level parser
urls = {
'Code': ('💻', 'https://oss.jina.ai'),
'Docs': ('📖', 'https://docs.jina.ai'),
'Help': ('💬', 'https://discord.jina.ai'),
'Hiring!': ('🙌', 'https://jobs.jina.ai'),
}
url_str = '\n'.join(
f'- {v[0]:<10} {k:10.10}\t{colored(v[1], "cyan", attrs=["underline"])}'
for k, v in urls.items()
)
parser = argparse.ArgumentParser(
epilog=f'''
Jina v{colored(__version__, "green")}: Build multimodal AI services via cloud native technologies.
{url_str}
''',
formatter_class=_chf,
)
parser.add_argument(
'-v',
'--version',
action='version',
version=__version__,
help='Show Jina version',
)
parser.add_argument(
'-vf',
'--version-full',
action='version',
version=format_full_version_info(*get_full_version()),
help='Show Jina and all dependencies\' versions',
)
return parser
|
import os
import subprocess
directory = os.path.dirname(os.path.realpath(__file__))
def run(*command: str) -> None:
print(f">>>>> Running poetry run {' '.join(command)}")
subprocess.run(["poetry", "run"] + list(command), cwd=directory, check=True)
def lint():
try:
run("ruff", "check", ".", "--exit-zero")
run("isort", "--diff", "--check", "--profile", "black", ".")
run("black", "--diff", "--check", ".")
run("pyright")
except subprocess.CalledProcessError as e:
print("Lint failed, try running `poetry run format` to fix the issues: ", e)
raise e
def populate_database():
import glob
import json
import pathlib
import requests
import market.model
templates = pathlib.Path(__file__).parent.parent / "graph_templates"
all_files = glob.glob(str(templates / "*.json"))
for file in all_files:
with open(file, "r") as f:
data = f.read()
req = market.model.AddAgentRequest(
graph=json.loads(data),
author="Populate DB",
categories=["Pre-Populated"],
keywords=["test"],
)
response = requests.post(
"http://localhost:8015/api/v1/market/admin/agent", json=req.model_dump()
)
print(response.text)
def format():
run("ruff", "check", "--fix", ".")
run("isort", "--profile", "black", ".")
run("black", ".")
run("pyright", ".")
def app():
port = os.getenv("PORT", "8015")
run("uvicorn", "market.app:app", "--reload", "--port", port, "--host", "0.0.0.0")
def setup():
run("prisma", "generate")
run("prisma", "migrate", "deploy")
|
import os
import subprocess
directory = os.path.dirname(os.path.realpath(__file__))
def run(*command: str) -> None:
print(f">>>>> Running poetry run {' '.join(command)}")
subprocess.run(["poetry", "run"] + list(command), cwd=directory, check=True)
def lint():
try:
run("ruff", "check", ".", "--exit-zero")
run("isort", "--diff", "--check", "--profile", "black", ".")
run("black", "--diff", "--check", ".")
run("pyright")
except subprocess.CalledProcessError as e:
print("Lint failed, try running `poetry run format` to fix the issues: ", e)
raise e
def populate_database():
import glob
import json
import pathlib
import requests
import market.model
templates = pathlib.Path(__file__).parent.parent / "backend" / "graph_templates"
all_files = glob.glob(str(templates / "*.json"))
for file in all_files:
with open(file, "r") as f:
data = f.read()
req = market.model.AddAgentRequest(
graph=json.loads(data),
author="Populate DB",
categories=["Pre-Populated"],
keywords=["test"],
)
response = requests.post(
"http://localhost:8015/api/v1/market/admin/agent", json=req.model_dump()
)
print(response.text)
def format():
run("ruff", "check", "--fix", ".")
run("isort", "--profile", "black", ".")
run("black", ".")
run("pyright", ".")
def app():
port = os.getenv("PORT", "8015")
run("uvicorn", "market.app:app", "--reload", "--port", port, "--host", "0.0.0.0")
def setup():
run("prisma", "generate")
run("prisma", "migrate", "deploy")
|
"""Test Ollama Chat API wrapper."""
from typing import Any
from unittest.mock import patch
from langchain_ollama import OllamaLLM
MODEL_NAME = "llama3.1"
def test_initialization() -> None:
"""Test integration initialization."""
OllamaLLM(model="llama3")
def test_model_params() -> None:
# Test standard tracing params
llm = OllamaLLM(model="llama3")
ls_params = llm._get_ls_params()
assert ls_params == {
"ls_provider": "ollama",
"ls_model_type": "llm",
"ls_model_name": "llama3",
}
llm = OllamaLLM(model="llama3", num_predict=3)
ls_params = llm._get_ls_params()
assert ls_params == {
"ls_provider": "ollama",
"ls_model_type": "llm",
"ls_model_name": "llama3",
"ls_max_tokens": 3,
}
@patch("langchain_ollama.llms.validate_model")
def test_validate_model_on_init(mock_validate_model: Any) -> None:
"""Test that the model is validated on initialization when requested."""
# Test that validate_model is called when validate_model_on_init=True
OllamaLLM(model=MODEL_NAME, validate_model_on_init=True)
mock_validate_model.assert_called_once()
mock_validate_model.reset_mock()
# Test that validate_model is NOT called when validate_model_on_init=False
OllamaLLM(model=MODEL_NAME, validate_model_on_init=False)
mock_validate_model.assert_not_called()
# Test that validate_model is NOT called by default
OllamaLLM(model=MODEL_NAME)
mock_validate_model.assert_not_called()
|
"""Test Ollama Chat API wrapper."""
from langchain_ollama import OllamaLLM
def test_initialization() -> None:
"""Test integration initialization."""
OllamaLLM(model="llama3")
def test_model_params() -> None:
# Test standard tracing params
llm = OllamaLLM(model="llama3")
ls_params = llm._get_ls_params()
assert ls_params == {
"ls_provider": "ollama",
"ls_model_type": "llm",
"ls_model_name": "llama3",
}
llm = OllamaLLM(model="llama3", num_predict=3)
ls_params = llm._get_ls_params()
assert ls_params == {
"ls_provider": "ollama",
"ls_model_type": "llm",
"ls_model_name": "llama3",
"ls_max_tokens": 3,
}
|
from __future__ import annotations
from .CSRLoss import CSRLoss, CSRReconstructionLoss
from .FlopsLoss import FlopsLoss
from .SparseAnglELoss import SparseAnglELoss
from .SparseCoSENTLoss import SparseCoSENTLoss
from .SparseCosineSimilarityLoss import SparseCosineSimilarityLoss
from .SparseDistillKLDivLoss import SparseDistillKLDivLoss
from .SparseMarginMSELoss import SparseMarginMSELoss
from .SparseMSELoss import SparseMSELoss
from .SparseMultipleNegativesRankingLoss import SparseMultipleNegativesRankingLoss
from .SparseTripletLoss import SparseTripletLoss
from .SpladeLoss import SpladeLoss
__all__ = [
"CSRLoss",
"CSRReconstructionLoss",
"SparseMultipleNegativesRankingLoss",
"SparseCoSENTLoss",
"SparseTripletLoss",
"SparseMarginMSELoss",
"SparseCosineSimilarityLoss",
"SparseMSELoss",
"SparseAnglELoss",
"SparseDistillKLDivLoss",
"FlopsLoss",
"SpladeLoss",
]
|
from __future__ import annotations
from .CSRLoss import CSRLoss, CSRReconstructionLoss
from .FlopsLoss import FlopsLoss
from .SparseAnglELoss import SparseAnglELoss
from .SparseCoSENTLoss import SparseCoSENTLoss
from .SparseCosineSimilarityLoss import SparseCosineSimilarityLoss
from .SparseDistillKLDivLoss import SparseDistillKLDivLoss
from .SparseMarginMSELoss import SparseMarginMSELoss
from .SparseMSELoss import SparseMSELoss
from .SparseMultipleNegativesRankingLoss import SparseMultipleNegativesRankingLoss
from .SparseTripletLoss import SparseTripletLoss
from .SpladeLoss import SpladeLoss
__all__ = [
"CSRLoss",
"CSRReconstructionLoss",
"SparseMultipleNegativesRankingLoss",
"SparseCoSENTLoss",
"SparseTripletLoss",
"SparseMarginMSELoss",
"SparseCosineSimilarityLoss",
"SparseMSELoss",
"SparseAnglELoss",
"SparseDistillKLDivLoss",
"FlopsLoss",
"SpladeLoss",
]
# TODO: Test cached losses
|
"""
This example starts multiple processes (1 per GPU), which encode
sentences in parallel. This gives a near linear speed-up
when encoding large text collections.
It also demonstrates how to stream data which is helpful in case you don't
want to wait for an extremely large dataset to download, or if you want to
limit the amount of memory used. More info about dataset streaming:
https://huggingface.co/docs/datasets/stream
"""
from sentence_transformers import SentenceTransformer, LoggingHandler
import logging
from datasets import load_dataset
from torch.utils.data import DataLoader
from tqdm import tqdm
logging.basicConfig(
format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO, handlers=[LoggingHandler()]
)
# Important, you need to shield your code with if __name__. Otherwise, CUDA runs into issues when spawning new processes.
if __name__ == "__main__":
# Set params
data_stream_size = 16384 # Size of the data that is loaded into memory at once
chunk_size = 1024 # Size of the chunks that are sent to each process
encode_batch_size = 128 # Batch size of the model
# Load a large dataset in streaming mode. more info: https://huggingface.co/docs/datasets/stream
dataset = load_dataset("yahoo_answers_topics", split="train", streaming=True)
dataloader = DataLoader(dataset.with_format("torch"), batch_size=data_stream_size)
# Define the model
model = SentenceTransformer("all-MiniLM-L6-v2")
# Start the multi-process pool on all available CUDA devices
pool = model.start_multi_process_pool()
for i, batch in enumerate(tqdm(dataloader)):
# Compute the embeddings using the multi-process pool
sentences = batch["best_answer"]
batch_emb = model.encode_multi_process(sentences, pool, chunk_size=chunk_size, batch_size=encode_batch_size)
print("Embeddings computed for 1 batch. Shape:", batch_emb.shape)
# Optional: Stop the processes in the pool
model.stop_multi_process_pool(pool)
|
"""
This example starts multiple processes (1 per GPU), which encode
sentences in parallel. This gives a near linear speed-up
when encoding large text collections.
It also demonstrates how to stream data which is helpful in case you don't
want to wait for an extremely large dataset to download, or if you want to
limit the amount of memory used. More info about dataset streaming:
https://huggingface.co/docs/datasets/stream
"""
from sentence_transformers import SentenceTransformer, LoggingHandler
import logging
from datasets import load_dataset
from torch.utils.data import DataLoader
from tqdm import tqdm
logging.basicConfig(
format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO, handlers=[LoggingHandler()]
)
# Important, you need to shield your code with if __name__. Otherwise, CUDA runs into issues when spawning new processes.
if __name__ == "__main__":
# Set params
data_stream_size = 16384 # Size of the data that is loaded into memory at once
chunk_size = 1024 # Size of the chunks that are sent to each process
encode_batch_size = 128 # Batch size of the model
# Load a large dataset in streaming mode. more info: https://huggingface.co/docs/datasets/stream
dataset = load_dataset("yahoo_answers_topics", split="train", streaming=True)
dataloader = DataLoader(dataset.with_format("torch"), batch_size=data_stream_size)
# Define the model
model = SentenceTransformer("all-MiniLM-L6-v2")
# Start the multi-process pool on all available CUDA devices
pool = model.start_multi_process_pool()
for i, batch in enumerate(tqdm(dataloader)):
# Compute the embeddings using the multi-process pool
sentences = batch["best_answer"]
batch_emb = model.encode_multi_process(sentences, pool, chunk_size=chunk_size, batch_size=encode_batch_size)
print("Embeddings computed for 1 batch. Shape:", batch_emb.shape)
# Optional: Stop the processes in the pool
model.stop_multi_process_pool(pool)
|
import pytest
from pydantic import Field
from docarray import BaseDoc
from docarray.index import ElasticV7DocIndex
from tests.index.elastic.fixture import start_storage_v7 # noqa: F401
pytestmark = [pytest.mark.slow, pytest.mark.index]
def test_column_config():
class MyDoc(BaseDoc):
text: str
color: str = Field(col_type='keyword')
index = ElasticV7DocIndex[MyDoc]()
index_docs = [
MyDoc(id='0', text='hello world', color='red'),
MyDoc(id='1', text='never gonna give you up', color='blue'),
MyDoc(id='2', text='we are the world', color='green'),
]
index.index(index_docs)
query = 'world'
docs, _ = index.text_search(query, search_field='text')
assert [doc.id for doc in docs] == ['0', '2']
filter_query = {'terms': {'color': ['red', 'blue']}}
docs = index.filter(filter_query)
assert [doc.id for doc in docs] == ['0', '1']
def test_field_object():
class MyDoc(BaseDoc):
manager: dict = Field(
properties={
'age': {'type': 'integer'},
'name': {
'properties': {
'first': {'type': 'keyword'},
'last': {'type': 'keyword'},
}
},
}
)
index = ElasticV7DocIndex[MyDoc]()
doc = [
MyDoc(manager={'age': 25, 'name': {'first': 'Rachel', 'last': 'Green'}}),
MyDoc(manager={'age': 30, 'name': {'first': 'Monica', 'last': 'Geller'}}),
MyDoc(manager={'age': 35, 'name': {'first': 'Phoebe', 'last': 'Buffay'}}),
]
index.index(doc)
id_ = doc[0].id
assert index[id_].id == id_
assert index[id_].manager == doc[0].manager
filter_query = {'range': {'manager.age': {'gte': 30}}}
docs = index.filter(filter_query)
assert [doc.id for doc in docs] == [doc[1].id, doc[2].id]
def test_field_geo_point():
class MyDoc(BaseDoc):
location: dict = Field(col_type='geo_point')
index = ElasticV7DocIndex[MyDoc]()
doc = [
MyDoc(location={'lat': 40.12, 'lon': -72.34}),
MyDoc(location={'lat': 41.12, 'lon': -73.34}),
MyDoc(location={'lat': 42.12, 'lon': -74.34}),
]
index.index(doc)
query = {
'query': {
'geo_bounding_box': {
'location': {
'top_left': {'lat': 42, 'lon': -74},
'bottom_right': {'lat': 40, 'lon': -72},
}
}
},
}
docs, _ = index.execute_query(query)
assert [doc['id'] for doc in docs] == [doc[0].id, doc[1].id]
def test_field_range():
class MyDoc(BaseDoc):
expected_attendees: dict = Field(col_type='integer_range')
time_frame: dict = Field(col_type='date_range', format='yyyy-MM-dd')
index = ElasticV7DocIndex[MyDoc]()
doc = [
MyDoc(
expected_attendees={'gte': 10, 'lt': 20},
time_frame={'gte': '2023-01-01', 'lt': '2023-02-01'},
),
MyDoc(
expected_attendees={'gte': 20, 'lt': 30},
time_frame={'gte': '2023-02-01', 'lt': '2023-03-01'},
),
MyDoc(
expected_attendees={'gte': 30, 'lt': 40},
time_frame={'gte': '2023-03-01', 'lt': '2023-04-01'},
),
]
index.index(doc)
query = {
'query': {
'bool': {
'should': [
{'term': {'expected_attendees': {'value': 15}}},
{
'range': {
'time_frame': {
'gte': '2023-02-05',
'lt': '2023-02-10',
'relation': 'contains',
}
}
},
]
}
},
}
docs, _ = index.execute_query(query)
assert [doc['id'] for doc in docs] == [doc[0].id, doc[1].id]
def test_index_name():
class TextDoc(BaseDoc):
text: str = Field()
class StringDoc(BaseDoc):
text: str = Field(col_type='text')
index = ElasticV7DocIndex[TextDoc]()
assert index.index_name == TextDoc.__name__.lower()
index = ElasticV7DocIndex[StringDoc]()
assert index.index_name == StringDoc.__name__.lower()
|
import pytest
from pydantic import Field
from docarray import BaseDoc
from docarray.index import ElasticV7DocIndex
from tests.index.elastic.fixture import start_storage_v7 # noqa: F401
pytestmark = [pytest.mark.slow, pytest.mark.index]
def test_column_config():
class MyDoc(BaseDoc):
text: str
color: str = Field(col_type='keyword')
index = ElasticV7DocIndex[MyDoc]()
index_docs = [
MyDoc(id='0', text='hello world', color='red'),
MyDoc(id='1', text='never gonna give you up', color='blue'),
MyDoc(id='2', text='we are the world', color='green'),
]
index.index(index_docs)
query = 'world'
docs, _ = index.text_search(query, search_field='text')
assert [doc.id for doc in docs] == ['0', '2']
filter_query = {'terms': {'color': ['red', 'blue']}}
docs = index.filter(filter_query)
assert [doc.id for doc in docs] == ['0', '1']
def test_field_object():
class MyDoc(BaseDoc):
manager: dict = Field(
properties={
'age': {'type': 'integer'},
'name': {
'properties': {
'first': {'type': 'keyword'},
'last': {'type': 'keyword'},
}
},
}
)
index = ElasticV7DocIndex[MyDoc]()
doc = [
MyDoc(manager={'age': 25, 'name': {'first': 'Rachel', 'last': 'Green'}}),
MyDoc(manager={'age': 30, 'name': {'first': 'Monica', 'last': 'Geller'}}),
MyDoc(manager={'age': 35, 'name': {'first': 'Phoebe', 'last': 'Buffay'}}),
]
index.index(doc)
id_ = doc[0].id
assert index[id_].id == id_
assert index[id_].manager == doc[0].manager
filter_query = {'range': {'manager.age': {'gte': 30}}}
docs = index.filter(filter_query)
assert [doc.id for doc in docs] == [doc[1].id, doc[2].id]
def test_field_geo_point():
class MyDoc(BaseDoc):
location: dict = Field(col_type='geo_point')
index = ElasticV7DocIndex[MyDoc]()
doc = [
MyDoc(location={'lat': 40.12, 'lon': -72.34}),
MyDoc(location={'lat': 41.12, 'lon': -73.34}),
MyDoc(location={'lat': 42.12, 'lon': -74.34}),
]
index.index(doc)
query = {
'query': {
'geo_bounding_box': {
'location': {
'top_left': {'lat': 42, 'lon': -74},
'bottom_right': {'lat': 40, 'lon': -72},
}
}
},
}
docs, _ = index.execute_query(query)
assert [doc['id'] for doc in docs] == [doc[0].id, doc[1].id]
def test_field_range():
class MyDoc(BaseDoc):
expected_attendees: dict = Field(col_type='integer_range')
time_frame: dict = Field(col_type='date_range', format='yyyy-MM-dd')
index = ElasticV7DocIndex[MyDoc]()
doc = [
MyDoc(
expected_attendees={'gte': 10, 'lt': 20},
time_frame={'gte': '2023-01-01', 'lt': '2023-02-01'},
),
MyDoc(
expected_attendees={'gte': 20, 'lt': 30},
time_frame={'gte': '2023-02-01', 'lt': '2023-03-01'},
),
MyDoc(
expected_attendees={'gte': 30, 'lt': 40},
time_frame={'gte': '2023-03-01', 'lt': '2023-04-01'},
),
]
index.index(doc)
query = {
'query': {
'bool': {
'should': [
{'term': {'expected_attendees': {'value': 15}}},
{
'range': {
'time_frame': {
'gte': '2023-02-05',
'lt': '2023-02-10',
'relation': 'contains',
}
}
},
]
}
},
}
docs, _ = index.execute_query(query)
assert [doc['id'] for doc in docs] == [doc[0].id, doc[1].id]
|
from typing import Union, BinaryIO, TYPE_CHECKING
import numpy as np
if TYPE_CHECKING:
from docarray.typing import T
class VideoDataMixin:
"""Provide helper functions for :class:`Document` to support video data."""
def load_uri_to_video_tensor(self: 'T', only_keyframes: bool = False) -> 'T':
"""Convert a :attr:`.uri` to a video ndarray :attr:`.tensor`.
:param only_keyframes: only keep the keyframes in the video
:return: Document itself after processed
"""
import av
with av.open(self.uri) as container:
if only_keyframes:
stream = container.streams.video[0]
stream.codec_context.skip_frame = 'NONKEY'
frames = []
for frame in container.decode(video=0):
img = frame.to_image()
frames.append(np.asarray(img))
self.tensor = np.moveaxis(np.stack(frames), 1, 2)
return self
def save_video_tensor_to_file(
self: 'T', file: Union[str, BinaryIO], frame_rate: int = 30, codec: str = 'h264'
) -> 'T':
"""Save :attr:`.tensor` as a video mp4/h264 file.
:param file: The file to open, which can be either a string or a file-like object.
:param frame_rate: frames per second
:param codec: the name of a decoder/encoder
:return: itself after processed
"""
if (
self.tensor.ndim != 4
or self.tensor.shape[-1] != 3
or self.tensor.dtype != np.uint8
):
raise ValueError(
f'expects `.tensor` with dtype=uint8 and ndim=4 and the last dimension is 3, '
f'but receiving {self.tensor.shape} in {self.tensor.dtype}'
)
video_tensor = np.moveaxis(np.clip(self.tensor, 0, 255), 1, 2)
import av
with av.open(file, mode='w') as container:
stream = container.add_stream(codec, rate=frame_rate)
stream.width = self.tensor.shape[1]
stream.height = self.tensor.shape[2]
stream.pix_fmt = 'yuv420p'
for b in video_tensor:
frame = av.VideoFrame.from_ndarray(b, format='rgb24')
for packet in stream.encode(frame):
container.mux(packet)
for packet in stream.encode():
container.mux(packet)
return self
|
from typing import Union, BinaryIO, TYPE_CHECKING
import numpy as np
if TYPE_CHECKING:
from ...typing import T
class VideoDataMixin:
"""Provide helper functions for :class:`Document` to support video data."""
def load_uri_to_video_tensor(self: 'T', only_keyframes: bool = False) -> 'T':
"""Convert a :attr:`.uri` to a video ndarray :attr:`.tensor`.
:param only_keyframes: only keep the keyframes in the video
:return: Document itself after processed
"""
import av
with av.open(self.uri) as container:
if only_keyframes:
stream = container.streams.video[0]
stream.codec_context.skip_frame = 'NONKEY'
frames = []
for frame in container.decode(video=0):
img = frame.to_image()
frames.append(np.asarray(img))
self.tensor = np.moveaxis(np.stack(frames), 1, 2)
return self
def save_video_tensor_to_file(
self: 'T', file: Union[str, BinaryIO], frame_rate: int = 30, codec: str = 'h264'
) -> 'T':
"""Save :attr:`.tensor` as a video mp4/h264 file.
:param file: The file to open, which can be either a string or a file-like object.
:param frame_rate: frames per second
:param codec: the name of a decoder/encoder
:return: itself after processed
"""
if (
self.tensor.ndim != 4
or self.tensor.shape[-1] != 3
or self.tensor.dtype != np.uint8
):
raise ValueError(
f'expects `.tensor` with dtype=uint8 and ndim=4 and the last dimension is 3, '
f'but receiving {self.tensor.shape} in {self.tensor.dtype}'
)
video_tensor = np.moveaxis(np.clip(self.tensor, 0, 255), 1, 2)
import av
with av.open(file, mode='w') as container:
stream = container.add_stream(codec, rate=frame_rate)
stream.width = self.tensor.shape[1]
stream.height = self.tensor.shape[2]
stream.pix_fmt = 'yuv420p'
for b in video_tensor:
frame = av.VideoFrame.from_ndarray(b, format='rgb24')
for packet in stream.encode(frame):
container.mux(packet)
for packet in stream.encode():
container.mux(packet)
return self
|
from typing import Any, Dict, Optional
from elasticsearch import AsyncElasticsearch, Elasticsearch
from logging import getLogger
from llama_index.core.schema import BaseNode, TextNode
from llama_index.core.vector_stores.utils import metadata_dict_to_node
logger = getLogger(__name__)
def get_user_agent() -> str:
"""Get user agent for Elasticsearch client."""
import llama_index.core
version = getattr(llama_index.core, "__version__", "")
return f"llama_index-py-vs/{version}"
def get_elasticsearch_client(
url: Optional[str] = None,
cloud_id: Optional[str] = None,
api_key: Optional[str] = None,
username: Optional[str] = None,
password: Optional[str] = None,
) -> AsyncElasticsearch:
if url and cloud_id:
raise ValueError(
"Both es_url and cloud_id are defined. Please provide only one."
)
connection_params: Dict[str, Any] = {}
if url:
connection_params["hosts"] = [url]
elif cloud_id:
connection_params["cloud_id"] = cloud_id
else:
raise ValueError("Please provide either elasticsearch_url or cloud_id.")
if api_key:
connection_params["api_key"] = api_key
elif username and password:
connection_params["basic_auth"] = (username, password)
sync_es_client = Elasticsearch(
**connection_params, headers={"user-agent": get_user_agent()}
)
async_es_client = AsyncElasticsearch(
**connection_params, headers={"user-agent": get_user_agent()}
)
sync_es_client.info() # use sync client so don't have to 'await' to just get info
return async_es_client
def convert_es_hit_to_node(
hit: Dict[str, Any], text_field: str = "content"
) -> BaseNode:
"""
Convert an Elasticsearch search hit to a BaseNode.
Args:
hit: The Elasticsearch search hit
text_field: The field name that contains the text content
Returns:
BaseNode: The converted node
"""
source = hit.get("_source", {})
metadata = source.get("metadata", {})
text = source.get(text_field, None)
node_id = hit.get("_id")
try:
node = metadata_dict_to_node(metadata)
node.text = text
except Exception:
# Legacy support for old metadata format
logger.warning(f"Could not parse metadata from hit {source.get('metadata')}")
node_info = source.get("node_info")
relationships = source.get("relationships", {})
start_char_idx = None
end_char_idx = None
if isinstance(node_info, dict):
start_char_idx = node_info.get("start", None)
end_char_idx = node_info.get("end", None)
node = TextNode(
text=text,
metadata=metadata,
id_=node_id,
start_char_idx=start_char_idx,
end_char_idx=end_char_idx,
relationships=relationships,
)
return node
|
from typing import Any, Dict, Optional
from elasticsearch import AsyncElasticsearch, Elasticsearch
from logging import getLogger
from llama_index.core.schema import BaseNode, TextNode
from llama_index.core.vector_stores.utils import metadata_dict_to_node
logger = getLogger(__name__)
def get_user_agent() -> str:
"""Get user agent for Elasticsearch client."""
import llama_index.core
version = getattr(llama_index.core, "__version__", "")
return f"llama_index-py-vs/{version}"
def get_elasticsearch_client(
url: Optional[str] = None,
cloud_id: Optional[str] = None,
api_key: Optional[str] = None,
username: Optional[str] = None,
password: Optional[str] = None,
) -> AsyncElasticsearch:
if url and cloud_id:
raise ValueError(
"Both es_url and cloud_id are defined. Please provide only one."
)
connection_params: Dict[str, Any] = {}
if url:
connection_params["hosts"] = [url]
elif cloud_id:
connection_params["cloud_id"] = cloud_id
else:
raise ValueError("Please provide either elasticsearch_url or cloud_id.")
if api_key:
connection_params["api_key"] = api_key
elif username and password:
connection_params["basic_auth"] = (username, password)
sync_es_client = Elasticsearch(
**connection_params, headers={"user-agent": get_user_agent()}
)
async_es_client = AsyncElasticsearch(
**connection_params, headers={"user-agent": get_user_agent()}
)
sync_es_client.info() # use sync client so don't have to 'await' to just get info
return async_es_client
def convert_es_hit_to_node(
hit: Dict[str, Any], text_field: str = "content"
) -> BaseNode:
"""
Convert an Elasticsearch search hit to a BaseNode.
Args:
hit: The Elasticsearch search hit
text_field: The field name that contains the text content
Returns:
BaseNode: The converted node
"""
source = hit.get("_source", {})
metadata = source.get("metadata", {})
text = source.get(text_field, None)
node_id = hit.get("_id")
try:
node = metadata_dict_to_node(metadata)
node.text = text
except Exception:
# Legacy support for old metadata format
logger.warning(f"Could not parse metadata from hit {source.get('metadata')}")
node_info = source.get("node_info")
relationships = source.get("relationships", {})
start_char_idx = None
end_char_idx = None
if isinstance(node_info, dict):
start_char_idx = node_info.get("start", None)
end_char_idx = node_info.get("end", None)
node = TextNode(
text=text,
metadata=metadata,
id_=node_id,
start_char_idx=start_char_idx,
end_char_idx=end_char_idx,
relationships=relationships,
)
return node
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.vectorstores import Redis
from langchain_community.vectorstores.redis.base import RedisVectorStoreRetriever
from langchain_community.vectorstores.redis.filters import (
RedisFilter,
RedisNum,
RedisTag,
RedisText,
)
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"Redis": "langchain_community.vectorstores",
"RedisFilter": "langchain_community.vectorstores.redis.filters",
"RedisTag": "langchain_community.vectorstores.redis.filters",
"RedisText": "langchain_community.vectorstores.redis.filters",
"RedisNum": "langchain_community.vectorstores.redis.filters",
"RedisVectorStoreRetriever": "langchain_community.vectorstores.redis.base",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"Redis",
"RedisFilter",
"RedisNum",
"RedisTag",
"RedisText",
"RedisVectorStoreRetriever",
]
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.vectorstores import Redis
from langchain_community.vectorstores.redis.base import RedisVectorStoreRetriever
from langchain_community.vectorstores.redis.filters import (
RedisFilter,
RedisNum,
RedisTag,
RedisText,
)
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"Redis": "langchain_community.vectorstores",
"RedisFilter": "langchain_community.vectorstores.redis.filters",
"RedisTag": "langchain_community.vectorstores.redis.filters",
"RedisText": "langchain_community.vectorstores.redis.filters",
"RedisNum": "langchain_community.vectorstores.redis.filters",
"RedisVectorStoreRetriever": "langchain_community.vectorstores.redis.base",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"Redis",
"RedisFilter",
"RedisTag",
"RedisText",
"RedisNum",
"RedisVectorStoreRetriever",
]
|
from abc import ABC, abstractmethod
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from docarray.proto import NodeProto
class BaseNode(ABC):
"""
A DocumentNode is an object than can be nested inside a Document.
A Document itself is a DocumentNode as well as prebuilt type
"""
@abstractmethod
def _to_node_protobuf(self) -> 'NodeProto':
"""Convert itself into a NodeProto message. This function should
be called when the self is nested into another Document that need to be
converted into a protobuf
:return: the nested item protobuf message
"""
...
|
from abc import ABC, abstractmethod
from docarray.proto import NodeProto
class BaseNode(ABC):
"""
A DocumentNode is an object than can be nested inside a Document.
A Document itself is a DocumentNode as well as prebuilt type
"""
@abstractmethod
def _to_node_protobuf(self) -> NodeProto:
"""Convert itself into a NodeProto message. This function should
be called when the self is nested into another Document that need to be
converted into a protobuf
:return: the nested item protobuf message
"""
...
|
"""Torch backend APIs.
# Note on device placement
Torch has a different device placement style compared to TF and JAX.
In short, variables/tensors are not created on GPU by default,
and the GPU cannot directly communicate with the CPU.
To bring Torch behavior in line with TF and JAX automated device placement,
we are doing the following to automate device placement if a GPU is available:
- Variables are created on GPU.
- Input data will be placed on GPU at the first `keras.layers.Layer` call.
- Tensor creation happens on GPU, e.g., `zeros()` will create a tensor on GPU.
- `convert_to_numpy` will bring the tensor to CPU before converting it to NumPy.
"""
from keras.src.backend.common.name_scope import name_scope
from keras.src.backend.torch import core
from keras.src.backend.torch import image
from keras.src.backend.torch import linalg
from keras.src.backend.torch import math
from keras.src.backend.torch import nn
from keras.src.backend.torch import numpy
from keras.src.backend.torch import random
from keras.src.backend.torch.core import IS_THREAD_SAFE
from keras.src.backend.torch.core import SUPPORTS_SPARSE_TENSORS
from keras.src.backend.torch.core import Variable
from keras.src.backend.torch.core import cast
from keras.src.backend.torch.core import compute_output_spec
from keras.src.backend.torch.core import cond
from keras.src.backend.torch.core import convert_to_numpy
from keras.src.backend.torch.core import convert_to_tensor
from keras.src.backend.torch.core import device_scope
from keras.src.backend.torch.core import is_tensor
from keras.src.backend.torch.core import random_seed_dtype
from keras.src.backend.torch.core import scatter
from keras.src.backend.torch.core import shape
from keras.src.backend.torch.core import stop_gradient
from keras.src.backend.torch.core import to_torch_dtype
from keras.src.backend.torch.core import vectorized_map
from keras.src.backend.torch.rnn import cudnn_ok
from keras.src.backend.torch.rnn import gru
from keras.src.backend.torch.rnn import lstm
from keras.src.backend.torch.rnn import rnn
|
"""Torch backend APIs.
# Note on device placement
Torch has a different device placement style compared to TF and JAX.
In short, variables/tensors are not created on GPU by default,
and the GPU cannot directly communicate with the CPU.
To bring Torch behavior in line with TF and JAX automated device placement,
we are doing the following to automate device placement if a GPU is available:
- Variables are created on GPU.
- Input data will be placed on GPU at the first `keras.layers.Layer` call.
- Tensor creation happens on GPU, e.g., `zeros()` will create a tensor on GPU.
- `convert_to_numpy` will bring the tensor to CPU before converting it to NumPy.
"""
from keras.src.backend.common.name_scope import name_scope
from keras.src.backend.torch import core
from keras.src.backend.torch import image
from keras.src.backend.torch import linalg
from keras.src.backend.torch import math
from keras.src.backend.torch import nn
from keras.src.backend.torch import numpy
from keras.src.backend.torch import random
from keras.src.backend.torch.core import SUPPORTS_SPARSE_TENSORS
from keras.src.backend.torch.core import Variable
from keras.src.backend.torch.core import cast
from keras.src.backend.torch.core import compute_output_spec
from keras.src.backend.torch.core import cond
from keras.src.backend.torch.core import convert_to_numpy
from keras.src.backend.torch.core import convert_to_tensor
from keras.src.backend.torch.core import device_scope
from keras.src.backend.torch.core import is_tensor
from keras.src.backend.torch.core import random_seed_dtype
from keras.src.backend.torch.core import scatter
from keras.src.backend.torch.core import shape
from keras.src.backend.torch.core import stop_gradient
from keras.src.backend.torch.core import to_torch_dtype
from keras.src.backend.torch.core import vectorized_map
from keras.src.backend.torch.rnn import cudnn_ok
from keras.src.backend.torch.rnn import gru
from keras.src.backend.torch.rnn import lstm
from keras.src.backend.torch.rnn import rnn
|
# Copyright (c) OpenMMLab. All rights reserved.
from .atss import ATSS
from .autoassign import AutoAssign
from .base import BaseDetector
from .cascade_rcnn import CascadeRCNN
from .centernet import CenterNet
from .cornernet import CornerNet
from .deformable_detr import DeformableDETR
from .detr import DETR
from .fast_rcnn import FastRCNN
from .faster_rcnn import FasterRCNN
from .fcos import FCOS
from .fovea import FOVEA
from .fsaf import FSAF
from .gfl import GFL
from .grid_rcnn import GridRCNN
from .htc import HybridTaskCascade
from .kd_one_stage import KnowledgeDistillationSingleStageDetector
from .lad import LAD
from .mask_rcnn import MaskRCNN
from .mask_scoring_rcnn import MaskScoringRCNN
from .nasfcos import NASFCOS
from .paa import PAA
from .panoptic_fpn import PanopticFPN
from .panoptic_two_stage_segmentor import TwoStagePanopticSegmentor
from .point_rend import PointRend
from .queryinst import QueryInst
from .reppoints_detector import RepPointsDetector
from .retinanet import RetinaNet
from .rpn import RPN
from .scnet import SCNet
from .single_stage import SingleStageDetector
from .solo import SOLO
from .sparse_rcnn import SparseRCNN
from .trident_faster_rcnn import TridentFasterRCNN
from .two_stage import TwoStageDetector
from .vfnet import VFNet
from .yolact import YOLACT
from .yolo import YOLOV3
from .yolof import YOLOF
from .yolox import YOLOX
__all__ = [
'ATSS', 'BaseDetector', 'SingleStageDetector', 'TwoStageDetector', 'RPN',
'KnowledgeDistillationSingleStageDetector', 'FastRCNN', 'FasterRCNN',
'MaskRCNN', 'CascadeRCNN', 'HybridTaskCascade', 'RetinaNet', 'FCOS',
'GridRCNN', 'MaskScoringRCNN', 'RepPointsDetector', 'FOVEA', 'FSAF',
'NASFCOS', 'PointRend', 'GFL', 'CornerNet', 'PAA', 'YOLOV3', 'YOLACT',
'VFNet', 'DETR', 'TridentFasterRCNN', 'SparseRCNN', 'SCNet', 'SOLO',
'DeformableDETR', 'AutoAssign', 'YOLOF', 'CenterNet', 'YOLOX',
'TwoStagePanopticSegmentor', 'PanopticFPN', 'QueryInst', 'LAD'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .atss import ATSS
from .autoassign import AutoAssign
from .base import BaseDetector
from .cascade_rcnn import CascadeRCNN
from .centernet import CenterNet
from .cornernet import CornerNet
from .deformable_detr import DeformableDETR
from .detr import DETR
from .fast_rcnn import FastRCNN
from .faster_rcnn import FasterRCNN
from .fcos import FCOS
from .fovea import FOVEA
from .fsaf import FSAF
from .gfl import GFL
from .grid_rcnn import GridRCNN
from .htc import HybridTaskCascade
from .kd_one_stage import KnowledgeDistillationSingleStageDetector
from .mask_rcnn import MaskRCNN
from .mask_scoring_rcnn import MaskScoringRCNN
from .nasfcos import NASFCOS
from .paa import PAA
from .panoptic_fpn import PanopticFPN
from .panoptic_two_stage_segmentor import TwoStagePanopticSegmentor
from .point_rend import PointRend
from .queryinst import QueryInst
from .reppoints_detector import RepPointsDetector
from .retinanet import RetinaNet
from .rpn import RPN
from .scnet import SCNet
from .single_stage import SingleStageDetector
from .solo import SOLO
from .sparse_rcnn import SparseRCNN
from .trident_faster_rcnn import TridentFasterRCNN
from .two_stage import TwoStageDetector
from .vfnet import VFNet
from .yolact import YOLACT
from .yolo import YOLOV3
from .yolof import YOLOF
from .yolox import YOLOX
__all__ = [
'ATSS', 'BaseDetector', 'SingleStageDetector', 'TwoStageDetector', 'RPN',
'KnowledgeDistillationSingleStageDetector', 'FastRCNN', 'FasterRCNN',
'MaskRCNN', 'CascadeRCNN', 'HybridTaskCascade', 'RetinaNet', 'FCOS',
'GridRCNN', 'MaskScoringRCNN', 'RepPointsDetector', 'FOVEA', 'FSAF',
'NASFCOS', 'PointRend', 'GFL', 'CornerNet', 'PAA', 'YOLOV3', 'YOLACT',
'VFNet', 'DETR', 'TridentFasterRCNN', 'SparseRCNN', 'SCNet', 'SOLO',
'DeformableDETR', 'AutoAssign', 'YOLOF', 'CenterNet', 'YOLOX',
'TwoStagePanopticSegmentor', 'PanopticFPN', 'QueryInst'
]
|
"""Google PaLM embeddings file."""
import deprecated
from typing import Any, List, Optional
from llama_index.core.base.embeddings.base import (
DEFAULT_EMBED_BATCH_SIZE,
BaseEmbedding,
)
from llama_index.core.bridge.pydantic import PrivateAttr
from llama_index.core.callbacks.base import CallbackManager
import google.generativeai as palm
@deprecated.deprecated(
reason=(
"Should use `llama-index-embeddings-google-genai` instead, using Google's latest unified SDK. "
"See: https://docs.llamaindex.ai/en/stable/examples/embeddings/google_genai/"
)
)
class GooglePaLMEmbedding(BaseEmbedding):
"""Class for Google PaLM embeddings.
Args:
model_name (str): Model for embedding.
Defaults to "models/embedding-gecko-001".
api_key (Optional[str]): API key to access the model. Defaults to None.
"""
_model: Any = PrivateAttr()
def __init__(
self,
model_name: str = "models/embedding-gecko-001",
api_key: Optional[str] = None,
embed_batch_size: int = DEFAULT_EMBED_BATCH_SIZE,
callback_manager: Optional[CallbackManager] = None,
**kwargs: Any,
):
super().__init__(
model_name=model_name,
embed_batch_size=embed_batch_size,
callback_manager=callback_manager,
**kwargs,
)
palm.configure(api_key=api_key)
self._model = palm
@classmethod
def class_name(cls) -> str:
return "PaLMEmbedding"
def _get_query_embedding(self, query: str) -> List[float]:
"""Get query embedding."""
return self._model.generate_embeddings(model=self.model_name, text=query)[
"embedding"
]
async def _aget_query_embedding(self, query: str) -> List[float]:
"""The asynchronous version of _get_query_embedding."""
return await self._model.aget_embedding(query)
def _get_text_embedding(self, text: str) -> List[float]:
"""Get text embedding."""
return self._model.generate_embeddings(model=self.model_name, text=text)[
"embedding"
]
async def _aget_text_embedding(self, text: str) -> List[float]:
"""Asynchronously get text embedding."""
return self._model._get_text_embedding(text)
def _get_text_embeddings(self, texts: List[str]) -> List[List[float]]:
"""Get text embeddings."""
return self._model.generate_embeddings(model=self.model_name, text=texts)[
"embedding"
]
async def _aget_text_embeddings(self, texts: List[str]) -> List[List[float]]:
"""Asynchronously get text embeddings."""
return await self._model._get_embeddings(texts)
|
"""Google PaLM embeddings file."""
from typing import Any, List, Optional
from llama_index.core.base.embeddings.base import (
DEFAULT_EMBED_BATCH_SIZE,
BaseEmbedding,
)
from llama_index.core.bridge.pydantic import PrivateAttr
from llama_index.core.callbacks.base import CallbackManager
import google.generativeai as palm
class GooglePaLMEmbedding(BaseEmbedding):
"""Class for Google PaLM embeddings.
Args:
model_name (str): Model for embedding.
Defaults to "models/embedding-gecko-001".
api_key (Optional[str]): API key to access the model. Defaults to None.
"""
_model: Any = PrivateAttr()
def __init__(
self,
model_name: str = "models/embedding-gecko-001",
api_key: Optional[str] = None,
embed_batch_size: int = DEFAULT_EMBED_BATCH_SIZE,
callback_manager: Optional[CallbackManager] = None,
**kwargs: Any,
):
super().__init__(
model_name=model_name,
embed_batch_size=embed_batch_size,
callback_manager=callback_manager,
**kwargs,
)
palm.configure(api_key=api_key)
self._model = palm
@classmethod
def class_name(cls) -> str:
return "PaLMEmbedding"
def _get_query_embedding(self, query: str) -> List[float]:
"""Get query embedding."""
return self._model.generate_embeddings(model=self.model_name, text=query)[
"embedding"
]
async def _aget_query_embedding(self, query: str) -> List[float]:
"""The asynchronous version of _get_query_embedding."""
return await self._model.aget_embedding(query)
def _get_text_embedding(self, text: str) -> List[float]:
"""Get text embedding."""
return self._model.generate_embeddings(model=self.model_name, text=text)[
"embedding"
]
async def _aget_text_embedding(self, text: str) -> List[float]:
"""Asynchronously get text embedding."""
return self._model._get_text_embedding(text)
def _get_text_embeddings(self, texts: List[str]) -> List[List[float]]:
"""Get text embeddings."""
return self._model.generate_embeddings(model=self.model_name, text=texts)[
"embedding"
]
async def _aget_text_embeddings(self, texts: List[str]) -> List[List[float]]:
"""Asynchronously get text embeddings."""
return await self._model._get_embeddings(texts)
|
"""Simple Web scraper."""
from typing import List, Optional, Dict, Callable
import uuid
import requests
from llama_index.core.bridge.pydantic import PrivateAttr
from llama_index.core.readers.base import BasePydanticReader
from llama_index.core.schema import Document
class SimpleWebPageReader(BasePydanticReader):
"""
Simple web page reader.
Reads pages from the web.
Args:
html_to_text (bool): Whether to convert HTML to text.
Requires `html2text` package.
metadata_fn (Optional[Callable[[str], Dict]]): A function that takes in
a URL and returns a dictionary of metadata.
Default is None.
"""
is_remote: bool = True
html_to_text: bool
_metadata_fn: Optional[Callable[[str], Dict]] = PrivateAttr()
def __init__(
self,
html_to_text: bool = False,
metadata_fn: Optional[Callable[[str], Dict]] = None,
) -> None:
"""Initialize with parameters."""
try:
import html2text # noqa
except ImportError:
raise ImportError(
"`html2text` package not found, please run `pip install html2text`"
)
super().__init__(html_to_text=html_to_text)
self._metadata_fn = metadata_fn
@classmethod
def class_name(cls) -> str:
return "SimpleWebPageReader"
def load_data(self, urls: List[str]) -> List[Document]:
"""
Load data from the input directory.
Args:
urls (List[str]): List of URLs to scrape.
Returns:
List[Document]: List of documents.
"""
if not isinstance(urls, list):
raise ValueError("urls must be a list of strings.")
documents = []
for url in urls:
response = requests.get(url, headers=None).text
if self.html_to_text:
import html2text
response = html2text.html2text(response)
metadata: Dict = {"url": url}
if self._metadata_fn is not None:
metadata = self._metadata_fn(url)
if "url" not in metadata:
metadata["url"] = url
documents.append(
Document(text=response, id_=str(uuid.uuid4()), metadata=metadata)
)
return documents
|
"""Simple Web scraper."""
from typing import List, Optional, Dict, Callable
import requests
from llama_index.core.bridge.pydantic import PrivateAttr
from llama_index.core.readers.base import BasePydanticReader
from llama_index.core.schema import Document
class SimpleWebPageReader(BasePydanticReader):
"""
Simple web page reader.
Reads pages from the web.
Args:
html_to_text (bool): Whether to convert HTML to text.
Requires `html2text` package.
metadata_fn (Optional[Callable[[str], Dict]]): A function that takes in
a URL and returns a dictionary of metadata.
Default is None.
"""
is_remote: bool = True
html_to_text: bool
_metadata_fn: Optional[Callable[[str], Dict]] = PrivateAttr()
def __init__(
self,
html_to_text: bool = False,
metadata_fn: Optional[Callable[[str], Dict]] = None,
) -> None:
"""Initialize with parameters."""
try:
import html2text # noqa
except ImportError:
raise ImportError(
"`html2text` package not found, please run `pip install html2text`"
)
super().__init__(html_to_text=html_to_text)
self._metadata_fn = metadata_fn
@classmethod
def class_name(cls) -> str:
return "SimpleWebPageReader"
def load_data(self, urls: List[str]) -> List[Document]:
"""
Load data from the input directory.
Args:
urls (List[str]): List of URLs to scrape.
Returns:
List[Document]: List of documents.
"""
if not isinstance(urls, list):
raise ValueError("urls must be a list of strings.")
documents = []
for url in urls:
response = requests.get(url, headers=None).text
if self.html_to_text:
import html2text
response = html2text.html2text(response)
metadata: Optional[Dict] = None
if self._metadata_fn is not None:
metadata = self._metadata_fn(url)
documents.append(Document(text=response, id_=url, metadata=metadata or {}))
return documents
|
# Copyright (c) OpenMMLab. All rights reserved.
"""MMEngine provides 11 root registries to support using modules across
projects.
More datails can be found at
https://mmengine.readthedocs.io/en/latest/tutorials/registry.html.
"""
from .registry import Registry
# manage all kinds of runners like `EpochBasedRunner` and `IterBasedRunner`
RUNNERS = Registry('runner')
# manage runner constructors that define how to initialize runners
RUNNER_CONSTRUCTORS = Registry('runner constructor')
# manage all kinds of loops like `EpochBasedTrainLoop`
LOOPS = Registry('loop')
# manage all kinds of hooks like `CheckpointHook`
HOOKS = Registry('hook')
# manage data-related modules
DATASETS = Registry('dataset')
DATA_SAMPLERS = Registry('data sampler')
TRANSFORMS = Registry('transform')
# mangage all kinds of modules inheriting `nn.Module`
MODELS = Registry('model')
# mangage all kinds of model wrappers like 'MMDistributedDataParallel'
MODEL_WRAPPERS = Registry('model_wrapper')
# mangage all kinds of weight initialization modules like `Uniform`
WEIGHT_INITIALIZERS = Registry('weight initializer')
# mangage all kinds of optimizers like `SGD` and `Adam`
OPTIMIZERS = Registry('optimizer')
# manage constructors that customize the optimization hyperparameters.
OPTIMIZER_CONSTRUCTORS = Registry('optimizer constructor')
# mangage all kinds of parameter schedulers like `MultiStepLR`
PARAM_SCHEDULERS = Registry('parameter scheduler')
# manage all kinds of metrics
METRICS = Registry('metric')
# manage task-specific modules like anchor generators and box coders
TASK_UTILS = Registry('task util')
# manage visualizer
VISUALIZERS = Registry('visualizer')
# manage visualizer backend
VISBACKENDS = Registry('vis_backend')
|
# Copyright (c) OpenMMLab. All rights reserved.
"""MMEngine provides 11 root registries to support using modules across
projects.
More datails can be found at
https://mmengine.readthedocs.io/en/latest/tutorials/registry.html.
"""
from .registry import Registry
# manage all kinds of runners like `EpochBasedRunner` and `IterBasedRunner`
RUNNERS = Registry('runner')
# manage runner constructors that define how to initialize runners
RUNNER_CONSTRUCTORS = Registry('runner constructor')
# manage all kinds of loops like `EpochBasedTrainLoop`
LOOPS = Registry('loop')
# manage all kinds of hooks like `CheckpointHook`
HOOKS = Registry('hook')
# manage data-related modules
DATASETS = Registry('dataset')
DATA_SAMPLERS = Registry('data sampler')
TRANSFORMS = Registry('transform')
# mangage all kinds of modules inheriting `nn.Module`
MODELS = Registry('model')
# mangage all kinds of model wrappers like 'MMDistributedDataParallel'
MODEL_WRAPPERS = Registry('model_wrapper')
# mangage all kinds of weight initialization modules like `Uniform`
WEIGHT_INITIALIZERS = Registry('weight initializer')
# mangage all kinds of optimizers like `SGD` and `Adam`
OPTIMIZERS = Registry('optimizer')
# manage constructors that customize the optimization hyperparameters.
OPTIMIZER_CONSTRUCTORS = Registry('optimizer constructor')
# mangage all kinds of parameter schedulers like `MultiStepLR`
PARAM_SCHEDULERS = Registry('parameter scheduler')
# manage all kinds of metrics
METRICS = Registry('metric')
# manage task-specific modules like anchor generators and box coders
TASK_UTILS = Registry('task util')
# manage visualizer
VISUALIZERS = Registry('visualizer')
# manage writer
WRITERS = Registry('writer')
|
from typing import List
import pytest
from llama_index.core.schema import (
Document,
NodeRelationship,
RelatedNodeInfo,
TextNode,
)
@pytest.fixture()
def documents() -> List[Document]:
"""Get documents."""
# NOTE: one document for now
doc_text = (
"Hello world.\nThis is a test.\nThis is another test.\nThis is a test v2."
)
return [Document(text=doc_text)]
@pytest.fixture()
def nodes() -> List[TextNode]:
"""Get documents."""
# NOTE: one document for now
return [
TextNode(
text="Hello world.",
relationships={
NodeRelationship.SOURCE: RelatedNodeInfo(node_id="test doc")
},
),
TextNode(
text="This is a test.",
relationships={
NodeRelationship.SOURCE: RelatedNodeInfo(node_id="test doc")
},
),
TextNode(
text="This is another test.",
relationships={
NodeRelationship.SOURCE: RelatedNodeInfo(node_id="test doc")
},
),
TextNode(
text="This is a test v2.",
relationships={
NodeRelationship.SOURCE: RelatedNodeInfo(node_id="test doc")
},
),
]
|
from typing import List
import pytest
from llama_index.core.schema import (
Document,
NodeRelationship,
RelatedNodeInfo,
TextNode,
)
@pytest.fixture()
def documents() -> List[Document]:
"""Get documents."""
# NOTE: one document for now
doc_text = (
"Hello world.\n"
"This is a test.\n"
"This is another test.\n"
"This is a test v2."
)
return [Document(text=doc_text)]
@pytest.fixture()
def nodes() -> List[TextNode]:
"""Get documents."""
# NOTE: one document for now
return [
TextNode(
text="Hello world.",
relationships={
NodeRelationship.SOURCE: RelatedNodeInfo(node_id="test doc")
},
),
TextNode(
text="This is a test.",
relationships={
NodeRelationship.SOURCE: RelatedNodeInfo(node_id="test doc")
},
),
TextNode(
text="This is another test.",
relationships={
NodeRelationship.SOURCE: RelatedNodeInfo(node_id="test doc")
},
),
TextNode(
text="This is a test v2.",
relationships={
NodeRelationship.SOURCE: RelatedNodeInfo(node_id="test doc")
},
),
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.