input
stringlengths 33
5k
| output
stringlengths 32
5k
|
---|---|
from .filtering import (
allpass_biquad,
band_biquad,
bandpass_biquad,
bandreject_biquad,
bass_biquad,
biquad,
contrast,
dcshift,
deemph_biquad,
dither,
equalizer_biquad,
filtfilt,
flanger,
gain,
highpass_biquad,
lfilter,
lowpass_biquad,
overdrive,
phaser,
riaa_biquad,
treble_biquad,
vad,
)
from .functional import (
add_noise,
amplitude_to_DB,
apply_beamforming,
apply_codec,
compute_deltas,
compute_kaldi_pitch,
convolve,
create_dct,
DB_to_amplitude,
deemphasis,
detect_pitch_frequency,
edit_distance,
fftconvolve,
forced_align,
griffinlim,
inverse_spectrogram,
linear_fbanks,
loudness,
mask_along_axis,
mask_along_axis_iid,
melscale_fbanks,
mu_law_decoding,
mu_law_encoding,
mvdr_weights_rtf,
mvdr_weights_souden,
phase_vocoder,
pitch_shift,
preemphasis,
psd,
resample,
rnnt_loss,
rtf_evd,
rtf_power,
sliding_window_cmn,
spectral_centroid,
spectrogram,
speed,
)
__all__ = [
"amplitude_to_DB",
"compute_deltas",
"compute_kaldi_pitch",
"create_dct",
"melscale_fbanks",
"linear_fbanks",
"DB_to_amplitude",
"loudness",
"detect_pitch_frequency",
"griffinlim",
"mask_along_axis",
"mask_along_axis_iid",
"mu_law_encoding",
"mu_law_decoding",
"phase_vocoder",
"sliding_window_cmn",
"spectrogram",
"inverse_spectrogram",
"spectral_centroid",
"allpass_biquad",
"band_biquad",
"bandpass_biquad",
"bandreject_biquad",
"bass_biquad",
"biquad",
"contrast",
"dither",
"dcshift",
"deemph_biquad",
"equalizer_biquad",
"filtfilt",
"flanger",
"forced_align",
"gain",
"highpass_biquad",
"lfilter",
"lowpass_biquad",
"overdrive",
"phaser",
"riaa_biquad",
"treble_biquad",
"vad",
"apply_codec",
"resample",
"edit_distance",
"pitch_shift",
"rnnt_loss",
"psd",
"mvdr_weights_souden",
"mvdr_weights_rtf",
"rtf_evd",
"rtf_power",
"apply_beamforming",
"fftconvolve",
"convolve",
"add_noise",
"speed",
"preemphasis",
"deemphasis",
]
|
from .filtering import (
allpass_biquad,
band_biquad,
bandpass_biquad,
bandreject_biquad,
bass_biquad,
biquad,
contrast,
dcshift,
deemph_biquad,
dither,
equalizer_biquad,
filtfilt,
flanger,
gain,
highpass_biquad,
lfilter,
lowpass_biquad,
overdrive,
phaser,
riaa_biquad,
treble_biquad,
vad,
)
from .functional import (
add_noise,
amplitude_to_DB,
apply_beamforming,
apply_codec,
compute_deltas,
compute_kaldi_pitch,
convolve,
create_dct,
DB_to_amplitude,
deemphasis,
detect_pitch_frequency,
edit_distance,
fftconvolve,
griffinlim,
inverse_spectrogram,
linear_fbanks,
loudness,
mask_along_axis,
mask_along_axis_iid,
melscale_fbanks,
mu_law_decoding,
mu_law_encoding,
mvdr_weights_rtf,
mvdr_weights_souden,
phase_vocoder,
pitch_shift,
preemphasis,
psd,
resample,
rnnt_loss,
rtf_evd,
rtf_power,
sliding_window_cmn,
spectral_centroid,
spectrogram,
speed,
)
__all__ = [
"amplitude_to_DB",
"compute_deltas",
"compute_kaldi_pitch",
"create_dct",
"melscale_fbanks",
"linear_fbanks",
"DB_to_amplitude",
"loudness",
"detect_pitch_frequency",
"griffinlim",
"mask_along_axis",
"mask_along_axis_iid",
"mu_law_encoding",
"mu_law_decoding",
"phase_vocoder",
"sliding_window_cmn",
"spectrogram",
"inverse_spectrogram",
"spectral_centroid",
"allpass_biquad",
"band_biquad",
"bandpass_biquad",
"bandreject_biquad",
"bass_biquad",
"biquad",
"contrast",
"dither",
"dcshift",
"deemph_biquad",
"equalizer_biquad",
"filtfilt",
"flanger",
"gain",
"highpass_biquad",
"lfilter",
"lowpass_biquad",
"overdrive",
"phaser",
"riaa_biquad",
"treble_biquad",
"vad",
"apply_codec",
"resample",
"edit_distance",
"pitch_shift",
"rnnt_loss",
"psd",
"mvdr_weights_souden",
"mvdr_weights_rtf",
"rtf_evd",
"rtf_power",
"apply_beamforming",
"fftconvolve",
"convolve",
"add_noise",
"speed",
"preemphasis",
"deemphasis",
]
|
from ._optical_flow import FlyingChairs, FlyingThings3D, HD1K, KittiFlow, Sintel
from ._stereo_matching import (
CarlaStereo,
CREStereo,
ETH3DStereo,
FallingThingsStereo,
InStereo2k,
Kitti2012Stereo,
Kitti2015Stereo,
Middlebury2014Stereo,
SceneFlowStereo,
SintelStereo,
)
from .caltech import Caltech101, Caltech256
from .celeba import CelebA
from .cifar import CIFAR10, CIFAR100
from .cityscapes import Cityscapes
from .clevr import CLEVRClassification
from .coco import CocoCaptions, CocoDetection
from .country211 import Country211
from .dtd import DTD
from .eurosat import EuroSAT
from .fakedata import FakeData
from .fer2013 import FER2013
from .fgvc_aircraft import FGVCAircraft
from .flickr import Flickr30k, Flickr8k
from .flowers102 import Flowers102
from .folder import DatasetFolder, ImageFolder
from .food101 import Food101
from .gtsrb import GTSRB
from .hmdb51 import HMDB51
from .imagenet import ImageNet
from .imagenette import Imagenette
from .inaturalist import INaturalist
from .kinetics import Kinetics
from .kitti import Kitti
from .lfw import LFWPairs, LFWPeople
from .lsun import LSUN, LSUNClass
from .mnist import EMNIST, FashionMNIST, KMNIST, MNIST, QMNIST
from .moving_mnist import MovingMNIST
from .omniglot import Omniglot
from .oxford_iiit_pet import OxfordIIITPet
from .pcam import PCAM
from .phototour import PhotoTour
from .places365 import Places365
from .rendered_sst2 import RenderedSST2
from .sbd import SBDataset
from .sbu import SBU
from .semeion import SEMEION
from .stanford_cars import StanfordCars
from .stl10 import STL10
from .sun397 import SUN397
from .svhn import SVHN
from .ucf101 import UCF101
from .usps import USPS
from .vision import VisionDataset
from .voc import VOCDetection, VOCSegmentation
from .widerface import WIDERFace
__all__ = (
"LSUN",
"LSUNClass",
"ImageFolder",
"DatasetFolder",
"FakeData",
"CocoCaptions",
"CocoDetection",
"CIFAR10",
"CIFAR100",
"EMNIST",
"FashionMNIST",
"QMNIST",
"MNIST",
"KMNIST",
"MovingMNIST",
"StanfordCars",
"STL10",
"SUN397",
"SVHN",
"PhotoTour",
"SEMEION",
"Omniglot",
"SBU",
"Flickr8k",
"Flickr30k",
"Flowers102",
"VOCSegmentation",
"VOCDetection",
"Cityscapes",
"ImageNet",
"Caltech101",
"Caltech256",
"CelebA",
"WIDERFace",
"SBDataset",
"VisionDataset",
"USPS",
"Kinetics",
"HMDB51",
"UCF101",
"Places365",
"Kitti",
"INaturalist",
"LFWPeople",
"LFWPairs",
"KittiFlow",
"Sintel",
"FlyingChairs",
"FlyingThings3D",
"HD1K",
"Food101",
"DTD",
"FER2013",
"GTSRB",
"CLEVRClassification",
"OxfordIIITPet",
"PCAM",
"Country211",
"FGVCAircraft",
"EuroSAT",
"RenderedSST2",
"Kitti2012Stereo",
"Kitti2015Stereo",
"CarlaStereo",
"Middlebury2014Stereo",
"CREStereo",
"FallingThingsStereo",
"SceneFlowStereo",
"SintelStereo",
"InStereo2k",
"ETH3DStereo",
"wrap_dataset_for_transforms_v2",
"Imagenette",
)
# We override current module's attributes to handle the import:
# from torchvision.datasets import wrap_dataset_for_transforms_v2
# without a cyclic error.
# Ref: https://peps.python.org/pep-0562/
def __getattr__(name):
if name in ("wrap_dataset_for_transforms_v2",):
from torchvision.tv_tensors._dataset_wrapper import wrap_dataset_for_transforms_v2
return wrap_dataset_for_transforms_v2
raise AttributeError(f"module {__name__!r} has no attribute {name!r}")
|
from ._optical_flow import FlyingChairs, FlyingThings3D, HD1K, KittiFlow, Sintel
from ._stereo_matching import (
CarlaStereo,
CREStereo,
ETH3DStereo,
FallingThingsStereo,
InStereo2k,
Kitti2012Stereo,
Kitti2015Stereo,
Middlebury2014Stereo,
SceneFlowStereo,
SintelStereo,
)
from .caltech import Caltech101, Caltech256
from .celeba import CelebA
from .cifar import CIFAR10, CIFAR100
from .cityscapes import Cityscapes
from .clevr import CLEVRClassification
from .coco import CocoCaptions, CocoDetection
from .country211 import Country211
from .dtd import DTD
from .eurosat import EuroSAT
from .fakedata import FakeData
from .fer2013 import FER2013
from .fgvc_aircraft import FGVCAircraft
from .flickr import Flickr30k, Flickr8k
from .flowers102 import Flowers102
from .folder import DatasetFolder, ImageFolder
from .food101 import Food101
from .gtsrb import GTSRB
from .hmdb51 import HMDB51
from .imagenet import ImageNet
from .imagenette import Imagenette
from .inaturalist import INaturalist
from .kinetics import Kinetics
from .kitti import Kitti
from .lfw import LFWPairs, LFWPeople
from .lsun import LSUN, LSUNClass
from .mnist import EMNIST, FashionMNIST, KMNIST, MNIST, QMNIST
from .moving_mnist import MovingMNIST
from .omniglot import Omniglot
from .oxford_iiit_pet import OxfordIIITPet
from .pcam import PCAM
from .phototour import PhotoTour
from .places365 import Places365
from .rendered_sst2 import RenderedSST2
from .sbd import SBDataset
from .sbu import SBU
from .semeion import SEMEION
from .stanford_cars import StanfordCars
from .stl10 import STL10
from .sun397 import SUN397
from .svhn import SVHN
from .ucf101 import UCF101
from .usps import USPS
from .vision import VisionDataset
from .voc import VOCDetection, VOCSegmentation
from .widerface import WIDERFace
__all__ = (
"LSUN",
"LSUNClass",
"ImageFolder",
"DatasetFolder",
"FakeData",
"CocoCaptions",
"CocoDetection",
"CIFAR10",
"CIFAR100",
"EMNIST",
"FashionMNIST",
"QMNIST",
"MNIST",
"KMNIST",
"StanfordCars",
"STL10",
"SUN397",
"SVHN",
"PhotoTour",
"SEMEION",
"Omniglot",
"SBU",
"Flickr8k",
"Flickr30k",
"Flowers102",
"VOCSegmentation",
"VOCDetection",
"Cityscapes",
"ImageNet",
"Caltech101",
"Caltech256",
"CelebA",
"WIDERFace",
"SBDataset",
"VisionDataset",
"USPS",
"Kinetics",
"HMDB51",
"UCF101",
"Places365",
"Kitti",
"INaturalist",
"LFWPeople",
"LFWPairs",
"KittiFlow",
"Sintel",
"FlyingChairs",
"FlyingThings3D",
"HD1K",
"Food101",
"DTD",
"FER2013",
"GTSRB",
"CLEVRClassification",
"OxfordIIITPet",
"PCAM",
"Country211",
"FGVCAircraft",
"EuroSAT",
"RenderedSST2",
"Kitti2012Stereo",
"Kitti2015Stereo",
"CarlaStereo",
"Middlebury2014Stereo",
"CREStereo",
"FallingThingsStereo",
"SceneFlowStereo",
"SintelStereo",
"InStereo2k",
"ETH3DStereo",
"wrap_dataset_for_transforms_v2",
"Imagenette",
)
# We override current module's attributes to handle the import:
# from torchvision.datasets import wrap_dataset_for_transforms_v2
# without a cyclic error.
# Ref: https://peps.python.org/pep-0562/
def __getattr__(name):
if name in ("wrap_dataset_for_transforms_v2",):
from torchvision.tv_tensors._dataset_wrapper import wrap_dataset_for_transforms_v2
return wrap_dataset_for_transforms_v2
raise AttributeError(f"module {__name__!r} has no attribute {name!r}")
|
import numpy as np
import pytest
from keras.src import backend
from keras.src import layers
from keras.src import testing
def squared_l2_norm(x):
x = backend.convert_to_numpy(x)
return np.sum(x**2)
class UnitNormalizationTest(testing.TestCase):
@pytest.mark.requires_trainable_backend
def test_un_basics(self):
self.run_layer_test(
layers.UnitNormalization,
init_kwargs={"axis": -1},
input_shape=(2, 3),
expected_output_shape=(2, 3),
supports_masking=True,
assert_built_after_instantiation=True,
)
self.run_layer_test(
layers.UnitNormalization,
init_kwargs={"axis": (1, 2)},
input_shape=(1, 3, 3),
expected_output_shape=(1, 3, 3),
supports_masking=True,
assert_built_after_instantiation=True,
)
def test_invalid_axis(self):
with self.assertRaisesRegex(
TypeError,
(
"Invalid value for `axis` argument: expected an int or a "
"list/tuple of ints."
),
):
layers.UnitNormalization(axis={"axis": -1})
def test_correctness(self):
layer = layers.UnitNormalization(axis=-1)
inputs = np.random.normal(size=(2, 3))
outputs = layer(inputs)
self.assertAllClose(squared_l2_norm(outputs[0, :]), 1.0)
self.assertAllClose(squared_l2_norm(outputs[1, :]), 1.0)
layer = layers.UnitNormalization(axis=(1, 2))
inputs = np.random.normal(size=(2, 3, 3))
outputs = layer(inputs)
self.assertAllClose(squared_l2_norm(outputs[0, :, :]), 1.0)
self.assertAllClose(squared_l2_norm(outputs[1, :, :]), 1.0)
layer = layers.UnitNormalization(axis=1)
inputs = np.random.normal(size=(2, 3, 2))
outputs = layer(inputs)
self.assertAllClose(squared_l2_norm(outputs[0, :, 0]), 1.0)
self.assertAllClose(squared_l2_norm(outputs[1, :, 0]), 1.0)
self.assertAllClose(squared_l2_norm(outputs[0, :, 1]), 1.0)
self.assertAllClose(squared_l2_norm(outputs[1, :, 1]), 1.0)
|
import numpy as np
import pytest
from keras.src import backend
from keras.src import layers
from keras.src import testing
def squared_l2_norm(x):
x = backend.convert_to_numpy(x)
return np.sum(x**2)
class UnitNormalizationTest(testing.TestCase):
@pytest.mark.requires_trainable_backend
def test_un_basics(self):
self.run_layer_test(
layers.UnitNormalization,
init_kwargs={"axis": -1},
input_shape=(2, 3),
expected_output_shape=(2, 3),
supports_masking=True,
)
self.run_layer_test(
layers.UnitNormalization,
init_kwargs={"axis": (1, 2)},
input_shape=(1, 3, 3),
expected_output_shape=(1, 3, 3),
supports_masking=True,
)
def test_invalid_axis(self):
with self.assertRaisesRegex(
TypeError,
(
"Invalid value for `axis` argument: expected an int or a "
"list/tuple of ints."
),
):
layers.UnitNormalization(axis={"axis": -1})
def test_correctness(self):
layer = layers.UnitNormalization(axis=-1)
inputs = np.random.normal(size=(2, 3))
outputs = layer(inputs)
self.assertAllClose(squared_l2_norm(outputs[0, :]), 1.0)
self.assertAllClose(squared_l2_norm(outputs[1, :]), 1.0)
layer = layers.UnitNormalization(axis=(1, 2))
inputs = np.random.normal(size=(2, 3, 3))
outputs = layer(inputs)
self.assertAllClose(squared_l2_norm(outputs[0, :, :]), 1.0)
self.assertAllClose(squared_l2_norm(outputs[1, :, :]), 1.0)
layer = layers.UnitNormalization(axis=1)
inputs = np.random.normal(size=(2, 3, 2))
outputs = layer(inputs)
self.assertAllClose(squared_l2_norm(outputs[0, :, 0]), 1.0)
self.assertAllClose(squared_l2_norm(outputs[1, :, 0]), 1.0)
self.assertAllClose(squared_l2_norm(outputs[0, :, 1]), 1.0)
self.assertAllClose(squared_l2_norm(outputs[1, :, 1]), 1.0)
|
_base_ = './yolov3_d53_8xb8-ms-608-273e_coco.py'
# dataset settings
# file_client_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/detection/',
# 'data/': 's3://openmmlab/datasets/detection/'
# }))
file_client_args = dict(backend='disk')
train_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='LoadAnnotations', with_bbox=True),
# `mean` and `to_rgb` should be the same with the `preprocess_cfg`
dict(type='Expand', mean=[0, 0, 0], to_rgb=True, ratio_range=(1, 2)),
dict(
type='MinIoURandomCrop',
min_ious=(0.4, 0.5, 0.6, 0.7, 0.8, 0.9),
min_crop_size=0.3),
dict(type='RandomResize', scale=[(320, 320), (416, 416)], keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PhotoMetricDistortion'),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='Resize', scale=(416, 416), keep_ratio=True),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
val_dataloader = dict(dataset=dict(pipeline=test_pipeline))
test_dataloader = val_dataloader
|
_base_ = './yolov3_d53_mstrain-608_273e_coco.py'
# dataset settings
# file_client_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/detection/',
# 'data/': 's3://openmmlab/datasets/detection/'
# }))
file_client_args = dict(backend='disk')
train_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='LoadAnnotations', with_bbox=True),
# `mean` and `to_rgb` should be the same with the `preprocess_cfg`
dict(type='Expand', mean=[0, 0, 0], to_rgb=True, ratio_range=(1, 2)),
dict(
type='MinIoURandomCrop',
min_ious=(0.4, 0.5, 0.6, 0.7, 0.8, 0.9),
min_crop_size=0.3),
dict(type='RandomResize', scale=[(320, 320), (416, 416)], keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PhotoMetricDistortion'),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='Resize', scale=(416, 416), keep_ratio=True),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
val_dataloader = dict(dataset=dict(pipeline=test_pipeline))
test_dataloader = val_dataloader
|
import torch
from torchvision.prototype import datapoints
from torchvision.utils import _log_api_usage_once
from ._utils import is_simple_tensor
def uniform_temporal_subsample_video(video: torch.Tensor, num_samples: int, temporal_dim: int = -4) -> torch.Tensor:
# Reference: https://github.com/facebookresearch/pytorchvideo/blob/a0a131e/pytorchvideo/transforms/functional.py#L19
t_max = video.shape[temporal_dim] - 1
indices = torch.linspace(0, t_max, num_samples, device=video.device).long()
return torch.index_select(video, temporal_dim, indices)
def uniform_temporal_subsample(
inpt: datapoints.VideoTypeJIT, num_samples: int, temporal_dim: int = -4
) -> datapoints.VideoTypeJIT:
if not torch.jit.is_scripting():
_log_api_usage_once(uniform_temporal_subsample)
if torch.jit.is_scripting() or is_simple_tensor(inpt):
return uniform_temporal_subsample_video(inpt, num_samples, temporal_dim=temporal_dim)
elif isinstance(inpt, datapoints.Video):
if temporal_dim != -4 and inpt.ndim - 4 != temporal_dim:
raise ValueError("Video inputs must have temporal_dim equivalent to -4")
output = uniform_temporal_subsample_video(
inpt.as_subclass(torch.Tensor), num_samples, temporal_dim=temporal_dim
)
return datapoints.Video.wrap_like(inpt, output)
else:
raise TypeError(f"Input can either be a plain tensor or a `Video` datapoint, but got {type(inpt)} instead.")
|
import torch
from torchvision.prototype import datapoints
from torchvision.utils import _log_api_usage_once
def uniform_temporal_subsample_video(video: torch.Tensor, num_samples: int, temporal_dim: int = -4) -> torch.Tensor:
# Reference: https://github.com/facebookresearch/pytorchvideo/blob/a0a131e/pytorchvideo/transforms/functional.py#L19
t_max = video.shape[temporal_dim] - 1
indices = torch.linspace(0, t_max, num_samples, device=video.device).long()
return torch.index_select(video, temporal_dim, indices)
def uniform_temporal_subsample(
inpt: datapoints.VideoTypeJIT, num_samples: int, temporal_dim: int = -4
) -> datapoints.VideoTypeJIT:
if not torch.jit.is_scripting():
_log_api_usage_once(uniform_temporal_subsample)
if isinstance(inpt, torch.Tensor) and (torch.jit.is_scripting() or not isinstance(inpt, datapoints.Video)):
return uniform_temporal_subsample_video(inpt, num_samples, temporal_dim=temporal_dim)
elif isinstance(inpt, datapoints.Video):
if temporal_dim != -4 and inpt.ndim - 4 != temporal_dim:
raise ValueError("Video inputs must have temporal_dim equivalent to -4")
output = uniform_temporal_subsample_video(
inpt.as_subclass(torch.Tensor), num_samples, temporal_dim=temporal_dim
)
return datapoints.Video.wrap_like(inpt, output)
else:
raise TypeError(f"Input can either be a plain tensor or a `Video` datapoint, but got {type(inpt)} instead.")
|
from enum import Enum
from typing import Dict, Iterable
import torch.nn.functional as F
from torch import Tensor, nn
from sentence_transformers.SentenceTransformer import SentenceTransformer
class SiameseDistanceMetric(Enum):
"""The metric for the contrastive loss"""
EUCLIDEAN = lambda x, y: F.pairwise_distance(x, y, p=2)
MANHATTAN = lambda x, y: F.pairwise_distance(x, y, p=1)
COSINE_DISTANCE = lambda x, y: 1 - F.cosine_similarity(x, y)
class ContrastiveLoss(nn.Module):
def __init__(
self,
model: SentenceTransformer,
distance_metric=SiameseDistanceMetric.COSINE_DISTANCE,
margin: float = 0.5,
size_average: bool = True,
):
"""
Contrastive loss. Expects as input two texts and a label of either 0 or 1. If the label == 1, then the distance between the
two embeddings is reduced. If the label == 0, then the distance between the embeddings is increased.
Args:
model: SentenceTransformer model
distance_metric: Function that returns a distance between
two embeddings. The class SiameseDistanceMetric contains
pre-defined metrices that can be used
margin: Negative samples (label == 0) should have a distance
of at least the margin value.
size_average: Average by the size of the mini-batch.
References:
* Further information: http://yann.lecun.com/exdb/publis/pdf/hadsell-chopra-lecun-06.pdf
* `Training Examples > Quora Duplicate Questions <../../examples/training/quora_duplicate_questions/README.html>`_
Requirements:
1. (anchor, positive/negative) pairs
Relations:
- :class:`OnlineContrastiveLoss` is similar, but uses hard positive and hard negative pairs.
It often yields better results.
Inputs:
+-----------------------------------------------+------------------------------+
| Texts | Labels |
+===============================================+==============================+
| (anchor, positive/negative) pairs | 1 if positive, 0 if negative |
+-----------------------------------------------+------------------------------+
Example:
::
from sentence_transformers import SentenceTransformer, SentenceTransformerTrainer, losses
from datasets import Dataset
model = SentenceTransformer("microsoft/mpnet-base")
train_dataset = Dataset.from_dict({
"sentence1": ["It's nice weather outside today.", "He drove to work."],
"sentence2": ["It's so sunny.", "She walked to the store."],
"label": [1, 0],
})
loss = losses.ContrastiveLoss(model)
trainer = SentenceTransformerTrainer(
model=model,
train_dataset=train_dataset,
loss=loss,
)
trainer.train()
"""
super(ContrastiveLoss, self).__init__()
self.distance_metric = distance_metric
self.margin = margin
self.model = model
self.size_average = size_average
def get_config_dict(self):
distance_metric_name = self.distance_metric.__name__
for name, value in vars(SiameseDistanceMetric).items():
if value == self.distance_metric:
distance_metric_name = "SiameseDistanceMetric.{}".format(name)
break
return {"distance_metric": distance_metric_name, "margin": self.margin, "size_average": self.size_average}
def forward(self, sentence_features: Iterable[Dict[str, Tensor]], labels: Tensor):
reps = [self.model(sentence_feature)["sentence_embedding"] for sentence_feature in sentence_features]
assert len(reps) == 2
rep_anchor, rep_other = reps
distances = self.distance_metric(rep_anchor, rep_other)
losses = 0.5 * (
labels.float() * distances.pow(2) + (1 - labels).float() * F.relu(self.margin - distances).pow(2)
)
return losses.mean() if self.size_average else losses.sum()
@property
def citation(self) -> str:
return """
@inproceedings{hadsell2006dimensionality,
author={Hadsell, R. and Chopra, S. and LeCun, Y.},
booktitle={2006 IEEE Computer Society Conference on Computer Vision and Pattern Recognition (CVPR'06)},
title={Dimensionality Reduction by Learning an Invariant Mapping},
year={2006},
volume={2},
number={},
pages={1735-1742},
doi={10.1109/CVPR.2006.100}
}
"""
|
from enum import Enum
from typing import Iterable, Dict
import torch.nn.functional as F
from torch import nn, Tensor
from sentence_transformers.SentenceTransformer import SentenceTransformer
class SiameseDistanceMetric(Enum):
"""
The metric for the contrastive loss
"""
EUCLIDEAN = lambda x, y: F.pairwise_distance(x, y, p=2)
MANHATTAN = lambda x, y: F.pairwise_distance(x, y, p=1)
COSINE_DISTANCE = lambda x, y: 1 - F.cosine_similarity(x, y)
class ContrastiveLoss(nn.Module):
def __init__(
self,
model: SentenceTransformer,
distance_metric=SiameseDistanceMetric.COSINE_DISTANCE,
margin: float = 0.5,
size_average: bool = True,
):
"""
Contrastive loss. Expects as input two texts and a label of either 0 or 1. If the label == 1, then the distance between the
two embeddings is reduced. If the label == 0, then the distance between the embeddings is increased.
:param model: SentenceTransformer model
:param distance_metric: Function that returns a distance between two embeddings. The class SiameseDistanceMetric contains pre-defined metrices that can be used
:param margin: Negative samples (label == 0) should have a distance of at least the margin value.
:param size_average: Average by the size of the mini-batch.
References:
* Further information: http://yann.lecun.com/exdb/publis/pdf/hadsell-chopra-lecun-06.pdf
* `Training Examples > Quora Duplicate Questions <../../examples/training/quora_duplicate_questions/README.html>`_
Requirements:
1. (anchor, positive/negative) pairs
Relations:
- :class:`OnlineContrastiveLoss` is similar, but uses hard positive and hard negative pairs.
It often yields better results.
Inputs:
+-----------------------------------------------+------------------------------+
| Texts | Labels |
+===============================================+==============================+
| (anchor, positive/negative) pairs | 1 if positive, 0 if negative |
+-----------------------------------------------+------------------------------+
Example:
::
from sentence_transformers import SentenceTransformer, losses
from sentence_transformers.readers import InputExample
from torch.utils.data import DataLoader
model = SentenceTransformer('all-MiniLM-L6-v2')
train_examples = [
InputExample(texts=['This is a positive pair', 'Where the distance will be minimized'], label=1),
InputExample(texts=['This is a negative pair', 'Their distance will be increased'], label=0),
]
train_dataloader = DataLoader(train_examples, shuffle=True, batch_size=2)
train_loss = losses.ContrastiveLoss(model=model)
model.fit(
[(train_dataloader, train_loss)],
epochs=10,
)
"""
super(ContrastiveLoss, self).__init__()
self.distance_metric = distance_metric
self.margin = margin
self.model = model
self.size_average = size_average
def get_config_dict(self):
distance_metric_name = self.distance_metric.__name__
for name, value in vars(SiameseDistanceMetric).items():
if value == self.distance_metric:
distance_metric_name = "SiameseDistanceMetric.{}".format(name)
break
return {"distance_metric": distance_metric_name, "margin": self.margin, "size_average": self.size_average}
def forward(self, sentence_features: Iterable[Dict[str, Tensor]], labels: Tensor):
reps = [self.model(sentence_feature)["sentence_embedding"] for sentence_feature in sentence_features]
assert len(reps) == 2
rep_anchor, rep_other = reps
distances = self.distance_metric(rep_anchor, rep_other)
losses = 0.5 * (
labels.float() * distances.pow(2) + (1 - labels).float() * F.relu(self.margin - distances).pow(2)
)
return losses.mean() if self.size_average else losses.sum()
|
import pytest
from xgboost import testing as tm
class TestPlotting:
@pytest.mark.skipif(**tm.no_multiple(tm.no_matplotlib(), tm.no_graphviz()))
def test_categorical(self) -> None:
from xgboost.testing.plotting import run_categorical
run_categorical("hist", "cuda")
|
import sys
import pytest
from xgboost import testing as tm
sys.path.append("tests/python")
import test_plotting as tp
pytestmark = pytest.mark.skipif(**tm.no_multiple(tm.no_matplotlib(), tm.no_graphviz()))
class TestPlotting:
cputest = tp.TestPlotting()
@pytest.mark.skipif(**tm.no_pandas())
def test_categorical(self):
self.cputest.run_categorical("gpu_hist")
|
import numpy as np
from absl.testing import parameterized
from keras.src import backend
from keras.src import dtype_policies
from keras.src import layers
from keras.src import testing
class ZeroPadding3DTest(testing.TestCase):
@parameterized.parameters(
{"data_format": "channels_first"}, {"data_format": "channels_last"}
)
def test_zero_padding_3d(self, data_format):
inputs = np.random.rand(1, 2, 3, 4, 5)
outputs = layers.ZeroPadding3D(
padding=((1, 2), (3, 4), (0, 2)), data_format=data_format
)(inputs)
if data_format == "channels_first":
for index in [0, -1, -2]:
self.assertAllClose(outputs[:, :, index, :, :], 0.0)
for index in [0, 1, 2, -1, -2, -3, -4]:
self.assertAllClose(outputs[:, :, :, index, :], 0.0)
for index in [-1, -2]:
self.assertAllClose(outputs[:, :, :, :, index], 0.0)
self.assertAllClose(outputs[:, :, 1:-2, 3:-4, 0:-2], inputs)
else:
for index in [0, -1, -2]:
self.assertAllClose(outputs[:, index, :, :, :], 0.0)
for index in [0, 1, 2, -1, -2, -3, -4]:
self.assertAllClose(outputs[:, :, index, :, :], 0.0)
for index in [-1, -2]:
self.assertAllClose(outputs[:, :, :, index, :], 0.0)
self.assertAllClose(outputs[:, 1:-2, 3:-4, 0:-2, :], inputs)
@parameterized.product(
(
{"padding": ((2, 2), (2, 2), (2, 2))}, # 3 tuples
{"padding": (2, 2, 2)}, # 1 tuple
{"padding": 2}, # 1 int
),
(
{"data_format": "channels_first"},
{"data_format": "channels_last"},
),
)
def test_zero_padding_3d_with_same_padding(self, padding, data_format):
inputs = np.random.rand(1, 2, 3, 4, 5)
outputs = layers.ZeroPadding3D(
padding=padding, data_format=data_format
)(inputs)
if data_format == "channels_first":
for index in [0, 1, -1, -2]:
self.assertAllClose(outputs[:, :, index, :, :], 0.0)
self.assertAllClose(outputs[:, :, :, index, :], 0.0)
self.assertAllClose(outputs[:, :, :, :, index], 0.0)
self.assertAllClose(outputs[:, :, 2:-2, 2:-2, 2:-2], inputs)
else:
for index in [0, 1, -1, -2]:
self.assertAllClose(outputs[:, index, :, :, :], 0.0)
self.assertAllClose(outputs[:, :, index, :, :], 0.0)
self.assertAllClose(outputs[:, :, :, index, :], 0.0)
self.assertAllClose(outputs[:, 2:-2, 2:-2, 2:-2, :], inputs)
def test_zero_padding_3d_with_dynamic_spatial_dim(self):
if backend.config.image_data_format() == "channels_last":
input_layer = layers.Input(batch_shape=(1, 2, None, 4, 5))
else:
input_layer = layers.Input(batch_shape=(1, 5, 2, None, 4))
padded = layers.ZeroPadding3D(((1, 2), (3, 4), (5, 6)))(input_layer)
if backend.config.image_data_format() == "channels_last":
self.assertEqual(padded.shape, (1, 5, None, 15, 5))
else:
self.assertEqual(padded.shape, (1, 5, 5, None, 15))
@parameterized.parameters(
{"padding": (1,)},
{"padding": (1, 2)},
{"padding": (1, 2, 3, 4)},
{"padding": "1"},
{"padding": ((1, 2), (3, 4), (5, 6, 7))},
{"padding": ((1, 2), (3, 4), (5, -6))},
{"padding": ((1, 2), (3, 4), "5")},
)
def test_zero_padding_3d_errors_if_padding_argument_invalid(self, padding):
with self.assertRaises(ValueError):
layers.ZeroPadding3D(padding=padding)
@parameterized.parameters(
{"data_format": "channels_first"},
{"data_format": "channels_last"},
)
def test_zero_padding_3d_get_config(self, data_format):
layer = layers.ZeroPadding3D(padding=(1, 2, 3), data_format=data_format)
expected_config = {
"data_format": data_format,
"dtype": dtype_policies.serialize(layer.dtype_policy),
"name": layer.name,
"padding": ((1, 1), (2, 2), (3, 3)),
"trainable": layer.trainable,
}
self.assertEqual(layer.get_config(), expected_config)
|
import numpy as np
from absl.testing import parameterized
from keras.src import backend
from keras.src import dtype_policies
from keras.src import layers
from keras.src import testing
class ZeroPadding3DTest(testing.TestCase, parameterized.TestCase):
@parameterized.parameters(
{"data_format": "channels_first"}, {"data_format": "channels_last"}
)
def test_zero_padding_3d(self, data_format):
inputs = np.random.rand(1, 2, 3, 4, 5)
outputs = layers.ZeroPadding3D(
padding=((1, 2), (3, 4), (0, 2)), data_format=data_format
)(inputs)
if data_format == "channels_first":
for index in [0, -1, -2]:
self.assertAllClose(outputs[:, :, index, :, :], 0.0)
for index in [0, 1, 2, -1, -2, -3, -4]:
self.assertAllClose(outputs[:, :, :, index, :], 0.0)
for index in [-1, -2]:
self.assertAllClose(outputs[:, :, :, :, index], 0.0)
self.assertAllClose(outputs[:, :, 1:-2, 3:-4, 0:-2], inputs)
else:
for index in [0, -1, -2]:
self.assertAllClose(outputs[:, index, :, :, :], 0.0)
for index in [0, 1, 2, -1, -2, -3, -4]:
self.assertAllClose(outputs[:, :, index, :, :], 0.0)
for index in [-1, -2]:
self.assertAllClose(outputs[:, :, :, index, :], 0.0)
self.assertAllClose(outputs[:, 1:-2, 3:-4, 0:-2, :], inputs)
@parameterized.product(
(
{"padding": ((2, 2), (2, 2), (2, 2))}, # 3 tuples
{"padding": (2, 2, 2)}, # 1 tuple
{"padding": 2}, # 1 int
),
(
{"data_format": "channels_first"},
{"data_format": "channels_last"},
),
)
def test_zero_padding_3d_with_same_padding(self, padding, data_format):
inputs = np.random.rand(1, 2, 3, 4, 5)
outputs = layers.ZeroPadding3D(
padding=padding, data_format=data_format
)(inputs)
if data_format == "channels_first":
for index in [0, 1, -1, -2]:
self.assertAllClose(outputs[:, :, index, :, :], 0.0)
self.assertAllClose(outputs[:, :, :, index, :], 0.0)
self.assertAllClose(outputs[:, :, :, :, index], 0.0)
self.assertAllClose(outputs[:, :, 2:-2, 2:-2, 2:-2], inputs)
else:
for index in [0, 1, -1, -2]:
self.assertAllClose(outputs[:, index, :, :, :], 0.0)
self.assertAllClose(outputs[:, :, index, :, :], 0.0)
self.assertAllClose(outputs[:, :, :, index, :], 0.0)
self.assertAllClose(outputs[:, 2:-2, 2:-2, 2:-2, :], inputs)
def test_zero_padding_3d_with_dynamic_spatial_dim(self):
if backend.config.image_data_format() == "channels_last":
input_layer = layers.Input(batch_shape=(1, 2, None, 4, 5))
else:
input_layer = layers.Input(batch_shape=(1, 5, 2, None, 4))
padded = layers.ZeroPadding3D(((1, 2), (3, 4), (5, 6)))(input_layer)
if backend.config.image_data_format() == "channels_last":
self.assertEqual(padded.shape, (1, 5, None, 15, 5))
else:
self.assertEqual(padded.shape, (1, 5, 5, None, 15))
@parameterized.parameters(
{"padding": (1,)},
{"padding": (1, 2)},
{"padding": (1, 2, 3, 4)},
{"padding": "1"},
{"padding": ((1, 2), (3, 4), (5, 6, 7))},
{"padding": ((1, 2), (3, 4), (5, -6))},
{"padding": ((1, 2), (3, 4), "5")},
)
def test_zero_padding_3d_errors_if_padding_argument_invalid(self, padding):
with self.assertRaises(ValueError):
layers.ZeroPadding3D(padding=padding)
@parameterized.parameters(
{"data_format": "channels_first"},
{"data_format": "channels_last"},
)
def test_zero_padding_3d_get_config(self, data_format):
layer = layers.ZeroPadding3D(padding=(1, 2, 3), data_format=data_format)
expected_config = {
"data_format": data_format,
"dtype": dtype_policies.serialize(layer.dtype_policy),
"name": layer.name,
"padding": ((1, 1), (2, 2), (3, 3)),
"trainable": layer.trainable,
}
self.assertEqual(layer.get_config(), expected_config)
|
from typing import Dict
def get_default_metas() -> Dict:
"""
Get a copy of default meta variables.
NOTE: DO NOT ADD MORE ENTRIES HERE!
:return: a deep copy of the default metas in a new dict
"""
# NOTE: DO NOT ADD MORE ENTRIES HERE!
return {
'name': '', #: a string, the name of the executor
'description': '', #: a string, the description of this executor. It will be used in automatics docs UI
'workspace': '', #: a string, the workspace of the executor
'py_modules': '', #: a list of strings, the python dependencies of the executor
}
def get_executor_taboo():
"""
Returns a set of executor meta variables
:return: set of executor meta variables
"""
taboo = {
'self',
'args',
'kwargs',
'metas',
'requests',
'runtime_args',
'dynamic_batching',
}
_defaults = get_default_metas()
taboo.update(_defaults.keys())
return taboo
|
from typing import Dict
def get_default_metas() -> Dict:
"""
Get a copy of default meta variables.
NOTE: DO NOT ADD MORE ENTRIES HERE!
:return: a deep copy of the default metas in a new dict
"""
# NOTE: DO NOT ADD MORE ENTRIES HERE!
return {
'name': '', #: a string, the name of the executor
'description': '', #: a string, the description of this executor. It will be used in automatics docs UI
'workspace': '', #: a string, the workspace of the executor
'py_modules': '', #: a list of strings, the python dependencies of the executor
}
def get_executor_taboo():
"""
Returns a set of executor meta variables
:return: set of executor meta variables
"""
taboo = {'self', 'args', 'kwargs', 'metas', 'requests', 'runtime_args', 'dynamic_batching'}
_defaults = get_default_metas()
taboo.update(_defaults.keys())
return taboo
|
import itertools
from dataclasses import dataclass
from typing import List, Optional
import pyarrow as pa
import pyarrow.parquet as pq
import datasets
from datasets.table import table_cast
logger = datasets.utils.logging.get_logger(__name__)
@dataclass
class ParquetConfig(datasets.BuilderConfig):
"""BuilderConfig for Parquet."""
batch_size: int = 10_000
columns: Optional[List[str]] = None
features: Optional[datasets.Features] = None
class Parquet(datasets.ArrowBasedBuilder):
BUILDER_CONFIG_CLASS = ParquetConfig
def _info(self):
if (
self.config.columns is not None
and self.config.features is not None
and set(self.config.columns) != set(self.config.features)
):
raise ValueError(
"The columns and features argument must contain the same columns, but got ",
f"{self.config.columns} and {self.config.features}",
)
return datasets.DatasetInfo(features=self.config.features)
def _split_generators(self, dl_manager):
"""We handle string, list and dicts in datafiles"""
if not self.config.data_files:
raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}")
data_files = dl_manager.download_and_extract(self.config.data_files)
if isinstance(data_files, (str, list, tuple)):
files = data_files
if isinstance(files, str):
files = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
files = [dl_manager.iter_files(file) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"files": files})]
splits = []
for split_name, files in data_files.items():
if isinstance(files, str):
files = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
files = [dl_manager.iter_files(file) for file in files]
# Infer features if they are stored in the arrow schema
if self.info.features is None:
for file in itertools.chain.from_iterable(files):
with open(file, "rb") as f:
self.info.features = datasets.Features.from_arrow_schema(pq.read_schema(f))
break
splits.append(datasets.SplitGenerator(name=split_name, gen_kwargs={"files": files}))
if self.config.columns is not None and set(self.config.columns) != set(self.info.features):
self.info.features = datasets.Features(
{col: feat for col, feat in self.info.features.items() if col in self.config.columns}
)
return splits
def _cast_table(self, pa_table: pa.Table) -> pa.Table:
if self.info.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
pa_table = table_cast(pa_table, self.info.features.arrow_schema)
return pa_table
def _generate_tables(self, files):
if self.config.features is not None and self.config.columns is not None:
if sorted(field.name for field in self.info.features.arrow_schema) != sorted(self.config.columns):
raise ValueError(
f"Tried to load parquet data with columns '{self.config.columns}' with mismatching features '{self.info.features}'"
)
for file_idx, file in enumerate(itertools.chain.from_iterable(files)):
with open(file, "rb") as f:
parquet_file = pq.ParquetFile(f)
try:
for batch_idx, record_batch in enumerate(
parquet_file.iter_batches(batch_size=self.config.batch_size, columns=self.config.columns)
):
pa_table = pa.Table.from_batches([record_batch])
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield f"{file_idx}_{batch_idx}", self._cast_table(pa_table)
except ValueError as e:
logger.error(f"Failed to read file '{file}' with error {type(e)}: {e}")
raise
|
import itertools
from dataclasses import dataclass
from typing import List, Optional
import pyarrow as pa
import pyarrow.parquet as pq
import datasets
from datasets.table import table_cast
logger = datasets.utils.logging.get_logger(__name__)
@dataclass
class ParquetConfig(datasets.BuilderConfig):
"""BuilderConfig for Parquet."""
batch_size: int = 10_000
columns: Optional[List[str]] = None
features: Optional[datasets.Features] = None
class Parquet(datasets.ArrowBasedBuilder):
BUILDER_CONFIG_CLASS = ParquetConfig
def _info(self):
return datasets.DatasetInfo(features=self.config.features)
def _split_generators(self, dl_manager):
"""We handle string, list and dicts in datafiles"""
if not self.config.data_files:
raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}")
data_files = dl_manager.download_and_extract(self.config.data_files)
if isinstance(data_files, (str, list, tuple)):
files = data_files
if isinstance(files, str):
files = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
files = [dl_manager.iter_files(file) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"files": files})]
splits = []
for split_name, files in data_files.items():
if isinstance(files, str):
files = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
files = [dl_manager.iter_files(file) for file in files]
# Infer features if they are stored in the arrow schema
if self.info.features is None:
for file in itertools.chain.from_iterable(files):
with open(file, "rb") as f:
features = datasets.Features.from_arrow_schema(pq.read_schema(f))
if self.config.columns is not None:
features = datasets.Features(
{col: feat for col, feat in features.items() if col in self.config.columns}
)
self.info.features = features
break
splits.append(datasets.SplitGenerator(name=split_name, gen_kwargs={"files": files}))
return splits
def _cast_table(self, pa_table: pa.Table) -> pa.Table:
if self.info.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
pa_table = table_cast(pa_table, self.info.features.arrow_schema)
return pa_table
def _generate_tables(self, files):
if self.config.features is not None and self.config.columns is not None:
if sorted(field.name for field in self.info.features.arrow_schema) != sorted(self.config.columns):
raise ValueError(
f"Tried to load parquet data with columns '{self.config.columns}' with mismatching features '{self.info.features}'"
)
for file_idx, file in enumerate(itertools.chain.from_iterable(files)):
with open(file, "rb") as f:
parquet_file = pq.ParquetFile(f)
try:
for batch_idx, record_batch in enumerate(
parquet_file.iter_batches(batch_size=self.config.batch_size, columns=self.config.columns)
):
pa_table = pa.Table.from_batches([record_batch])
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield f"{file_idx}_{batch_idx}", self._cast_table(pa_table)
except ValueError as e:
logger.error(f"Failed to read file '{file}' with error {type(e)}: {e}")
raise
|
from collections import namedtuple
from typing import TYPE_CHECKING, Dict, NamedTuple, Optional
from urllib.parse import urlparse
if TYPE_CHECKING:
from docarray import DocumentArray
_ParsedHost = namedtuple('ParsedHost', 'on host port version scheme')
def _parse_host(host: str) -> NamedTuple:
"""Parse a host string into namedtuple object.
A parsed host's components are `on`, `host`, `port`, `version`, `scheme`.
:param host: a host string. Can be one of the following:
- `grpc://192.168.0.123:8080/endpoint`
- `ws://192.168.0.123:8080/endpoint`
- `http://192.168.0.123:8080/endpoint`
- `jinahub://Hello/endpoint`
- `jinahub+docker://Hello/endpoint`
- `jinahub+docker://Hello/v0.0.1/endpoint`
- `jinahub+docker://Hello/latest/endpoint`
- `jinahub+sandbox://Hello/endpoint`
"""
r = urlparse(host)
on = r.path or '/'
host = (
r._replace(netloc=r.netloc.replace(f':{r.port}', ''))._replace(path='').geturl()
)
port = r.port or None
version = None
scheme = r.scheme
splited_path = list(filter(None, r.path.split('/')))
if len(splited_path) == 2:
# path includes version and endpoint
version = splited_path[0]
host = host + '/' + version
on = '/' + splited_path[1]
return _ParsedHost(on=on, host=host, port=port, version=version, scheme=scheme)
class PostMixin:
"""Helper functions for posting DocumentArray to Jina Flow."""
def post(
self,
host: str,
show_progress: bool = False,
batch_size: Optional[int] = None,
parameters: Optional[Dict] = None,
**kwargs,
) -> 'DocumentArray':
"""Posting itself to a remote Flow/Sandbox and get the modified DocumentArray back
:param host: a host string. Can be one of the following:
- `grpc://192.168.0.123:8080/endpoint`
- `ws://192.168.0.123:8080/endpoint`
- `http://192.168.0.123:8080/endpoint`
- `jinahub://Hello/endpoint`
- `jinahub+docker://Hello/endpoint`
- `jinahub+docker://Hello/v0.0.1/endpoint`
- `jinahub+docker://Hello/latest/endpoint`
- `jinahub+sandbox://Hello/endpoint`
:param show_progress: if to show a progressbar
:param batch_size: number of Document on each request
:param parameters: parameters to send in the request
:return: the new DocumentArray returned from remote
"""
if not self:
return
parsed_host = _parse_host(host)
batch_size = batch_size or len(self)
scheme = parsed_host.scheme
host = parsed_host.host
if scheme in ('grpcs', 'https', 'wss'):
scheme = scheme[:-1]
if scheme == 'ws':
scheme = 'websocket' # temp fix for the core
if scheme.startswith('jinahub'):
from jina import Flow
f = Flow(quiet=True, prefetch=1).add(uses=host, **kwargs)
with f:
return f.post(
parsed_host.on,
inputs=self,
show_progress=show_progress,
request_size=batch_size,
parameters=parameters,
**kwargs,
)
elif scheme in ('grpc', 'http', 'ws', 'websocket'):
from jina import Client
if parsed_host.port:
host += f':{parsed_host.port}'
c = Client(host=host)
return c.post(
parsed_host.on,
inputs=self,
show_progress=show_progress,
request_size=batch_size,
parameters=parameters,
**kwargs,
)
else:
raise ValueError(f'unsupported scheme: `{scheme}`')
|
from collections import namedtuple
from typing import TYPE_CHECKING, Dict, NamedTuple, Optional
from urllib.parse import urlparse
if TYPE_CHECKING:
from docarray import DocumentArray
_ParsedHost = namedtuple('ParsedHost', 'on host port version scheme')
def _parse_host(host: str) -> NamedTuple:
"""Parse a host string into namedtuple object.
A parsed host's components are `on`, `host`, `port`, `version`, `scheme`.
:param host: a host string. Can be one of the following:
- `grpc://192.168.0.123:8080/endpoint`
- `ws://192.168.0.123:8080/endpoint`
- `http://192.168.0.123:8080/endpoint`
- `jinahub://Hello/endpoint`
- `jinahub+docker://Hello/endpoint`
- `jinahub+docker://Hello/v0.0.1/endpoint`
- `jinahub+docker://Hello/latest/endpoint`
- `jinahub+sandbox://Hello/endpoint`
"""
r = urlparse(host)
on = r.path or '/'
host = (
r._replace(netloc=r.netloc.replace(f':{r.port}', ''))._replace(path='').geturl()
)
port = r.port or None
version = None
scheme = r.scheme
splited_path = list(filter(None, r.path.split('/')))
if len(splited_path) == 2:
# path includes version and endpoint
version = splited_path[0]
host = host + '/' + version
on = '/' + splited_path[1]
return _ParsedHost(on=on, host=host, port=port, version=version, scheme=scheme)
class PostMixin:
"""Helper functions for posting DocumentArray to Jina Flow."""
def post(
self,
host: str,
show_progress: bool = False,
batch_size: Optional[int] = None,
parameters: Optional[Dict] = None,
**kwargs,
) -> 'DocumentArray':
"""Posting itself to a remote Flow/Sandbox and get the modified DocumentArray back
:param host: a host string. Can be one of the following:
- `grpc://192.168.0.123:8080/endpoint`
- `ws://192.168.0.123:8080/endpoint`
- `http://192.168.0.123:8080/endpoint`
- `jinahub://Hello/endpoint`
- `jinahub+docker://Hello/endpoint`
- `jinahub+docker://Hello/v0.0.1/endpoint`
- `jinahub+docker://Hello/latest/endpoint`
- `jinahub+sandbox://Hello/endpoint`
:param show_progress: if to show a progressbar
:param batch_size: number of Document on each request
:param parameters: parameters to send in the request
:return: the new DocumentArray returned from remote
"""
if not self:
return
parsed_host = _parse_host(host)
batch_size = batch_size or len(self)
scheme = parsed_host.scheme
host = parsed_host.host
if scheme in ('grpcs', 'https', 'wss'):
scheme = scheme[:-1]
if scheme == 'ws':
scheme = 'websocket' # temp fix for the core
if scheme.startswith('jinahub'):
from jina import Flow
f = Flow(quiet=True, prefetch=1).add(uses=host, **kwargs)
with f:
return f.post(
parsed_host.on,
inputs=self,
show_progress=show_progress,
request_size=batch_size,
parameters=parameters,
**kwargs,
)
elif scheme in ('grpc', 'http', 'ws', 'websocket'):
from jina import Client
if parsed_host.port:
host += f':{parsed_host.port}'
c = Client(host=host)
return c.post(
parsed_host.on,
inputs=self,
show_progress=show_progress,
request_size=batch_size,
parameters=parameters,
**kwargs,
)
else:
raise ValueError(f'unsupported scheme: {scheme}')
|
from .cmuarctic import CMUARCTIC
from .cmudict import CMUDict
from .commonvoice import COMMONVOICE
from .dr_vctk import DR_VCTK
from .fluentcommands import FluentSpeechCommands
from .gtzan import GTZAN
from .librilight_limited import LibriLightLimited
from .librimix import LibriMix
from .librispeech import LIBRISPEECH
from .libritts import LIBRITTS
from .ljspeech import LJSPEECH
from .musdb_hq import MUSDB_HQ
from .quesst14 import QUESST14
from .speechcommands import SPEECHCOMMANDS
from .tedlium import TEDLIUM
from .vctk import VCTK_092
from .yesno import YESNO
__all__ = [
"COMMONVOICE",
"LIBRISPEECH",
"LibriLightLimited",
"SPEECHCOMMANDS",
"VCTK_092",
"DR_VCTK",
"YESNO",
"LJSPEECH",
"GTZAN",
"CMUARCTIC",
"CMUDict",
"LibriMix",
"LIBRITTS",
"TEDLIUM",
"QUESST14",
"MUSDB_HQ",
"FluentSpeechCommands",
]
|
from .cmuarctic import CMUARCTIC
from .cmudict import CMUDict
from .commonvoice import COMMONVOICE
from .dr_vctk import DR_VCTK
from .fluentcommands import FluentSpeechCommands
from .gtzan import GTZAN
from .librilight_limited import LibriLightLimited
from .librimix import LibriMix
from .librispeech import LIBRISPEECH
from .libritts import LIBRITTS
from .ljspeech import LJSPEECH
from .quesst14 import QUESST14
from .speechcommands import SPEECHCOMMANDS
from .tedlium import TEDLIUM
from .vctk import VCTK_092
from .yesno import YESNO
__all__ = [
"COMMONVOICE",
"LIBRISPEECH",
"LibriLightLimited",
"SPEECHCOMMANDS",
"VCTK_092",
"DR_VCTK",
"YESNO",
"LJSPEECH",
"GTZAN",
"CMUARCTIC",
"CMUDict",
"LibriMix",
"LIBRITTS",
"TEDLIUM",
"QUESST14",
"FluentSpeechCommands",
]
|
# Copyright (c) OpenMMLab. All rights reserved.
__version__ = '0.8.0'
def parse_version_info(version_str):
"""Parse the version information.
Args:
version_str (str): version string like '0.1.0'.
Returns:
tuple: version information contains major, minor, micro version.
"""
version_info = []
for x in version_str.split('.'):
if x.isdigit():
version_info.append(int(x))
elif x.find('rc') != -1:
patch_version = x.split('rc')
version_info.append(int(patch_version[0]))
version_info.append(f'rc{patch_version[1]}')
return tuple(version_info)
version_info = parse_version_info(__version__)
|
# Copyright (c) OpenMMLab. All rights reserved.
__version__ = '0.7.4'
def parse_version_info(version_str):
"""Parse the version information.
Args:
version_str (str): version string like '0.1.0'.
Returns:
tuple: version information contains major, minor, micro version.
"""
version_info = []
for x in version_str.split('.'):
if x.isdigit():
version_info.append(int(x))
elif x.find('rc') != -1:
patch_version = x.split('rc')
version_info.append(int(patch_version[0]))
version_info.append(f'rc{patch_version[1]}')
return tuple(version_info)
version_info = parse_version_info(__version__)
|
"""
Top-level module of Jina.
The primary function of this module is to import all of the public Jina
interfaces into a single place. The interfaces themselves are located in
sub-modules, as described below.
"""
import os as _os
import platform as _platform
import signal as _signal
import sys as _sys
import warnings as _warnings
import docarray as _docarray
if _sys.version_info < (3, 7, 0):
raise OSError(f'Jina requires Python >= 3.7, but yours is {_sys.version_info}')
def _warning_on_one_line(message, category, filename, lineno, *args, **kwargs):
return '\033[1;33m%s: %s\033[0m \033[1;30m(raised from %s:%s)\033[0m\n' % (
category.__name__,
message,
filename,
lineno,
)
_warnings.formatwarning = _warning_on_one_line
_warnings.simplefilter('always', DeprecationWarning)
# fix fork error on MacOS but seems no effect? must do EXPORT manually before jina start
_os.environ['OBJC_DISABLE_INITIALIZE_FORK_SAFETY'] = 'YES'
# JINA_MP_START_METHOD has higher priority than os-patch
_start_method = _os.environ.get('JINA_MP_START_METHOD', None)
if _start_method and _start_method.lower() in {'fork', 'spawn', 'forkserver'}:
from multiprocessing import set_start_method as _set_start_method
try:
_set_start_method(_start_method.lower())
_warnings.warn(
f'multiprocessing start method is set to `{_start_method.lower()}`'
)
except Exception as e:
_warnings.warn(
f'failed to set multiprocessing start_method to `{_start_method.lower()}`: {e!r}'
)
elif _sys.version_info >= (3, 8, 0) and _platform.system() == 'Darwin':
# DO SOME OS-WISE PATCHES
# temporary fix for python 3.8 on macos where the default start is set to "spawn"
# https://docs.python.org/3/library/multiprocessing.html#contexts-and-start-methods
from multiprocessing import set_start_method as _set_start_method
_set_start_method('fork')
# do not change this line manually
# this is managed by git tag and updated on every release
# NOTE: this represents the NEXT release version
__version__ = '3.15.0'
# do not change this line manually
# this is managed by proto/build-proto.sh and updated on every execution
__proto_version__ = '0.1.17'
try:
__docarray_version__ = _docarray.__version__
except AttributeError as e:
raise RuntimeError(
'`docarray` dependency is not installed correctly, please reinstall with `pip install -U --force-reinstall docarray`'
)
try:
_signal.signal(_signal.SIGINT, _signal.default_int_handler)
except Exception as exc:
_warnings.warn(f'failed to set default signal handler: {exc!r}`')
def _set_nofile(nofile_atleast=4096):
"""
Set nofile soft limit to at least 4096, useful for running matlplotlib/seaborn on
parallel executing plot generators vs. Ubuntu default ulimit -n 1024 or OS X El Captian 256
temporary setting extinguishing with Python session.
:param nofile_atleast: nofile soft limit
:return: nofile soft limit and nofile hard limit
"""
try:
import resource as res
except ImportError: # Windows
res = None
if res is None:
return (None,) * 2
soft, ohard = res.getrlimit(res.RLIMIT_NOFILE)
hard = ohard
if soft < nofile_atleast:
soft = nofile_atleast
if hard < soft:
hard = soft
try:
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except (ValueError, res.error):
try:
hard = soft
print(f'trouble with max limit, retrying with soft,hard {soft},{hard}')
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except Exception:
print('failed to set ulimit, giving up')
soft, hard = res.getrlimit(res.RLIMIT_NOFILE)
return soft, hard
_set_nofile()
# ONLY FIRST CLASS CITIZENS ARE ALLOWED HERE, namely Document, Executor Flow
# Document
from jina._docarray import Document, DocumentArray
# Client
from jina.clients import Client
# Deployment
from jina.orchestrate.deployments import Deployment
from jina.orchestrate.flow.asyncio import AsyncFlow
# Flow
from jina.orchestrate.flow.base import Flow
# Executor
from jina.serve.executors import BaseExecutor as Executor
from jina.serve.executors.decorators import dynamic_batching, monitor, requests
# Custom Gateway
from jina.serve.runtimes.gateway.gateway import Gateway
|
"""
Top-level module of Jina.
The primary function of this module is to import all of the public Jina
interfaces into a single place. The interfaces themselves are located in
sub-modules, as described below.
"""
import os as _os
import platform as _platform
import signal as _signal
import sys as _sys
import warnings as _warnings
import docarray as _docarray
if _sys.version_info < (3, 7, 0):
raise OSError(f'Jina requires Python >= 3.7, but yours is {_sys.version_info}')
def _warning_on_one_line(message, category, filename, lineno, *args, **kwargs):
return '\033[1;33m%s: %s\033[0m \033[1;30m(raised from %s:%s)\033[0m\n' % (
category.__name__,
message,
filename,
lineno,
)
_warnings.formatwarning = _warning_on_one_line
_warnings.simplefilter('always', DeprecationWarning)
# fix fork error on MacOS but seems no effect? must do EXPORT manually before jina start
_os.environ['OBJC_DISABLE_INITIALIZE_FORK_SAFETY'] = 'YES'
# JINA_MP_START_METHOD has higher priority than os-patch
_start_method = _os.environ.get('JINA_MP_START_METHOD', None)
if _start_method and _start_method.lower() in {'fork', 'spawn', 'forkserver'}:
from multiprocessing import set_start_method as _set_start_method
try:
_set_start_method(_start_method.lower())
_warnings.warn(
f'multiprocessing start method is set to `{_start_method.lower()}`'
)
except Exception as e:
_warnings.warn(
f'failed to set multiprocessing start_method to `{_start_method.lower()}`: {e!r}'
)
elif _sys.version_info >= (3, 8, 0) and _platform.system() == 'Darwin':
# DO SOME OS-WISE PATCHES
# temporary fix for python 3.8 on macos where the default start is set to "spawn"
# https://docs.python.org/3/library/multiprocessing.html#contexts-and-start-methods
from multiprocessing import set_start_method as _set_start_method
_set_start_method('fork')
# do not change this line manually
# this is managed by git tag and updated on every release
# NOTE: this represents the NEXT release version
__version__ = '3.14.2'
# do not change this line manually
# this is managed by proto/build-proto.sh and updated on every execution
__proto_version__ = '0.1.17'
try:
__docarray_version__ = _docarray.__version__
except AttributeError as e:
raise RuntimeError(
'`docarray` dependency is not installed correctly, please reinstall with `pip install -U --force-reinstall docarray`'
)
try:
_signal.signal(_signal.SIGINT, _signal.default_int_handler)
except Exception as exc:
_warnings.warn(f'failed to set default signal handler: {exc!r}`')
def _set_nofile(nofile_atleast=4096):
"""
Set nofile soft limit to at least 4096, useful for running matlplotlib/seaborn on
parallel executing plot generators vs. Ubuntu default ulimit -n 1024 or OS X El Captian 256
temporary setting extinguishing with Python session.
:param nofile_atleast: nofile soft limit
:return: nofile soft limit and nofile hard limit
"""
try:
import resource as res
except ImportError: # Windows
res = None
if res is None:
return (None,) * 2
soft, ohard = res.getrlimit(res.RLIMIT_NOFILE)
hard = ohard
if soft < nofile_atleast:
soft = nofile_atleast
if hard < soft:
hard = soft
try:
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except (ValueError, res.error):
try:
hard = soft
print(f'trouble with max limit, retrying with soft,hard {soft},{hard}')
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except Exception:
print('failed to set ulimit, giving up')
soft, hard = res.getrlimit(res.RLIMIT_NOFILE)
return soft, hard
_set_nofile()
# ONLY FIRST CLASS CITIZENS ARE ALLOWED HERE, namely Document, Executor Flow
# Document
from jina._docarray import Document, DocumentArray
# Client
from jina.clients import Client
# Deployment
from jina.orchestrate.deployments import Deployment
from jina.orchestrate.flow.asyncio import AsyncFlow
# Flow
from jina.orchestrate.flow.base import Flow
# Executor
from jina.serve.executors import BaseExecutor as Executor
from jina.serve.executors.decorators import dynamic_batching, monitor, requests
# Custom Gateway
from jina.serve.runtimes.gateway.gateway import Gateway
|
# Copyright (c) OpenMMLab. All rights reserved.
import warnings
from abc import ABCMeta, abstractmethod
from typing import Any, List, Optional, Sequence, Union
from mmengine.dist import (broadcast_object_list, collect_results,
is_main_process)
class BaseMetric(metaclass=ABCMeta):
"""Base class for a metric.
The metric first processes each batch of data_samples and predictions,
and appends the processed results to the results list. Then it
collects all results together from all ranks if distributed training
is used. Finally, it computes the metrics of the entire dataset.
A subclass of class:`BaseMetric` should assign a meaningful value to the
class attribute `default_prefix`. See the argument `prefix` for details.
Args:
collect_device (str): Device name used for collecting results from
different ranks during distributed training. Must be 'cpu' or
'gpu'. Defaults to 'cpu'.
prefix (str, optional): The prefix that will be added in the metric
names to disambiguate homonymous metrics of different evaluators.
If prefix is not provided in the argument, self.default_prefix
will be used instead. Default: None
"""
default_prefix: Optional[str] = None
def __init__(self,
collect_device: str = 'cpu',
prefix: Optional[str] = None) -> None:
self._dataset_meta: Union[None, dict] = None
self.collect_device = collect_device
self.results: List[Any] = []
self.prefix = prefix or self.default_prefix
if self.prefix is None:
warnings.warn('The prefix is not set in metric class '
f'{self.__class__.__name__}.')
@property
def dataset_meta(self) -> Optional[dict]:
"""Optional[dict]: Meta info of the dataset."""
return self._dataset_meta
@dataset_meta.setter
def dataset_meta(self, dataset_meta: dict) -> None:
"""Set the dataset meta info to the metric."""
self._dataset_meta = dataset_meta
@abstractmethod
def process(self, data_batch: Sequence[dict],
predictions: Sequence[dict]) -> None:
"""Process one batch of data samples and predictions. The processed
results should be stored in ``self.results``, which will be used to
compute the metrics when all batches have been processed.
Args:
data_batch (Sequence[dict]): A batch of data from the dataloader.
predictions (Sequence[dict]): A batch of outputs from
the model.
"""
@abstractmethod
def compute_metrics(self, results: list) -> dict:
"""Compute the metrics from processed results.
Args:
results (list): The processed results of each batch.
Returns:
dict: The computed metrics. The keys are the names of the metrics,
and the values are corresponding results.
"""
def evaluate(self, size: int) -> dict:
"""Evaluate the model performance of the whole dataset after processing
all batches.
Args:
size (int): Length of the entire validation dataset. When batch
size > 1, the dataloader may pad some data samples to make
sure all ranks have the same length of dataset slice. The
``collect_results`` function will drop the padded data based on
this size.
Returns:
dict: Evaluation metrics dict on the val dataset. The keys are the
names of the metrics, and the values are corresponding results.
"""
if len(self.results) == 0:
warnings.warn(
f'{self.__class__.__name__} got empty `self.results`. Please '
'ensure that the processed results are properly added into '
'`self.results` in `process` method.')
results = collect_results(self.results, size, self.collect_device)
if is_main_process():
_metrics = self.compute_metrics(results) # type: ignore
# Add prefix to metric names
if self.prefix:
_metrics = {
'/'.join((self.prefix, k)): v
for k, v in _metrics.items()
}
metrics = [_metrics]
else:
metrics = [None] # type: ignore
broadcast_object_list(metrics)
# reset the results list
self.results.clear()
return metrics[0]
|
# Copyright (c) OpenMMLab. All rights reserved.
import warnings
from abc import ABCMeta, abstractmethod
from typing import Any, List, Optional, Sequence, Union
from mmengine.dist import (broadcast_object_list, collect_results,
is_main_process)
class BaseMetric(metaclass=ABCMeta):
"""Base class for a metric.
The metric first processes each batch of data_samples and predictions,
and appends the processed results to the results list. Then it
collects all results together from all ranks if distributed training
is used. Finally, it computes the metrics of the entire dataset.
A subclass of class:`BaseMetric` should assign a meaningful value to the
class attribute `default_prefix`. See the argument `prefix` for details.
Args:
collect_device (str): Device name used for collecting results from
different ranks during distributed training. Must be 'cpu' or
'gpu'. Defaults to 'cpu'.
prefix (str, optional): The prefix that will be added in the metric
names to disambiguate homonymous metrics of different evaluators.
If prefix is not provided in the argument, self.default_prefix
will be used instead. Default: None
"""
default_prefix: Optional[str] = None
def __init__(self,
collect_device: str = 'cpu',
prefix: Optional[str] = None) -> None:
self._dataset_meta: Union[None, dict] = None
self.collect_device = collect_device
self.results: List[Any] = []
self.prefix = prefix or self.default_prefix
if self.prefix is None:
warnings.warn('The prefix is not set in metric class '
f'{self.__class__.__name__}.')
@property
def dataset_meta(self) -> Optional[dict]:
return self._dataset_meta
@dataset_meta.setter
def dataset_meta(self, dataset_meta: dict) -> None:
self._dataset_meta = dataset_meta
@abstractmethod
def process(self, data_batch: Sequence[dict],
predictions: Sequence[dict]) -> None:
"""Process one batch of data samples and predictions. The processed
results should be stored in ``self.results``, which will be used to
compute the metrics when all batches have been processed.
Args:
data_batch (Sequence[dict]): A batch of data from the dataloader.
predictions (Sequence[dict]): A batch of outputs from
the model.
"""
@abstractmethod
def compute_metrics(self, results: list) -> dict:
"""Compute the metrics from processed results.
Args:
results (list): The processed results of each batch.
Returns:
dict: The computed metrics. The keys are the names of the metrics,
and the values are corresponding results.
"""
def evaluate(self, size: int) -> dict:
"""Evaluate the model performance of the whole dataset after processing
all batches.
Args:
size (int): Length of the entire validation dataset. When batch
size > 1, the dataloader may pad some data samples to make
sure all ranks have the same length of dataset slice. The
``collect_results`` function will drop the padded data based on
this size.
Returns:
dict: Evaluation metrics dict on the val dataset. The keys are the
names of the metrics, and the values are corresponding results.
"""
if len(self.results) == 0:
warnings.warn(
f'{self.__class__.__name__} got empty `self.results`. Please '
'ensure that the processed results are properly added into '
'`self.results` in `process` method.')
results = collect_results(self.results, size, self.collect_device)
if is_main_process():
_metrics = self.compute_metrics(results) # type: ignore
# Add prefix to metric names
if self.prefix:
_metrics = {
'/'.join((self.prefix, k)): v
for k, v in _metrics.items()
}
metrics = [_metrics]
else:
metrics = [None] # type: ignore
broadcast_object_list(metrics)
# reset the results list
self.results.clear()
return metrics[0]
|
from collections.abc import Sequence
from typing import Callable
from langchain_core.agents import AgentAction
from langchain_core.language_models import BaseLanguageModel
from langchain_core.messages import BaseMessage
from langchain_core.prompts.chat import ChatPromptTemplate
from langchain_core.runnables import Runnable, RunnablePassthrough
from langchain_core.tools import BaseTool
from langchain.agents.format_scratchpad.tools import (
format_to_tool_messages,
)
from langchain.agents.output_parsers.tools import ToolsAgentOutputParser
MessageFormatter = Callable[[Sequence[tuple[AgentAction, str]]], list[BaseMessage]]
def create_tool_calling_agent(
llm: BaseLanguageModel,
tools: Sequence[BaseTool],
prompt: ChatPromptTemplate,
*,
message_formatter: MessageFormatter = format_to_tool_messages,
) -> Runnable:
"""Create an agent that uses tools.
Args:
llm: LLM to use as the agent.
tools: Tools this agent has access to.
prompt: The prompt to use. See Prompt section below for more on the expected
input variables.
message_formatter: Formatter function to convert (AgentAction, tool output)
tuples into FunctionMessages.
Returns:
A Runnable sequence representing an agent. It takes as input all the same input
variables as the prompt passed in does. It returns as output either an
AgentAction or AgentFinish.
Example:
.. code-block:: python
from langchain.agents import AgentExecutor, create_tool_calling_agent, tool
from langchain_anthropic import ChatAnthropic
from langchain_core.prompts import ChatPromptTemplate
prompt = ChatPromptTemplate.from_messages(
[
("system", "You are a helpful assistant"),
("placeholder", "{chat_history}"),
("human", "{input}"),
("placeholder", "{agent_scratchpad}"),
]
)
model = ChatAnthropic(model="claude-3-opus-20240229")
@tool
def magic_function(input: int) -> int:
\"\"\"Applies a magic function to an input.\"\"\"
return input + 2
tools = [magic_function]
agent = create_tool_calling_agent(model, tools, prompt)
agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True)
agent_executor.invoke({"input": "what is the value of magic_function(3)?"})
# Using with chat history
from langchain_core.messages import AIMessage, HumanMessage
agent_executor.invoke(
{
"input": "what's my name?",
"chat_history": [
HumanMessage(content="hi! my name is bob"),
AIMessage(content="Hello Bob! How can I assist you today?"),
],
}
)
Prompt:
The agent prompt must have an `agent_scratchpad` key that is a
``MessagesPlaceholder``. Intermediate agent actions and tool output
messages will be passed in here.
"""
missing_vars = {"agent_scratchpad"}.difference(
prompt.input_variables + list(prompt.partial_variables)
)
if missing_vars:
msg = f"Prompt missing required variables: {missing_vars}"
raise ValueError(msg)
if not hasattr(llm, "bind_tools"):
msg = "This function requires a bind_tools() method be implemented on the LLM."
raise ValueError(
msg,
)
llm_with_tools = llm.bind_tools(tools)
return (
RunnablePassthrough.assign(
agent_scratchpad=lambda x: message_formatter(x["intermediate_steps"])
)
| prompt
| llm_with_tools
| ToolsAgentOutputParser()
)
|
from collections.abc import Sequence
from typing import Callable
from langchain_core.agents import AgentAction
from langchain_core.language_models import BaseLanguageModel
from langchain_core.messages import BaseMessage
from langchain_core.prompts.chat import ChatPromptTemplate
from langchain_core.runnables import Runnable, RunnablePassthrough
from langchain_core.tools import BaseTool
from langchain.agents.format_scratchpad.tools import (
format_to_tool_messages,
)
from langchain.agents.output_parsers.tools import ToolsAgentOutputParser
MessageFormatter = Callable[[Sequence[tuple[AgentAction, str]]], list[BaseMessage]]
def create_tool_calling_agent(
llm: BaseLanguageModel,
tools: Sequence[BaseTool],
prompt: ChatPromptTemplate,
*,
message_formatter: MessageFormatter = format_to_tool_messages,
) -> Runnable:
"""Create an agent that uses tools.
Args:
llm: LLM to use as the agent.
tools: Tools this agent has access to.
prompt: The prompt to use. See Prompt section below for more on the expected
input variables.
message_formatter: Formatter function to convert (AgentAction, tool output)
tuples into FunctionMessages.
Returns:
A Runnable sequence representing an agent. It takes as input all the same input
variables as the prompt passed in does. It returns as output either an
AgentAction or AgentFinish.
Example:
.. code-block:: python
from langchain.agents import AgentExecutor, create_tool_calling_agent, tool
from langchain_anthropic import ChatAnthropic
from langchain_core.prompts import ChatPromptTemplate
prompt = ChatPromptTemplate.from_messages(
[
("system", "You are a helpful assistant"),
("placeholder", "{chat_history}"),
("human", "{input}"),
("placeholder", "{agent_scratchpad}"),
]
)
model = ChatAnthropic(model="claude-3-opus-20240229")
@tool
def magic_function(input: int) -> int:
\"\"\"Applies a magic function to an input.\"\"\"
return input + 2
tools = [magic_function]
agent = create_tool_calling_agent(model, tools, prompt)
agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True)
agent_executor.invoke({"input": "what is the value of magic_function(3)?"})
# Using with chat history
from langchain_core.messages import AIMessage, HumanMessage
agent_executor.invoke(
{
"input": "what's my name?",
"chat_history": [
HumanMessage(content="hi! my name is bob"),
AIMessage(content="Hello Bob! How can I assist you today?"),
],
}
)
Prompt:
The agent prompt must have an `agent_scratchpad` key that is a
``MessagesPlaceholder``. Intermediate agent actions and tool output
messages will be passed in here.
"""
missing_vars = {"agent_scratchpad"}.difference(
prompt.input_variables + list(prompt.partial_variables)
)
if missing_vars:
msg = f"Prompt missing required variables: {missing_vars}"
raise ValueError(msg)
if not hasattr(llm, "bind_tools"):
msg = "This function requires a bind_tools() method be implemented on the LLM."
raise ValueError(
msg,
)
llm_with_tools = llm.bind_tools(tools)
agent = (
RunnablePassthrough.assign(
agent_scratchpad=lambda x: message_formatter(x["intermediate_steps"])
)
| prompt
| llm_with_tools
| ToolsAgentOutputParser()
)
return agent
|
"""Message responsible for deleting other messages."""
from typing import Any, Literal
from langchain_core.messages.base import BaseMessage
class RemoveMessage(BaseMessage):
"""Message responsible for deleting other messages."""
type: Literal["remove"] = "remove"
"""The type of the message (used for serialization). Defaults to "remove"."""
def __init__(self, id: str, **kwargs: Any) -> None:
"""Create a RemoveMessage.
Args:
id: The ID of the message to remove.
kwargs: Additional fields to pass to the message.
Raises:
ValueError: If the 'content' field is passed in kwargs.
"""
if kwargs.pop("content", None):
msg = "RemoveMessage does not support 'content' field."
raise ValueError(msg)
return super().__init__("", id=id, **kwargs)
RemoveMessage.model_rebuild()
|
from typing import Any, Literal
from langchain_core.messages.base import BaseMessage
class RemoveMessage(BaseMessage):
"""Message responsible for deleting other messages."""
type: Literal["remove"] = "remove"
"""The type of the message (used for serialization). Defaults to "remove"."""
def __init__(self, id: str, **kwargs: Any) -> None:
"""Create a RemoveMessage.
Args:
id: The ID of the message to remove.
kwargs: Additional fields to pass to the message.
Raises:
ValueError: If the 'content' field is passed in kwargs.
"""
if kwargs.pop("content", None):
msg = "RemoveMessage does not support 'content' field."
raise ValueError(msg)
return super().__init__("", id=id, **kwargs)
@classmethod
def get_lc_namespace(cls) -> list[str]:
"""Get the namespace of the langchain object.
Default is ["langchain", "schema", "messages"].
"""
return ["langchain", "schema", "messages"]
RemoveMessage.model_rebuild()
|
# Copyright (c) OpenMMLab. All rights reserved.
import os.path as osp
from typing import Optional, Sequence, Tuple
import cv2
import numpy as np
from mmengine.hooks import Hook
from mmengine.registry import HOOKS
from mmengine.structures import BaseDataElement
from mmengine.utils.dl_utils import tensor2imgs
# TODO: Due to interface changes, the current class
# functions incorrectly
@HOOKS.register_module()
class NaiveVisualizationHook(Hook):
"""Show or Write the predicted results during the process of testing.
Args:
interval (int): Visualization interval. Defaults to 1.
draw_gt (bool): Whether to draw the ground truth. Default to True.
draw_pred (bool): Whether to draw the predicted result.
Default to True.
"""
priority = 'NORMAL'
def __init__(self,
interval: int = 1,
draw_gt: bool = True,
draw_pred: bool = True):
self.draw_gt = draw_gt
self.draw_pred = draw_pred
self._interval = interval
def _unpad(self, input: np.ndarray, unpad_shape: Tuple[int,
int]) -> np.ndarray:
"""Unpad the input image.
Args:
input (np.ndarray): The image to unpad.
unpad_shape (tuple): The shape of image before padding.
Returns:
np.ndarray: The image before padding.
"""
unpad_width, unpad_height = unpad_shape
unpad_image = input[:unpad_height, :unpad_width]
return unpad_image
def after_test_iter(
self,
runner,
batch_idx: int,
data_batch: Optional[Sequence[dict]] = None,
outputs: Optional[Sequence[BaseDataElement]] = None) -> None:
"""Show or Write the predicted results.
Args:
runner (Runner): The runner of the training process.
batch_idx (int): The index of the current batch in the test loop.
data_batch (Sequence[dict], optional): Data
from dataloader. Defaults to None.
outputs (Sequence[BaseDataElement], optional): Outputs from model.
Defaults to None.
"""
if self.every_n_inner_iters(batch_idx, self._interval):
for data, output in zip(data_batch, outputs): # type: ignore
input = data['inputs']
data_sample = data['data_sample']
input = tensor2imgs(input,
**data_sample.get('img_norm_cfg',
dict()))[0]
# TODO We will implement a function to revert the augmentation
# in the future.
ori_shape = (data_sample.ori_width, data_sample.ori_height)
if 'pad_shape' in data_sample:
input = self._unpad(input,
data_sample.get('scale', ori_shape))
origin_image = cv2.resize(input, ori_shape)
name = osp.basename(data_sample.img_path)
runner.visualizer.add_datasample(name, origin_image,
data_sample, output,
self.draw_gt, self.draw_pred)
|
# Copyright (c) OpenMMLab. All rights reserved.
import os.path as osp
from typing import Optional, Sequence, Tuple
import cv2
import numpy as np
from mmengine.data import BaseDataElement
from mmengine.hooks import Hook
from mmengine.registry import HOOKS
from mmengine.utils.misc import tensor2imgs
# TODO: Due to interface changes, the current class
# functions incorrectly
@HOOKS.register_module()
class NaiveVisualizationHook(Hook):
"""Show or Write the predicted results during the process of testing.
Args:
interval (int): Visualization interval. Defaults to 1.
draw_gt (bool): Whether to draw the ground truth. Default to True.
draw_pred (bool): Whether to draw the predicted result.
Default to True.
"""
priority = 'NORMAL'
def __init__(self,
interval: int = 1,
draw_gt: bool = True,
draw_pred: bool = True):
self.draw_gt = draw_gt
self.draw_pred = draw_pred
self._interval = interval
def _unpad(self, input: np.ndarray, unpad_shape: Tuple[int,
int]) -> np.ndarray:
"""Unpad the input image.
Args:
input (np.ndarray): The image to unpad.
unpad_shape (tuple): The shape of image before padding.
Returns:
np.ndarray: The image before padding.
"""
unpad_width, unpad_height = unpad_shape
unpad_image = input[:unpad_height, :unpad_width]
return unpad_image
def after_test_iter(
self,
runner,
batch_idx: int,
data_batch: Optional[Sequence[dict]] = None,
outputs: Optional[Sequence[BaseDataElement]] = None) -> None:
"""Show or Write the predicted results.
Args:
runner (Runner): The runner of the training process.
batch_idx (int): The index of the current batch in the test loop.
data_batch (Sequence[dict], optional): Data
from dataloader. Defaults to None.
outputs (Sequence[BaseDataElement], optional): Outputs from model.
Defaults to None.
"""
if self.every_n_inner_iters(batch_idx, self._interval):
for data, output in zip(data_batch, outputs): # type: ignore
input = data['inputs']
data_sample = data['data_sample']
input = tensor2imgs(input,
**data_sample.get('img_norm_cfg',
dict()))[0]
# TODO We will implement a function to revert the augmentation
# in the future.
ori_shape = (data_sample.ori_width, data_sample.ori_height)
if 'pad_shape' in data_sample:
input = self._unpad(input,
data_sample.get('scale', ori_shape))
origin_image = cv2.resize(input, ori_shape)
name = osp.basename(data_sample.img_path)
runner.visualizer.add_datasample(name, origin_image,
data_sample, output,
self.draw_gt, self.draw_pred)
|
import numpy as np
import pytest
from docarray import DocumentArray
def test_embedding_ops_error():
da = DocumentArray.empty(100)
db = DocumentArray.empty(100)
da.embeddings = np.random.random([100, 256])
da[2].embedding = None
da[3].embedding = None
with pytest.raises(ValueError, match='[2, 3]'):
da.embeddings
db.embeddings = np.random.random([100, 256])
with pytest.raises(ValueError, match='[2, 3]'):
da.match(db)
with pytest.raises(ValueError, match='[2, 3]'):
db.match(da)
with pytest.raises(ValueError, match='[2, 3]'):
db.find(da)
with pytest.raises(ValueError, match='[2, 3]'):
da.find(db)
da.embeddings = None
with pytest.raises(ValueError, match='Did you forget to set'):
da.find(db)
db.embeddings = None
with pytest.raises(ValueError, match='Did you forget to set'):
da.find(db)
with pytest.raises(ValueError, match='Did you forget to set'):
db.find(da)
da.embeddings = np.random.random([100, 256])
with pytest.raises(ValueError, match='filter must be dict when query is None'):
da.find(None)
|
import numpy as np
import pytest
from docarray import DocumentArray
def test_embedding_ops_error():
da = DocumentArray.empty(100)
db = DocumentArray.empty(100)
da.embeddings = np.random.random([100, 256])
da[2].embedding = None
da[3].embedding = None
with pytest.raises(ValueError, match='[2, 3]'):
da.embeddings
db.embeddings = np.random.random([100, 256])
with pytest.raises(ValueError, match='[2, 3]'):
da.match(db)
with pytest.raises(ValueError, match='[2, 3]'):
db.match(da)
with pytest.raises(ValueError, match='[2, 3]'):
db.find(da)
with pytest.raises(ValueError, match='[2, 3]'):
da.find(db)
da.embeddings = None
with pytest.raises(ValueError, match='Did you forget to set'):
da.find(db)
db.embeddings = None
with pytest.raises(ValueError, match='Did you forget to set'):
da.find(db)
with pytest.raises(ValueError, match='Did you forget to set'):
db.find(da)
da.embeddings = np.random.random([100, 256])
with pytest.raises(ValueError, match='Did you forget to set'):
da.find(None)
|
# Copyright 2025 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import shutil
import tempfile
import unittest
from transformers import AutoProcessor, LlamaTokenizerFast, LlavaNextVideoProcessor
from transformers.testing_utils import require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_processing_common import ProcessorTesterMixin
if is_vision_available():
from transformers import LlavaNextImageProcessor, LlavaNextVideoImageProcessor
if is_torch_available:
pass
@require_vision
class LlavaNextVideoProcessorTest(ProcessorTesterMixin, unittest.TestCase):
processor_class = LlavaNextVideoProcessor
@classmethod
def setUpClass(cls):
cls.tmpdirname = tempfile.mkdtemp()
image_processor = LlavaNextImageProcessor()
video_processor = LlavaNextVideoImageProcessor()
tokenizer = LlamaTokenizerFast.from_pretrained("llava-hf/LLaVA-NeXT-Video-7B-hf")
tokenizer.add_special_tokens({"additional_special_tokens": ["<image>", "<video>"]})
processor_kwargs = cls.prepare_processor_dict()
processor = LlavaNextVideoProcessor(
video_processor=video_processor, image_processor=image_processor, tokenizer=tokenizer, **processor_kwargs
)
processor.save_pretrained(cls.tmpdirname)
cls.image_token = processor.image_token
cls.video_token = processor.video_token
def get_tokenizer(self, **kwargs):
return AutoProcessor.from_pretrained(self.tmpdirname, **kwargs).tokenizer
def get_image_processor(self, **kwargs):
return AutoProcessor.from_pretrained(self.tmpdirname, **kwargs).image_processor
def get_video_processor(self, **kwargs):
return AutoProcessor.from_pretrained(self.tmpdirname, **kwargs).video_processor
@classmethod
def prepare_processor_dict(cls):
return {
"chat_template": "{% for message in messages %}{{'<|im_start|>' + message['role'] + ' '}}{# Render all images first #}{% for content in message['content'] | selectattr('type', 'equalto', 'image') %}{{ '<image>' }}{% endfor %}{# Render all video then #}{% for content in message['content'] | selectattr('type', 'equalto', 'video') %}{{ '<video>' }}{% endfor %}{# Render all text next #}{% if message['role'] != 'assistant' %}{% for content in message['content'] | selectattr('type', 'equalto', 'text') %}{{ '\n' + content['text'] }}{% endfor %}{% else %}{% for content in message['content'] | selectattr('type', 'equalto', 'text') %}{% generation %}{{ '\n' + content['text'] }}{% endgeneration %}{% endfor %}{% endif %}{{'<|im_end|>'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}",
"num_additional_image_tokens": 0,
"patch_size": 128,
"vision_feature_select_strategy": "default",
}
# Copied from tests.models.llava.test_processor_llava.LlavaProcessorTest.test_chat_template_is_saved
def test_chat_template_is_saved(self):
processor_loaded = self.processor_class.from_pretrained(self.tmpdirname)
processor_dict_loaded = json.loads(processor_loaded.to_json_string())
# chat templates aren't serialized to json in processors
self.assertFalse("chat_template" in processor_dict_loaded.keys())
# they have to be saved as separate file and loaded back from that file
# so we check if the same template is loaded
processor_dict = self.prepare_processor_dict()
self.assertTrue(processor_loaded.chat_template == processor_dict.get("chat_template", None))
@classmethod
def tearDownClass(cls):
shutil.rmtree(cls.tmpdirname, ignore_errors=True)
|
# Copyright 2025 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import shutil
import tempfile
import unittest
from transformers import AutoProcessor, LlamaTokenizerFast, LlavaNextVideoProcessor
from transformers.testing_utils import require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_processing_common import ProcessorTesterMixin
if is_vision_available():
from transformers import LlavaNextImageProcessor, LlavaNextVideoImageProcessor
if is_torch_available:
pass
@require_vision
class LlavaNextVideoProcessorTest(ProcessorTesterMixin, unittest.TestCase):
processor_class = LlavaNextVideoProcessor
@classmethod
def setUpClass(cls):
cls.tmpdirname = tempfile.mkdtemp()
image_processor = LlavaNextImageProcessor()
video_processor = LlavaNextVideoImageProcessor()
tokenizer = LlamaTokenizerFast.from_pretrained("llava-hf/LLaVA-NeXT-Video-7B-hf")
processor_kwargs = cls.prepare_processor_dict()
processor = LlavaNextVideoProcessor(
video_processor=video_processor, image_processor=image_processor, tokenizer=tokenizer, **processor_kwargs
)
processor.save_pretrained(cls.tmpdirname)
cls.image_token = processor.image_token
cls.video_token = processor.video_token
def get_tokenizer(self, **kwargs):
return AutoProcessor.from_pretrained(self.tmpdirname, **kwargs).tokenizer
def get_image_processor(self, **kwargs):
return AutoProcessor.from_pretrained(self.tmpdirname, **kwargs).image_processor
def get_video_processor(self, **kwargs):
return AutoProcessor.from_pretrained(self.tmpdirname, **kwargs).video_processor
@classmethod
def prepare_processor_dict(cls):
return {
"chat_template": "{% for message in messages %}{{'<|im_start|>' + message['role'] + ' '}}{# Render all images first #}{% for content in message['content'] | selectattr('type', 'equalto', 'image') %}{{ '<image>' }}{% endfor %}{# Render all video then #}{% for content in message['content'] | selectattr('type', 'equalto', 'video') %}{{ '<video>' }}{% endfor %}{# Render all text next #}{% if message['role'] != 'assistant' %}{% for content in message['content'] | selectattr('type', 'equalto', 'text') %}{{ '\n' + content['text'] }}{% endfor %}{% else %}{% for content in message['content'] | selectattr('type', 'equalto', 'text') %}{% generation %}{{ '\n' + content['text'] }}{% endgeneration %}{% endfor %}{% endif %}{{'<|im_end|>'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}",
"num_additional_image_tokens": 0,
"patch_size": 128,
"vision_feature_select_strategy": "default",
}
# Copied from tests.models.llava.test_processor_llava.LlavaProcessorTest.test_chat_template_is_saved
def test_chat_template_is_saved(self):
processor_loaded = self.processor_class.from_pretrained(self.tmpdirname)
processor_dict_loaded = json.loads(processor_loaded.to_json_string())
# chat templates aren't serialized to json in processors
self.assertFalse("chat_template" in processor_dict_loaded.keys())
# they have to be saved as separate file and loaded back from that file
# so we check if the same template is loaded
processor_dict = self.prepare_processor_dict()
self.assertTrue(processor_loaded.chat_template == processor_dict.get("chat_template", None))
@classmethod
def tearDownClass(cls):
shutil.rmtree(cls.tmpdirname, ignore_errors=True)
|
import logging
from datasets import load_dataset
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.evaluation import SparseMSEEvaluator
logging.basicConfig(format="%(message)s", level=logging.INFO)
# Load a model
student_model = SparseEncoder("prithivida/Splade_PP_en_v1")
teacher_model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
# Load any dataset with some texts
dataset = load_dataset("sentence-transformers/stsb", split="validation")
sentences = dataset["sentence1"] + dataset["sentence2"]
# Given queries, a corpus and a mapping with relevant documents, the SparseMSEEvaluator computes different MSE metrics.
mse_evaluator = SparseMSEEvaluator(
source_sentences=sentences,
target_sentences=sentences,
teacher_model=teacher_model,
name="stsb-dev",
)
results = mse_evaluator(student_model)
"""
MSE evaluation (lower = better) on the stsb-dev dataset:
MSE (*100): 0.034905
Model Sparsity: Active Dimensions: 54.6, Sparsity Ratio: 0.9982
"""
# Print the results
print(f"Primary metric: {mse_evaluator.primary_metric}")
# => Primary metric: stsb-dev_negative_mse
print(f"Primary metric value: {results[mse_evaluator.primary_metric]:.4f}")
# => Primary metric value: -0.0349
|
import logging
from datasets import load_dataset
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.evaluation import SparseMSEEvaluator
logging.basicConfig(format="%(message)s", level=logging.INFO)
# Load a model
student_model = SparseEncoder("prithivida/Splade_PP_en_v1")
teacher_model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
# Load any dataset with some texts
dataset = load_dataset("sentence-transformers/stsb", split="validation")
sentences = dataset["sentence1"] + dataset["sentence2"]
# Given queries, a corpus and a mapping with relevant documents, the SparseMSEEvaluator computes different MSE metrics.
mse_evaluator = SparseMSEEvaluator(
source_sentences=sentences,
target_sentences=sentences,
teacher_model=teacher_model,
name="stsb-dev",
)
results = mse_evaluator(student_model)
"""
MSE evaluation (lower = better) on the stsb-dev dataset:
MSE (*100): 0.035540
Model Sparsity: Active Dimensions: 55.6, Sparsity Ratio: 0.9982
"""
# Print the results
print(f"Primary metric: {mse_evaluator.primary_metric}")
# => Primary metric: stsb-dev_negative_mse
print(f"Primary metric value: {results[mse_evaluator.primary_metric]:.4f}")
# => Primary metric value: -0.0355
|
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
from pathlib import Path
from typing import List
import pytest
from jina import Document, DocumentArray, Executor
from spacy_text_encoder import SpacyTextEncoder
_EMBEDDING_DIM = 96
@pytest.fixture(scope='session')
def basic_encoder() -> SpacyTextEncoder:
return SpacyTextEncoder()
def test_config():
ex = Executor.load_config(str(Path(__file__).parents[2] / 'config.yml'))
assert ex.__class__.__name__ == 'SpacyTextEncoder'
def test_encoding_cpu():
enc = SpacyTextEncoder(device='cpu')
input_data = DocumentArray([Document(text='hello world')])
enc.encode(docs=input_data, parameters={})
assert input_data[0].embedding.shape == (_EMBEDDING_DIM,)
@pytest.mark.gpu
def test_encoding_gpu():
enc = SpacyTextEncoder(device='cuda')
input_data = DocumentArray([Document(text='hello world')])
enc.encode(docs=input_data, parameters={})
assert input_data[0].embedding.shape == (_EMBEDDING_DIM,)
@pytest.mark.parametrize(
'model_name, emb_dim',
[
('en_core_web_sm', 96),
('en_core_web_lg', 300),
('es_core_news_sm', 96),
],
)
def test_models(model_name: str, emb_dim: int):
encoder = SpacyTextEncoder(model_name)
input_data = DocumentArray([Document(text='hello world')])
encoder.encode(docs=input_data, parameters={})
assert input_data[0].embedding.shape == (emb_dim,)
@pytest.mark.parametrize(
'traversal_paths, counts',
[
(['r'], [['r', 1], ['c', 0], ['cc', 0]]),
(['c'], [['r', 0], ['c', 3], ['cc', 0]]),
(['cc'], [['r', 0], ['c', 0], ['cc', 2]]),
(['cc', 'r'], [['r', 1], ['c', 0], ['cc', 2]]),
],
)
def test_traversal_path(
traversal_paths: List[str], counts: List, basic_encoder: SpacyTextEncoder
):
text = 'blah'
docs = DocumentArray([Document(id='root1', text=text)])
docs[0].chunks = [
Document(id='chunk11', text=text),
Document(id='chunk12', text=text),
Document(id='chunk13', text=text),
]
docs[0].chunks[0].chunks = [
Document(id='chunk111', text=text),
Document(id='chunk112', text=text),
]
basic_encoder.encode(docs=docs, parameters={'traversal_paths': traversal_paths})
for path, count in counts:
embeddings = docs.traverse_flat([path]).get_attributes('embedding')
assert len(list(filter(lambda x: x is not None, embeddings))) == count
@pytest.mark.parametrize('batch_size', [1, 2, 4, 8])
def test_batch_size(basic_encoder: SpacyTextEncoder, batch_size: int):
docs = DocumentArray([Document(text='hello there') for _ in range(32)])
basic_encoder.encode(docs, parameters={'batch_size': batch_size})
for doc in docs:
assert doc.embedding.shape == (_EMBEDDING_DIM,)
@pytest.mark.gpu
def test_spacy_text_encoder_gpu():
# Input
docs = DocumentArray(
[
Document(text='Jina rocks'),
]
)
# Encoder embedding
encoder = SpacyTextEncoder(device='cuda')
encoder.encode(docs, parameters={})
assert len(docs) == 1
assert docs[0].embedding.shape == (96,)
def test_quality_embeddings(basic_encoder: SpacyTextEncoder):
docs = DocumentArray(
[
Document(id='A', text='a furry animal that with a long tail'),
Document(id='B', text='a domesticated mammal with four legs'),
Document(id='C', text='a type of aircraft that uses rotating wings'),
Document(id='D', text='flying vehicle that has fixed wings and engines'),
]
)
basic_encoder.encode(DocumentArray(docs), {})
# assert semantic meaning is captured in the encoding
docs.match(docs)
matches = ['B', 'A', 'D', 'C']
for i, doc in enumerate(docs):
assert doc.matches[1].id == matches[i]
|
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
from pathlib import Path
from typing import List
import pytest
from jina import Document, DocumentArray, Executor
from spacy_text_encoder import SpacyTextEncoder
_EMBEDDING_DIM = 96
@pytest.fixture(scope='session')
def basic_encoder() -> SpacyTextEncoder:
return SpacyTextEncoder()
def test_config():
ex = Executor.load_config(str(Path(__file__).parents[2] / 'config.yml'))
assert ex.__class__.__name__ == 'SpacyTextEncoder'
def test_encoding_cpu():
enc = SpacyTextEncoder(require_gpu=False)
input_data = DocumentArray([Document(text='hello world')])
enc.encode(docs=input_data, parameters={})
assert input_data[0].embedding.shape == (_EMBEDDING_DIM,)
@pytest.mark.gpu
def test_encoding_gpu():
enc = SpacyTextEncoder(require_gpu=True)
input_data = DocumentArray([Document(text='hello world')])
enc.encode(docs=input_data, parameters={})
assert input_data[0].embedding.shape == (_EMBEDDING_DIM,)
@pytest.mark.parametrize(
'model_name, emb_dim',
[
('en_core_web_sm', 96),
('en_core_web_lg', 300),
('es_core_news_sm', 96),
],
)
def test_models(model_name: str, emb_dim: int):
encoder = SpacyTextEncoder(model_name)
input_data = DocumentArray([Document(text='hello world')])
encoder.encode(docs=input_data, parameters={})
assert input_data[0].embedding.shape == (emb_dim,)
@pytest.mark.parametrize(
'traversal_paths, counts',
[
(['r'], [['r', 1], ['c', 0], ['cc', 0]]),
(['c'], [['r', 0], ['c', 3], ['cc', 0]]),
(['cc'], [['r', 0], ['c', 0], ['cc', 2]]),
(['cc', 'r'], [['r', 1], ['c', 0], ['cc', 2]]),
],
)
def test_traversal_path(
traversal_paths: List[str], counts: List, basic_encoder: SpacyTextEncoder
):
text = 'blah'
docs = DocumentArray([Document(id='root1', text=text)])
docs[0].chunks = [
Document(id='chunk11', text=text),
Document(id='chunk12', text=text),
Document(id='chunk13', text=text),
]
docs[0].chunks[0].chunks = [
Document(id='chunk111', text=text),
Document(id='chunk112', text=text),
]
basic_encoder.encode(docs=docs, parameters={'traversal_paths': traversal_paths})
for path, count in counts:
embeddings = docs.traverse_flat([path]).get_attributes('embedding')
assert len(list(filter(lambda x: x is not None, embeddings))) == count
@pytest.mark.parametrize('batch_size', [1, 2, 4, 8])
def test_batch_size(basic_encoder: SpacyTextEncoder, batch_size: int):
docs = DocumentArray([Document(text='hello there') for _ in range(32)])
basic_encoder.encode(docs, parameters={'batch_size': batch_size})
for doc in docs:
assert doc.embedding.shape == (_EMBEDDING_DIM,)
def test_quality_embeddings(basic_encoder: SpacyTextEncoder):
docs = DocumentArray(
[
Document(id='A', text='a furry animal that with a long tail'),
Document(id='B', text='a domesticated mammal with four legs'),
Document(id='C', text='a type of aircraft that uses rotating wings'),
Document(id='D', text='flying vehicle that has fixed wings and engines'),
]
)
basic_encoder.encode(DocumentArray(docs), {})
# assert semantic meaning is captured in the encoding
docs.match(docs)
matches = ['B', 'A', 'D', 'C']
for i, doc in enumerate(docs):
assert doc.matches[1].id == matches[i]
|
import copy
from typing import Any, Dict, List, Tuple
_SPECIFIC_EXECUTOR_SEPARATOR = '__'
def _spit_key_and_executor_name(key_name: str) -> Tuple[str]:
"""Split a specific key into a key, name pair
ex: 'key__my_executor' will be split into 'key', 'my_executor'
:param key_name: key name of the param
:return: return the split 'key', 'executor_name' for the key_name
"""
key_split = key_name.split(_SPECIFIC_EXECUTOR_SEPARATOR)
new_key_name = key_split.pop(-1)
executor_name = ''.join(key_split)
return new_key_name, executor_name
def _get_name_from_replicas_name(name: str) -> Tuple[str]:
"""return the original name without the replicas
ex: 'exec1/rep-0' will be transform into 'exec1'
:param name: name of the DataRequest
:return: return the original name without the replicas
"""
return name.split('/')[0]
def _is_param_for_specific_executor(key_name: str) -> bool:
"""Tell if a key is for a specific Executor
ex: 'key' is for every Executor whereas 'my_executor__key' is only for 'my_executor'
:param key_name: key name of the param
:return: return True if key_name is for specific Executor, False otherwise
"""
if _SPECIFIC_EXECUTOR_SEPARATOR in key_name:
if key_name.startswith(_SPECIFIC_EXECUTOR_SEPARATOR) or key_name.endswith(
_SPECIFIC_EXECUTOR_SEPARATOR
):
return False
return True
else:
return False
def _parse_specific_params(parameters: Dict, executor_name: str):
"""Parse the parameters dictionary to filter executor specific parameters
:param parameters: dictionary container the parameters
:param executor_name: name of the Executor
:returns: the parsed parameters after applying filtering for the specific Executor
"""
parsed_params = copy.deepcopy(parameters)
for key in parameters:
if _is_param_for_specific_executor(key):
(
key_name,
key_executor_name,
) = _spit_key_and_executor_name(key)
if key_executor_name == executor_name:
parsed_params[key_name] = parameters[key]
del parsed_params[key]
specific_parameters = parameters.get(executor_name, None)
if specific_parameters:
parsed_params.update(**specific_parameters)
return parsed_params
_DEFAULT_GRPC_OPTION = {
'grpc.max_send_message_length': -1,
'grpc.max_receive_message_length': -1,
# for the following see this blog post for the choice of default value https://cs.mcgill.ca/~mxia3/2019/02/23/Using-gRPC-in-Production/
'grpc.keepalive_time_ms': 10000,
# send keepalive ping every 10 second, default is 2 hours.
'grpc.keepalive_timeout_ms': 5000,
# keepalive ping time out after 5 seconds, default is 20 seconds
'grpc.keepalive_permit_without_calls': True,
# allow keepalive pings when there's no gRPC calls
'grpc.http2.max_pings_without_data': 0,
# allow unlimited amount of keepalive pings without data
'grpc.http2.min_time_between_pings_ms': 10000,
# allow grpc pings from client every 10 seconds
'grpc.http2.min_ping_interval_without_data_ms': 5000,
# allow grpc pings from client without data every 5 seconds
}
def _get_grpc_server_options(option_from_args: Dict) -> List[Tuple[str, Any]]:
"""transform dict of args into grpc option, will merge the args wit the default args
:param option_from_args: a dict of argument
:return: grpc option i.e a list of tuple of key value
"""
option_from_args = (
{**_DEFAULT_GRPC_OPTION, **option_from_args}
if option_from_args
else _DEFAULT_GRPC_OPTION
) # merge new and default args
return list(option_from_args.items())
|
import copy
from typing import Dict, Tuple
from jina.serve.runtimes.request_handlers.data_request_handler import DataRequestHandler
_SPECIFIC_EXECUTOR_SEPARATOR = '__'
def _spit_key_and_executor_name(key_name: str) -> Tuple[str]:
"""Split a specific key into a key, name pair
ex: 'key__my_executor' will be split into 'key', 'my_executor'
:param key_name: key name of the param
:return: return the split 'key', 'executor_name' for the key_name
"""
key_split = key_name.split(_SPECIFIC_EXECUTOR_SEPARATOR)
new_key_name = key_split.pop(-1)
executor_name = ''.join(key_split)
return new_key_name, executor_name
def _get_name_from_replicas_name(name: str) -> Tuple[str]:
"""return the original name without the replicas
ex: 'exec1/rep-0' will be transform into 'exec1'
:param name: name of the DataRequest
:return: return the original name without the replicas
"""
return name.split('/')[0]
def _is_param_for_specific_executor(key_name: str) -> bool:
"""Tell if a key is for a specific Executor
ex: 'key' is for every Executor whereas 'my_executor__key' is only for 'my_executor'
:param key_name: key name of the param
:return: return True if key_name is for specific Executor, False otherwise
"""
if _SPECIFIC_EXECUTOR_SEPARATOR in key_name:
if key_name.startswith(_SPECIFIC_EXECUTOR_SEPARATOR) or key_name.endswith(
_SPECIFIC_EXECUTOR_SEPARATOR
):
return False
return True
else:
return False
def _parse_specific_params(parameters: Dict, executor_name: str):
"""Parse the parameters dictionary to filter executor specific parameters
:param parameters: dictionary container the parameters
:param executor_name: name of the Executor
:returns: the parsed parameters after applying filtering for the specific Executor
"""
parsed_params = copy.deepcopy(parameters)
for key in parameters:
if _is_param_for_specific_executor(key):
(
key_name,
key_executor_name,
) = _spit_key_and_executor_name(key)
if key_executor_name == executor_name:
parsed_params[key_name] = parameters[key]
del parsed_params[key]
specific_parameters = parameters.get(executor_name, None)
if specific_parameters:
parsed_params.update(**specific_parameters)
return parsed_params
|
from __future__ import annotations
import pytest
from sentence_transformers import SentenceTransformer, SentenceTransformerTrainer
from sentence_transformers.util import is_training_available
@pytest.mark.parametrize(
("revision", "expected_base_revision"),
[
("f3cb857cba53019a20df283396bcca179cf051a4", "f3cb857cba53019a20df283396bcca179cf051a4"),
("f3cb857", "f3cb857"),
("main", "valid-revision"),
(None, "valid-revision"),
],
)
def test_model_card_data(revision, expected_base_revision) -> None:
model_name = "sentence-transformers-testing/stsb-bert-tiny-safetensors"
model = SentenceTransformer(model_name, revision=revision)
assert model.model_card_data.base_model == model_name
if expected_base_revision == "valid-revision":
assert model.model_card_data.base_model_revision
assert len(model.model_card_data.base_model_revision) == 40
else:
assert model.model_card_data.base_model_revision == expected_base_revision
@pytest.mark.skipif(
not is_training_available(), reason='Sentence Transformers was not installed with the `["train"]` extra.'
)
def test_generated_from_trainer_tag(stsb_bert_tiny_model: SentenceTransformer) -> None:
model = stsb_bert_tiny_model
assert "generated_from_trainer" not in model.model_card_data.tags
SentenceTransformerTrainer(model)
assert "generated_from_trainer" in model.model_card_data.tags
|
from __future__ import annotations
import pytest
from sentence_transformers import SentenceTransformer, SentenceTransformerTrainer
@pytest.mark.parametrize(
("revision", "expected_base_revision"),
[
("f3cb857cba53019a20df283396bcca179cf051a4", "f3cb857cba53019a20df283396bcca179cf051a4"),
("f3cb857", "f3cb857"),
("main", "valid-revision"),
(None, "valid-revision"),
],
)
def test_model_card_data(revision, expected_base_revision) -> None:
model_name = "sentence-transformers-testing/stsb-bert-tiny-safetensors"
model = SentenceTransformer(model_name, revision=revision)
assert model.model_card_data.base_model == model_name
if expected_base_revision == "valid-revision":
assert model.model_card_data.base_model_revision
assert len(model.model_card_data.base_model_revision) == 40
else:
assert model.model_card_data.base_model_revision == expected_base_revision
def test_generated_from_trainer_tag(stsb_bert_tiny_model: SentenceTransformer) -> None:
model = stsb_bert_tiny_model
assert "generated_from_trainer" not in model.model_card_data.tags
SentenceTransformerTrainer(model)
assert "generated_from_trainer" in model.model_card_data.tags
|
import itertools
from parameterized import parameterized
from torchaudio.backend import sox_io_backend
from torchaudio_unittest.common_utils import (
get_wav_data,
PytorchTestCase,
skipIfNoExec,
skipIfNoSox,
TempDirMixin,
)
from .common import get_enc_params, name_func
@skipIfNoExec("sox")
@skipIfNoSox
class TestRoundTripIO(TempDirMixin, PytorchTestCase):
"""save/load round trip should not degrade data for lossless formats"""
@parameterized.expand(
list(
itertools.product(
["float32", "int32", "int16", "uint8"],
[8000, 16000],
[1, 2],
)
),
name_func=name_func,
)
def test_wav(self, dtype, sample_rate, num_channels):
"""save/load round trip should not degrade data for wav formats"""
original = get_wav_data(dtype, num_channels, normalize=False)
enc, bps = get_enc_params(dtype)
data = original
for i in range(10):
path = self.get_temp_path(f"{i}.wav")
sox_io_backend.save(path, data, sample_rate, encoding=enc, bits_per_sample=bps)
data, sr = sox_io_backend.load(path, normalize=False)
assert sr == sample_rate
self.assertEqual(original, data)
@parameterized.expand(
list(
itertools.product(
[8000, 16000],
[1, 2],
list(range(9)),
)
),
name_func=name_func,
)
def test_flac(self, sample_rate, num_channels, compression_level):
"""save/load round trip should not degrade data for flac formats"""
original = get_wav_data("float32", num_channels)
data = original
for i in range(10):
path = self.get_temp_path(f"{i}.flac")
sox_io_backend.save(path, data, sample_rate, compression=compression_level)
data, sr = sox_io_backend.load(path)
assert sr == sample_rate
self.assertEqual(original, data)
|
import itertools
from parameterized import parameterized
from torchaudio.backend import sox_io_backend
from torchaudio_unittest.common_utils import (
TempDirMixin,
PytorchTestCase,
skipIfNoExec,
skipIfNoSox,
get_wav_data,
)
from .common import (
name_func,
get_enc_params,
)
@skipIfNoExec("sox")
@skipIfNoSox
class TestRoundTripIO(TempDirMixin, PytorchTestCase):
"""save/load round trip should not degrade data for lossless formats"""
@parameterized.expand(
list(
itertools.product(
["float32", "int32", "int16", "uint8"],
[8000, 16000],
[1, 2],
)
),
name_func=name_func,
)
def test_wav(self, dtype, sample_rate, num_channels):
"""save/load round trip should not degrade data for wav formats"""
original = get_wav_data(dtype, num_channels, normalize=False)
enc, bps = get_enc_params(dtype)
data = original
for i in range(10):
path = self.get_temp_path(f"{i}.wav")
sox_io_backend.save(path, data, sample_rate, encoding=enc, bits_per_sample=bps)
data, sr = sox_io_backend.load(path, normalize=False)
assert sr == sample_rate
self.assertEqual(original, data)
@parameterized.expand(
list(
itertools.product(
[8000, 16000],
[1, 2],
list(range(9)),
)
),
name_func=name_func,
)
def test_flac(self, sample_rate, num_channels, compression_level):
"""save/load round trip should not degrade data for flac formats"""
original = get_wav_data("float32", num_channels)
data = original
for i in range(10):
path = self.get_temp_path(f"{i}.flac")
sox_io_backend.save(path, data, sample_rate, compression=compression_level)
data, sr = sox_io_backend.load(path)
assert sr == sample_rate
self.assertEqual(original, data)
|
_base_ = '../dcn/cascade-mask-rcnn_x101-32x4d-dconv-c3-c5_fpn_1x_coco.py'
model = dict(
backbone=dict(
norm_cfg=dict(type='SyncBN', requires_grad=True),
norm_eval=False,
plugins=[
dict(
cfg=dict(type='ContextBlock', ratio=1. / 4),
stages=(False, True, True, True),
position='after_conv3')
]))
|
_base_ = '../dcn/cascade_mask_rcnn_x101_32x4d_fpn_dconv_c3-c5_1x_coco.py'
model = dict(
backbone=dict(
norm_cfg=dict(type='SyncBN', requires_grad=True),
norm_eval=False,
plugins=[
dict(
cfg=dict(type='ContextBlock', ratio=1. / 4),
stages=(False, True, True, True),
position='after_conv3')
]))
|
from __future__ import annotations
import logging
from typing import TYPE_CHECKING, Any, Literal
from sentence_transformers.evaluation import TripletEvaluator
if TYPE_CHECKING:
import numpy as np
from torch import Tensor
from sentence_transformers.similarity_functions import SimilarityFunction
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
logger = logging.getLogger(__name__)
class SparseTripletEvaluator(TripletEvaluator):
def __init__(
self,
anchors: list[str],
positives: list[str],
negatives: list[str],
main_similarity_function: str | SimilarityFunction | None = None,
margin: float | dict[str, float] | None = None,
name: str = "",
batch_size: int = 16,
show_progress_bar: bool = False,
write_csv: bool = True,
truncate_dim: int | None = None,
similarity_fn_names: list[Literal["cosine", "dot", "euclidean", "manhattan"]] | None = None,
main_distance_function: str | SimilarityFunction | None = "deprecated",
):
return super().__init__(
anchors=anchors,
positives=positives,
negatives=negatives,
main_similarity_function=main_similarity_function,
margin=margin,
name=name,
batch_size=batch_size,
show_progress_bar=show_progress_bar,
write_csv=write_csv,
truncate_dim=truncate_dim,
similarity_fn_names=similarity_fn_names,
main_distance_function=main_distance_function,
)
def __call__(
self, model: SparseEncoder, output_path: str = None, epoch: int = -1, steps: int = -1
) -> dict[str, float]:
return super().__call__(model=model, output_path=output_path, epoch=epoch, steps=steps)
def embed_inputs(
self,
model: SparseEncoder,
sentences: str | list[str] | np.ndarray,
**kwargs,
) -> Tensor:
kwargs["truncate_dim"] = self.truncate_dim
return model.encode(
sentences,
batch_size=self.batch_size,
show_progress_bar=self.show_progress_bar,
convert_to_sparse_tensor=True,
save_on_cpu=True,
**kwargs,
)
def store_metrics_in_model_card_data(
self, model: SparseEncoder, metrics: dict[str, Any], epoch: int = 0, step: int = 0
) -> None:
model.model_card_data.set_evaluation_metrics(self, metrics, epoch=epoch, step=step)
|
from __future__ import annotations
import logging
from typing import TYPE_CHECKING, Any, Literal
from sentence_transformers.evaluation import TripletEvaluator
if TYPE_CHECKING:
import numpy as np
from torch import Tensor
from sentence_transformers.similarity_functions import SimilarityFunction
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
logger = logging.getLogger(__name__)
class SparseTripletEvaluator(TripletEvaluator):
def __init__(
self,
anchors: list[str],
positives: list[str],
negatives: list[str],
main_similarity_function: str | SimilarityFunction | None = None,
margin: float | dict[str, float] | None = None,
name: str = "",
batch_size: int = 16,
show_progress_bar: bool = False,
write_csv: bool = True,
truncate_dim: int | None = None,
similarity_fn_names: list[Literal["cosine", "dot", "euclidean", "manhattan"]] | None = None,
main_distance_function: str | SimilarityFunction | None = "deprecated",
):
return super().__init__(
anchors=anchors,
positives=positives,
negatives=negatives,
main_similarity_function=main_similarity_function,
margin=margin,
name=name,
batch_size=batch_size,
show_progress_bar=show_progress_bar,
write_csv=write_csv,
truncate_dim=truncate_dim,
similarity_fn_names=similarity_fn_names,
main_distance_function=main_distance_function,
)
def __call__(
self, model: SparseEncoder, output_path: str = None, epoch: int = -1, steps: int = -1
) -> dict[str, float]:
return super().__call__(model=model, output_path=output_path, epoch=epoch, steps=steps)
def embed_inputs(
self,
model: SparseEncoder,
sentences: str | list[str] | np.ndarray,
**kwargs,
) -> Tensor:
kwargs["truncate_dim"] = self.truncate_dim
return model.encode(
sentences,
batch_size=self.batch_size,
show_progress_bar=self.show_progress_bar,
convert_to_sparse_tensor=True,
**kwargs,
)
def store_metrics_in_model_card_data(
self, model: SparseEncoder, metrics: dict[str, Any], epoch: int = 0, step: int = 0
) -> None:
model.model_card_data.set_evaluation_metrics(self, metrics, epoch=epoch, step=step)
|
from __future__ import annotations
import json
import os
from typing import Callable
import torch
from safetensors.torch import load_model as load_safetensors_model
from safetensors.torch import save_model as save_safetensors_model
from torch import Tensor, nn
from sentence_transformers.util import fullname, import_from_string
class Dense(nn.Module):
"""
Feed-forward function with activation function.
This layer takes a fixed-sized sentence embedding and passes it through a feed-forward layer. Can be used to generate deep averaging networks (DAN).
Args:
in_features: Size of the input dimension
out_features: Output size
bias: Add a bias vector
activation_function: Pytorch activation function applied on
output
init_weight: Initial value for the matrix of the linear layer
init_bias: Initial value for the bias of the linear layer
"""
def __init__(
self,
in_features: int,
out_features: int,
bias: bool = True,
activation_function: Callable[[Tensor], Tensor] | None = nn.Tanh(),
init_weight: Tensor | None = None,
init_bias: Tensor | None = None,
):
super().__init__()
self.in_features = in_features
self.out_features = out_features
self.bias = bias
self.activation_function = nn.Identity() if activation_function is None else activation_function
self.linear = nn.Linear(in_features, out_features, bias=bias)
if init_weight is not None:
self.linear.weight = nn.Parameter(init_weight)
if init_bias is not None:
self.linear.bias = nn.Parameter(init_bias)
def forward(self, features: dict[str, Tensor]):
features.update({"sentence_embedding": self.activation_function(self.linear(features["sentence_embedding"]))})
return features
def get_sentence_embedding_dimension(self) -> int:
return self.out_features
def get_config_dict(self):
return {
"in_features": self.in_features,
"out_features": self.out_features,
"bias": self.bias,
"activation_function": fullname(self.activation_function),
}
def save(self, output_path, safe_serialization: bool = True) -> None:
with open(os.path.join(output_path, "config.json"), "w") as fOut:
json.dump(self.get_config_dict(), fOut)
if safe_serialization:
save_safetensors_model(self, os.path.join(output_path, "model.safetensors"))
else:
torch.save(self.state_dict(), os.path.join(output_path, "pytorch_model.bin"))
def __repr__(self):
return f"Dense({self.get_config_dict()})"
@staticmethod
def load(input_path):
with open(os.path.join(input_path, "config.json")) as fIn:
config = json.load(fIn)
config["activation_function"] = import_from_string(config["activation_function"])()
model = Dense(**config)
if os.path.exists(os.path.join(input_path, "model.safetensors")):
load_safetensors_model(model, os.path.join(input_path, "model.safetensors"))
else:
model.load_state_dict(
torch.load(
os.path.join(input_path, "pytorch_model.bin"), map_location=torch.device("cpu"), weights_only=True
)
)
return model
|
from __future__ import annotations
import json
import os
import torch
from safetensors.torch import load_model as load_safetensors_model
from safetensors.torch import save_model as save_safetensors_model
from torch import Tensor, nn
from sentence_transformers.util import fullname, import_from_string
class Dense(nn.Module):
"""
Feed-forward function with activation function.
This layer takes a fixed-sized sentence embedding and passes it through a feed-forward layer. Can be used to generate deep averaging networks (DAN).
Args:
in_features: Size of the input dimension
out_features: Output size
bias: Add a bias vector
activation_function: Pytorch activation function applied on
output
init_weight: Initial value for the matrix of the linear layer
init_bias: Initial value for the bias of the linear layer
"""
def __init__(
self,
in_features: int,
out_features: int,
bias: bool = True,
activation_function=nn.Tanh(),
init_weight: Tensor = None,
init_bias: Tensor = None,
):
super().__init__()
self.in_features = in_features
self.out_features = out_features
self.bias = bias
self.activation_function = activation_function
self.linear = nn.Linear(in_features, out_features, bias=bias)
if init_weight is not None:
self.linear.weight = nn.Parameter(init_weight)
if init_bias is not None:
self.linear.bias = nn.Parameter(init_bias)
def forward(self, features: dict[str, Tensor]):
features.update({"sentence_embedding": self.activation_function(self.linear(features["sentence_embedding"]))})
return features
def get_sentence_embedding_dimension(self) -> int:
return self.out_features
def get_config_dict(self):
return {
"in_features": self.in_features,
"out_features": self.out_features,
"bias": self.bias,
"activation_function": fullname(self.activation_function),
}
def save(self, output_path, safe_serialization: bool = True) -> None:
with open(os.path.join(output_path, "config.json"), "w") as fOut:
json.dump(self.get_config_dict(), fOut)
if safe_serialization:
save_safetensors_model(self, os.path.join(output_path, "model.safetensors"))
else:
torch.save(self.state_dict(), os.path.join(output_path, "pytorch_model.bin"))
def __repr__(self):
return f"Dense({self.get_config_dict()})"
@staticmethod
def load(input_path):
with open(os.path.join(input_path, "config.json")) as fIn:
config = json.load(fIn)
config["activation_function"] = import_from_string(config["activation_function"])()
model = Dense(**config)
if os.path.exists(os.path.join(input_path, "model.safetensors")):
load_safetensors_model(model, os.path.join(input_path, "model.safetensors"))
else:
model.load_state_dict(
torch.load(
os.path.join(input_path, "pytorch_model.bin"), map_location=torch.device("cpu"), weights_only=True
)
)
return model
|
import numpy as np
from docarray import BaseDocument, DocumentArray, Image, Text
def test_multi_modal_doc():
class MyMultiModalDoc(BaseDocument):
image: Image
text: Text
doc = MyMultiModalDoc(
image=Image(tensor=np.zeros((3, 224, 224))), text=Text(text='hello')
)
assert isinstance(doc.image, BaseDocument)
assert isinstance(doc.image, Image)
assert isinstance(doc.text, Text)
assert doc.text.text == 'hello'
assert (doc.image.tensor == np.zeros((3, 224, 224))).all()
def test_nested_chunks_document():
class ChunksDocument(BaseDocument):
text: str
images: DocumentArray[Image]
doc = ChunksDocument(
text='hello',
images=DocumentArray[Image]([Image() for _ in range(10)]),
)
assert isinstance(doc.images, DocumentArray)
|
import numpy as np
from docarray import Document, DocumentArray, Image, Text
def test_multi_modal_doc():
class MyMultiModalDoc(Document):
image: Image
text: Text
doc = MyMultiModalDoc(
image=Image(tensor=np.zeros((3, 224, 224))), text=Text(text='hello')
)
assert isinstance(doc.image, Document)
assert isinstance(doc.image, Image)
assert isinstance(doc.text, Text)
assert doc.text.text == 'hello'
assert (doc.image.tensor == np.zeros((3, 224, 224))).all()
def test_nested_chunks_document():
class ChunksDocument(Document):
text: str
images: DocumentArray[Image]
doc = ChunksDocument(
text='hello',
images=DocumentArray[Image]([Image() for _ in range(10)]),
)
assert isinstance(doc.images, DocumentArray)
|
# Copyright (c) OpenMMLab. All rights reserved.
from .brick_wrappers import AdaptiveAvgPool2d, adaptive_avg_pool2d
from .builder import build_linear_layer, build_transformer
from .ckpt_convert import pvt_convert
from .conv_upsample import ConvUpsample
from .csp_layer import CSPLayer
from .gaussian_target import gaussian_radius, gen_gaussian_target
from .inverted_residual import InvertedResidual
from .make_divisible import make_divisible
from .misc import interpolate_as, sigmoid_geometric_mean
from .normed_predictor import NormedConv2d, NormedLinear
from .panoptic_gt_processing import preprocess_panoptic_gt
from .positional_encoding import (LearnedPositionalEncoding,
SinePositionalEncoding)
from .res_layer import ResLayer, SimplifiedBasicBlock
from .se_layer import DyReLU, SELayer
from .transformer import (DetrTransformerDecoder, DetrTransformerDecoderLayer,
DynamicConv, PatchEmbed, Transformer, nchw_to_nlc,
nlc_to_nchw)
__all__ = [
'ResLayer', 'gaussian_radius', 'gen_gaussian_target',
'DetrTransformerDecoderLayer', 'DetrTransformerDecoder', 'Transformer',
'build_transformer', 'build_linear_layer', 'SinePositionalEncoding',
'LearnedPositionalEncoding', 'DynamicConv', 'SimplifiedBasicBlock',
'NormedLinear', 'NormedConv2d', 'make_divisible', 'InvertedResidual',
'SELayer', 'interpolate_as', 'ConvUpsample', 'CSPLayer',
'adaptive_avg_pool2d', 'AdaptiveAvgPool2d', 'PatchEmbed', 'nchw_to_nlc',
'nlc_to_nchw', 'pvt_convert', 'sigmoid_geometric_mean',
'preprocess_panoptic_gt', 'DyReLU'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .brick_wrappers import AdaptiveAvgPool2d, adaptive_avg_pool2d
from .builder import build_linear_layer, build_transformer
from .ckpt_convert import pvt_convert
from .conv_upsample import ConvUpsample
from .csp_layer import CSPLayer
from .gaussian_target import gaussian_radius, gen_gaussian_target
from .inverted_residual import InvertedResidual
from .make_divisible import make_divisible
from .misc import interpolate_as, sigmoid_geometric_mean
from .normed_predictor import NormedConv2d, NormedLinear
from .positional_encoding import (LearnedPositionalEncoding,
SinePositionalEncoding)
from .res_layer import ResLayer, SimplifiedBasicBlock
from .se_layer import DyReLU, SELayer
from .transformer import (DetrTransformerDecoder, DetrTransformerDecoderLayer,
DynamicConv, PatchEmbed, Transformer, nchw_to_nlc,
nlc_to_nchw)
__all__ = [
'ResLayer', 'gaussian_radius', 'gen_gaussian_target',
'DetrTransformerDecoderLayer', 'DetrTransformerDecoder', 'Transformer',
'build_transformer', 'build_linear_layer', 'SinePositionalEncoding',
'LearnedPositionalEncoding', 'DynamicConv', 'SimplifiedBasicBlock',
'NormedLinear', 'NormedConv2d', 'make_divisible', 'InvertedResidual',
'SELayer', 'interpolate_as', 'ConvUpsample', 'CSPLayer',
'adaptive_avg_pool2d', 'AdaptiveAvgPool2d', 'PatchEmbed', 'nchw_to_nlc',
'nlc_to_nchw', 'pvt_convert', 'sigmoid_geometric_mean', 'DyReLU'
]
|
from __future__ import annotations
import torch
import transformers
from PIL import Image
from torch import nn
class CLIPModel(nn.Module):
save_in_root: bool = True
def __init__(self, model_name: str = "openai/clip-vit-base-patch32", processor_name=None) -> None:
super().__init__()
if processor_name is None:
processor_name = model_name
self.model = transformers.CLIPModel.from_pretrained(model_name)
self.processor = transformers.CLIPProcessor.from_pretrained(processor_name)
def __repr__(self) -> str:
return "CLIPModel()"
def forward(self, features: dict[str, torch.Tensor]) -> dict[str, torch.Tensor]:
image_embeds = []
text_embeds = []
if "pixel_values" in features:
vision_outputs = self.model.vision_model(pixel_values=features["pixel_values"])
image_embeds = self.model.visual_projection(vision_outputs[1])
if "input_ids" in features:
text_outputs = self.model.text_model(
input_ids=features.get("input_ids"),
attention_mask=features.get("attention_mask", None),
position_ids=features.get("position_ids", None),
output_attentions=features.get("output_attentions", None),
output_hidden_states=features.get("output_hidden_states", None),
)
text_embeds = self.model.text_projection(text_outputs[1])
sentence_embedding = []
image_features = iter(image_embeds)
text_features = iter(text_embeds)
for idx, input_type in enumerate(features["image_text_info"]):
if input_type == 0:
sentence_embedding.append(next(image_features))
else:
sentence_embedding.append(next(text_features))
features["sentence_embedding"] = torch.stack(sentence_embedding).float()
return features
def tokenize(self, texts, padding: str | bool = True) -> dict[str, torch.Tensor]:
images = []
texts_values = []
image_text_info = []
for idx, data in enumerate(texts):
if isinstance(data, Image.Image): # An Image
images.append(data)
image_text_info.append(0)
else: # A text
texts_values.append(data)
image_text_info.append(1)
encoding = {}
if len(texts_values):
encoding = self.processor.tokenizer(texts_values, return_tensors="pt", padding=padding)
if len(images):
image_features = self.processor.image_processor(images, return_tensors="pt")
encoding["pixel_values"] = image_features.pixel_values
encoding["image_text_info"] = image_text_info
return dict(encoding)
@property
def tokenizer(self) -> transformers.CLIPProcessor:
return self.processor
def save(self, output_path: str) -> None:
self.model.save_pretrained(output_path)
self.processor.save_pretrained(output_path)
@staticmethod
def load(input_path: str) -> CLIPModel:
return CLIPModel(model_name=input_path)
|
from __future__ import annotations
import torch
import transformers
from PIL import Image
from torch import nn
class CLIPModel(nn.Module):
def __init__(self, model_name: str = "openai/clip-vit-base-patch32", processor_name=None) -> None:
super().__init__()
if processor_name is None:
processor_name = model_name
self.model = transformers.CLIPModel.from_pretrained(model_name)
self.processor = transformers.CLIPProcessor.from_pretrained(processor_name)
def __repr__(self) -> str:
return "CLIPModel()"
def forward(self, features: dict[str, torch.Tensor]) -> dict[str, torch.Tensor]:
image_embeds = []
text_embeds = []
if "pixel_values" in features:
vision_outputs = self.model.vision_model(pixel_values=features["pixel_values"])
image_embeds = self.model.visual_projection(vision_outputs[1])
if "input_ids" in features:
text_outputs = self.model.text_model(
input_ids=features.get("input_ids"),
attention_mask=features.get("attention_mask", None),
position_ids=features.get("position_ids", None),
output_attentions=features.get("output_attentions", None),
output_hidden_states=features.get("output_hidden_states", None),
)
text_embeds = self.model.text_projection(text_outputs[1])
sentence_embedding = []
image_features = iter(image_embeds)
text_features = iter(text_embeds)
for idx, input_type in enumerate(features["image_text_info"]):
if input_type == 0:
sentence_embedding.append(next(image_features))
else:
sentence_embedding.append(next(text_features))
features["sentence_embedding"] = torch.stack(sentence_embedding).float()
return features
def tokenize(self, texts, padding: str | bool = True) -> dict[str, torch.Tensor]:
images = []
texts_values = []
image_text_info = []
for idx, data in enumerate(texts):
if isinstance(data, Image.Image): # An Image
images.append(data)
image_text_info.append(0)
else: # A text
texts_values.append(data)
image_text_info.append(1)
encoding = {}
if len(texts_values):
encoding = self.processor.tokenizer(texts_values, return_tensors="pt", padding=padding)
if len(images):
image_features = self.processor.image_processor(images, return_tensors="pt")
encoding["pixel_values"] = image_features.pixel_values
encoding["image_text_info"] = image_text_info
return dict(encoding)
@property
def tokenizer(self) -> transformers.CLIPProcessor:
return self.processor
def save(self, output_path: str) -> None:
self.model.save_pretrained(output_path)
self.processor.save_pretrained(output_path)
@staticmethod
def load(input_path: str) -> CLIPModel:
return CLIPModel(model_name=input_path)
|
"""Test Tongyi API wrapper."""
from langchain_core.outputs import LLMResult
from langchain_community.llms.tongyi import Tongyi
def test_tongyi_call() -> None:
"""Test valid call to tongyi."""
llm = Tongyi()
output = llm.invoke("who are you")
assert isinstance(output, str)
def test_tongyi_generate() -> None:
"""Test valid call to tongyi."""
llm = Tongyi()
output = llm.generate(["who are you"])
assert isinstance(output, LLMResult)
assert isinstance(output.generations, list)
def test_tongyi_generate_stream() -> None:
"""Test valid call to tongyi."""
llm = Tongyi(streaming=True)
output = llm.generate(["who are you"])
print(output) # noqa: T201
assert isinstance(output, LLMResult)
assert isinstance(output.generations, list)
def test_tongyi_with_param_alias() -> None:
"""Test tongyi parameters alias"""
llm = Tongyi(model="qwen-max", api_key="your-api_key")
assert llm.model_name == "qwen-max"
assert llm.dashscope_api_key == "your-api_key"
|
"""Test Tongyi API wrapper."""
from langchain_core.outputs import LLMResult
from langchain_community.llms.tongyi import Tongyi
def test_tongyi_call() -> None:
"""Test valid call to tongyi."""
llm = Tongyi() # type: ignore[call-arg]
output = llm.invoke("who are you")
assert isinstance(output, str)
def test_tongyi_generate() -> None:
"""Test valid call to tongyi."""
llm = Tongyi() # type: ignore[call-arg]
output = llm.generate(["who are you"])
assert isinstance(output, LLMResult)
assert isinstance(output.generations, list)
def test_tongyi_generate_stream() -> None:
"""Test valid call to tongyi."""
llm = Tongyi(streaming=True) # type: ignore[call-arg]
output = llm.generate(["who are you"])
print(output) # noqa: T201
assert isinstance(output, LLMResult)
assert isinstance(output.generations, list)
def test_tongyi_with_param_alias() -> None:
"""Test tongyi parameters alias"""
llm = Tongyi(model="qwen-max", api_key="your-api_key") # type: ignore[call-arg]
assert llm.model_name == "qwen-max"
assert llm.dashscope_api_key == "your-api_key"
|
# Copyright (c) OpenMMLab. All rights reserved.
from .atss import ATSS
from .autoassign import AutoAssign
from .base import BaseDetector
from .cascade_rcnn import CascadeRCNN
from .centernet import CenterNet
from .cornernet import CornerNet
from .deformable_detr import DeformableDETR
from .detr import DETR
from .fast_rcnn import FastRCNN
from .faster_rcnn import FasterRCNN
from .fcos import FCOS
from .fovea import FOVEA
from .fsaf import FSAF
from .gfl import GFL
from .grid_rcnn import GridRCNN
from .htc import HybridTaskCascade
from .kd_one_stage import KnowledgeDistillationSingleStageDetector
from .lad import LAD
from .mask_rcnn import MaskRCNN
from .mask_scoring_rcnn import MaskScoringRCNN
from .nasfcos import NASFCOS
from .paa import PAA
from .panoptic_fpn import PanopticFPN
from .panoptic_two_stage_segmentor import TwoStagePanopticSegmentor
from .point_rend import PointRend
from .queryinst import QueryInst
from .reppoints_detector import RepPointsDetector
from .retinanet import RetinaNet
from .rpn import RPN
from .scnet import SCNet
from .single_stage import SingleStageDetector
from .solo import SOLO
from .sparse_rcnn import SparseRCNN
from .tood import TOOD
from .trident_faster_rcnn import TridentFasterRCNN
from .two_stage import TwoStageDetector
from .vfnet import VFNet
from .yolact import YOLACT
from .yolo import YOLOV3
from .yolof import YOLOF
from .yolox import YOLOX
__all__ = [
'ATSS', 'BaseDetector', 'SingleStageDetector', 'TwoStageDetector', 'RPN',
'KnowledgeDistillationSingleStageDetector', 'FastRCNN', 'FasterRCNN',
'MaskRCNN', 'CascadeRCNN', 'HybridTaskCascade', 'RetinaNet', 'FCOS',
'GridRCNN', 'MaskScoringRCNN', 'RepPointsDetector', 'FOVEA', 'FSAF',
'NASFCOS', 'PointRend', 'GFL', 'CornerNet', 'PAA', 'YOLOV3', 'YOLACT',
'VFNet', 'DETR', 'TridentFasterRCNN', 'SparseRCNN', 'SCNet', 'SOLO',
'DeformableDETR', 'AutoAssign', 'YOLOF', 'CenterNet', 'YOLOX',
'TwoStagePanopticSegmentor', 'PanopticFPN', 'QueryInst', 'LAD', 'TOOD'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .atss import ATSS
from .autoassign import AutoAssign
from .base import BaseDetector
from .cascade_rcnn import CascadeRCNN
from .centernet import CenterNet
from .cornernet import CornerNet
from .deformable_detr import DeformableDETR
from .detr import DETR
from .fast_rcnn import FastRCNN
from .faster_rcnn import FasterRCNN
from .fcos import FCOS
from .fovea import FOVEA
from .fsaf import FSAF
from .gfl import GFL
from .grid_rcnn import GridRCNN
from .htc import HybridTaskCascade
from .kd_one_stage import KnowledgeDistillationSingleStageDetector
from .lad import LAD
from .mask_rcnn import MaskRCNN
from .mask_scoring_rcnn import MaskScoringRCNN
from .nasfcos import NASFCOS
from .paa import PAA
from .panoptic_fpn import PanopticFPN
from .panoptic_two_stage_segmentor import TwoStagePanopticSegmentor
from .point_rend import PointRend
from .queryinst import QueryInst
from .reppoints_detector import RepPointsDetector
from .retinanet import RetinaNet
from .rpn import RPN
from .scnet import SCNet
from .single_stage import SingleStageDetector
from .solo import SOLO
from .sparse_rcnn import SparseRCNN
from .trident_faster_rcnn import TridentFasterRCNN
from .two_stage import TwoStageDetector
from .vfnet import VFNet
from .yolact import YOLACT
from .yolo import YOLOV3
from .yolof import YOLOF
from .yolox import YOLOX
__all__ = [
'ATSS', 'BaseDetector', 'SingleStageDetector', 'TwoStageDetector', 'RPN',
'KnowledgeDistillationSingleStageDetector', 'FastRCNN', 'FasterRCNN',
'MaskRCNN', 'CascadeRCNN', 'HybridTaskCascade', 'RetinaNet', 'FCOS',
'GridRCNN', 'MaskScoringRCNN', 'RepPointsDetector', 'FOVEA', 'FSAF',
'NASFCOS', 'PointRend', 'GFL', 'CornerNet', 'PAA', 'YOLOV3', 'YOLACT',
'VFNet', 'DETR', 'TridentFasterRCNN', 'SparseRCNN', 'SCNet', 'SOLO',
'DeformableDETR', 'AutoAssign', 'YOLOF', 'CenterNet', 'YOLOX',
'TwoStagePanopticSegmentor', 'PanopticFPN', 'QueryInst', 'LAD'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .auto_augment import (AutoAugment, BrightnessTransform, ColorTransform,
ContrastTransform, EqualizeTransform, Rotate, Shear,
Translate)
from .compose import Compose
from .formatting import (Collect, DefaultFormatBundle, ImageToTensor,
ToDataContainer, ToTensor, Transpose, to_tensor)
from .instaboost import InstaBoost
from .loading import (FilterAnnotations, LoadAnnotations, LoadImageFromFile,
LoadImageFromWebcam, LoadMultiChannelImageFromFiles,
LoadPanopticAnnotations, LoadProposals)
from .test_time_aug import MultiScaleFlipAug
from .transforms import (Albu, CopyPaste, CutOut, Expand, MinIoURandomCrop,
MixUp, Mosaic, Normalize, Pad, PhotoMetricDistortion,
RandomAffine, RandomCenterCropPad, RandomCrop,
RandomFlip, RandomShift, Resize, SegRescale,
YOLOXHSVRandomAug)
__all__ = [
'Compose', 'to_tensor', 'ToTensor', 'ImageToTensor', 'ToDataContainer',
'Transpose', 'Collect', 'DefaultFormatBundle', 'LoadAnnotations',
'LoadImageFromFile', 'LoadImageFromWebcam', 'LoadPanopticAnnotations',
'LoadMultiChannelImageFromFiles', 'LoadProposals', 'FilterAnnotations',
'MultiScaleFlipAug', 'Resize', 'RandomFlip', 'Pad', 'RandomCrop',
'Normalize', 'SegRescale', 'MinIoURandomCrop', 'Expand',
'PhotoMetricDistortion', 'Albu', 'InstaBoost', 'RandomCenterCropPad',
'AutoAugment', 'CutOut', 'Shear', 'Rotate', 'ColorTransform',
'EqualizeTransform', 'BrightnessTransform', 'ContrastTransform',
'Translate', 'RandomShift', 'Mosaic', 'MixUp', 'RandomAffine',
'YOLOXHSVRandomAug', 'CopyPaste'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .auto_augment import (AutoAugment, BrightnessTransform, ColorTransform,
ContrastTransform, EqualizeTransform, Rotate, Shear,
Translate)
from .compose import Compose
from .formatting import (Collect, DefaultFormatBundle, ImageToTensor,
ToDataContainer, ToTensor, Transpose, to_tensor)
from .instaboost import InstaBoost
from .loading import (LoadAnnotations, LoadImageFromFile, LoadImageFromWebcam,
LoadMultiChannelImageFromFiles, LoadPanopticAnnotations,
LoadProposals)
from .test_time_aug import MultiScaleFlipAug
from .transforms import (Albu, CopyPaste, CutOut, Expand, MinIoURandomCrop,
MixUp, Mosaic, Normalize, Pad, PhotoMetricDistortion,
RandomAffine, RandomCenterCropPad, RandomCrop,
RandomFlip, RandomShift, Resize, SegRescale,
YOLOXHSVRandomAug)
__all__ = [
'Compose', 'to_tensor', 'ToTensor', 'ImageToTensor', 'ToDataContainer',
'Transpose', 'Collect', 'DefaultFormatBundle', 'LoadAnnotations',
'LoadImageFromFile', 'LoadImageFromWebcam', 'LoadPanopticAnnotations',
'LoadMultiChannelImageFromFiles', 'LoadProposals', 'MultiScaleFlipAug',
'Resize', 'RandomFlip', 'Pad', 'RandomCrop', 'Normalize', 'SegRescale',
'MinIoURandomCrop', 'Expand', 'PhotoMetricDistortion', 'Albu',
'InstaBoost', 'RandomCenterCropPad', 'AutoAugment', 'CutOut', 'Shear',
'Rotate', 'ColorTransform', 'EqualizeTransform', 'BrightnessTransform',
'ContrastTransform', 'Translate', 'RandomShift', 'Mosaic', 'MixUp',
'RandomAffine', 'YOLOXHSVRandomAug', 'CopyPaste'
]
|
import logging
import random
from datasets import load_dataset
from sentence_transformers.sparse_encoder import (
MLMTransformer,
SparseEncoder,
SparseInformationRetrievalEvaluator,
SpladePooling,
)
logging.basicConfig(format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO)
# Initialize the SPLADE model
model_name = "naver/splade-cocondenser-ensembledistil"
model = SparseEncoder(
modules=[
MLMTransformer(model_name),
SpladePooling(pooling_strategy="max"), # You can also use 'sum'
],
device="cuda:0",
)
# Load the Touche-2020 IR dataset (https://huggingface.co/datasets/BeIR/webis-touche2020, https://huggingface.co/datasets/BeIR/webis-touche2020-qrels)
corpus = load_dataset("BeIR/webis-touche2020", "corpus", split="corpus")
queries = load_dataset("BeIR/webis-touche2020", "queries", split="queries")
relevant_docs_data = load_dataset("BeIR/webis-touche2020-qrels", split="test")
# For this dataset, we want to concatenate the title and texts for the corpus
corpus = corpus.map(lambda x: {"text": x["title"] + " " + x["text"]}, remove_columns=["title"])
# Shrink the corpus size heavily to only the relevant documents + 30,000 random documents
required_corpus_ids = set(map(str, relevant_docs_data["corpus-id"]))
required_corpus_ids |= set(random.sample(corpus["_id"], k=30_000))
corpus = corpus.filter(lambda x: x["_id"] in required_corpus_ids)
# Convert the datasets to dictionaries
corpus = dict(zip(corpus["_id"], corpus["text"])) # Our corpus (cid => document)
queries = dict(zip(queries["_id"], queries["text"])) # Our queries (qid => question)
relevant_docs = {} # Query ID to relevant documents (qid => set([relevant_cids])
for qid, corpus_ids in zip(relevant_docs_data["query-id"], relevant_docs_data["corpus-id"]):
qid = str(qid)
corpus_ids = str(corpus_ids)
if qid not in relevant_docs:
relevant_docs[qid] = set()
relevant_docs[qid].add(corpus_ids)
# Given queries, a corpus and a mapping with relevant documents, the SparseInformationRetrievalEvaluator computes different IR metrics.
ir_evaluator = SparseInformationRetrievalEvaluator(
queries=queries,
corpus=corpus,
relevant_docs=relevant_docs,
name="BeIR-touche2020-subset-test",
show_progress_bar=True,
batch_size=16,
)
# Run evaluation
results = ir_evaluator(model)
# Print the results
print(f"Primary metric: {ir_evaluator.primary_metric}")
print(f"Primary metric value: {results[ir_evaluator.primary_metric]:.4f}")
|
import random
from datasets import load_dataset
from sentence_transformers.sparse_encoder import (
MLMTransformer,
SparseEncoder,
SparseInformationRetrievalEvaluator,
SpladePooling,
)
# Initialize the SPLADE model
model_name = "naver/splade-cocondenser-ensembledistil"
model = SparseEncoder(
modules=[
MLMTransformer(model_name),
SpladePooling(pooling_strategy="max"), # You can also use 'sum'
],
device="cuda:0",
)
# Load the Touche-2020 IR dataset (https://huggingface.co/datasets/BeIR/webis-touche2020, https://huggingface.co/datasets/BeIR/webis-touche2020-qrels)
corpus = load_dataset("BeIR/webis-touche2020", "corpus", split="corpus")
queries = load_dataset("BeIR/webis-touche2020", "queries", split="queries")
relevant_docs_data = load_dataset("BeIR/webis-touche2020-qrels", split="test")
# For this dataset, we want to concatenate the title and texts for the corpus
corpus = corpus.map(lambda x: {"text": x["title"] + " " + x["text"]}, remove_columns=["title"])
# Shrink the corpus size heavily to only the relevant documents + 30,000 random documents
required_corpus_ids = set(map(str, relevant_docs_data["corpus-id"]))
required_corpus_ids |= set(random.sample(corpus["_id"], k=30_000))
corpus = corpus.filter(lambda x: x["_id"] in required_corpus_ids)
# Convert the datasets to dictionaries
corpus = dict(zip(corpus["_id"], corpus["text"])) # Our corpus (cid => document)
queries = dict(zip(queries["_id"], queries["text"])) # Our queries (qid => question)
relevant_docs = {} # Query ID to relevant documents (qid => set([relevant_cids])
for qid, corpus_ids in zip(relevant_docs_data["query-id"], relevant_docs_data["corpus-id"]):
qid = str(qid)
corpus_ids = str(corpus_ids)
if qid not in relevant_docs:
relevant_docs[qid] = set()
relevant_docs[qid].add(corpus_ids)
# Given queries, a corpus and a mapping with relevant documents, the SparseInformationRetrievalEvaluator computes different IR metrics.
ir_evaluator = SparseInformationRetrievalEvaluator(
queries=queries,
corpus=corpus,
relevant_docs=relevant_docs,
name="BeIR-touche2020-subset-test",
show_progress_bar=True,
batch_size=32,
)
# Run evaluation
print("Starting evaluation ")
results = ir_evaluator(model)
print(f"Primary metric: {ir_evaluator.primary_metric}")
print(f"Primary metric value: {results[ir_evaluator.primary_metric]:.4f}")
# Print results for each dataset
for key, value in results.items():
if key.startswith("Nano"):
print(f"{key}: {value:.4f}")
|
from langchain_core.documents import Document
from langchain_core.retrievers import BaseRetriever
class SequentialRetriever(BaseRetriever):
"""Test util that returns a sequence of documents"""
sequential_responses: list[list[Document]]
response_index: int = 0
def _get_relevant_documents( # type: ignore[override]
self,
query: str,
) -> list[Document]:
if self.response_index >= len(self.sequential_responses):
return []
self.response_index += 1
return self.sequential_responses[self.response_index - 1]
async def _aget_relevant_documents( # type: ignore[override]
self,
query: str,
) -> list[Document]:
return self._get_relevant_documents(query)
|
from langchain_core.documents import Document
from langchain_core.retrievers import BaseRetriever
class SequentialRetriever(BaseRetriever):
"""Test util that returns a sequence of documents"""
sequential_responses: list[list[Document]]
response_index: int = 0
def _get_relevant_documents( # type: ignore[override]
self,
query: str,
) -> list[Document]:
if self.response_index >= len(self.sequential_responses):
return []
else:
self.response_index += 1
return self.sequential_responses[self.response_index - 1]
async def _aget_relevant_documents( # type: ignore[override]
self,
query: str,
) -> list[Document]:
return self._get_relevant_documents(query)
|
_base_ = [
'../_base_/models/cascade-rcnn_r50_fpn.py',
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
model = dict(
backbone=dict(
type='DetectoRS_ResNet',
conv_cfg=dict(type='ConvAWS'),
sac=dict(type='SAC', use_deform=True),
stage_with_sac=(False, True, True, True)))
|
_base_ = [
'../_base_/models/cascade_rcnn_r50_fpn.py',
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
model = dict(
backbone=dict(
type='DetectoRS_ResNet',
conv_cfg=dict(type='ConvAWS'),
sac=dict(type='SAC', use_deform=True),
stage_with_sac=(False, True, True, True)))
|
import pytest
from llama_index.core.base.llms.types import ChatMessage, ImageBlock, AudioBlock
from llama_index.core.memory.memory import Memory
from llama_index.core.storage.chat_store.sql import MessageStatus
@pytest.fixture()
def memory():
"""Create a basic memory instance for testing."""
return Memory(
token_limit=1000,
token_flush_size=700,
chat_history_token_ratio=0.9,
session_id="test_user",
)
@pytest.mark.asyncio
async def test_initialization(memory):
"""Test that memory initializes correctly."""
assert memory.token_limit == 1000
assert memory.token_flush_size == 700
assert memory.session_id == "test_user"
@pytest.mark.asyncio
async def test_estimate_token_count_text(memory):
"""Test token counting for text."""
message = ChatMessage(role="user", content="Test message")
count = memory._estimate_token_count(message)
assert count == len(memory.tokenizer_fn("Test message"))
@pytest.mark.asyncio
async def test_estimate_token_count_image(memory):
"""Test token counting for images."""
block = ImageBlock(url="http://example.com/image.jpg")
message = ChatMessage(role="user", blocks=[block])
count = memory._estimate_token_count(message)
assert count == memory.image_token_size_estimate
@pytest.mark.asyncio
async def test_estimate_token_count_audio(memory):
"""Test token counting for audio."""
block = AudioBlock(url="http://example.com/audio.mp3")
message = ChatMessage(role="user", blocks=[block])
count = memory._estimate_token_count(message)
assert count == memory.audio_token_size_estimate
@pytest.mark.asyncio
async def test_manage_queue_under_limit(memory):
"""Test queue management when under token limit."""
# Set up a case where we're under the token limit
chat_messages = [
ChatMessage(role="user", content="Short message")
]
await memory.aput_messages(chat_messages)
cur_messages = await memory.aget()
assert len(cur_messages) == 1
assert cur_messages[0].content == "Short message"
@pytest.mark.asyncio
async def test_manage_queue_over_limit(memory):
"""Test queue management when over token limit."""
# Set up a case where we're over the token limit
chat_messages = [
ChatMessage(role="user", content="x " * 500),
ChatMessage(role="assistant", content="y " * 500),
ChatMessage(role="user", content="z " * 500),
]
# This will exceed the token limit and flush 700 tokens (two messages)
await memory.aput_messages(chat_messages)
cur_messages = await memory.aget()
assert len(cur_messages) == 1
assert "z " in cur_messages[0].content
@pytest.mark.asyncio
async def test_aput(memory):
"""Test adding a message."""
message = ChatMessage(role="user", content="New message")
await memory.aput(message)
# Should add the message to the store
messages = await memory.aget()
assert len(messages) == 1
assert messages[0].content == "New message"
@pytest.mark.asyncio
async def test_aput_messages(memory):
"""Test adding multiple messages."""
messages = [
ChatMessage(role="user", content="Message 1"),
ChatMessage(role="assistant", content="Response 1"),
]
await memory.aput_messages(messages)
# Should add the messages to the store
messages = await memory.aget()
assert len(messages) == 2
assert messages[0].content == "Message 1"
assert messages[1].content == "Response 1"
@pytest.mark.asyncio
async def test_aset(memory):
"""Test setting the chat history."""
messages = [
ChatMessage(role="user", content="Message 1"),
ChatMessage(role="assistant", content="Response 1"),
]
await memory.aset(messages)
# Should set the messages in the store
messages = await memory.aget()
assert len(messages) == 2
assert messages[0].content == "Message 1"
assert messages[1].content == "Response 1"
@pytest.mark.asyncio
async def test_aget_all(memory):
"""Test getting all messages."""
await memory.aput_messages([
ChatMessage(role="user", content="Message 1"),
ChatMessage(role="assistant", content="Response 1"),
])
messages = await memory.aget_all(status=MessageStatus.ACTIVE)
# Should get all messages from the store
assert len(messages) == 2
assert messages[0].content == "Message 1"
assert messages[1].content == "Response 1"
@pytest.mark.asyncio
async def test_areset(memory):
"""Test resetting the memory."""
await memory.aput(ChatMessage(role="user", content="New message"))
await memory.areset(status=MessageStatus.ACTIVE)
# Should delete messages from the store
messages = await memory.aget()
assert len(messages) == 0
|
import pytest
from llama_index.core.base.llms.types import ChatMessage, ImageBlock, AudioBlock
from llama_index.core.memory.memory import Memory
from llama_index.core.storage.chat_store.sql import MessageStatus
@pytest.fixture()
def memory():
"""Create a basic memory instance for testing."""
return Memory(
token_limit=1000,
token_flush_size=700,
user_id="test_user",
)
@pytest.mark.asyncio
async def test_initialization(memory):
"""Test that memory initializes correctly."""
assert memory.token_limit == 1000
assert memory.token_flush_size == 700
assert memory.user_id == "test_user"
@pytest.mark.asyncio
async def test_estimate_token_count_text(memory):
"""Test token counting for text."""
message = ChatMessage(role="user", content="Test message")
count = memory._estimate_token_count(message)
assert count == len(memory.tokenizer_fn("Test message"))
@pytest.mark.asyncio
async def test_estimate_token_count_image(memory):
"""Test token counting for images."""
block = ImageBlock(url="http://example.com/image.jpg")
message = ChatMessage(role="user", blocks=[block])
count = memory._estimate_token_count(message)
assert count == memory.image_token_size_estimate
@pytest.mark.asyncio
async def test_estimate_token_count_audio(memory):
"""Test token counting for audio."""
block = AudioBlock(url="http://example.com/audio.mp3")
message = ChatMessage(role="user", blocks=[block])
count = memory._estimate_token_count(message)
assert count == memory.audio_token_size_estimate
@pytest.mark.asyncio
async def test_manage_queue_under_limit(memory):
"""Test queue management when under token limit."""
# Set up a case where we're under the token limit
chat_messages = [
ChatMessage(role="user", content="Short message")
]
await memory.aput_messages(chat_messages)
cur_messages = await memory.aget()
assert len(cur_messages) == 1
assert cur_messages[0].content == "Short message"
@pytest.mark.asyncio
async def test_manage_queue_over_limit(memory):
"""Test queue management when over token limit."""
# Set up a case where we're over the token limit
chat_messages = [
ChatMessage(role="user", content="x " * 500),
ChatMessage(role="assistant", content="y " * 500),
ChatMessage(role="user", content="z " * 500),
]
# This will exceed the token limit and flush 700 tokens (two messages)
await memory.aput_messages(chat_messages)
cur_messages = await memory.aget()
assert len(cur_messages) == 1
assert "z " in cur_messages[0].content
@pytest.mark.asyncio
async def test_aput(memory):
"""Test adding a message."""
message = ChatMessage(role="user", content="New message")
await memory.aput(message)
# Should add the message to the store
messages = await memory.aget()
assert len(messages) == 1
assert messages[0].content == "New message"
@pytest.mark.asyncio
async def test_aput_messages(memory):
"""Test adding multiple messages."""
messages = [
ChatMessage(role="user", content="Message 1"),
ChatMessage(role="assistant", content="Response 1"),
]
await memory.aput_messages(messages)
# Should add the messages to the store
messages = await memory.aget()
assert len(messages) == 2
assert messages[0].content == "Message 1"
assert messages[1].content == "Response 1"
@pytest.mark.asyncio
async def test_aset(memory):
"""Test setting the chat history."""
messages = [
ChatMessage(role="user", content="Message 1"),
ChatMessage(role="assistant", content="Response 1"),
]
await memory.aset(messages)
# Should set the messages in the store
messages = await memory.aget()
assert len(messages) == 2
assert messages[0].content == "Message 1"
assert messages[1].content == "Response 1"
@pytest.mark.asyncio
async def test_aget_all(memory):
"""Test getting all messages."""
await memory.aput_messages([
ChatMessage(role="user", content="Message 1"),
ChatMessage(role="assistant", content="Response 1"),
])
messages = await memory.aget_all(status=MessageStatus.ACTIVE)
# Should get all messages from the store
assert len(messages) == 2
assert messages[0].content == "Message 1"
assert messages[1].content == "Response 1"
@pytest.mark.asyncio
async def test_areset(memory):
"""Test resetting the memory."""
await memory.aput(ChatMessage(role="user", content="New message"))
await memory.areset(status=MessageStatus.ACTIVE)
# Should delete messages from the store
messages = await memory.aget()
assert len(messages) == 0
|
# Copyright 2020 The HuggingFace Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""XNLI benchmark metric."""
import datasets
_CITATION = """\
@InProceedings{conneau2018xnli,
author = "Conneau, Alexis
and Rinott, Ruty
and Lample, Guillaume
and Williams, Adina
and Bowman, Samuel R.
and Schwenk, Holger
and Stoyanov, Veselin",
title = "XNLI: Evaluating Cross-lingual Sentence Representations",
booktitle = "Proceedings of the 2018 Conference on Empirical Methods
in Natural Language Processing",
year = "2018",
publisher = "Association for Computational Linguistics",
location = "Brussels, Belgium",
}
"""
_DESCRIPTION = """\
XNLI is a subset of a few thousand examples from MNLI which has been translated
into a 14 different languages (some low-ish resource). As with MNLI, the goal is
to predict textual entailment (does sentence A imply/contradict/neither sentence
B) and is a classification task (given two sentences, predict one of three
labels).
"""
_KWARGS_DESCRIPTION = """
Computes XNLI score which is just simple accuracy.
Args:
predictions: Predicted labels.
references: Ground truth labels.
Returns:
'accuracy': accuracy
Examples:
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> xnli_metric = datasets.load_metric("xnli")
>>> results = xnli_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0}
"""
def simple_accuracy(preds, labels):
return (preds == labels).mean()
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION)
class Xnli(datasets.Metric):
def _info(self):
return datasets.MetricInfo(
description=_DESCRIPTION,
citation=_CITATION,
inputs_description=_KWARGS_DESCRIPTION,
features=datasets.Features(
{
"predictions": datasets.Value("int64" if self.config_name != "sts-b" else "float32"),
"references": datasets.Value("int64" if self.config_name != "sts-b" else "float32"),
}
),
codebase_urls=[],
reference_urls=[],
format="numpy",
)
def _compute(self, predictions, references):
return {"accuracy": simple_accuracy(predictions, references)}
|
# Copyright 2020 The HuggingFace Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" XNLI benchmark metric. """
import datasets
_CITATION = """\
@InProceedings{conneau2018xnli,
author = "Conneau, Alexis
and Rinott, Ruty
and Lample, Guillaume
and Williams, Adina
and Bowman, Samuel R.
and Schwenk, Holger
and Stoyanov, Veselin",
title = "XNLI: Evaluating Cross-lingual Sentence Representations",
booktitle = "Proceedings of the 2018 Conference on Empirical Methods
in Natural Language Processing",
year = "2018",
publisher = "Association for Computational Linguistics",
location = "Brussels, Belgium",
}
"""
_DESCRIPTION = """\
XNLI is a subset of a few thousand examples from MNLI which has been translated
into a 14 different languages (some low-ish resource). As with MNLI, the goal is
to predict textual entailment (does sentence A imply/contradict/neither sentence
B) and is a classification task (given two sentences, predict one of three
labels).
"""
_KWARGS_DESCRIPTION = """
Computes XNLI score which is just simple accuracy.
Args:
predictions: Predicted labels.
references: Ground truth labels.
Returns:
'accuracy': accuracy
Examples:
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> xnli_metric = datasets.load_metric("xnli")
>>> results = xnli_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0}
"""
def simple_accuracy(preds, labels):
return (preds == labels).mean()
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION)
class Xnli(datasets.Metric):
def _info(self):
return datasets.MetricInfo(
description=_DESCRIPTION,
citation=_CITATION,
inputs_description=_KWARGS_DESCRIPTION,
features=datasets.Features(
{
"predictions": datasets.Value("int64" if self.config_name != "sts-b" else "float32"),
"references": datasets.Value("int64" if self.config_name != "sts-b" else "float32"),
}
),
codebase_urls=[],
reference_urls=[],
format="numpy",
)
def _compute(self, predictions, references):
return {"accuracy": simple_accuracy(predictions, references)}
|
import os
from torchaudio.datasets import librilight_limited
from torchaudio_unittest.common_utils import get_whitenoise, save_wav, TempDirMixin, TorchaudioTestCase
# Used to generate a unique transcript for each dummy audio file
_NUMBERS = ["ZERO", "ONE", "TWO", "THREE", "FOUR", "FIVE", "SIX", "SEVEN", "EIGHT", "NINE"]
def _save_sample(file_path, speaker_id, chapter_id, utterance_id, sample_rate, seed):
filename = f"{speaker_id}-{chapter_id}-{utterance_id:04d}.flac"
path = os.path.join(file_path, filename)
data = get_whitenoise(sample_rate=sample_rate, duration=0.01, n_channels=1, dtype="float32", seed=seed)
transcript = " ".join([_NUMBERS[x] for x in [speaker_id, chapter_id, utterance_id]])
save_wav(path, data, sample_rate)
sample = (data, sample_rate, transcript, speaker_id, chapter_id, utterance_id)
return sample
def get_mock_dataset(dataset_dir: str):
"""Create mocked dataset for a sub directory.
Args:
dataset_dir (str): the path of the sub directory.
The structure is: audio_type/speaker_id/chapter_id/filename.flac
"""
mocked_data = []
sample_rate = 16000 # 16kHz
seed = 0
for audio_type in ["clean", "other"]:
for speaker_id in range(5):
for chapter_id in range(3):
file_path = os.path.join(dataset_dir, audio_type, str(speaker_id), str(chapter_id))
os.makedirs(file_path, exist_ok=True)
trans_content = []
for utterance_id in range(3):
sample = _save_sample(file_path, speaker_id, chapter_id, utterance_id, sample_rate, seed)
trans_content.append(f"{sample[3]}-{sample[4]}-{sample[5]:04d} {sample[2]}")
mocked_data.append(sample)
seed += 1
trans_filename = f"{speaker_id}-{chapter_id}.trans.txt"
trans_path = os.path.join(file_path, trans_filename)
with open(trans_path, "w") as f:
f.write("\n".join(trans_content))
return mocked_data
def get_mock_datasets(root_dir):
"""
root_dir: directory to the mocked dataset
"""
mocked_data_10min, mocked_data_1h, mocked_data_10h = [], [], []
dataset_dir = os.path.join(root_dir, "librispeech_finetuning", "1h", "0")
os.makedirs(dataset_dir, exist_ok=True)
mocked_data_10min = get_mock_dataset(dataset_dir)
mocked_data_1h += mocked_data_10min
for i in range(1, 6):
dataset_dir = os.path.join(root_dir, "librispeech_finetuning", "1h", str(i))
os.makedirs(dataset_dir, exist_ok=True)
mocked_data_1h += get_mock_dataset(dataset_dir)
mocked_data_10h += mocked_data_1h
dataset_dir = os.path.join(root_dir, "librispeech_finetuning", "9h")
os.makedirs(dataset_dir, exist_ok=True)
mocked_data_10h += get_mock_dataset(dataset_dir)
return mocked_data_10min, mocked_data_1h, mocked_data_10h
class TestLibriLightLimited(TempDirMixin, TorchaudioTestCase):
root_dir = None
samples_10min = []
samples_1h = []
samples_10h = []
@classmethod
def setUpClass(cls):
cls.root_dir = cls.get_base_temp_dir()
(cls.samples_10min, cls.samples_1h, cls.samples_10h) = get_mock_datasets(cls.root_dir)
def _test_librilightlimited(self, dataset, samples):
num_samples = 0
for i, (data, sample_rate, transcript, speaker_id, chapter_id, utterance_id) in enumerate(dataset):
self.assertEqual(data, samples[i][0], atol=5e-5, rtol=1e-8)
assert sample_rate == samples[i][1]
assert transcript == samples[i][2]
assert speaker_id == samples[i][3]
assert chapter_id == samples[i][4]
assert utterance_id == samples[i][5]
num_samples += 1
assert num_samples == len(samples)
def test_librilightlimited_10min(self):
dataset = librilight_limited.LibriLightLimited(self.root_dir, subset="10min")
self._test_librilightlimited(dataset, self.samples_10min)
def test_librilightlimited_1h(self):
dataset = librilight_limited.LibriLightLimited(self.root_dir, subset="1h")
self._test_librilightlimited(dataset, self.samples_1h)
def test_librilightlimited_10h(self):
dataset = librilight_limited.LibriLightLimited(self.root_dir, subset="10h")
self._test_librilightlimited(dataset, self.samples_10h)
|
import os
from torchaudio.datasets import librilight_limited
from torchaudio_unittest.common_utils import get_whitenoise, save_wav, TempDirMixin, TorchaudioTestCase
# Used to generate a unique transcript for each dummy audio file
_NUMBERS = ["ZERO", "ONE", "TWO", "THREE", "FOUR", "FIVE", "SIX", "SEVEN", "EIGHT", "NINE"]
def _save_sample(file_path, speaker_id, chapter_id, utterance_id, sample_rate, seed):
filename = f"{speaker_id}-{chapter_id}-{utterance_id:04d}.flac"
path = os.path.join(file_path, filename)
data = get_whitenoise(sample_rate=sample_rate, duration=0.01, n_channels=1, dtype="float32", seed=seed)
transcript = " ".join([_NUMBERS[x] for x in [speaker_id, chapter_id, utterance_id]])
save_wav(path, data, sample_rate)
sample = (data, sample_rate, transcript, speaker_id, chapter_id, utterance_id)
return sample
def get_mock_dataset(dataset_dir: str):
"""Create mocked dataset for a sub directory.
Args:
dataset_dir (str): the path of the sub directory.
The structure is: audio_type/speaker_id/chapter_id/filename.flac
"""
mocked_data = []
sample_rate = 16000 # 16kHz
seed = 0
for audio_type in ["clean", "other"]:
for speaker_id in range(5):
for chapter_id in range(3):
file_path = os.path.join(dataset_dir, audio_type, str(speaker_id), str(chapter_id))
os.makedirs(file_path, exist_ok=True)
trans_content = []
for utterance_id in range(3):
sample = _save_sample(file_path, speaker_id, chapter_id, utterance_id, sample_rate, seed)
trans_content.append(f"{sample[3]}-{sample[4]}-{sample[5]:04d} {sample[2]}")
mocked_data.append(sample)
seed += 1
trans_filename = f"{speaker_id}-{chapter_id}.trans.txt"
trans_path = os.path.join(file_path, trans_filename)
with open(trans_path, "w") as f:
f.write("\n".join(trans_content))
return mocked_data
def get_mock_datasets(root_dir):
"""
root_dir: directory to the mocked dataset
"""
mocked_data_10min, mocked_data_1h, mocked_data_10h = [], [], []
dataset_dir = os.path.join(root_dir, "librispeech_finetuning", "1h", "0")
os.makedirs(dataset_dir, exist_ok=True)
mocked_data_10min = get_mock_dataset(dataset_dir)
mocked_data_1h += mocked_data_10min
for i in range(1, 6):
dataset_dir = os.path.join(root_dir, "librispeech_finetuning", "1h", str(i))
os.makedirs(dataset_dir, exist_ok=True)
mocked_data_1h += get_mock_dataset(dataset_dir)
mocked_data_10h += mocked_data_1h
dataset_dir = os.path.join(root_dir, "librispeech_finetuning", "9h")
os.makedirs(dataset_dir, exist_ok=True)
mocked_data_10h += get_mock_dataset(dataset_dir)
return mocked_data_10min, mocked_data_1h, mocked_data_10h
class TestLibriLightLimited(TempDirMixin, TorchaudioTestCase):
backend = "default"
root_dir = None
samples_10min = []
samples_1h = []
samples_10h = []
@classmethod
def setUpClass(cls):
cls.root_dir = cls.get_base_temp_dir()
(cls.samples_10min, cls.samples_1h, cls.samples_10h) = get_mock_datasets(cls.root_dir)
def _test_librilightlimited(self, dataset, samples):
num_samples = 0
for i, (data, sample_rate, transcript, speaker_id, chapter_id, utterance_id) in enumerate(dataset):
self.assertEqual(data, samples[i][0], atol=5e-5, rtol=1e-8)
assert sample_rate == samples[i][1]
assert transcript == samples[i][2]
assert speaker_id == samples[i][3]
assert chapter_id == samples[i][4]
assert utterance_id == samples[i][5]
num_samples += 1
assert num_samples == len(samples)
def test_librilightlimited_10min(self):
dataset = librilight_limited.LibriLightLimited(self.root_dir, subset="10min")
self._test_librilightlimited(dataset, self.samples_10min)
def test_librilightlimited_1h(self):
dataset = librilight_limited.LibriLightLimited(self.root_dir, subset="1h")
self._test_librilightlimited(dataset, self.samples_1h)
def test_librilightlimited_10h(self):
dataset = librilight_limited.LibriLightLimited(self.root_dir, subset="10h")
self._test_librilightlimited(dataset, self.samples_10h)
|
_base_ = './faster-rcnn_r50_fpn_gn-ws-all_1x_coco.py'
conv_cfg = dict(type='ConvWS')
norm_cfg = dict(type='GN', num_groups=32, requires_grad=True)
model = dict(
backbone=dict(
type='ResNeXt',
depth=50,
groups=32,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
style='pytorch',
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://jhu/resnext50_32x4d_gn_ws')))
|
_base_ = './faster_rcnn_r50_fpn_gn_ws-all_1x_coco.py'
conv_cfg = dict(type='ConvWS')
norm_cfg = dict(type='GN', num_groups=32, requires_grad=True)
model = dict(
backbone=dict(
type='ResNeXt',
depth=50,
groups=32,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
style='pytorch',
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://jhu/resnext50_32x4d_gn_ws')))
|
import os
import os.path as osp
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from mmdet.evaluation import CityScapesMetric
try:
import cityscapesscripts
except ImportError:
cityscapesscripts = None
class TestCityScapesMetric(unittest.TestCase):
def setUp(self):
self.tmp_dir = tempfile.TemporaryDirectory()
def tearDown(self):
self.tmp_dir.cleanup()
@unittest.skipIf(cityscapesscripts is None,
'cityscapesscripts is not installed.')
def test_init(self):
# test with outfile_prefix = None
with self.assertRaises(AssertionError):
CityScapesMetric(outfile_prefix=None)
# test with format_only=True, keep_results=False
with self.assertRaises(AssertionError):
CityScapesMetric(
outfile_prefix=self.tmp_dir.name + 'test',
format_only=True,
keep_results=False)
@unittest.skipIf(cityscapesscripts is None,
'cityscapesscripts is not installed.')
def test_evaluate(self):
dummy_mask1 = np.zeros((1, 20, 20), dtype=np.uint8)
dummy_mask1[:, :10, :10] = 1
dummy_mask2 = np.zeros((1, 20, 20), dtype=np.uint8)
dummy_mask2[:, :10, :10] = 1
self.outfile_prefix = osp.join(self.tmp_dir.name, 'test')
self.seg_prefix = osp.join(self.tmp_dir.name, 'cityscapes/gtFine/val')
city = 'lindau'
sequenceNb = '000000'
frameNb = '000019'
img_name1 = f'{city}_{sequenceNb}_{frameNb}_gtFine_instanceIds.png'
img_path1 = osp.join(self.seg_prefix, city, img_name1)
frameNb = '000020'
img_name2 = f'{city}_{sequenceNb}_{frameNb}_gtFine_instanceIds.png'
img_path2 = osp.join(self.seg_prefix, city, img_name2)
os.makedirs(osp.join(self.seg_prefix, city))
masks1 = np.zeros((20, 20), dtype=np.int32)
masks1[:10, :10] = 24 * 1000
Image.fromarray(masks1).save(img_path1)
masks2 = np.zeros((20, 20), dtype=np.int32)
masks2[:10, :10] = 24 * 1000 + 1
Image.fromarray(masks2).save(img_path2)
data_samples = [{
'img_path': img_path1,
'pred_instances': {
'scores': torch.from_numpy(np.array([1.0])),
'labels': torch.from_numpy(np.array([0])),
'masks': torch.from_numpy(dummy_mask1)
}
}, {
'img_path': img_path2,
'pred_instances': {
'scores': torch.from_numpy(np.array([0.98])),
'labels': torch.from_numpy(np.array([1])),
'masks': torch.from_numpy(dummy_mask2)
}
}]
target = {'cityscapes/mAP': 0.5, 'cityscapes/AP@50': 0.5}
metric = CityScapesMetric(
seg_prefix=self.seg_prefix,
format_only=False,
keep_results=False,
outfile_prefix=self.outfile_prefix)
metric.dataset_meta = dict(
CLASSES=('person', 'rider', 'car', 'truck', 'bus', 'train',
'motorcycle', 'bicycle'))
metric.process({}, data_samples)
results = metric.evaluate(size=2)
self.assertDictEqual(results, target)
del metric
self.assertTrue(not osp.exists('{self.outfile_prefix}.results'))
# test format_only
metric = CityScapesMetric(
seg_prefix=self.seg_prefix,
format_only=True,
keep_results=True,
outfile_prefix=self.outfile_prefix)
metric.dataset_meta = dict(
CLASSES=('person', 'rider', 'car', 'truck', 'bus', 'train',
'motorcycle', 'bicycle'))
metric.process({}, data_samples)
results = metric.evaluate(size=2)
self.assertDictEqual(results, dict())
|
import os
import os.path as osp
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from mmdet.evaluation import CityScapesMetric
try:
import cityscapesscripts
except ImportError:
cityscapesscripts = None
class TestCityScapesMetric(unittest.TestCase):
def setUp(self):
self.tmp_dir = tempfile.TemporaryDirectory()
def tearDown(self):
self.tmp_dir.cleanup()
@unittest.skipIf(cityscapesscripts is None,
'cityscapesscripts is not installed.')
def test_init(self):
# test with outfile_prefix = None
with self.assertRaises(AssertionError):
CityScapesMetric(outfile_prefix=None)
# test with format_only=True, keep_results=False
with self.assertRaises(AssertionError):
CityScapesMetric(
outfile_prefix=self.tmp_dir.name + 'test',
format_only=True,
keep_results=False)
@unittest.skipIf(cityscapesscripts is None,
'cityscapesscripts is not installed.')
def test_evaluate(self):
dummy_mask1 = np.zeros((1, 20, 20), dtype=np.uint8)
dummy_mask1[:, :10, :10] = 1
dummy_mask2 = np.zeros((1, 20, 20), dtype=np.uint8)
dummy_mask2[:, :10, :10] = 1
predictions = [{
'pred_instances': {
'scores': torch.from_numpy(np.array([1.0])),
'labels': torch.from_numpy(np.array([0])),
'masks': torch.from_numpy(dummy_mask1)
}
}, {
'pred_instances': {
'scores': torch.from_numpy(np.array([0.98])),
'labels': torch.from_numpy(np.array([1])),
'masks': torch.from_numpy(dummy_mask2)
}
}]
self.outfile_prefix = osp.join(self.tmp_dir.name, 'test')
self.seg_prefix = osp.join(self.tmp_dir.name, 'cityscapes/gtFine/val')
city = 'lindau'
sequenceNb = '000000'
frameNb = '000019'
img_name1 = f'{city}_{sequenceNb}_{frameNb}_gtFine_instanceIds.png'
img_path1 = osp.join(self.seg_prefix, city, img_name1)
frameNb = '000020'
img_name2 = f'{city}_{sequenceNb}_{frameNb}_gtFine_instanceIds.png'
img_path2 = osp.join(self.seg_prefix, city, img_name2)
os.makedirs(osp.join(self.seg_prefix, city))
masks1 = np.zeros((20, 20), dtype=np.int32)
masks1[:10, :10] = 24 * 1000
Image.fromarray(masks1).save(img_path1)
masks2 = np.zeros((20, 20), dtype=np.int32)
masks2[:10, :10] = 24 * 1000 + 1
Image.fromarray(masks2).save(img_path2)
data_batch = [{
'inputs': None,
'data_sample': {
'img_path': img_path1
}
}, {
'inputs': None,
'data_sample': {
'img_path': img_path2
}
}]
target = {'cityscapes/mAP': 0.5, 'cityscapes/AP@50': 0.5}
metric = CityScapesMetric(
seg_prefix=self.seg_prefix,
format_only=False,
keep_results=False,
outfile_prefix=self.outfile_prefix)
metric.dataset_meta = dict(
CLASSES=('person', 'rider', 'car', 'truck', 'bus', 'train',
'motorcycle', 'bicycle'))
metric.process(data_batch, predictions)
results = metric.evaluate(size=2)
self.assertDictEqual(results, target)
del metric
self.assertTrue(not osp.exists('{self.outfile_prefix}.results'))
# test format_only
metric = CityScapesMetric(
seg_prefix=self.seg_prefix,
format_only=True,
keep_results=True,
outfile_prefix=self.outfile_prefix)
metric.dataset_meta = dict(
CLASSES=('person', 'rider', 'car', 'truck', 'bus', 'train',
'motorcycle', 'bicycle'))
metric.process(data_batch, predictions)
results = metric.evaluate(size=2)
self.assertDictEqual(results, dict())
|
#!/usr/bin/env python3
"""Extract version number from __init__.py"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import os
sklearn_init = os.path.join(os.path.dirname(__file__), "../__init__.py")
data = open(sklearn_init).readlines()
version_line = next(line for line in data if line.startswith("__version__"))
version = version_line.strip().split(" = ")[1].replace('"', "").replace("'", "")
print(version)
|
#!/usr/bin/env python3
"""Extract version number from __init__.py"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import os
sklearn_init = os.path.join(os.path.dirname(__file__), "../__init__.py")
data = open(sklearn_init).readlines()
version_line = next(line for line in data if line.startswith("__version__"))
version = version_line.strip().split(" = ")[1].replace('"', "").replace("'", "")
print(version)
|
_base_ = './faster-rcnn_r50-caffe_fpn_ms-1x_coco.py'
# MMEngine support the following two ways, users can choose
# according to convenience
# param_scheduler = [
# dict(
# type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500), # noqa
# dict(
# type='MultiStepLR',
# begin=0,
# end=12,
# by_epoch=True,
# milestones=[16, 23],
# gamma=0.1)
# ]
_base_.param_scheduler[1].milestones = [16, 23]
train_cfg = dict(max_epochs=24)
|
_base_ = './faster-rcnn_r50-caffe_fpn_ms-1x_coco.py'
# learning policy
lr_config = dict(step=[16, 23])
runner = dict(type='EpochBasedRunner', max_epochs=24)
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.activations import deserialize
from keras.src.activations import get
from keras.src.activations import serialize
from keras.src.activations.activations import celu
from keras.src.activations.activations import elu
from keras.src.activations.activations import exponential
from keras.src.activations.activations import gelu
from keras.src.activations.activations import glu
from keras.src.activations.activations import hard_shrink
from keras.src.activations.activations import hard_sigmoid
from keras.src.activations.activations import hard_silu
from keras.src.activations.activations import hard_silu as hard_swish
from keras.src.activations.activations import hard_tanh
from keras.src.activations.activations import leaky_relu
from keras.src.activations.activations import linear
from keras.src.activations.activations import log_sigmoid
from keras.src.activations.activations import log_softmax
from keras.src.activations.activations import mish
from keras.src.activations.activations import relu
from keras.src.activations.activations import relu6
from keras.src.activations.activations import selu
from keras.src.activations.activations import sigmoid
from keras.src.activations.activations import silu
from keras.src.activations.activations import silu as swish
from keras.src.activations.activations import soft_shrink
from keras.src.activations.activations import softmax
from keras.src.activations.activations import softplus
from keras.src.activations.activations import softsign
from keras.src.activations.activations import sparse_plus
from keras.src.activations.activations import sparsemax
from keras.src.activations.activations import squareplus
from keras.src.activations.activations import tanh
from keras.src.activations.activations import tanh_shrink
from keras.src.activations.activations import threshold
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.activations import deserialize
from keras.src.activations import get
from keras.src.activations import serialize
from keras.src.activations.activations import celu
from keras.src.activations.activations import elu
from keras.src.activations.activations import exponential
from keras.src.activations.activations import gelu
from keras.src.activations.activations import glu
from keras.src.activations.activations import hard_shrink
from keras.src.activations.activations import hard_sigmoid
from keras.src.activations.activations import hard_silu
from keras.src.activations.activations import hard_silu as hard_swish
from keras.src.activations.activations import hard_tanh
from keras.src.activations.activations import leaky_relu
from keras.src.activations.activations import linear
from keras.src.activations.activations import log_sigmoid
from keras.src.activations.activations import log_softmax
from keras.src.activations.activations import mish
from keras.src.activations.activations import relu
from keras.src.activations.activations import relu6
from keras.src.activations.activations import selu
from keras.src.activations.activations import sigmoid
from keras.src.activations.activations import silu
from keras.src.activations.activations import silu as swish
from keras.src.activations.activations import soft_shrink
from keras.src.activations.activations import softmax
from keras.src.activations.activations import softplus
from keras.src.activations.activations import softsign
from keras.src.activations.activations import sparse_plus
from keras.src.activations.activations import sparsemax
from keras.src.activations.activations import squareplus
from keras.src.activations.activations import tanh
from keras.src.activations.activations import tanh_shrink
|
import asyncio
import pytest
from jina import Document, DocumentArray
from jina.helper import Namespace, random_identity
from jina.serve.stream import RequestStreamer
from jina.types.request.data import DataRequest
@pytest.mark.asyncio
@pytest.mark.parametrize('prefetch', [0, 5])
@pytest.mark.parametrize('num_requests', [1, 5, 13])
@pytest.mark.parametrize('async_iterator', [False, True])
async def test_request_streamer(prefetch, num_requests, async_iterator):
requests_handled = []
results_handled = []
def request_handler_fn(request):
requests_handled.append(request)
async def task():
await asyncio.sleep(0.5)
docs = request.docs
docs[0].tags['request_handled'] = True
request.data.docs = docs
return request
future = asyncio.ensure_future(task())
return future
def result_handle_fn(result):
results_handled.append(result)
assert isinstance(result, DataRequest)
docs = result.docs
docs[0].tags['result_handled'] = True
result.data.docs = docs
return result
def end_of_iter_fn():
# with a sync generator, iteration
assert len(requests_handled) == num_requests
assert len(results_handled) <= num_requests
def _yield_data_request():
req = DataRequest()
req.header.request_id = random_identity()
da = DocumentArray()
da.append(Document())
req.data.docs = da
return req
def _get_sync_requests_iterator(num_requests):
for i in range(num_requests):
yield _yield_data_request()
async def _get_async_requests_iterator(num_requests):
for i in range(num_requests):
yield _yield_data_request()
await asyncio.sleep(0.1)
args = Namespace()
args.prefetch = prefetch
streamer = RequestStreamer(
args=args,
request_handler=request_handler_fn,
result_handler=result_handle_fn,
end_of_iter_handler=end_of_iter_fn,
)
it = (
_get_async_requests_iterator(num_requests)
if async_iterator
else _get_sync_requests_iterator(num_requests)
)
response = streamer.stream(it)
num_responses = 0
async for r in response:
num_responses += 1
assert r.docs[0].tags['request_handled']
assert r.docs[0].tags['result_handled']
assert num_responses == num_requests
|
import asyncio
import pytest
from jina import Document, DocumentArray
from jina.helper import Namespace, random_identity
from jina.serve.stream import RequestStreamer
from jina.types.request.data import DataRequest
@pytest.mark.asyncio
@pytest.mark.parametrize('prefetch', [0, 5])
@pytest.mark.parametrize('num_requests', [1, 5, 13])
@pytest.mark.parametrize('async_iterator', [False, True])
async def test_request_streamer(prefetch, num_requests, async_iterator):
requests_handled = []
results_handled = []
def request_handler_fn(request):
requests_handled.append(request)
async def task():
await asyncio.sleep(0.5)
docs = request.docs
docs[0].tags['request_handled'] = True
request.data.docs = docs
return request
future = asyncio.ensure_future(task())
return future
def result_handle_fn(result):
results_handled.append(result)
assert isinstance(result, DataRequest)
docs = result.docs
docs[0].tags['result_handled'] = True
result.data.docs = docs
return result
def end_of_iter_fn():
# with a sync generator, iteration
assert len(requests_handled) == num_requests
assert len(results_handled) < num_requests
def _yield_data_request():
req = DataRequest()
req.header.request_id = random_identity()
da = DocumentArray()
da.append(Document())
req.data.docs = da
return req
def _get_sync_requests_iterator(num_requests):
for i in range(num_requests):
yield _yield_data_request()
async def _get_async_requests_iterator(num_requests):
for i in range(num_requests):
yield _yield_data_request()
await asyncio.sleep(0.1)
args = Namespace()
args.prefetch = prefetch
streamer = RequestStreamer(
args=args,
request_handler=request_handler_fn,
result_handler=result_handle_fn,
end_of_iter_handler=end_of_iter_fn,
)
it = (
_get_async_requests_iterator(num_requests)
if async_iterator
else _get_sync_requests_iterator(num_requests)
)
response = streamer.stream(it)
num_responses = 0
async for r in response:
num_responses += 1
assert r.docs[0].tags['request_handled']
assert r.docs[0].tags['result_handled']
assert num_responses == num_requests
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os.path as osp
import mmcv
import numpy as np
from mmcv import Config, DictAction
from mmdet.models.utils import mask2ndarray
from mmdet.registry import DATASETS, VISUALIZERS
from mmdet.structures.bbox import BaseBoxes
from mmdet.utils import register_all_modules
def parse_args():
parser = argparse.ArgumentParser(description='Browse a dataset')
parser.add_argument('config', help='train config file path')
parser.add_argument(
'--output-dir',
default=None,
type=str,
help='If there is no display interface, you can save it')
parser.add_argument('--not-show', default=False, action='store_true')
parser.add_argument(
'--show-interval',
type=float,
default=2,
help='the interval of show (s)')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
args = parser.parse_args()
return args
def main():
args = parse_args()
cfg = Config.fromfile(args.config)
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
# register all modules in mmdet into the registries
register_all_modules()
dataset = DATASETS.build(cfg.train_dataloader.dataset)
visualizer = VISUALIZERS.build(cfg.visualizer)
visualizer.dataset_meta = dataset.metainfo
progress_bar = mmcv.ProgressBar(len(dataset))
for item in dataset:
img = item['inputs'].permute(1, 2, 0).numpy()
data_sample = item['data_sample'].numpy()
gt_instances = data_sample.gt_instances
img_path = osp.basename(item['data_sample'].img_path)
out_file = osp.join(
args.output_dir,
osp.basename(img_path)) if args.output_dir is not None else None
img = img[..., [2, 1, 0]] # bgr to rgb
gt_bboxes = gt_instances.get('bboxes', None)
if gt_bboxes is not None and isinstance(gt_bboxes, BaseBoxes):
gt_instances.bboxes = gt_bboxes.tensor
gt_masks = gt_instances.get('masks', None)
if gt_masks is not None:
masks = mask2ndarray(gt_masks)
gt_instances.masks = masks.astype(np.bool)
data_sample.gt_instances = gt_instances
visualizer.add_datasample(
osp.basename(img_path),
img,
data_sample,
show=not args.not_show,
wait_time=args.show_interval,
out_file=out_file)
progress_bar.update()
if __name__ == '__main__':
main()
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os.path as osp
import mmcv
import numpy as np
from mmcv import Config, DictAction
from mmdet.datasets.builder import build_dataset
from mmdet.models.utils import mask2ndarray
from mmdet.registry import VISUALIZERS
from mmdet.utils import register_all_modules
def parse_args():
parser = argparse.ArgumentParser(description='Browse a dataset')
parser.add_argument('config', help='train config file path')
parser.add_argument(
'--output-dir',
default=None,
type=str,
help='If there is no display interface, you can save it')
parser.add_argument('--not-show', default=False, action='store_true')
parser.add_argument(
'--show-interval',
type=float,
default=2,
help='the interval of show (s)')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
args = parser.parse_args()
return args
def main():
args = parse_args()
cfg = Config.fromfile(args.config)
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
# register all modules in mmdet into the registries
register_all_modules()
dataset = build_dataset(cfg.train_dataloader.dataset)
visualizer = VISUALIZERS.build(cfg.visualizer)
visualizer.dataset_meta = dataset.metainfo
progress_bar = mmcv.ProgressBar(len(dataset))
for item in dataset:
img = item['inputs'].permute(1, 2, 0).numpy()
data_sample = item['data_sample'].numpy()
gt_instances = data_sample.gt_instances
img_path = osp.basename(item['data_sample'].img_path)
out_file = osp.join(
args.output_dir,
osp.basename(img_path)) if args.output_dir is not None else None
img = img[..., [2, 1, 0]] # bgr to rgb
gt_masks = gt_instances.get('masks', None)
if gt_masks is not None:
masks = mask2ndarray(gt_masks)
gt_instances.masks = masks.astype(np.bool)
data_sample.gt_instances = gt_instances
visualizer.add_datasample(
osp.basename(img_path),
img,
data_sample,
show=not args.not_show,
wait_time=args.show_interval,
out_file=out_file)
progress_bar.update()
if __name__ == '__main__':
main()
|
from torchvision.transforms import AutoAugmentPolicy, InterpolationMode # usort: skip
from . import functional, utils # usort: skip
from ._transform import Transform # usort: skip
from ._augment import CutMix, MixUp, RandomErasing
from ._auto_augment import AugMix, AutoAugment, RandAugment, TrivialAugmentWide
from ._color import (
ColorJitter,
Grayscale,
RandomAdjustSharpness,
RandomAutocontrast,
RandomChannelPermutation,
RandomEqualize,
RandomGrayscale,
RandomInvert,
RandomPhotometricDistort,
RandomPosterize,
RandomSolarize,
)
from ._container import Compose, RandomApply, RandomChoice, RandomOrder
from ._geometry import (
CenterCrop,
ElasticTransform,
FiveCrop,
Pad,
RandomAffine,
RandomCrop,
RandomHorizontalFlip,
RandomIoUCrop,
RandomPerspective,
RandomResize,
RandomResizedCrop,
RandomRotation,
RandomShortestSize,
RandomVerticalFlip,
RandomZoomOut,
Resize,
ScaleJitter,
TenCrop,
)
from ._meta import ClampBoundingBoxes, ConvertBoundingBoxFormat
from ._misc import (
ConvertImageDtype,
GaussianBlur,
Identity,
Lambda,
LinearTransformation,
Normalize,
SanitizeBoundingBoxes,
ToDtype,
)
from ._temporal import UniformTemporalSubsample
from ._type_conversion import PILToTensor, ToImage, ToPILImage, ToPureTensor
from ._deprecated import ToTensor # usort: skip
from torchvision import _BETA_TRANSFORMS_WARNING, _WARN_ABOUT_BETA_TRANSFORMS
if _WARN_ABOUT_BETA_TRANSFORMS:
import warnings
warnings.warn(_BETA_TRANSFORMS_WARNING)
|
from torchvision.transforms import AutoAugmentPolicy, InterpolationMode # usort: skip
from . import functional, utils # usort: skip
from ._transform import Transform # usort: skip
from ._augment import CutMix, MixUp, RandomErasing
from ._auto_augment import AugMix, AutoAugment, RandAugment, TrivialAugmentWide
from ._color import (
ColorJitter,
Grayscale,
RandomAdjustSharpness,
RandomAutocontrast,
RandomChannelPermutation,
RandomEqualize,
RandomGrayscale,
RandomInvert,
RandomPhotometricDistort,
RandomPosterize,
RandomSolarize,
)
from ._container import Compose, RandomApply, RandomChoice, RandomOrder
from ._geometry import (
CenterCrop,
ElasticTransform,
FiveCrop,
Pad,
RandomAffine,
RandomCrop,
RandomHorizontalFlip,
RandomIoUCrop,
RandomPerspective,
RandomResize,
RandomResizedCrop,
RandomRotation,
RandomShortestSize,
RandomVerticalFlip,
RandomZoomOut,
Resize,
ScaleJitter,
TenCrop,
)
from ._meta import ClampBoundingBoxes, ConvertBoundingBoxFormat
from ._misc import (
ConvertImageDtype,
GaussianBlur,
Identity,
Lambda,
LinearTransformation,
Normalize,
SanitizeBoundingBoxes,
ToDtype,
)
from ._temporal import UniformTemporalSubsample
from ._type_conversion import PILToTensor, ToImage, ToPILImage
from ._deprecated import ToTensor # usort: skip
from torchvision import _BETA_TRANSFORMS_WARNING, _WARN_ABOUT_BETA_TRANSFORMS
if _WARN_ABOUT_BETA_TRANSFORMS:
import warnings
warnings.warn(_BETA_TRANSFORMS_WARNING)
|
import logging
from llama_index.llms.text_generation_inference.base import (
TextGenerationInference,
)
logger = logging.getLogger(__name__)
logger.warning("""
===============================================================================
⚠️ DEPRECATION WARNING ⚠️
===============================================================================
The llama-index-llms-text-generation-inference package is NO LONGER MAINTAINED!
Please use HuggingFaceInferenceAPI instead, which can call either TGI servers
or huggingface inference API.
To install: pip install llama-index-llms-huggingface-api
===============================================================================
""".strip())
__all__ = ["TextGenerationInference"]
|
from llama_index.llms.text_generation_inference.base import (
TextGenerationInference,
)
__all__ = ["TextGenerationInference"]
|
_base_ = [
'../_base_/models/faster_rcnn_r50_fpn.py', '../_base_/datasets/voc0712.py',
'../_base_/default_runtime.py'
]
model = dict(roi_head=dict(bbox_head=dict(num_classes=20)))
# training schedule, voc dataset is repeated 3 times, in
# `_base_/datasets/voc0712.py`, so the actual epoch = 4 * 3 = 12
max_epochs = 4
train_cfg = dict(
type='EpochBasedTrainLoop', max_epochs=max_epochs, val_interval=1)
val_cfg = dict(type='ValLoop')
test_cfg = dict(type='TestLoop')
# learning rate
param_scheduler = [
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[3],
gamma=0.1)
]
# optimizer
optim_wrapper = dict(
type='OptimWrapper',
optimizer=dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001))
|
_base_ = [
'../_base_/models/faster_rcnn_r50_fpn.py', '../_base_/datasets/voc0712.py',
'../_base_/default_runtime.py'
]
model = dict(roi_head=dict(bbox_head=dict(num_classes=20)))
# optimizer
optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001)
optimizer_config = dict(grad_clip=None)
# learning policy
# actual epoch = 3 * 3 = 9
lr_config = dict(policy='step', step=[3])
# runtime settings
runner = dict(
type='EpochBasedRunner', max_epochs=4) # actual epoch = 4 * 3 = 12
|
from __future__ import annotations
from collections.abc import Iterable
from torch import Tensor
from sentence_transformers import util
from sentence_transformers.losses.CoSENTLoss import CoSENTLoss
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
class SparseCoSENTLoss(CoSENTLoss):
def __init__(self, model: SparseEncoder, scale: float = 20.0, similarity_fct=util.cos_sim) -> None:
"""
This class implements CoSENT (Cosine Sentence).
It expects that each of the InputExamples consists of a pair of texts and a float valued label, representing
the expected similarity score between the pair.
It computes the following loss function:
``loss = logsum(1+exp(s(i,j)-s(k,l))+exp...)``, where ``(i,j)`` and ``(k,l)`` are any of the input pairs in the
batch such that the expected similarity of ``(i,j)`` is greater than ``(k,l)``. The summation is over all possible
pairs of input pairs in the batch that match this condition.
Args:
model: SparseEncoder
similarity_fct: Function to compute the PAIRWISE similarity
between embeddings. Default is
``util.pairwise_cos_sim``.
scale: Output of similarity function is multiplied by scale
value. Represents the inverse temperature.
References:
- For further details, see: https://kexue.fm/archives/8847
Requirements:
- Need to be used in SpladeLoss or CSRLoss as a loss function.
- Sentence pairs with corresponding similarity scores in range of the similarity function. Default is [-1,1].
Inputs:
+--------------------------------+------------------------+
| Texts | Labels |
+================================+========================+
| (sentence_A, sentence_B) pairs | float similarity score |
+--------------------------------+------------------------+
Relations:
- :class:`SparseAnglELoss` is SparseCoSENTLoss with ``pairwise_angle_sim`` as the metric, rather than ``pairwise_cos_sim``.
Example:
::
from datasets import Dataset
from sentence_transformers.sparse_encoder import SparseEncoder, SparseEncoderTrainer, losses
model = SparseEncoder("distilbert/distilbert-base-uncased")
train_dataset = Dataset.from_dict(
{
"sentence1": ["It's nice weather outside today.", "He drove to work."],
"sentence2": ["It's so sunny.", "She walked to the store."],
"score": [1.0, 0.3],
}
)
loss = losses.SpladeLoss(
model=model, loss=losses.SparseCoSENTLoss(model), document_regularizer_weight=5e-5, use_document_regularizer_only=True
)
trainer = SparseEncoderTrainer(model=model, train_dataset=train_dataset, loss=loss)
trainer.train()
"""
model.similarity_fn_name = "cosine"
return super().__init__(model, scale=scale, similarity_fct=similarity_fct)
def forward(self, sentence_features: Iterable[dict[str, Tensor]], labels: Tensor) -> Tensor:
raise AttributeError("SparseCoSENTLoss should not be used alone. Use it with SpladeLoss or CSRLoss.")
|
from __future__ import annotations
from collections.abc import Iterable
from torch import Tensor
from sentence_transformers import util
from sentence_transformers.losses.CoSENTLoss import CoSENTLoss
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
class SparseCoSENTLoss(CoSENTLoss):
def __init__(self, model: SparseEncoder, scale: float = 20.0, similarity_fct=util.cos_sim) -> None:
"""
This class implements CoSENT (Cosine Sentence).
It expects that each of the InputExamples consists of a pair of texts and a float valued label, representing
the expected similarity score between the pair.
It computes the following loss function:
``loss = logsum(1+exp(s(i,j)-s(k,l))+exp...)``, where ``(i,j)`` and ``(k,l)`` are any of the input pairs in the
batch such that the expected similarity of ``(i,j)`` is greater than ``(k,l)``. The summation is over all possible
pairs of input pairs in the batch that match this condition.
Args:
model: SparseEncoder
similarity_fct: Function to compute the PAIRWISE similarity
between embeddings. Default is
``util.pairwise_cos_sim``.
scale: Output of similarity function is multiplied by scale
value. Represents the inverse temperature.
References:
- For further details, see: https://kexue.fm/archives/8847
Requirements:
- Need to be used in SpladeLoss or CSRLoss as a loss function.
- Sentence pairs with corresponding similarity scores in range of the similarity function. Default is [-1,1].
Inputs:
+--------------------------------+------------------------+
| Texts | Labels |
+================================+========================+
| (sentence_A, sentence_B) pairs | float similarity score |
+--------------------------------+------------------------+
Relations:
- :class:`SparseAnglELoss` is SparseCoSENTLoss with ``pairwise_angle_sim`` as the metric, rather than ``pairwise_cos_sim``.
Example:
::
from datasets import Dataset
from sentence_transformers.sparse_encoder import SparseEncoder, SparseEncoderTrainer, losses
model = SparseEncoder("distilbert/distilbert-base-uncased")
train_dataset = Dataset.from_dict(
{
"sentence1": ["It's nice weather outside today.", "He drove to work."],
"sentence2": ["It's so sunny.", "She walked to the store."],
"score": [1.0, 0.3],
}
)
loss = losses.SpladeLoss(
model=model, loss=losses.SparseCoSENTLoss(model), corpus_regularizer_weight=5e-5, use_corpus_regularizer_only=True
)
trainer = SparseEncoderTrainer(model=model, train_dataset=train_dataset, loss=loss)
trainer.train()
"""
model.similarity_fn_name = "cosine"
return super().__init__(model, scale=scale, similarity_fct=similarity_fct)
def forward(self, sentence_features: Iterable[dict[str, Tensor]], labels: Tensor) -> Tensor:
raise AttributeError("SparseCoSENTLoss should not be used alone. Use it with SpladeLoss or CSRLoss.")
|
_base_ = ['../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py']
img_scale = (640, 640) # height, width
# model settings
model = dict(
type='YOLOX',
input_size=img_scale,
random_size_range=(15, 25),
random_size_interval=10,
backbone=dict(type='CSPDarknet', deepen_factor=0.33, widen_factor=0.5),
neck=dict(
type='YOLOXPAFPN',
in_channels=[128, 256, 512],
out_channels=128,
num_csp_blocks=1),
bbox_head=dict(
type='YOLOXHead', num_classes=80, in_channels=128, feat_channels=128),
train_cfg=dict(assigner=dict(type='SimOTAAssigner', center_radius=2.5)),
# In order to align the source code, the threshold of the val phase is
# 0.01, and the threshold of the test phase is 0.001.
test_cfg=dict(score_thr=0.01, nms=dict(type='nms', iou_threshold=0.65)))
# dataset settings
data_root = 'data/coco/'
dataset_type = 'CocoDataset'
train_pipeline = [
dict(type='Mosaic', img_scale=img_scale, pad_val=114.0),
dict(
type='RandomAffine',
scaling_ratio_range=(0.1, 2),
border=(-img_scale[0] // 2, -img_scale[1] // 2)),
dict(
type='MixUp',
img_scale=img_scale,
ratio_range=(0.8, 1.6),
pad_val=114.0),
dict(type='YOLOXHSVRandomAug'),
dict(type='RandomFlip', flip_ratio=0.5),
# According to the official implementation, multi-scale
# training is not considered here but in the
# 'mmdet/models/detectors/yolox.py'.
dict(type='Resize', img_scale=img_scale, keep_ratio=True),
dict(
type='Pad',
pad_to_square=True,
# If the image is three-channel, the pad value needs
# to be set separately for each channel.
pad_val=dict(img=(114.0, 114.0, 114.0))),
dict(type='FilterAnnotations', min_gt_bbox_wh=(1, 1), keep_empty=False),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
]
train_dataset = dict(
type='MultiImageMixDataset',
dataset=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_train2017.json',
img_prefix=data_root + 'train2017/',
pipeline=[
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True)
],
filter_empty_gt=False,
),
pipeline=train_pipeline)
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=img_scale,
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(
type='Pad',
pad_to_square=True,
pad_val=dict(img=(114.0, 114.0, 114.0))),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img'])
])
]
data = dict(
samples_per_gpu=8,
workers_per_gpu=4,
persistent_workers=True,
train=train_dataset,
val=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline))
# optimizer
# default 8 gpu
optimizer = dict(
type='SGD',
lr=0.01,
momentum=0.9,
weight_decay=5e-4,
nesterov=True,
paramwise_cfg=dict(norm_decay_mult=0., bias_decay_mult=0.))
optimizer_config = dict(grad_clip=None)
max_epochs = 300
num_last_epochs = 15
resume_from = None
interval = 10
# learning policy
lr_config = dict(
_delete_=True,
policy='YOLOX',
warmup='exp',
by_epoch=False,
warmup_by_epoch=True,
warmup_ratio=1,
warmup_iters=5, # 5 epoch
num_last_epochs=num_last_epochs,
min_lr_ratio=0.05)
runner = dict(type='EpochBasedRunner', max_epochs=max_epochs)
custom_hooks = [
dict(
type='YOLOXModeSwitchHook',
num_last_epochs=num_last_epochs,
priority=48),
dict(
type='SyncNormHook',
num_last_epochs=num_last_epochs,
interval=interval,
priority=48),
dict(
type='ExpMomentumEMAHook',
resume_from=resume_from,
momentum=0.0001,
priority=49)
]
checkpoint_config = dict(interval=interval)
evaluation = dict(
save_best='auto',
# The evaluation interval is 'interval' when running epoch is
# less than ‘max_epochs - num_last_epochs’.
# The evaluation interval is 1 when running epoch is greater than
# or equal to ‘max_epochs - num_last_epochs’.
interval=interval,
dynamic_intervals=[(max_epochs - num_last_epochs, 1)],
metric='bbox')
log_config = dict(interval=50)
|
_base_ = ['../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py']
img_scale = (640, 640)
# model settings
model = dict(
type='YOLOX',
input_size=img_scale,
random_size_range=(15, 25),
random_size_interval=10,
backbone=dict(type='CSPDarknet', deepen_factor=0.33, widen_factor=0.5),
neck=dict(
type='YOLOXPAFPN',
in_channels=[128, 256, 512],
out_channels=128,
num_csp_blocks=1),
bbox_head=dict(
type='YOLOXHead', num_classes=80, in_channels=128, feat_channels=128),
train_cfg=dict(assigner=dict(type='SimOTAAssigner', center_radius=2.5)),
# In order to align the source code, the threshold of the val phase is
# 0.01, and the threshold of the test phase is 0.001.
test_cfg=dict(score_thr=0.01, nms=dict(type='nms', iou_threshold=0.65)))
# dataset settings
data_root = 'data/coco/'
dataset_type = 'CocoDataset'
train_pipeline = [
dict(type='Mosaic', img_scale=img_scale, pad_val=114.0),
dict(
type='RandomAffine',
scaling_ratio_range=(0.1, 2),
border=(-img_scale[0] // 2, -img_scale[1] // 2)),
dict(
type='MixUp',
img_scale=img_scale,
ratio_range=(0.8, 1.6),
pad_val=114.0),
dict(type='YOLOXHSVRandomAug'),
dict(type='RandomFlip', flip_ratio=0.5),
# According to the official implementation, multi-scale
# training is not considered here but in the
# 'mmdet/models/detectors/yolox.py'.
dict(type='Resize', img_scale=img_scale, keep_ratio=True),
dict(
type='Pad',
pad_to_square=True,
# If the image is three-channel, the pad value needs
# to be set separately for each channel.
pad_val=dict(img=(114.0, 114.0, 114.0))),
dict(type='FilterAnnotations', min_gt_bbox_wh=(1, 1), keep_empty=False),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
]
train_dataset = dict(
type='MultiImageMixDataset',
dataset=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_train2017.json',
img_prefix=data_root + 'train2017/',
pipeline=[
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True)
],
filter_empty_gt=False,
),
pipeline=train_pipeline)
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=img_scale,
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(
type='Pad',
pad_to_square=True,
pad_val=dict(img=(114.0, 114.0, 114.0))),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img'])
])
]
data = dict(
samples_per_gpu=8,
workers_per_gpu=4,
persistent_workers=True,
train=train_dataset,
val=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline))
# optimizer
# default 8 gpu
optimizer = dict(
type='SGD',
lr=0.01,
momentum=0.9,
weight_decay=5e-4,
nesterov=True,
paramwise_cfg=dict(norm_decay_mult=0., bias_decay_mult=0.))
optimizer_config = dict(grad_clip=None)
max_epochs = 300
num_last_epochs = 15
resume_from = None
interval = 10
# learning policy
lr_config = dict(
_delete_=True,
policy='YOLOX',
warmup='exp',
by_epoch=False,
warmup_by_epoch=True,
warmup_ratio=1,
warmup_iters=5, # 5 epoch
num_last_epochs=num_last_epochs,
min_lr_ratio=0.05)
runner = dict(type='EpochBasedRunner', max_epochs=max_epochs)
custom_hooks = [
dict(
type='YOLOXModeSwitchHook',
num_last_epochs=num_last_epochs,
priority=48),
dict(
type='SyncNormHook',
num_last_epochs=num_last_epochs,
interval=interval,
priority=48),
dict(
type='ExpMomentumEMAHook',
resume_from=resume_from,
momentum=0.0001,
priority=49)
]
checkpoint_config = dict(interval=interval)
evaluation = dict(
save_best='auto',
# The evaluation interval is 'interval' when running epoch is
# less than ‘max_epochs - num_last_epochs’.
# The evaluation interval is 1 when running epoch is greater than
# or equal to ‘max_epochs - num_last_epochs’.
interval=interval,
dynamic_intervals=[(max_epochs - num_last_epochs, 1)],
metric='bbox')
log_config = dict(interval=50)
|
import pathlib
from typing import Any, Dict, List, Union
import torch
from torchdata.datapipes.iter import CSVDictParser, IterDataPipe, Mapper
from torchvision.prototype.datasets.utils import Dataset, KaggleDownloadResource, OnlineResource
from torchvision.prototype.datasets.utils._internal import hint_sharding, hint_shuffling
from torchvision.prototype.tv_tensors import Label
from torchvision.tv_tensors import Image
from .._api import register_dataset, register_info
NAME = "fer2013"
@register_info(NAME)
def _info() -> Dict[str, Any]:
return dict(categories=("angry", "disgust", "fear", "happy", "sad", "surprise", "neutral"))
@register_dataset(NAME)
class FER2013(Dataset):
"""FER 2013 Dataset
homepage="https://www.kaggle.com/c/challenges-in-representation-learning-facial-expression-recognition-challenge"
"""
def __init__(
self, root: Union[str, pathlib.Path], *, split: str = "train", skip_integrity_check: bool = False
) -> None:
self._split = self._verify_str_arg(split, "split", {"train", "test"})
self._categories = _info()["categories"]
super().__init__(root, skip_integrity_check=skip_integrity_check)
_CHECKSUMS = {
"train": "a2b7c9360cc0b38d21187e5eece01c2799fce5426cdeecf746889cc96cda2d10",
"test": "dec8dfe8021e30cd6704b85ec813042b4a5d99d81cb55e023291a94104f575c3",
}
def _resources(self) -> List[OnlineResource]:
archive = KaggleDownloadResource(
"https://www.kaggle.com/c/challenges-in-representation-learning-facial-expression-recognition-challenge",
file_name=f"{self._split}.csv.zip",
sha256=self._CHECKSUMS[self._split],
)
return [archive]
def _prepare_sample(self, data: Dict[str, Any]) -> Dict[str, Any]:
label_id = data.get("emotion")
return dict(
image=Image(torch.tensor([int(idx) for idx in data["pixels"].split()], dtype=torch.uint8).reshape(48, 48)),
label=Label(int(label_id), categories=self._categories) if label_id is not None else None,
)
def _datapipe(self, resource_dps: List[IterDataPipe]) -> IterDataPipe[Dict[str, Any]]:
dp = resource_dps[0]
dp = CSVDictParser(dp)
dp = hint_shuffling(dp)
dp = hint_sharding(dp)
return Mapper(dp, self._prepare_sample)
def __len__(self) -> int:
return 28_709 if self._split == "train" else 3_589
|
import pathlib
from typing import Any, Dict, List, Union
import torch
from torchdata.datapipes.iter import CSVDictParser, IterDataPipe, Mapper
from torchvision.datapoints import Image
from torchvision.prototype.datapoints import Label
from torchvision.prototype.datasets.utils import Dataset, KaggleDownloadResource, OnlineResource
from torchvision.prototype.datasets.utils._internal import hint_sharding, hint_shuffling
from .._api import register_dataset, register_info
NAME = "fer2013"
@register_info(NAME)
def _info() -> Dict[str, Any]:
return dict(categories=("angry", "disgust", "fear", "happy", "sad", "surprise", "neutral"))
@register_dataset(NAME)
class FER2013(Dataset):
"""FER 2013 Dataset
homepage="https://www.kaggle.com/c/challenges-in-representation-learning-facial-expression-recognition-challenge"
"""
def __init__(
self, root: Union[str, pathlib.Path], *, split: str = "train", skip_integrity_check: bool = False
) -> None:
self._split = self._verify_str_arg(split, "split", {"train", "test"})
self._categories = _info()["categories"]
super().__init__(root, skip_integrity_check=skip_integrity_check)
_CHECKSUMS = {
"train": "a2b7c9360cc0b38d21187e5eece01c2799fce5426cdeecf746889cc96cda2d10",
"test": "dec8dfe8021e30cd6704b85ec813042b4a5d99d81cb55e023291a94104f575c3",
}
def _resources(self) -> List[OnlineResource]:
archive = KaggleDownloadResource(
"https://www.kaggle.com/c/challenges-in-representation-learning-facial-expression-recognition-challenge",
file_name=f"{self._split}.csv.zip",
sha256=self._CHECKSUMS[self._split],
)
return [archive]
def _prepare_sample(self, data: Dict[str, Any]) -> Dict[str, Any]:
label_id = data.get("emotion")
return dict(
image=Image(torch.tensor([int(idx) for idx in data["pixels"].split()], dtype=torch.uint8).reshape(48, 48)),
label=Label(int(label_id), categories=self._categories) if label_id is not None else None,
)
def _datapipe(self, resource_dps: List[IterDataPipe]) -> IterDataPipe[Dict[str, Any]]:
dp = resource_dps[0]
dp = CSVDictParser(dp)
dp = hint_shuffling(dp)
dp = hint_sharding(dp)
return Mapper(dp, self._prepare_sample)
def __len__(self) -> int:
return 28_709 if self._split == "train" else 3_589
|
from typing import TypeVar
import numpy as np
from pydantic import parse_obj_as
from docarray.typing.proto_register import _register_proto
from docarray.typing.tensor.ndarray import NdArray
from docarray.typing.url.url_3d.url_3d import Url3D
T = TypeVar('T', bound='PointCloud3DUrl')
@_register_proto(proto_type_name='point_cloud_url')
class PointCloud3DUrl(Url3D):
"""
URL to a .obj, .glb, or .ply file containing point cloud information.
Can be remote (web) URL, or a local file path.
"""
def load(self: T, samples: int, multiple_geometries: bool = False) -> NdArray:
"""
Load the data from the url into an NdArray containing point cloud information.
EXAMPLE USAGE
.. code-block:: python
import numpy as np
from docarray import BaseDocument
from docarray.typing import PointCloud3DUrl
class MyDoc(BaseDocument):
point_cloud_url: PointCloud3DvUrl
doc = MyDoc(point_cloud_url="toydata/tetrahedron.obj")
point_cloud = doc.point_cloud_url.load(samples=100)
assert isinstance(point_cloud, np.ndarray)
assert point_cloud.shape == (100, 3)
:param samples: number of points to sample from the mesh
:param multiple_geometries: if False, store point cloud in 2D np.ndarray.
If True, store point clouds from multiple geometries in 3D np.ndarray.
:return: np.ndarray representing the point cloud
"""
if multiple_geometries:
# try to coerce everything into a scene
scene = self._load_trimesh_instance(force='scene')
point_cloud = np.stack(
[np.array(geo.sample(samples)) for geo in scene.geometry.values()],
axis=0,
)
else:
# combine a scene into a single mesh
mesh = self._load_trimesh_instance(force='mesh')
point_cloud = np.array(mesh.sample(samples))
return parse_obj_as(NdArray, point_cloud)
|
from typing import TypeVar
import numpy as np
from pydantic import parse_obj_as
from docarray.typing import NdArray
from docarray.typing.proto_register import _register_proto
from docarray.typing.url.url_3d.url_3d import Url3D
T = TypeVar('T', bound='PointCloud3DUrl')
@_register_proto(proto_type_name='point_cloud_url')
class PointCloud3DUrl(Url3D):
"""
URL to a .obj, .glb, or .ply file containing point cloud information.
Can be remote (web) URL, or a local file path.
"""
def load(self: T, samples: int, multiple_geometries: bool = False) -> NdArray:
"""
Load the data from the url into an NdArray containing point cloud information.
EXAMPLE USAGE
.. code-block:: python
import numpy as np
from docarray import BaseDocument
from docarray.typing import PointCloud3DUrl
class MyDoc(BaseDocument):
point_cloud_url: PointCloud3DvUrl
doc = MyDoc(point_cloud_url="toydata/tetrahedron.obj")
point_cloud = doc.point_cloud_url.load(samples=100)
assert isinstance(point_cloud, np.ndarray)
assert point_cloud.shape == (100, 3)
:param samples: number of points to sample from the mesh
:param multiple_geometries: if False, store point cloud in 2D np.ndarray.
If True, store point clouds from multiple geometries in 3D np.ndarray.
:return: np.ndarray representing the point cloud
"""
if multiple_geometries:
# try to coerce everything into a scene
scene = self._load_trimesh_instance(force='scene')
point_cloud = np.stack(
[np.array(geo.sample(samples)) for geo in scene.geometry.values()],
axis=0,
)
else:
# combine a scene into a single mesh
mesh = self._load_trimesh_instance(force='mesh')
point_cloud = np.array(mesh.sample(samples))
return parse_obj_as(NdArray, point_cloud)
|
import logging
from datasets import load_dataset
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.evaluation import SparseRerankingEvaluator
logging.basicConfig(format="%(message)s", level=logging.INFO)
# Load a model
model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
# Load a dataset with queries, positives, and negatives
eval_dataset = load_dataset("microsoft/ms_marco", "v1.1", split="validation").select(range(1000))
samples = [
{
"query": sample["query"],
"positive": [
text
for is_selected, text in zip(sample["passages"]["is_selected"], sample["passages"]["passage_text"])
if is_selected
],
"negative": [
text
for is_selected, text in zip(sample["passages"]["is_selected"], sample["passages"]["passage_text"])
if not is_selected
],
}
for sample in eval_dataset
]
# Now evaluate using only the documents from the 1000 samples
reranking_evaluator = SparseRerankingEvaluator(
samples=samples,
name="ms_marco_dev_small",
show_progress_bar=True,
batch_size=32,
)
results = reranking_evaluator(model)
"""
RerankingEvaluator: Evaluating the model on the ms_marco_dev_small dataset:
Queries: 967 Positives: Min 1.0, Mean 1.1, Max 3.0 Negatives: Min 1.0, Mean 7.1, Max 9.0
MAP: 53.61
MRR@10: 54.30
NDCG@10: 65.20
Model Query Sparsity: Active Dimensions: 43.9, Sparsity Ratio: 0.9986
Model Corpus Sparsity: Active Dimensions: 128.4, Sparsity Ratio: 0.9958
"""
# Print the results
print(f"Primary metric: {reranking_evaluator.primary_metric}")
# => Primary metric: ms_marco_dev_small_ndcg@10
print(f"Primary metric value: {results[reranking_evaluator.primary_metric]:.4f}")
# => Primary metric value: 0.6520
|
import logging
from datasets import load_dataset
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.evaluation import SparseRerankingEvaluator
logging.basicConfig(format="%(message)s", level=logging.INFO)
# Load a model
model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
# Load a dataset with queries, positives, and negatives
eval_dataset = load_dataset("microsoft/ms_marco", "v1.1", split="validation").select(range(1000))
samples = [
{
"query": sample["query"],
"positive": [
text
for is_selected, text in zip(sample["passages"]["is_selected"], sample["passages"]["passage_text"])
if is_selected
],
"negative": [
text
for is_selected, text in zip(sample["passages"]["is_selected"], sample["passages"]["passage_text"])
if not is_selected
],
}
for sample in eval_dataset
]
# Now evaluate using only the documents from the 1000 samples
reranking_evaluator = SparseRerankingEvaluator(
samples=samples,
name="ms-marco-dev-small",
show_progress_bar=True,
batch_size=32,
)
results = reranking_evaluator(model)
"""
RerankingEvaluator: Evaluating the model on the ms-marco-dev-small dataset:
Queries: 967 Positives: Min 1.0, Mean 1.1, Max 3.0 Negatives: Min 1.0, Mean 7.1, Max 9.0
MAP: 53.46
MRR@10: 54.18
NDCG@10: 65.10
Model Sparsity Stats Query : Row Non-Zero Mean: 43.89658737182617, Row Sparsity Mean: 0.9985617995262146
Model Sparsity Stats Corpus : Row Non-Zero Mean: 128.37216186523438, Row Sparsity Mean: 0.9957940578460693
"""
# Print the results
print(f"Primary metric: {reranking_evaluator.primary_metric}")
# => Primary metric: ms-marco-dev-small_ndcg@10
print(f"Primary metric value: {results[reranking_evaluator.primary_metric]:.4f}")
# => Primary metric value: 0.6510
|
"""Test conversation chain and memory."""
from langchain_core.documents import Document
from langchain_core.language_models import FakeListLLM
from langchain.chains.conversational_retrieval.base import (
ConversationalRetrievalChain,
)
from langchain.memory.buffer import ConversationBufferMemory
from tests.unit_tests.retrievers.sequential_retriever import SequentialRetriever
async def test_simplea() -> None:
fixed_resp = "I don't know"
answer = "I know the answer!"
llm = FakeListLLM(responses=[answer])
retriever = SequentialRetriever(sequential_responses=[[]])
memory = ConversationBufferMemory( # type: ignore[call-arg]
k=1,
output_key="answer",
memory_key="chat_history",
return_messages=True,
)
qa_chain = ConversationalRetrievalChain.from_llm(
llm=llm,
memory=memory,
retriever=retriever,
return_source_documents=True,
rephrase_question=False,
response_if_no_docs_found=fixed_resp,
verbose=True,
)
got = await qa_chain.acall("What is the answer?")
assert got["chat_history"][1].content == fixed_resp
assert got["answer"] == fixed_resp
async def test_fixed_message_response_when_docs_founda() -> None:
fixed_resp = "I don't know"
answer = "I know the answer!"
llm = FakeListLLM(responses=[answer])
retriever = SequentialRetriever(
sequential_responses=[[Document(page_content=answer)]],
)
memory = ConversationBufferMemory( # type: ignore[call-arg]
k=1,
output_key="answer",
memory_key="chat_history",
return_messages=True,
)
qa_chain = ConversationalRetrievalChain.from_llm(
llm=llm,
memory=memory,
retriever=retriever,
return_source_documents=True,
rephrase_question=False,
response_if_no_docs_found=fixed_resp,
verbose=True,
)
got = await qa_chain.acall("What is the answer?")
assert got["chat_history"][1].content == answer
assert got["answer"] == answer
def test_fixed_message_response_when_no_docs_found() -> None:
fixed_resp = "I don't know"
answer = "I know the answer!"
llm = FakeListLLM(responses=[answer])
retriever = SequentialRetriever(sequential_responses=[[]])
memory = ConversationBufferMemory( # type: ignore[call-arg]
k=1,
output_key="answer",
memory_key="chat_history",
return_messages=True,
)
qa_chain = ConversationalRetrievalChain.from_llm(
llm=llm,
memory=memory,
retriever=retriever,
return_source_documents=True,
rephrase_question=False,
response_if_no_docs_found=fixed_resp,
verbose=True,
)
got = qa_chain("What is the answer?")
assert got["chat_history"][1].content == fixed_resp
assert got["answer"] == fixed_resp
def test_fixed_message_response_when_docs_found() -> None:
fixed_resp = "I don't know"
answer = "I know the answer!"
llm = FakeListLLM(responses=[answer])
retriever = SequentialRetriever(
sequential_responses=[[Document(page_content=answer)]],
)
memory = ConversationBufferMemory( # type: ignore[call-arg]
k=1,
output_key="answer",
memory_key="chat_history",
return_messages=True,
)
qa_chain = ConversationalRetrievalChain.from_llm(
llm=llm,
memory=memory,
retriever=retriever,
return_source_documents=True,
rephrase_question=False,
response_if_no_docs_found=fixed_resp,
verbose=True,
)
got = qa_chain("What is the answer?")
assert got["chat_history"][1].content == answer
assert got["answer"] == answer
|
"""Test conversation chain and memory."""
from langchain_core.documents import Document
from langchain_core.language_models import FakeListLLM
from langchain.chains.conversational_retrieval.base import (
ConversationalRetrievalChain,
)
from langchain.memory.buffer import ConversationBufferMemory
from tests.unit_tests.retrievers.sequential_retriever import SequentialRetriever
async def test_simplea() -> None:
fixed_resp = "I don't know"
answer = "I know the answer!"
llm = FakeListLLM(responses=[answer])
retriever = SequentialRetriever(sequential_responses=[[]])
memory = ConversationBufferMemory( # type: ignore[call-arg]
k=1, output_key="answer", memory_key="chat_history", return_messages=True
)
qa_chain = ConversationalRetrievalChain.from_llm(
llm=llm,
memory=memory,
retriever=retriever,
return_source_documents=True,
rephrase_question=False,
response_if_no_docs_found=fixed_resp,
verbose=True,
)
got = await qa_chain.acall("What is the answer?")
assert got["chat_history"][1].content == fixed_resp
assert got["answer"] == fixed_resp
async def test_fixed_message_response_when_docs_founda() -> None:
fixed_resp = "I don't know"
answer = "I know the answer!"
llm = FakeListLLM(responses=[answer])
retriever = SequentialRetriever(
sequential_responses=[[Document(page_content=answer)]]
)
memory = ConversationBufferMemory( # type: ignore[call-arg]
k=1, output_key="answer", memory_key="chat_history", return_messages=True
)
qa_chain = ConversationalRetrievalChain.from_llm(
llm=llm,
memory=memory,
retriever=retriever,
return_source_documents=True,
rephrase_question=False,
response_if_no_docs_found=fixed_resp,
verbose=True,
)
got = await qa_chain.acall("What is the answer?")
assert got["chat_history"][1].content == answer
assert got["answer"] == answer
def test_fixed_message_response_when_no_docs_found() -> None:
fixed_resp = "I don't know"
answer = "I know the answer!"
llm = FakeListLLM(responses=[answer])
retriever = SequentialRetriever(sequential_responses=[[]])
memory = ConversationBufferMemory( # type: ignore[call-arg]
k=1, output_key="answer", memory_key="chat_history", return_messages=True
)
qa_chain = ConversationalRetrievalChain.from_llm(
llm=llm,
memory=memory,
retriever=retriever,
return_source_documents=True,
rephrase_question=False,
response_if_no_docs_found=fixed_resp,
verbose=True,
)
got = qa_chain("What is the answer?")
assert got["chat_history"][1].content == fixed_resp
assert got["answer"] == fixed_resp
def test_fixed_message_response_when_docs_found() -> None:
fixed_resp = "I don't know"
answer = "I know the answer!"
llm = FakeListLLM(responses=[answer])
retriever = SequentialRetriever(
sequential_responses=[[Document(page_content=answer)]]
)
memory = ConversationBufferMemory( # type: ignore[call-arg]
k=1, output_key="answer", memory_key="chat_history", return_messages=True
)
qa_chain = ConversationalRetrievalChain.from_llm(
llm=llm,
memory=memory,
retriever=retriever,
return_source_documents=True,
rephrase_question=False,
response_if_no_docs_found=fixed_resp,
verbose=True,
)
got = qa_chain("What is the answer?")
assert got["chat_history"][1].content == answer
assert got["answer"] == answer
|
from typing import Any, AsyncGenerator, Coroutine, Dict, List, Optional, Sequence, Union
import pytest
from llama_index.core.base.llms.types import (
ChatMessage,
ChatResponse,
ChatResponseGen,
CompletionResponse,
LLMMetadata,
)
from llama_index.core.llms.function_calling import FunctionCallingLLM
from llama_index.core.llms.llm import ToolSelection
from llama_index.core.program.function_program import FunctionTool, get_function_tool
from llama_index.core.tools.types import BaseTool
from pydantic import BaseModel, Field
class MockFunctionCallingLLM(FunctionCallingLLM):
def __init__(self, tool_selection: List[ToolSelection]):
super().__init__()
self._tool_selection = tool_selection
async def achat(
self, messages: Sequence[ChatMessage], **kwargs: Any
) -> Coroutine[Any, Any, ChatResponse]:
return ChatResponse(message=ChatMessage(role="user", content=""))
def acomplete(
self, prompt: str, formatted: bool = False, **kwargs: Any
) -> Coroutine[Any, Any, CompletionResponse]:
pass
def astream_chat(
self, messages: Sequence[ChatMessage], **kwargs: Any
) -> Coroutine[Any, Any, AsyncGenerator[ChatResponse, None]]:
pass
def astream_complete(
self, prompt: str, formatted: bool = False, **kwargs: Any
) -> Coroutine[Any, Any, AsyncGenerator[CompletionResponse, None]]:
pass
def chat(self, messages: Sequence[ChatMessage], **kwargs: Any) -> ChatResponse:
return ChatResponse(message=ChatMessage(role="user", content=""))
def complete(
self, prompt: str, formatted: bool = False, **kwargs: Any
) -> CompletionResponse:
pass
def stream_chat(
self, messages: Sequence[ChatMessage], **kwargs: Any
) -> ChatResponseGen:
pass
def stream_complete(
self, prompt: str, formatted: bool = False, **kwargs: Any
) -> ChatResponseGen:
pass
@property
def metadata(self) -> LLMMetadata:
return LLMMetadata(is_function_calling_model=True)
def _prepare_chat_with_tools(
self,
tools: Sequence["BaseTool"],
user_msg: Optional[Union[str, ChatMessage]] = None,
chat_history: Optional[List[ChatMessage]] = None,
verbose: bool = False,
allow_parallel_tool_calls: bool = False,
tool_required: bool = False,
**kwargs: Any,
) -> Dict[str, Any]:
return {"messages": []}
def get_tool_calls_from_response(
self,
response: ChatResponse,
error_on_no_tool_call: bool = True,
**kwargs: Any,
) -> List[ToolSelection]:
return self._tool_selection
class Person(BaseModel):
name: str = Field(description="Person name")
@pytest.fixture()
def person_tool() -> FunctionTool:
return get_function_tool(Person)
@pytest.fixture()
def person_tool_selection(person_tool: FunctionTool) -> ToolSelection:
return ToolSelection(
tool_id="",
tool_name=person_tool.metadata.name,
tool_kwargs={},
)
def test_predict_and_call(
person_tool: FunctionTool, person_tool_selection: ToolSelection
) -> None:
"""Test predict_and_call will return ToolOutput with error rather than raising one."""
llm = MockFunctionCallingLLM([person_tool_selection])
response = llm.predict_and_call(tools=[person_tool])
assert all(tool_output.is_error for tool_output in response.sources)
def test_predict_and_call_throws_if_error_on_tool(
person_tool: FunctionTool, person_tool_selection: ToolSelection
) -> None:
"""Test predict_and_call will raise an error."""
llm = MockFunctionCallingLLM([person_tool_selection])
with pytest.raises(ValueError):
llm.predict_and_call(tools=[person_tool], error_on_tool_error=True)
@pytest.mark.asyncio
async def test_apredict_and_call(
person_tool: FunctionTool, person_tool_selection: ToolSelection
) -> None:
"""Test apredict_and_call will return ToolOutput with error rather than raising one."""
llm = MockFunctionCallingLLM([person_tool_selection])
response = await llm.apredict_and_call(tools=[person_tool])
assert all(tool_output.is_error for tool_output in response.sources)
@pytest.mark.asyncio
async def test_apredict_and_call_throws_if_error_on_tool(
person_tool: FunctionTool, person_tool_selection: ToolSelection
) -> None:
"""Test apredict_and_call will raise an error."""
llm = MockFunctionCallingLLM([person_tool_selection])
with pytest.raises(ValueError):
await llm.apredict_and_call(tools=[person_tool], error_on_tool_error=True)
|
from typing import Any, AsyncGenerator, Coroutine, Dict, List, Optional, Sequence, Union
import pytest
from llama_index.core.base.llms.types import (
ChatMessage,
ChatResponse,
ChatResponseGen,
CompletionResponse,
LLMMetadata,
)
from llama_index.core.llms.function_calling import FunctionCallingLLM
from llama_index.core.llms.llm import ToolSelection
from llama_index.core.program.function_program import FunctionTool, get_function_tool
from llama_index.core.tools.types import BaseTool
from pydantic import BaseModel, Field
class MockFunctionCallingLLM(FunctionCallingLLM):
def __init__(self, tool_selection: List[ToolSelection]):
super().__init__()
self._tool_selection = tool_selection
async def achat(
self, messages: Sequence[ChatMessage], **kwargs: Any
) -> Coroutine[Any, Any, ChatResponse]:
return ChatResponse(message=ChatMessage(role="user", content=""))
def acomplete(
self, prompt: str, formatted: bool = False, **kwargs: Any
) -> Coroutine[Any, Any, CompletionResponse]:
pass
def astream_chat(
self, messages: Sequence[ChatMessage], **kwargs: Any
) -> Coroutine[Any, Any, AsyncGenerator[ChatResponse, None]]:
pass
def astream_complete(
self, prompt: str, formatted: bool = False, **kwargs: Any
) -> Coroutine[Any, Any, AsyncGenerator[CompletionResponse, None]]:
pass
def chat(self, messages: Sequence[ChatMessage], **kwargs: Any) -> ChatResponse:
return ChatResponse(message=ChatMessage(role="user", content=""))
def complete(
self, prompt: str, formatted: bool = False, **kwargs: Any
) -> CompletionResponse:
pass
def stream_chat(
self, messages: Sequence[ChatMessage], **kwargs: Any
) -> ChatResponseGen:
pass
def stream_complete(
self, prompt: str, formatted: bool = False, **kwargs: Any
) -> ChatResponseGen:
pass
@property
def metadata(self) -> LLMMetadata:
return LLMMetadata(is_function_calling_model=True)
def _prepare_chat_with_tools(
self,
tools: Sequence["BaseTool"],
user_msg: Optional[Union[str, ChatMessage]] = None,
chat_history: Optional[List[ChatMessage]] = None,
verbose: bool = False,
allow_parallel_tool_calls: bool = False,
**kwargs: Any,
) -> Dict[str, Any]:
return {"messages": []}
def get_tool_calls_from_response(
self,
response: ChatResponse,
error_on_no_tool_call: bool = True,
**kwargs: Any,
) -> List[ToolSelection]:
return self._tool_selection
class Person(BaseModel):
name: str = Field(description="Person name")
@pytest.fixture()
def person_tool() -> FunctionTool:
return get_function_tool(Person)
@pytest.fixture()
def person_tool_selection(person_tool: FunctionTool) -> ToolSelection:
return ToolSelection(
tool_id="",
tool_name=person_tool.metadata.name,
tool_kwargs={},
)
def test_predict_and_call(
person_tool: FunctionTool, person_tool_selection: ToolSelection
) -> None:
"""Test predict_and_call will return ToolOutput with error rather than raising one."""
llm = MockFunctionCallingLLM([person_tool_selection])
response = llm.predict_and_call(tools=[person_tool])
assert all(tool_output.is_error for tool_output in response.sources)
def test_predict_and_call_throws_if_error_on_tool(
person_tool: FunctionTool, person_tool_selection: ToolSelection
) -> None:
"""Test predict_and_call will raise an error."""
llm = MockFunctionCallingLLM([person_tool_selection])
with pytest.raises(ValueError):
llm.predict_and_call(tools=[person_tool], error_on_tool_error=True)
@pytest.mark.asyncio
async def test_apredict_and_call(
person_tool: FunctionTool, person_tool_selection: ToolSelection
) -> None:
"""Test apredict_and_call will return ToolOutput with error rather than raising one."""
llm = MockFunctionCallingLLM([person_tool_selection])
response = await llm.apredict_and_call(tools=[person_tool])
assert all(tool_output.is_error for tool_output in response.sources)
@pytest.mark.asyncio
async def test_apredict_and_call_throws_if_error_on_tool(
person_tool: FunctionTool, person_tool_selection: ToolSelection
) -> None:
"""Test apredict_and_call will raise an error."""
llm = MockFunctionCallingLLM([person_tool_selection])
with pytest.raises(ValueError):
await llm.apredict_and_call(tools=[person_tool], error_on_tool_error=True)
|
"""Base argparser module for Pod and Deployment runtime"""
import argparse
import os
from jina.enums import PollingType
from jina.helper import random_identity
from jina.parsers.helper import _SHOW_ALL_ARGS, add_arg_group
def mixin_essential_parser(parser):
"""Mixing in arguments required by every module into the given parser.
:param parser: the parser instance to which we add arguments
"""
gp = add_arg_group(parser, title='Essential')
gp.add_argument(
'--name',
type=str,
help='''
The name of this object.
This will be used in the following places:
- how you refer to this object in Python/YAML/CLI
- visualization
- log message header
- ...
When not given, then the default naming strategy will apply.
''',
)
gp.add_argument(
'--workspace',
type=str,
default=None,
help='The working directory for any IO operations in this object. '
'If not set, then derive from its parent `workspace`.',
)
gp.add_argument(
'--log-config',
type=str,
default='default',
help='The YAML config of the logger used in this object.',
)
gp.add_argument(
'--quiet',
action='store_true',
default=False,
help='If set, then no log will be emitted from this object.',
)
gp.add_argument(
'--quiet-error',
action='store_true',
default=False,
help='If set, then exception stack information will not be added to the log',
)
gp.add_argument(
'--workspace-id',
type=str,
default=random_identity(),
help='the UUID for identifying the workspace. When not given a random id will be assigned.'
'Multiple Pod/Deployment/Flow will work under the same workspace if they share the same '
'`workspace-id`.'
if _SHOW_ALL_ARGS
else argparse.SUPPRESS,
)
def mixin_base_ppr_parser(parser):
"""Mixing in arguments required by pod/deployment/runtime module into the given parser.
:param parser: the parser instance to which we add arguments
"""
mixin_essential_parser(parser)
gp = add_arg_group(parser, title='Base Deployment')
gp.add_argument(
'--extra-search-paths',
type=str,
default=[],
nargs='*',
help='Extra search paths to be used when loading modules and finding YAML config files.'
if _SHOW_ALL_ARGS
else argparse.SUPPRESS,
)
gp.add_argument(
'--timeout-ctrl',
type=int,
default=int(os.getenv('JINA_DEFAULT_TIMEOUT_CTRL', '60')),
help='The timeout in milliseconds of the control request, -1 for waiting forever',
)
parser.add_argument(
'--k8s-namespace',
type=str,
help='Name of the namespace where Kubernetes deployment should be deployed, to be filled by flow name'
if _SHOW_ALL_ARGS
else argparse.SUPPRESS,
)
gp.add_argument(
'--polling',
type=str,
default=PollingType.ANY.name,
help='''
The polling strategy of the Deployment and its endpoints (when `shards>1`).
Can be defined for all endpoints of a Deployment or by endpoint.
Define per Deployment:
- ANY: only one (whoever is idle) Pod polls the message
- ALL: all Pods poll the message (like a broadcast)
Define per Endpoint:
JSON dict, {endpoint: PollingType}
{'/custom': 'ALL', '/search': 'ANY', '*': 'ANY'}
''',
)
|
"""Base argparser module for Pod and Deployment runtime"""
import argparse
import os
from jina.enums import PollingType
from jina.helper import random_identity
from jina.parsers.helper import _SHOW_ALL_ARGS, add_arg_group
def mixin_essential_parser(parser):
"""Mixing in arguments required by every module into the given parser.
:param parser: the parser instance to which we add arguments
"""
gp = add_arg_group(parser, title='Essential')
gp.add_argument(
'--name',
type=str,
help='''
The name of this object.
This will be used in the following places:
- how you refer to this object in Python/YAML/CLI
- visualization
- log message header
- ...
When not given, then the default naming strategy will apply.
''',
)
gp.add_argument(
'--workspace',
type=str,
default=None,
help='The working directory for any IO operations in this object. '
'If not set, then derive from its parent `workspace`.',
)
gp.add_argument(
'--log-config',
type=str,
default='default',
help='The YAML config of the logger used in this object.',
)
gp.add_argument(
'--quiet',
action='store_true',
default=False,
help='If set, then no log will be emitted from this object.',
)
gp.add_argument(
'--quiet-error',
action='store_true',
default=False,
help='If set, then exception stack information will not be added to the log',
)
gp.add_argument(
'--workspace-id',
type=str,
default=random_identity(),
help='the UUID for identifying the workspace. When not given a random id will be assigned.'
'Multiple Pod/Deployment/Flow will work under the same workspace if they share the same '
'`workspace-id`.'
if _SHOW_ALL_ARGS
else argparse.SUPPRESS,
)
def mixin_base_ppr_parser(parser):
"""Mixing in arguments required by pod/deployment/runtime module into the given parser.
:param parser: the parser instance to which we add arguments
"""
mixin_essential_parser(parser)
gp = add_arg_group(parser, title='Base Deployment')
gp.add_argument(
'--extra-search-paths',
type=str,
default=[],
nargs='*',
help='Extra search paths to be used when loading modules and finding YAML config files.'
if _SHOW_ALL_ARGS
else argparse.SUPPRESS,
)
gp.add_argument(
'--timeout-ctrl',
type=int,
default=int(os.getenv('JINA_DEFAULT_TIMEOUT_CTRL', '60')),
help='The timeout in milliseconds of the control request, -1 for waiting forever',
)
parser.add_argument(
'--k8s-namespace',
type=str,
help='Name of the namespace where Kubernetes deployment should be deployed, to be filled by flow name'
if _SHOW_ALL_ARGS
else argparse.SUPPRESS,
)
gp.add_argument(
'--polling',
type=str,
default=PollingType.ANY.name,
help='''
The polling strategy of the Deployment and its endpoints (when `shards>1`).
Can be defined for all endpoints of a Deployment or by endpoint.
Define per Deployment:
- ANY: only one (whoever is idle) Pod polls the message
- ALL: all Pods poll the message (like a broadcast)
Define per Endpoint:
JSON dict, {endpoint: PollingType}
{'/custom': 'ALL', '/search': 'ANY', '*': 'ANY'}
''',
)
gp.add_argument(
'--exit-on-exceptions',
type=str,
default=[],
nargs='*',
help='List of exceptions that will cause the Executor to shut down.',
)
|
import numpy as np
import pytest
from tensorflow import data as tf_data
from keras.src import backend
from keras.src import layers
from keras.src import testing
class RandomContrastTest(testing.TestCase):
@pytest.mark.requires_trainable_backend
def test_layer(self):
self.run_layer_test(
layers.RandomContrast,
init_kwargs={
"factor": 0.75,
"seed": 1,
},
input_shape=(8, 3, 4, 3),
supports_masking=False,
expected_output_shape=(8, 3, 4, 3),
)
self.run_layer_test(
layers.RandomContrast,
init_kwargs={
"factor": 0.75,
"seed": 1,
"data_format": "channels_first",
},
input_shape=(8, 3, 4, 4),
supports_masking=False,
expected_output_shape=(8, 3, 4, 4),
)
def test_random_contrast(self):
seed = 9809
np.random.seed(seed)
inputs = np.random.random((12, 8, 16, 3))
layer = layers.RandomContrast(factor=0.5, seed=seed)
outputs = layer(inputs)
# Actual contrast arithmetic
np.random.seed(seed)
factor = np.random.uniform(0.5, 1.5)
inp_mean = np.mean(inputs, axis=-3, keepdims=True)
inp_mean = np.mean(inp_mean, axis=-2, keepdims=True)
actual_outputs = (inputs - inp_mean) * factor + inp_mean
outputs = backend.convert_to_numpy(outputs)
actual_outputs = np.clip(outputs, 0, 255)
self.assertAllClose(outputs, actual_outputs)
def test_tf_data_compatibility(self):
layer = layers.RandomContrast(factor=0.5, seed=1337)
input_data = np.random.random((2, 8, 8, 3))
ds = tf_data.Dataset.from_tensor_slices(input_data).batch(2).map(layer)
next(iter(ds)).numpy()
def test_dict_input(self):
layer = layers.RandomContrast(factor=0.1, bounding_box_format="xyxy")
data = {
"images": np.random.random((2, 4, 5, 3)),
"labels": np.random.random((2, 7)),
"segmentation_masks": np.random.random((2, 4, 5, 7)),
"bounding_boxes": {
"boxes": np.array([[1, 2, 2, 3]]),
"labels": np.array([0]),
},
}
transformed_data = layer(data)
self.assertEqual(
data["images"].shape[:-1],
transformed_data["segmentation_masks"].shape[:-1],
)
self.assertAllClose(data["labels"], transformed_data["labels"])
self.assertAllClose(
data["bounding_boxes"]["boxes"],
transformed_data["bounding_boxes"]["boxes"],
)
self.assertAllClose(
data["bounding_boxes"]["labels"],
transformed_data["bounding_boxes"]["labels"],
)
|
import numpy as np
import pytest
from tensorflow import data as tf_data
from keras.src import backend
from keras.src import layers
from keras.src import testing
class RandomContrastTest(testing.TestCase):
@pytest.mark.requires_trainable_backend
def test_layer(self):
self.run_layer_test(
layers.RandomContrast,
init_kwargs={
"factor": 0.75,
"seed": 1,
},
input_shape=(8, 3, 4, 3),
supports_masking=False,
expected_output_shape=(8, 3, 4, 3),
)
self.run_layer_test(
layers.RandomContrast,
init_kwargs={
"factor": 0.75,
"seed": 1,
"data_format": "channels_first",
},
input_shape=(8, 3, 4, 4),
supports_masking=False,
expected_output_shape=(8, 3, 4, 4),
)
def test_random_contrast(self):
seed = 9809
np.random.seed(seed)
inputs = np.random.random((12, 8, 16, 3))
layer = layers.RandomContrast(factor=0.5, seed=seed)
outputs = layer(inputs)
# Actual contrast arithmetic
np.random.seed(seed)
factor = np.random.uniform(0.5, 1.5)
inp_mean = np.mean(inputs, axis=-3, keepdims=True)
inp_mean = np.mean(inp_mean, axis=-2, keepdims=True)
actual_outputs = (inputs - inp_mean) * factor + inp_mean
outputs = backend.convert_to_numpy(outputs)
actual_outputs = np.clip(outputs, 0, 255)
self.assertAllClose(outputs, actual_outputs)
def test_tf_data_compatibility(self):
layer = layers.RandomContrast(factor=0.5, seed=1337)
input_data = np.random.random((2, 8, 8, 3))
ds = tf_data.Dataset.from_tensor_slices(input_data).batch(2).map(layer)
for output in ds.take(1):
output.numpy()
def test_dict_input(self):
layer = layers.RandomContrast(factor=0.1, bounding_box_format="xyxy")
data = {
"images": np.random.random((2, 4, 5, 3)),
"labels": np.random.random((2, 7)),
"segmentation_masks": np.random.random((2, 4, 5, 7)),
"bounding_boxes": {
"boxes": np.array([[1, 2, 2, 3]]),
"labels": np.array([0]),
},
}
transformed_data = layer(data)
self.assertEqual(
data["images"].shape[:-1],
transformed_data["segmentation_masks"].shape[:-1],
)
self.assertAllClose(data["labels"], transformed_data["labels"])
self.assertAllClose(
data["bounding_boxes"]["boxes"],
transformed_data["bounding_boxes"]["boxes"],
)
self.assertAllClose(
data["bounding_boxes"]["labels"],
transformed_data["bounding_boxes"]["labels"],
)
|
# TODO: enable ruff qa on this file when we figure out why it thinks weaviate_client is
# redefined at each test that fixture
# ruff: noqa
import numpy as np
import pytest
import torch
from pydantic import Field
from docarray import BaseDoc
from docarray.index.backends.weaviate import WeaviateDocumentIndex
from docarray.typing import TorchTensor
from tests.index.weaviate.fixture_weaviate import ( # noqa: F401
start_storage,
weaviate_client,
)
pytestmark = [pytest.mark.slow, pytest.mark.index]
def test_find_torch(weaviate_client):
class TorchDoc(BaseDoc):
tens: TorchTensor[10] = Field(dims=10, is_embedding=True)
index = WeaviateDocumentIndex[TorchDoc]()
index_docs = [
TorchDoc(tens=np.random.rand(10).astype(dtype=np.float32)) for _ in range(10)
]
index.index(index_docs)
query = index_docs[-1]
docs, scores = index.find(query, limit=5)
assert len(docs) == 5
assert len(scores) == 5
for doc in docs:
assert isinstance(doc.tens, TorchTensor)
assert docs[0].id == index_docs[-1].id
assert torch.allclose(docs[0].tens, index_docs[-1].tens)
@pytest.mark.tensorflow
def test_find_tensorflow():
from docarray.typing import TensorFlowTensor
class TfDoc(BaseDoc):
tens: TensorFlowTensor[10] = Field(dims=10, is_embedding=True)
index = WeaviateDocumentIndex[TfDoc]()
index_docs = [
TfDoc(tens=np.random.rand(10).astype(dtype=np.float32)) for _ in range(10)
]
index.index(index_docs)
query = index_docs[-1]
docs, scores = index.find(query, limit=5)
assert len(docs) == 5
assert len(scores) == 5
for doc in docs:
assert isinstance(doc.tens, TensorFlowTensor)
assert docs[0].id == index_docs[-1].id
assert np.allclose(
docs[0].tens.unwrap().numpy(), index_docs[-1].tens.unwrap().numpy()
)
def test_comprehensive():
import numpy as np
from pydantic import Field
from docarray import BaseDoc
from docarray.index.backends.weaviate import WeaviateDocumentIndex
from docarray.typing import NdArray
class Document(BaseDoc):
text: str
embedding: NdArray[2] = Field(
dims=2, is_embedding=True
) # Embedding column -> vector representation of the document
file: NdArray[100] = Field(dims=100)
docs = [
Document(
text="Hello world",
embedding=np.array([1, 2]),
file=np.random.rand(100),
id="1",
),
Document(
text="Hello world, how are you?",
embedding=np.array([3, 4]),
file=np.random.rand(100),
id="2",
),
Document(
text="Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut",
embedding=np.array([5, 6]),
file=np.random.rand(100),
id="3",
),
]
batch_config = {
"batch_size": 20,
"dynamic": False,
"timeout_retries": 3,
"num_workers": 1,
}
dbconfig = WeaviateDocumentIndex.DBConfig(
host="https://docarray-test-4mfexsso.weaviate.network", # Replace with your endpoint
auth_api_key="JPsfPHB3OLHrgnN80JAa7bmPApOxOfaHy0SO",
)
runtimeconfig = WeaviateDocumentIndex.RuntimeConfig(batch_config=batch_config)
store = WeaviateDocumentIndex[Document](db_config=dbconfig)
store.configure(runtimeconfig) # Batch settings being passed on
store.index(docs)
|
# TODO: enable ruff qa on this file when we figure out why it thinks weaviate_client is
# redefined at each test that fixture
# ruff: noqa
import numpy as np
import pytest
import torch
from pydantic import Field
from docarray import BaseDoc
from docarray.index.backends.weaviate import WeaviateDocumentIndex
from docarray.typing import TorchTensor
from tests.index.weaviate.fixture_weaviate import ( # noqa: F401
start_storage,
weaviate_client,
)
pytestmark = [pytest.mark.slow, pytest.mark.index]
def test_find_torch(weaviate_client):
class TorchDoc(BaseDoc):
tens: TorchTensor[10] = Field(dims=10, is_embedding=True)
index = WeaviateDocumentIndex[TorchDoc]()
index_docs = [
TorchDoc(tens=np.random.rand(10).astype(dtype=np.float32)) for _ in range(10)
]
index.index(index_docs)
query = index_docs[-1]
docs, scores = index.find(query, limit=5)
assert len(docs) == 5
assert len(scores) == 5
for doc in docs:
assert isinstance(doc.tens, TorchTensor)
assert docs[0].id == index_docs[-1].id
assert torch.allclose(docs[0].tens, index_docs[-1].tens)
@pytest.mark.tensorflow
def test_find_tensorflow():
from docarray.typing import TensorFlowTensor
class TfDoc(BaseDoc):
tens: TensorFlowTensor[10] = Field(dims=10, is_embedding=True)
index = WeaviateDocumentIndex[TfDoc]()
index_docs = [
TfDoc(tens=np.random.rand(10).astype(dtype=np.float32)) for _ in range(10)
]
index.index(index_docs)
query = index_docs[-1]
docs, scores = index.find(query, limit=5)
assert len(docs) == 5
assert len(scores) == 5
for doc in docs:
assert isinstance(doc.tens, TensorFlowTensor)
assert docs[0].id == index_docs[-1].id
assert np.allclose(
docs[0].tens.unwrap().numpy(), index_docs[-1].tens.unwrap().numpy()
)
|
# Copyright (c) OpenMMLab. All rights reserved.
from .bbox_overlaps import bbox_overlaps
from .class_names import (cityscapes_classes, coco_classes,
coco_panoptic_classes, dataset_aliases, get_classes,
imagenet_det_classes, imagenet_vid_classes,
objects365v1_classes, objects365v2_classes,
oid_challenge_classes, oid_v6_classes, voc_classes)
from .mean_ap import average_precision, eval_map, print_map_summary
from .panoptic_utils import (INSTANCE_OFFSET, pq_compute_multi_core,
pq_compute_single_core)
from .recall import (eval_recalls, plot_iou_recall, plot_num_recall,
print_recall_summary)
__all__ = [
'voc_classes', 'imagenet_det_classes', 'imagenet_vid_classes',
'coco_classes', 'cityscapes_classes', 'dataset_aliases', 'get_classes',
'average_precision', 'eval_map', 'print_map_summary', 'eval_recalls',
'print_recall_summary', 'plot_num_recall', 'plot_iou_recall',
'oid_v6_classes', 'oid_challenge_classes', 'INSTANCE_OFFSET',
'pq_compute_single_core', 'pq_compute_multi_core', 'bbox_overlaps',
'objects365v1_classes', 'objects365v2_classes', 'coco_panoptic_classes'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .bbox_overlaps import bbox_overlaps
from .class_names import (cityscapes_classes, coco_classes, dataset_aliases,
get_classes, imagenet_det_classes,
imagenet_vid_classes, objects365v1_classes,
objects365v2_classes, oid_challenge_classes,
oid_v6_classes, voc_classes)
from .mean_ap import average_precision, eval_map, print_map_summary
from .panoptic_utils import (INSTANCE_OFFSET, pq_compute_multi_core,
pq_compute_single_core)
from .recall import (eval_recalls, plot_iou_recall, plot_num_recall,
print_recall_summary)
__all__ = [
'voc_classes', 'imagenet_det_classes', 'imagenet_vid_classes',
'coco_classes', 'cityscapes_classes', 'dataset_aliases', 'get_classes',
'average_precision', 'eval_map', 'print_map_summary', 'eval_recalls',
'print_recall_summary', 'plot_num_recall', 'plot_iou_recall',
'oid_v6_classes', 'oid_challenge_classes', 'INSTANCE_OFFSET',
'pq_compute_single_core', 'pq_compute_multi_core', 'bbox_overlaps',
'objects365v1_classes', 'objects365v2_classes'
]
|
from keras.src.api_export import keras_export
from keras.src.layers.pooling.base_pooling import BasePooling
@keras_export(["keras.layers.AveragePooling2D", "keras.layers.AvgPool2D"])
class AveragePooling2D(BasePooling):
"""Average pooling operation for 2D spatial data.
Downsamples the input along its spatial dimensions (height and width)
by taking the average value over an input window
(of size defined by `pool_size`) for each channel of the input.
The window is shifted by `strides` along each dimension.
The resulting output when using the `"valid"` padding option has a spatial
shape (number of rows or columns) of:
`output_shape = math.floor((input_shape - pool_size) / strides) + 1`
(when `input_shape >= pool_size`)
The resulting output shape when using the `"same"` padding option is:
`output_shape = input_shape`
Args:
pool_size: int or tuple of 2 integers, factors by which to downscale
(dim1, dim2). If only one integer is specified, the same
window length will be used for all dimensions.
strides: int or tuple of 2 integers, or None. Strides values. If None,
it will default to `pool_size`. If only one int is specified, the
same stride size will be used for all dimensions.
padding: string, either `"valid"` or `"same"` (case-insensitive).
`"valid"` means no padding. `"same"` results in padding evenly to
the left/right or up/down of the input such that output has the same
height/width dimension as the input.
data_format: string, either `"channels_last"` or `"channels_first"`.
The ordering of the dimensions in the inputs. `"channels_last"`
corresponds to inputs with shape `(batch, height, width, channels)`
while `"channels_first"` corresponds to inputs with shape
`(batch, channels, height, width)`. It defaults to the
`image_data_format` value found in your Keras config file at
`~/.keras/keras.json`. If you never set it, then it will be
`"channels_last"`.
Input shape:
- If `data_format="channels_last"`:
4D tensor with shape `(batch_size, height, width, channels)`.
- If `data_format="channels_first"`:
4D tensor with shape `(batch_size, channels, height, width)`.
Output shape:
- If `data_format="channels_last"`:
4D tensor with shape
`(batch_size, pooled_height, pooled_width, channels)`.
- If `data_format="channels_first"`:
4D tensor with shape
`(batch_size, channels, pooled_height, pooled_width)`.
Examples:
`strides=(1, 1)` and `padding="valid"`:
>>> x = np.array([[1., 2., 3.],
... [4., 5., 6.],
... [7., 8., 9.]])
>>> x = np.reshape(x, [1, 3, 3, 1])
>>> avg_pool_2d = keras.layers.AveragePooling2D(pool_size=(2, 2),
... strides=(1, 1), padding="valid")
>>> avg_pool_2d(x)
`strides=(2, 2)` and `padding="valid"`:
>>> x = np.array([[1., 2., 3., 4.],
... [5., 6., 7., 8.],
... [9., 10., 11., 12.]])
>>> x = np.reshape(x, [1, 3, 4, 1])
>>> avg_pool_2d = keras.layers.AveragePooling2D(pool_size=(2, 2),
... strides=(2, 2), padding="valid")
>>> avg_pool_2d(x)
`stride=(1, 1)` and `padding="same"`:
>>> x = np.array([[1., 2., 3.],
... [4., 5., 6.],
... [7., 8., 9.]])
>>> x = np.reshape(x, [1, 3, 3, 1])
>>> avg_pool_2d = keras.layers.AveragePooling2D(pool_size=(2, 2),
... strides=(1, 1), padding="same")
>>> avg_pool_2d(x)
"""
def __init__(
self,
pool_size,
strides=None,
padding="valid",
data_format=None,
name=None,
**kwargs,
):
super().__init__(
pool_size,
strides,
pool_dimensions=2,
pool_mode="average",
padding=padding,
data_format=data_format,
name=name,
**kwargs,
)
|
from keras.src.api_export import keras_export
from keras.src.layers.pooling.base_pooling import BasePooling
@keras_export(["keras.layers.AveragePooling2D", "keras.layers.AvgPool2D"])
class AveragePooling2D(BasePooling):
"""Average pooling operation for 2D spatial data.
Downsamples the input along its spatial dimensions (height and width)
by taking the average value over an input window
(of size defined by `pool_size`) for each channel of the input.
The window is shifted by `strides` along each dimension.
The resulting output when using the `"valid"` padding option has a spatial
shape (number of rows or columns) of:
`output_shape = math.floor((input_shape - pool_size) / strides) + 1`
(when `input_shape >= pool_size`)
The resulting output shape when using the `"same"` padding option is:
`output_shape = math.floor((input_shape - 1) / strides) + 1`
Args:
pool_size: int or tuple of 2 integers, factors by which to downscale
(dim1, dim2). If only one integer is specified, the same
window length will be used for all dimensions.
strides: int or tuple of 2 integers, or None. Strides values. If None,
it will default to `pool_size`. If only one int is specified, the
same stride size will be used for all dimensions.
padding: string, either `"valid"` or `"same"` (case-insensitive).
`"valid"` means no padding. `"same"` results in padding evenly to
the left/right or up/down of the input such that output has the same
height/width dimension as the input.
data_format: string, either `"channels_last"` or `"channels_first"`.
The ordering of the dimensions in the inputs. `"channels_last"`
corresponds to inputs with shape `(batch, height, width, channels)`
while `"channels_first"` corresponds to inputs with shape
`(batch, channels, height, width)`. It defaults to the
`image_data_format` value found in your Keras config file at
`~/.keras/keras.json`. If you never set it, then it will be
`"channels_last"`.
Input shape:
- If `data_format="channels_last"`:
4D tensor with shape `(batch_size, height, width, channels)`.
- If `data_format="channels_first"`:
4D tensor with shape `(batch_size, channels, height, width)`.
Output shape:
- If `data_format="channels_last"`:
4D tensor with shape
`(batch_size, pooled_height, pooled_width, channels)`.
- If `data_format="channels_first"`:
4D tensor with shape
`(batch_size, channels, pooled_height, pooled_width)`.
Examples:
`strides=(1, 1)` and `padding="valid"`:
>>> x = np.array([[1., 2., 3.],
... [4., 5., 6.],
... [7., 8., 9.]])
>>> x = np.reshape(x, [1, 3, 3, 1])
>>> avg_pool_2d = keras.layers.AveragePooling2D(pool_size=(2, 2),
... strides=(1, 1), padding="valid")
>>> avg_pool_2d(x)
`strides=(2, 2)` and `padding="valid"`:
>>> x = np.array([[1., 2., 3., 4.],
... [5., 6., 7., 8.],
... [9., 10., 11., 12.]])
>>> x = np.reshape(x, [1, 3, 4, 1])
>>> avg_pool_2d = keras.layers.AveragePooling2D(pool_size=(2, 2),
... strides=(2, 2), padding="valid")
>>> avg_pool_2d(x)
`stride=(1, 1)` and `padding="same"`:
>>> x = np.array([[1., 2., 3.],
... [4., 5., 6.],
... [7., 8., 9.]])
>>> x = np.reshape(x, [1, 3, 3, 1])
>>> avg_pool_2d = keras.layers.AveragePooling2D(pool_size=(2, 2),
... strides=(1, 1), padding="same")
>>> avg_pool_2d(x)
"""
def __init__(
self,
pool_size,
strides=None,
padding="valid",
data_format=None,
name=None,
**kwargs,
):
super().__init__(
pool_size,
strides,
pool_dimensions=2,
pool_mode="average",
padding=padding,
data_format=data_format,
name=name,
**kwargs,
)
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmengine.utils import get_git_hash
from mmengine.utils.dl_utils import collect_env as collect_base_env
import mmdet
def collect_env():
"""Collect the information of the running environments."""
env_info = collect_base_env()
env_info['MMDetection'] = mmdet.__version__ + '+' + get_git_hash()[:7]
return env_info
if __name__ == '__main__':
for name, val in collect_env().items():
print(f'{name}: {val}')
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmengine.utils import collect_env as collect_base_env
from mmengine.utils import get_git_hash
import mmdet
def collect_env():
"""Collect the information of the running environments."""
env_info = collect_base_env()
env_info['MMDetection'] = mmdet.__version__ + '+' + get_git_hash()[:7]
return env_info
if __name__ == '__main__':
for name, val in collect_env().items():
print(f'{name}: {val}')
|
"""
Top-level module of Jina.
The primary function of this module is to import all of the public Jina
interfaces into a single place. The interfaces themselves are located in
sub-modules, as described below.
"""
import os as _os
import platform as _platform
import signal as _signal
import sys as _sys
import warnings as _warnings
import docarray as _docarray
if _sys.version_info < (3, 7, 0):
raise OSError(f'Jina requires Python >= 3.7, but yours is {_sys.version_info}')
def _warning_on_one_line(message, category, filename, lineno, *args, **kwargs):
return '\033[1;33m%s: %s\033[0m \033[1;30m(raised from %s:%s)\033[0m\n' % (
category.__name__,
message,
filename,
lineno,
)
def _ignore_google_warnings():
import warnings
warnings.filterwarnings(
'ignore',
category=DeprecationWarning,
message='Deprecated call to `pkg_resources.declare_namespace(\'google\')`.',
append=True
)
_warnings.formatwarning = _warning_on_one_line
_warnings.simplefilter('always', DeprecationWarning, append=True)
_ignore_google_warnings()
# fix fork error on MacOS but seems no effect? must do EXPORT manually before jina start
_os.environ['OBJC_DISABLE_INITIALIZE_FORK_SAFETY'] = 'YES'
# JINA_MP_START_METHOD has higher priority than os-patch
_start_method = _os.environ.get('JINA_MP_START_METHOD', None)
if _start_method and _start_method.lower() in {'fork', 'spawn', 'forkserver'}:
from multiprocessing import set_start_method as _set_start_method
try:
_set_start_method(_start_method.lower())
_warnings.warn(
f'multiprocessing start method is set to `{_start_method.lower()}`'
)
except Exception as e:
_warnings.warn(
f'failed to set multiprocessing start_method to `{_start_method.lower()}`: {e!r}'
)
elif _sys.version_info >= (3, 8, 0) and _platform.system() == 'Darwin':
# DO SOME OS-WISE PATCHES
# temporary fix for python 3.8 on macos where the default start is set to "spawn"
# https://docs.python.org/3/library/multiprocessing.html#contexts-and-start-methods
from multiprocessing import set_start_method as _set_start_method
try:
_set_start_method('fork')
_warnings.warn(f'multiprocessing start method is set to `fork`')
except Exception as e:
_warnings.warn(f'failed to set multiprocessing start_method to `fork`: {e!r}')
# do not change this line manually
# this is managed by git tag and updated on every release
# NOTE: this represents the NEXT release version
__version__ = '3.21.0'
# do not change this line manually
# this is managed by proto/build-proto.sh and updated on every execution
__proto_version__ = '0.1.27'
try:
__docarray_version__ = _docarray.__version__
except AttributeError as e:
raise RuntimeError(
'`docarray` dependency is not installed correctly, please reinstall with `pip install -U --force-reinstall docarray`'
)
try:
_signal.signal(_signal.SIGINT, _signal.default_int_handler)
except Exception as exc:
_warnings.warn(f'failed to set default signal handler: {exc!r}`')
def _set_nofile(nofile_atleast=4096):
"""
Set nofile soft limit to at least 4096, useful for running matlplotlib/seaborn on
parallel executing plot generators vs. Ubuntu default ulimit -n 1024 or OS X El Captian 256
temporary setting extinguishing with Python session.
:param nofile_atleast: nofile soft limit
:return: nofile soft limit and nofile hard limit
"""
try:
import resource as res
except ImportError: # Windows
res = None
if res is None:
return (None,) * 2
soft, ohard = res.getrlimit(res.RLIMIT_NOFILE)
hard = ohard
if soft < nofile_atleast:
soft = nofile_atleast
if hard < soft:
hard = soft
try:
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except (ValueError, res.error):
try:
hard = soft
print(f'trouble with max limit, retrying with soft,hard {soft},{hard}')
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except Exception:
print('failed to set ulimit, giving up')
soft, hard = res.getrlimit(res.RLIMIT_NOFILE)
return soft, hard
_set_nofile()
# ONLY FIRST CLASS CITIZENS ARE ALLOWED HERE, namely Document, Executor Flow
# Document
from jina._docarray import Document, DocumentArray
# Client
from jina.clients import Client
# Deployment
from jina.orchestrate.deployments import Deployment
from jina.orchestrate.flow.asyncio import AsyncFlow
# Flow
from jina.orchestrate.flow.base import Flow
# Executor
from jina.serve.executors import BaseExecutor as Executor
from jina.serve.executors.decorators import dynamic_batching, monitor, requests
# Custom Gateway
from jina.serve.runtimes.gateway.gateway import Gateway
|
"""
Top-level module of Jina.
The primary function of this module is to import all of the public Jina
interfaces into a single place. The interfaces themselves are located in
sub-modules, as described below.
"""
import os as _os
import platform as _platform
import signal as _signal
import sys as _sys
import warnings as _warnings
import docarray as _docarray
if _sys.version_info < (3, 7, 0):
raise OSError(f'Jina requires Python >= 3.7, but yours is {_sys.version_info}')
def _warning_on_one_line(message, category, filename, lineno, *args, **kwargs):
return '\033[1;33m%s: %s\033[0m \033[1;30m(raised from %s:%s)\033[0m\n' % (
category.__name__,
message,
filename,
lineno,
)
def _ignore_google_warnings():
import warnings
warnings.filterwarnings(
'ignore',
category=DeprecationWarning,
message='Deprecated call to `pkg_resources.declare_namespace(\'google\')`.',
append=True
)
_warnings.formatwarning = _warning_on_one_line
_warnings.simplefilter('always', DeprecationWarning, append=True)
_ignore_google_warnings()
# fix fork error on MacOS but seems no effect? must do EXPORT manually before jina start
_os.environ['OBJC_DISABLE_INITIALIZE_FORK_SAFETY'] = 'YES'
# JINA_MP_START_METHOD has higher priority than os-patch
_start_method = _os.environ.get('JINA_MP_START_METHOD', None)
if _start_method and _start_method.lower() in {'fork', 'spawn', 'forkserver'}:
from multiprocessing import set_start_method as _set_start_method
try:
_set_start_method(_start_method.lower())
_warnings.warn(
f'multiprocessing start method is set to `{_start_method.lower()}`'
)
except Exception as e:
_warnings.warn(
f'failed to set multiprocessing start_method to `{_start_method.lower()}`: {e!r}'
)
elif _sys.version_info >= (3, 8, 0) and _platform.system() == 'Darwin':
# DO SOME OS-WISE PATCHES
# temporary fix for python 3.8 on macos where the default start is set to "spawn"
# https://docs.python.org/3/library/multiprocessing.html#contexts-and-start-methods
from multiprocessing import set_start_method as _set_start_method
try:
_set_start_method('fork')
_warnings.warn(f'multiprocessing start method is set to `fork`')
except Exception as e:
_warnings.warn(f'failed to set multiprocessing start_method to `fork`: {e!r}')
# do not change this line manually
# this is managed by git tag and updated on every release
# NOTE: this represents the NEXT release version
__version__ = '3.20.4'
# do not change this line manually
# this is managed by proto/build-proto.sh and updated on every execution
__proto_version__ = '0.1.27'
try:
__docarray_version__ = _docarray.__version__
except AttributeError as e:
raise RuntimeError(
'`docarray` dependency is not installed correctly, please reinstall with `pip install -U --force-reinstall docarray`'
)
try:
_signal.signal(_signal.SIGINT, _signal.default_int_handler)
except Exception as exc:
_warnings.warn(f'failed to set default signal handler: {exc!r}`')
def _set_nofile(nofile_atleast=4096):
"""
Set nofile soft limit to at least 4096, useful for running matlplotlib/seaborn on
parallel executing plot generators vs. Ubuntu default ulimit -n 1024 or OS X El Captian 256
temporary setting extinguishing with Python session.
:param nofile_atleast: nofile soft limit
:return: nofile soft limit and nofile hard limit
"""
try:
import resource as res
except ImportError: # Windows
res = None
if res is None:
return (None,) * 2
soft, ohard = res.getrlimit(res.RLIMIT_NOFILE)
hard = ohard
if soft < nofile_atleast:
soft = nofile_atleast
if hard < soft:
hard = soft
try:
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except (ValueError, res.error):
try:
hard = soft
print(f'trouble with max limit, retrying with soft,hard {soft},{hard}')
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except Exception:
print('failed to set ulimit, giving up')
soft, hard = res.getrlimit(res.RLIMIT_NOFILE)
return soft, hard
_set_nofile()
# ONLY FIRST CLASS CITIZENS ARE ALLOWED HERE, namely Document, Executor Flow
# Document
from jina._docarray import Document, DocumentArray
# Client
from jina.clients import Client
# Deployment
from jina.orchestrate.deployments import Deployment
from jina.orchestrate.flow.asyncio import AsyncFlow
# Flow
from jina.orchestrate.flow.base import Flow
# Executor
from jina.serve.executors import BaseExecutor as Executor
from jina.serve.executors.decorators import dynamic_batching, monitor, requests
# Custom Gateway
from jina.serve.runtimes.gateway.gateway import Gateway
|
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class Translation:
"""`Feature` for translations with fixed languages per example.
Here for compatiblity with tfds.
Args:
languages (`dict`):
A dictionary for each example mapping string language codes to string translations.
Example:
```python
>>> # At construction time:
>>> datasets.features.Translation(languages=['en', 'fr', 'de'])
>>> # During data generation:
>>> yield {
... 'en': 'the cat',
... 'fr': 'le chat',
... 'de': 'die katze'
... }
```
"""
languages: List[str]
id: Optional[str] = None
# Automatically constructed
dtype: ClassVar[str] = "dict"
pa_type: ClassVar[Any] = None
_type: str = field(default="Translation", init=False, repr=False)
def __call__(self):
return pa.struct({lang: pa.string() for lang in sorted(self.languages)})
def flatten(self) -> Union["FeatureType", Dict[str, "FeatureType"]]:
"""Flatten the Translation feature into a dictionary."""
from .features import Value
return {k: Value("string") for k in sorted(self.languages)}
@dataclass
class TranslationVariableLanguages:
"""`Feature` for translations with variable languages per example.
Here for compatiblity with tfds.
Args:
languages (`dict`):
A dictionary for each example mapping string language codes to one or more string translations.
The languages present may vary from example to example.
Returns:
- `language` or `translation` (variable-length 1D `tf.Tensor` of `tf.string`):
Language codes sorted in ascending order or plain text translations, sorted to align with language codes.
Example:
```python
>>> # At construction time:
>>> datasets.features.TranslationVariableLanguages(languages=['en', 'fr', 'de'])
>>> # During data generation:
>>> yield {
... 'en': 'the cat',
... 'fr': ['le chat', 'la chatte,']
... 'de': 'die katze'
... }
>>> # Tensor returned :
>>> {
... 'language': ['en', 'de', 'fr', 'fr'],
... 'translation': ['the cat', 'die katze', 'la chatte', 'le chat'],
... }
```
"""
languages: Optional[List] = None
num_languages: Optional[int] = None
id: Optional[str] = None
# Automatically constructed
dtype: ClassVar[str] = "dict"
pa_type: ClassVar[Any] = None
_type: str = field(default="TranslationVariableLanguages", init=False, repr=False)
def __post_init__(self):
self.languages = sorted(set(self.languages)) if self.languages else None
self.num_languages = len(self.languages) if self.languages else None
def __call__(self):
return pa.struct({"language": pa.list_(pa.string()), "translation": pa.list_(pa.string())})
def encode_example(self, translation_dict):
lang_set = set(self.languages)
if set(translation_dict) == {"language", "translation"}:
return translation_dict
elif self.languages and set(translation_dict) - lang_set:
raise ValueError(
f'Some languages in example ({", ".join(sorted(set(translation_dict) - lang_set))}) are not in valid set ({", ".join(lang_set)}).'
)
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
translation_tuples = []
for lang, text in translation_dict.items():
if isinstance(text, str):
translation_tuples.append((lang, text))
else:
translation_tuples.extend([(lang, el) for el in text])
# Ensure translations are in ascending order by language code.
languages, translations = zip(*sorted(translation_tuples))
return {"language": languages, "translation": translations}
def flatten(self) -> Union["FeatureType", Dict[str, "FeatureType"]]:
"""Flatten the TranslationVariableLanguages feature into a dictionary."""
from .features import Sequence, Value
return {
"language": Sequence(Value("string")),
"translation": Sequence(Value("string")),
}
|
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class Translation:
"""`FeatureConnector` for translations with fixed languages per example.
Here for compatiblity with tfds.
Args:
languages (`dict`):
A dictionary for each example mapping string language codes to string translations.
Example:
```python
>>> # At construction time:
>>> datasets.features.Translation(languages=['en', 'fr', 'de'])
>>> # During data generation:
>>> yield {
... 'en': 'the cat',
... 'fr': 'le chat',
... 'de': 'die katze'
... }
```
"""
languages: List[str]
id: Optional[str] = None
# Automatically constructed
dtype: ClassVar[str] = "dict"
pa_type: ClassVar[Any] = None
_type: str = field(default="Translation", init=False, repr=False)
def __call__(self):
return pa.struct({lang: pa.string() for lang in sorted(self.languages)})
def flatten(self) -> Union["FeatureType", Dict[str, "FeatureType"]]:
"""Flatten the Translation feature into a dictionary."""
from .features import Value
return {k: Value("string") for k in sorted(self.languages)}
@dataclass
class TranslationVariableLanguages:
"""`FeatureConnector` for translations with variable languages per example.
Here for compatiblity with tfds.
Args:
languages (`dict`):
A dictionary for each example mapping string language codes to one or more string translations.
The languages present may vary from example to example.
Returns:
- `language` or `translation` (variable-length 1D `tf.Tensor` of `tf.string`):
Language codes sorted in ascending order or plain text translations, sorted to align with language codes.
Example:
```python
>>> # At construction time:
>>> datasets.features.TranslationVariableLanguages(languages=['en', 'fr', 'de'])
>>> # During data generation:
>>> yield {
... 'en': 'the cat',
... 'fr': ['le chat', 'la chatte,']
... 'de': 'die katze'
... }
>>> # Tensor returned :
>>> {
... 'language': ['en', 'de', 'fr', 'fr'],
... 'translation': ['the cat', 'die katze', 'la chatte', 'le chat'],
... }
```
"""
languages: Optional[List] = None
num_languages: Optional[int] = None
id: Optional[str] = None
# Automatically constructed
dtype: ClassVar[str] = "dict"
pa_type: ClassVar[Any] = None
_type: str = field(default="TranslationVariableLanguages", init=False, repr=False)
def __post_init__(self):
self.languages = sorted(set(self.languages)) if self.languages else None
self.num_languages = len(self.languages) if self.languages else None
def __call__(self):
return pa.struct({"language": pa.list_(pa.string()), "translation": pa.list_(pa.string())})
def encode_example(self, translation_dict):
lang_set = set(self.languages)
if set(translation_dict) == {"language", "translation"}:
return translation_dict
elif self.languages and set(translation_dict) - lang_set:
raise ValueError(
f'Some languages in example ({", ".join(sorted(set(translation_dict) - lang_set))}) are not in valid set ({", ".join(lang_set)}).'
)
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
translation_tuples = []
for lang, text in translation_dict.items():
if isinstance(text, str):
translation_tuples.append((lang, text))
else:
translation_tuples.extend([(lang, el) for el in text])
# Ensure translations are in ascending order by language code.
languages, translations = zip(*sorted(translation_tuples))
return {"language": languages, "translation": translations}
def flatten(self) -> Union["FeatureType", Dict[str, "FeatureType"]]:
"""Flatten the TranslationVariableLanguages feature into a dictionary."""
from .features import Sequence, Value
return {
"language": Sequence(Value("string")),
"translation": Sequence(Value("string")),
}
|
from abc import ABC
import numpy as np
import pytest
from docarray import Document, DocumentArray
from docarray.array.storage.base.helper import Offset2ID
from docarray.array.storage.memory import SequenceLikeMixin
from docarray.array.storage.redis.getsetdel import GetSetDelMixin
from docarray.array.storage.redis.backend import BackendMixin, RedisConfig
class StorageMixins(BackendMixin, GetSetDelMixin, SequenceLikeMixin, ABC):
...
class DocumentArrayDummy(StorageMixins, DocumentArray):
def __new__(cls, *args, **kwargs):
return super().__new__(cls)
def _load_offset2ids(self):
pass
def _save_offset2ids(self):
pass
@pytest.fixture(scope='function')
def columns():
columns = {
'col_str': 'str',
'col_bytes': 'bytes',
'col_int': 'int',
'col_float': 'float',
'col_long': 'long',
'col_double': 'double',
}
return columns
@pytest.fixture(scope='function')
def da_redis(columns):
cfg = RedisConfig(n_dim=3, columns=columns)
da_redis = DocumentArrayDummy(storage='redis', config=cfg)
return da_redis
@pytest.mark.parametrize(
'embedding', [[1, 2, 3], [1.0, 2.0, 3.0], [1, 2, 3, 4, 5], None]
)
@pytest.mark.parametrize('text', ['test_text', None])
@pytest.mark.parametrize(
'tag',
[
{'tag_1': 'tag1'},
{'tag_1': 'tag1', 'tag_2': 'tag2'},
{'tag_1': 'tag1', 'tag_2': 'tag2', 'tag_3': 'tag3'},
None,
],
)
@pytest.mark.parametrize(
'col',
[
{'col_str': 'hello', 'col_bytes': b'world'},
{'col_int': 1, 'col_float': 1.0},
{'col_long': 123, 'col_double': 1.1},
None,
],
)
def test_document_to_embedding(
embedding, text, tag, col, da_redis, columns, start_storage
):
tags = {}
if tag is not None:
tags.update(tag)
if col is not None:
tags.update(col)
doc = Document(embedding=embedding, text=text, tags=tags)
payload = da_redis._document_to_redis(doc)
if embedding is None:
assert np.allclose(
np.frombuffer(payload['embedding'], dtype=np.float32), np.zeros((3))
)
else:
assert np.allclose(
np.frombuffer(payload['embedding'], dtype=np.float32), np.array(embedding)
)
if text is None:
with pytest.raises(KeyError):
payload['text']
else:
assert payload['text'] == text
for col, _ in columns.items():
if col in tags:
assert payload[col] == tags[col]
else:
with pytest.raises(KeyError):
payload[col]
for key in tags:
if key not in (col for col in columns.keys()):
assert key not in payload
@pytest.mark.parametrize(
'doc',
[
Document(id='0'),
Document(id='1', text='hello world'),
Document(id='2', embedding=[1, 2, 3], tags={'tag_1': 'tag1', 'tag_2': 'tag2'}),
Document(
text='hello world',
embedding=[1, 2, 3],
tags={'tag_1': 'tag1', 'tag_2': 'tag2'},
chunks=[Document(text='token1'), Document(text='token2')],
),
],
)
def test_setgetdel_doc_by_id(doc, da_redis, start_storage):
da_redis._set_doc_by_id(doc.id, doc)
doc_get = da_redis._get_doc_by_id(doc.id)
assert doc == doc_get
da_redis._del_doc_by_id(doc.id)
with pytest.raises(KeyError):
da_redis._get_doc_by_id(doc.id)
def test_offset2ids_and_clear_storage(da_redis, start_storage):
ids = [str(i) for i in range(3)]
for id in ids:
doc = Document(id=id)
da_redis._set_doc_by_id(id, doc)
da_redis._offset2ids = Offset2ID(ids)
da_redis._save_offset2ids()
da_redis._load_offset2ids()
assert da_redis._offset2ids.ids == ids
da_redis._clear_storage()
for i in range(3):
with pytest.raises(KeyError):
da_redis._get_doc_by_id(i)
|
from abc import ABC
import numpy as np
import pytest
from docarray import Document, DocumentArray
from docarray.array.storage.base.helper import Offset2ID
from docarray.array.storage.memory import SequenceLikeMixin
from docarray.array.storage.redis.getsetdel import GetSetDelMixin
from docarray.array.storage.redis.backend import BackendMixin, RedisConfig
class StorageMixins(BackendMixin, GetSetDelMixin, SequenceLikeMixin, ABC):
...
class DocumentArrayDummy(StorageMixins, DocumentArray):
def __new__(cls, *args, **kwargs):
return super().__new__(cls)
def _load_offset2ids(self):
pass
def _save_offset2ids(self):
pass
@pytest.fixture(scope='function')
def columns():
columns = {
'col_str': 'str',
'col_bytes': 'bytes',
'col_int': 'int',
'col_float': 'float',
'col_long': 'long',
'col_double': 'double',
}
return columns
@pytest.fixture(scope='function')
def da_redis(columns):
cfg = RedisConfig(n_dim=3, flush=True, columns=columns)
da_redis = DocumentArrayDummy(storage='redis', config=cfg)
return da_redis
@pytest.mark.parametrize(
'embedding', [[1, 2, 3], [1.0, 2.0, 3.0], [1, 2, 3, 4, 5], None]
)
@pytest.mark.parametrize('text', ['test_text', None])
@pytest.mark.parametrize(
'tag',
[
{'tag_1': 'tag1'},
{'tag_1': 'tag1', 'tag_2': 'tag2'},
{'tag_1': 'tag1', 'tag_2': 'tag2', 'tag_3': 'tag3'},
None,
],
)
@pytest.mark.parametrize(
'col',
[
{'col_str': 'hello', 'col_bytes': b'world'},
{'col_int': 1, 'col_float': 1.0},
{'col_long': 123, 'col_double': 1.1},
None,
],
)
def test_document_to_embedding(
embedding, text, tag, col, da_redis, columns, start_storage
):
tags = {}
if tag is not None:
tags.update(tag)
if col is not None:
tags.update(col)
doc = Document(embedding=embedding, text=text, tags=tags)
payload = da_redis._document_to_redis(doc)
if embedding is None:
assert np.allclose(
np.frombuffer(payload['embedding'], dtype=np.float32), np.zeros((3))
)
else:
assert np.allclose(
np.frombuffer(payload['embedding'], dtype=np.float32), np.array(embedding)
)
if text is None:
with pytest.raises(KeyError):
payload['text']
else:
assert payload['text'] == text
for col, _ in columns.items():
if col in tags:
assert payload[col] == tags[col]
else:
with pytest.raises(KeyError):
payload[col]
for key in tags:
if key not in (col for col in columns.keys()):
assert key not in payload
@pytest.mark.parametrize(
'doc',
[
Document(id='0'),
Document(id='1', text='hello world'),
Document(id='2', embedding=[1, 2, 3], tags={'tag_1': 'tag1', 'tag_2': 'tag2'}),
Document(
text='hello world',
embedding=[1, 2, 3],
tags={'tag_1': 'tag1', 'tag_2': 'tag2'},
chunks=[Document(text='token1'), Document(text='token2')],
),
],
)
def test_setgetdel_doc_by_id(doc, da_redis, start_storage):
da_redis._set_doc_by_id(doc.id, doc)
doc_get = da_redis._get_doc_by_id(doc.id)
assert doc == doc_get
da_redis._del_doc_by_id(doc.id)
with pytest.raises(KeyError):
da_redis._get_doc_by_id(doc.id)
def test_clear_storage(da_redis, start_storage):
for i in range(3):
doc = Document(id=str(i))
da_redis._set_doc_by_id(str(i), doc)
da_redis._clear_storage()
for i in range(3):
with pytest.raises(KeyError):
da_redis._get_doc_by_id(i)
def test_offset2ids(da_redis, start_storage):
ids = [str(i) for i in range(3)]
for id in ids:
doc = Document(id=id)
da_redis._set_doc_by_id(id, doc)
da_redis._offset2ids = Offset2ID(ids)
da_redis._save_offset2ids()
da_redis._load_offset2ids()
assert da_redis._offset2ids.ids == ids
|
import argparse
import json
import subprocess
def get_runner_status(target_runners, token):
offline_runners = []
cmd = [
"curl",
"-H",
"Accept: application/vnd.github+json",
"-H",
f"Authorization: Bearer {token}",
"https://api.github.com/repos/huggingface/transformers/actions/runners",
]
output = subprocess.run(cmd, shell=True, stdout=subprocess.PIPE)
o = output.stdout.decode("utf-8")
status = json.loads(o)
runners = status["runners"]
for runner in runners:
if runner["name"] in target_runners:
if runner["status"] == "offline":
offline_runners.append(runner)
# save the result so we can report them on Slack
with open("offline_runners.txt", "w") as fp:
fp.write(json.dumps(offline_runners))
if len(offline_runners) > 0:
failed = "\n".join([x["name"] for x in offline_runners])
raise ValueError(f"The following runners are offline:\n{failed}")
if __name__ == "__main__":
def list_str(values):
return values.split(",")
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--target_runners",
default=None,
type=list_str,
required=True,
help="Comma-separated list of runners to check status.",
)
parser.add_argument(
"--token", default=None, type=str, required=True, help="A token that has actions:read permission."
)
args = parser.parse_args()
get_runner_status(args.target_runners, args.token)
|
import argparse
import json
import subprocess
def get_runner_status(target_runners, token):
offline_runners = []
cmd = (
f'curl -H "Accept: application/vnd.github+json" -H "Authorization: Bearer {token}"'
" https://api.github.com/repos/huggingface/transformers/actions/runners"
)
output = subprocess.run(cmd, shell=True, stdout=subprocess.PIPE)
o = output.stdout.decode("utf-8")
status = json.loads(o)
runners = status["runners"]
for runner in runners:
if runner["name"] in target_runners:
if runner["status"] == "offline":
offline_runners.append(runner)
# save the result so we can report them on Slack
with open("offline_runners.txt", "w") as fp:
fp.write(json.dumps(offline_runners))
if len(offline_runners) > 0:
failed = "\n".join([x["name"] for x in offline_runners])
raise ValueError(f"The following runners are offline:\n{failed}")
if __name__ == "__main__":
def list_str(values):
return values.split(",")
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--target_runners",
default=None,
type=list_str,
required=True,
help="Comma-separated list of runners to check status.",
)
parser.add_argument(
"--token", default=None, type=str, required=True, help="A token that has actions:read permission."
)
args = parser.parse_args()
get_runner_status(args.target_runners, args.token)
|
"""Test prompt mixin."""
from llama_index.core.prompts.base import PromptTemplate
from llama_index.core.prompts.mixin import (
PromptDictType,
PromptMixin,
PromptMixinType,
)
class MockObject2(PromptMixin):
def __init__(self) -> None:
self._prompt_dict_2 = {
"abc": PromptTemplate("{abc} {def}"),
}
def _get_prompts(self) -> PromptDictType:
return self._prompt_dict_2
def _get_prompt_modules(self) -> PromptMixinType:
return {}
def _update_prompts(self, prompts: PromptDictType) -> None:
if "abc" in prompts:
self._prompt_dict_2["abc"] = prompts["abc"]
class MockObject1(PromptMixin):
def __init__(self) -> None:
self.mock_object_2 = MockObject2()
self._prompt_dict_1 = {
"summary": PromptTemplate("{summary}"),
"foo": PromptTemplate("{foo} {bar}"),
}
def _get_prompts(self) -> PromptDictType:
return self._prompt_dict_1
def _get_prompt_modules(self) -> PromptMixinType:
return {"mock_object_2": self.mock_object_2}
def _update_prompts(self, prompts: PromptDictType) -> None:
if "summary" in prompts:
self._prompt_dict_1["summary"] = prompts["summary"]
if "foo" in prompts:
self._prompt_dict_1["foo"] = prompts["foo"]
def test_prompt_mixin() -> None:
mock_obj1 = MockObject1()
prompts = mock_obj1.get_prompts()
assert prompts == {
"summary": PromptTemplate("{summary}"),
"foo": PromptTemplate("{foo} {bar}"),
"mock_object_2:abc": PromptTemplate("{abc} {def}"),
}
assert mock_obj1.mock_object_2.get_prompts() == {
"abc": PromptTemplate("{abc} {def}"),
}
# update prompts
mock_obj1.update_prompts(
{
"summary": PromptTemplate("{summary} testing"),
"mock_object_2:abc": PromptTemplate("{abc} {def} ghi"),
}
)
assert mock_obj1.get_prompts() == {
"summary": PromptTemplate("{summary} testing"),
"foo": PromptTemplate("{foo} {bar}"),
"mock_object_2:abc": PromptTemplate("{abc} {def} ghi"),
}
|
"""Test prompt mixin."""
from llama_index.core.prompts.base import PromptTemplate
from llama_index.core.prompts.mixin import (
PromptDictType,
PromptMixin,
PromptMixinType,
)
class MockObject2(PromptMixin):
def __init__(self) -> None:
self._prompt_dict_2 = {
"abc": PromptTemplate("{abc} {def}"),
}
def _get_prompts(self) -> PromptDictType:
return self._prompt_dict_2
def _get_prompt_modules(self) -> PromptMixinType:
return {}
def _update_prompts(self, prompts: PromptDictType) -> None:
if "abc" in prompts:
self._prompt_dict_2["abc"] = prompts["abc"]
class MockObject1(PromptMixin):
def __init__(self) -> None:
self.mock_object_2 = MockObject2()
self._prompt_dict_1 = {
"summary": PromptTemplate("{summary}"),
"foo": PromptTemplate("{foo} {bar}"),
}
def _get_prompts(self) -> PromptDictType:
return self._prompt_dict_1
def _get_prompt_modules(self) -> PromptMixinType:
return {"mock_object_2": self.mock_object_2}
def _update_prompts(self, prompts: PromptDictType) -> None:
if "summary" in prompts:
self._prompt_dict_1["summary"] = prompts["summary"]
if "foo" in prompts:
self._prompt_dict_1["foo"] = prompts["foo"]
def test_prompt_mixin() -> None:
mock_obj1 = MockObject1()
prompts = mock_obj1.get_prompts()
assert prompts == {
"summary": PromptTemplate("{summary}"),
"foo": PromptTemplate("{foo} {bar}"),
"mock_object_2:abc": PromptTemplate("{abc} {def}"),
}
assert mock_obj1.mock_object_2.get_prompts() == {
"abc": PromptTemplate("{abc} {def}"),
}
# update prompts
mock_obj1.update_prompts(
{
"summary": PromptTemplate("{summary} testing"),
"mock_object_2:abc": PromptTemplate("{abc} {def} ghi"),
}
)
assert mock_obj1.get_prompts() == {
"summary": PromptTemplate("{summary} testing"),
"foo": PromptTemplate("{foo} {bar}"),
"mock_object_2:abc": PromptTemplate("{abc} {def} ghi"),
}
|
"""[DEPRECATED] Pipeline prompt template."""
from typing import Any
from pydantic import model_validator
from langchain_core._api.deprecation import deprecated
from langchain_core.prompt_values import PromptValue
from langchain_core.prompts.base import BasePromptTemplate
from langchain_core.prompts.chat import BaseChatPromptTemplate
def _get_inputs(inputs: dict, input_variables: list[str]) -> dict:
return {k: inputs[k] for k in input_variables}
@deprecated(
since="0.3.22",
removal="1.0",
message=(
"This class is deprecated. Please see the docstring below or at the link"
" for a replacement option: "
"https://python.langchain.com/api_reference/core/prompts/langchain_core.prompts.pipeline.PipelinePromptTemplate.html"
),
)
class PipelinePromptTemplate(BasePromptTemplate):
"""[DEPRECATED] Pipeline prompt template.
This has been deprecated in favor of chaining individual prompts together in your
code. E.g. using a for loop, you could do:
.. code-block:: python
my_input = {"key": "value"}
for name, prompt in pipeline_prompts:
my_input[name] = prompt.invoke(my_input).to_string()
my_output = final_prompt.invoke(my_input)
Prompt template for composing multiple prompt templates together.
This can be useful when you want to reuse parts of prompts.
A PipelinePrompt consists of two main parts:
- final_prompt: This is the final prompt that is returned
- pipeline_prompts: This is a list of tuples, consisting
of a string (`name`) and a Prompt Template.
Each PromptTemplate will be formatted and then passed
to future prompt templates as a variable with
the same name as `name`
"""
final_prompt: BasePromptTemplate
"""The final prompt that is returned."""
pipeline_prompts: list[tuple[str, BasePromptTemplate]]
"""A list of tuples, consisting of a string (`name`) and a Prompt Template."""
@classmethod
def get_lc_namespace(cls) -> list[str]:
"""Get the namespace of the langchain object."""
return ["langchain", "prompts", "pipeline"]
@model_validator(mode="before")
@classmethod
def get_input_variables(cls, values: dict) -> Any:
"""Get input variables."""
created_variables = set()
all_variables = set()
for k, prompt in values["pipeline_prompts"]:
created_variables.add(k)
all_variables.update(prompt.input_variables)
values["input_variables"] = list(all_variables.difference(created_variables))
return values
def format_prompt(self, **kwargs: Any) -> PromptValue:
"""Format the prompt with the inputs.
Args:
kwargs: Any arguments to be passed to the prompt template.
Returns:
A formatted string.
"""
for k, prompt in self.pipeline_prompts:
_inputs = _get_inputs(kwargs, prompt.input_variables)
if isinstance(prompt, BaseChatPromptTemplate):
kwargs[k] = prompt.format_messages(**_inputs)
else:
kwargs[k] = prompt.format(**_inputs)
_inputs = _get_inputs(kwargs, self.final_prompt.input_variables)
return self.final_prompt.format_prompt(**_inputs)
async def aformat_prompt(self, **kwargs: Any) -> PromptValue:
"""Async format the prompt with the inputs.
Args:
kwargs: Any arguments to be passed to the prompt template.
Returns:
A formatted string.
"""
for k, prompt in self.pipeline_prompts:
_inputs = _get_inputs(kwargs, prompt.input_variables)
if isinstance(prompt, BaseChatPromptTemplate):
kwargs[k] = await prompt.aformat_messages(**_inputs)
else:
kwargs[k] = await prompt.aformat(**_inputs)
_inputs = _get_inputs(kwargs, self.final_prompt.input_variables)
return await self.final_prompt.aformat_prompt(**_inputs)
def format(self, **kwargs: Any) -> str:
"""Format the prompt with the inputs.
Args:
kwargs: Any arguments to be passed to the prompt template.
Returns:
A formatted string.
"""
return self.format_prompt(**kwargs).to_string()
async def aformat(self, **kwargs: Any) -> str:
"""Async format the prompt with the inputs.
Args:
kwargs: Any arguments to be passed to the prompt template.
Returns:
A formatted string.
"""
return (await self.aformat_prompt(**kwargs)).to_string()
@property
def _prompt_type(self) -> str:
raise ValueError
PipelinePromptTemplate.model_rebuild()
|
"""[DEPRECATED] Pipeline prompt template."""
from typing import Any
from typing import Optional as Optional
from pydantic import model_validator
from langchain_core._api.deprecation import deprecated
from langchain_core.prompt_values import PromptValue
from langchain_core.prompts.base import BasePromptTemplate
from langchain_core.prompts.chat import BaseChatPromptTemplate
def _get_inputs(inputs: dict, input_variables: list[str]) -> dict:
return {k: inputs[k] for k in input_variables}
@deprecated(
since="0.3.22",
removal="1.0",
message=(
"This class is deprecated. Please see the docstring below or at the link"
" for a replacement option: "
"https://python.langchain.com/api_reference/core/prompts/langchain_core.prompts.pipeline.PipelinePromptTemplate.html"
),
)
class PipelinePromptTemplate(BasePromptTemplate):
"""[DEPRECATED] Pipeline prompt template.
This has been deprecated in favor of chaining individual prompts together in your
code. E.g. using a for loop, you could do:
.. code-block:: python
my_input = {"key": "value"}
for name, prompt in pipeline_prompts:
my_input[name] = prompt.invoke(my_input).to_string()
my_output = final_prompt.invoke(my_input)
Prompt template for composing multiple prompt templates together.
This can be useful when you want to reuse parts of prompts.
A PipelinePrompt consists of two main parts:
- final_prompt: This is the final prompt that is returned
- pipeline_prompts: This is a list of tuples, consisting
of a string (`name`) and a Prompt Template.
Each PromptTemplate will be formatted and then passed
to future prompt templates as a variable with
the same name as `name`
"""
final_prompt: BasePromptTemplate
"""The final prompt that is returned."""
pipeline_prompts: list[tuple[str, BasePromptTemplate]]
"""A list of tuples, consisting of a string (`name`) and a Prompt Template."""
@classmethod
def get_lc_namespace(cls) -> list[str]:
"""Get the namespace of the langchain object."""
return ["langchain", "prompts", "pipeline"]
@model_validator(mode="before")
@classmethod
def get_input_variables(cls, values: dict) -> Any:
"""Get input variables."""
created_variables = set()
all_variables = set()
for k, prompt in values["pipeline_prompts"]:
created_variables.add(k)
all_variables.update(prompt.input_variables)
values["input_variables"] = list(all_variables.difference(created_variables))
return values
def format_prompt(self, **kwargs: Any) -> PromptValue:
"""Format the prompt with the inputs.
Args:
kwargs: Any arguments to be passed to the prompt template.
Returns:
A formatted string.
"""
for k, prompt in self.pipeline_prompts:
_inputs = _get_inputs(kwargs, prompt.input_variables)
if isinstance(prompt, BaseChatPromptTemplate):
kwargs[k] = prompt.format_messages(**_inputs)
else:
kwargs[k] = prompt.format(**_inputs)
_inputs = _get_inputs(kwargs, self.final_prompt.input_variables)
return self.final_prompt.format_prompt(**_inputs)
async def aformat_prompt(self, **kwargs: Any) -> PromptValue:
"""Async format the prompt with the inputs.
Args:
kwargs: Any arguments to be passed to the prompt template.
Returns:
A formatted string.
"""
for k, prompt in self.pipeline_prompts:
_inputs = _get_inputs(kwargs, prompt.input_variables)
if isinstance(prompt, BaseChatPromptTemplate):
kwargs[k] = await prompt.aformat_messages(**_inputs)
else:
kwargs[k] = await prompt.aformat(**_inputs)
_inputs = _get_inputs(kwargs, self.final_prompt.input_variables)
return await self.final_prompt.aformat_prompt(**_inputs)
def format(self, **kwargs: Any) -> str:
"""Format the prompt with the inputs.
Args:
kwargs: Any arguments to be passed to the prompt template.
Returns:
A formatted string.
"""
return self.format_prompt(**kwargs).to_string()
async def aformat(self, **kwargs: Any) -> str:
"""Async format the prompt with the inputs.
Args:
kwargs: Any arguments to be passed to the prompt template.
Returns:
A formatted string.
"""
return (await self.aformat_prompt(**kwargs)).to_string()
@property
def _prompt_type(self) -> str:
raise ValueError
PipelinePromptTemplate.model_rebuild()
|
# Copyright (c) OpenMMLab. All rights reserved.
from .inference import (async_inference_detector, inference_detector,
init_detector, show_result_pyplot)
from .test import multi_gpu_test, single_gpu_test
from .train import (get_root_logger, init_random_seed, set_random_seed,
train_detector)
__all__ = [
'get_root_logger', 'set_random_seed', 'train_detector', 'init_detector',
'async_inference_detector', 'inference_detector', 'show_result_pyplot',
'multi_gpu_test', 'single_gpu_test', 'init_random_seed'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .inference import (async_inference_detector, inference_detector,
init_detector, show_result_pyplot)
from .test import multi_gpu_test, single_gpu_test
from .train import get_root_logger, set_random_seed, train_detector
__all__ = [
'get_root_logger', 'set_random_seed', 'train_detector', 'init_detector',
'async_inference_detector', 'inference_detector', 'show_result_pyplot',
'multi_gpu_test', 'single_gpu_test'
]
|
# Experimental features are not mature yet and are subject to change.
# We do not provide any BC/FC guarantees
|
# Experimental features are not mature yet and are subject to change.
# We do not provide any BC/FC guarntees
|
"""
This example loads the pre-trained SentenceTransformer model 'nli-distilroberta-base-v2' from the server.
It then fine-tunes this model for some epochs on the STS benchmark dataset.
Note: In this example, you must specify a SentenceTransformer model.
If you want to fine-tune a huggingface/transformers model like bert-base-uncased, see training_nli.py and training_stsbenchmark.py
"""
from torch.utils.data import DataLoader
import math
from sentence_transformers import SentenceTransformer, LoggingHandler, losses, util, InputExample
from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator
import logging
from datetime import datetime
import os
import gzip
import csv
#### Just some code to print debug information to stdout
logging.basicConfig(
format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO, handlers=[LoggingHandler()]
)
#### /print debug information to stdout
# Check if dataset exists. If not, download and extract it
sts_dataset_path = "datasets/stsbenchmark.tsv.gz"
if not os.path.exists(sts_dataset_path):
util.http_get("https://sbert.net/datasets/stsbenchmark.tsv.gz", sts_dataset_path)
# Read the dataset
model_name = "nli-distilroberta-base-v2"
train_batch_size = 16
num_epochs = 4
model_save_path = (
"output/training_stsbenchmark_continue_training-" + model_name + "-" + datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
)
# Load a pre-trained sentence transformer model
model = SentenceTransformer(model_name)
# Convert the dataset to a DataLoader ready for training
logging.info("Read STSbenchmark train dataset")
train_samples = []
dev_samples = []
test_samples = []
with gzip.open(sts_dataset_path, "rt", encoding="utf8") as fIn:
reader = csv.DictReader(fIn, delimiter="\t", quoting=csv.QUOTE_NONE)
for row in reader:
score = float(row["score"]) / 5.0 # Normalize score to range 0 ... 1
inp_example = InputExample(texts=[row["sentence1"], row["sentence2"]], label=score)
if row["split"] == "dev":
dev_samples.append(inp_example)
elif row["split"] == "test":
test_samples.append(inp_example)
else:
train_samples.append(inp_example)
train_dataloader = DataLoader(train_samples, shuffle=True, batch_size=train_batch_size)
train_loss = losses.CosineSimilarityLoss(model=model)
# Development set: Measure correlation between cosine score and gold labels
logging.info("Read STSbenchmark dev dataset")
evaluator = EmbeddingSimilarityEvaluator.from_input_examples(dev_samples, name="sts-dev")
# Configure the training. We skip evaluation in this example
warmup_steps = math.ceil(len(train_dataloader) * num_epochs * 0.1) # 10% of train data for warm-up
logging.info("Warmup-steps: {}".format(warmup_steps))
# Train the model
model.fit(
train_objectives=[(train_dataloader, train_loss)],
evaluator=evaluator,
epochs=num_epochs,
evaluation_steps=1000,
warmup_steps=warmup_steps,
output_path=model_save_path,
)
##############################################################################
#
# Load the stored model and evaluate its performance on STS benchmark dataset
#
##############################################################################
model = SentenceTransformer(model_save_path)
test_evaluator = EmbeddingSimilarityEvaluator.from_input_examples(test_samples, name="sts-test")
test_evaluator(model, output_path=model_save_path)
|
"""
This example loads the pre-trained SentenceTransformer model 'nli-distilroberta-base-v2' from the server.
It then fine-tunes this model for some epochs on the STS benchmark dataset.
Note: In this example, you must specify a SentenceTransformer model.
If you want to fine-tune a huggingface/transformers model like bert-base-uncased, see training_nli.py and training_stsbenchmark.py
"""
from torch.utils.data import DataLoader
import math
from sentence_transformers import SentenceTransformer, LoggingHandler, losses, util, InputExample
from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator
import logging
from datetime import datetime
import os
import gzip
import csv
#### Just some code to print debug information to stdout
logging.basicConfig(
format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO, handlers=[LoggingHandler()]
)
#### /print debug information to stdout
# Check if dataset exists. If not, download and extract it
sts_dataset_path = "datasets/stsbenchmark.tsv.gz"
if not os.path.exists(sts_dataset_path):
util.http_get("https://sbert.net/datasets/stsbenchmark.tsv.gz", sts_dataset_path)
# Read the dataset
model_name = "nli-distilroberta-base-v2"
train_batch_size = 16
num_epochs = 4
model_save_path = (
"output/training_stsbenchmark_continue_training-" + model_name + "-" + datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
)
# Load a pre-trained sentence transformer model
model = SentenceTransformer(model_name)
# Convert the dataset to a DataLoader ready for training
logging.info("Read STSbenchmark train dataset")
train_samples = []
dev_samples = []
test_samples = []
with gzip.open(sts_dataset_path, "rt", encoding="utf8") as fIn:
reader = csv.DictReader(fIn, delimiter="\t", quoting=csv.QUOTE_NONE)
for row in reader:
score = float(row["score"]) / 5.0 # Normalize score to range 0 ... 1
inp_example = InputExample(texts=[row["sentence1"], row["sentence2"]], label=score)
if row["split"] == "dev":
dev_samples.append(inp_example)
elif row["split"] == "test":
test_samples.append(inp_example)
else:
train_samples.append(inp_example)
train_dataloader = DataLoader(train_samples, shuffle=True, batch_size=train_batch_size)
train_loss = losses.CosineSimilarityLoss(model=model)
# Development set: Measure correlation between cosine score and gold labels
logging.info("Read STSbenchmark dev dataset")
evaluator = EmbeddingSimilarityEvaluator.from_input_examples(dev_samples, name="sts-dev")
# Configure the training. We skip evaluation in this example
warmup_steps = math.ceil(len(train_dataloader) * num_epochs * 0.1) # 10% of train data for warm-up
logging.info("Warmup-steps: {}".format(warmup_steps))
# Train the model
model.fit(
train_objectives=[(train_dataloader, train_loss)],
evaluator=evaluator,
epochs=num_epochs,
evaluation_steps=1000,
warmup_steps=warmup_steps,
output_path=model_save_path,
)
##############################################################################
#
# Load the stored model and evaluate its performance on STS benchmark dataset
#
##############################################################################
model = SentenceTransformer(model_save_path)
test_evaluator = EmbeddingSimilarityEvaluator.from_input_examples(test_samples, name="sts-test")
test_evaluator(model, output_path=model_save_path)
|
from pydantic.tools import parse_obj_as, schema_json_of
from docarray.document.io.json import orjson_dumps
from docarray.typing import AnyUrl
def test_proto_any_url():
uri = parse_obj_as(AnyUrl, 'http://jina.ai/img.png')
uri._to_node_protobuf()
def test_json_schema():
schema_json_of(AnyUrl)
def test_dump_json():
url = parse_obj_as(AnyUrl, 'http://jina.ai/img.png')
orjson_dumps(url)
|
from pydantic.tools import parse_obj_as
from docarray.typing import ImageUrl
def test_proto_any_url():
uri = parse_obj_as(ImageUrl, 'http://jina.ai/img.png')
uri._to_node_protobuf()
|
_base_ = ['./mask2former_r50_8xb2-lsj-50e_coco-panoptic.py']
num_things_classes = 80
num_stuff_classes = 0
num_classes = num_things_classes + num_stuff_classes
image_size = (1024, 1024)
batch_augments = [
dict(
type='BatchFixedSizePad',
size=image_size,
img_pad_value=0,
pad_mask=True,
mask_pad_value=0,
pad_seg=False)
]
data_preprocessor = dict(
type='DetDataPreprocessor',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
bgr_to_rgb=True,
pad_size_divisor=32,
pad_mask=True,
mask_pad_value=0,
pad_seg=False,
batch_augments=batch_augments)
model = dict(
data_preprocessor=data_preprocessor,
panoptic_head=dict(
num_things_classes=num_things_classes,
num_stuff_classes=num_stuff_classes,
loss_cls=dict(class_weight=[1.0] * num_classes + [0.1])),
panoptic_fusion_head=dict(
num_things_classes=num_things_classes,
num_stuff_classes=num_stuff_classes),
test_cfg=dict(panoptic_on=False))
# dataset settings
train_pipeline = [
dict(
type='LoadImageFromFile',
to_float32=True,
backend_args={{_base_.backend_args}}),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(type='RandomFlip', prob=0.5),
# large scale jittering
dict(
type='RandomResize',
scale=image_size,
ratio_range=(0.1, 2.0),
resize_type='Resize',
keep_ratio=True),
dict(
type='RandomCrop',
crop_size=image_size,
crop_type='absolute',
recompute_bbox=True,
allow_negative_crop=True),
dict(type='FilterAnnotations', min_gt_bbox_wh=(1e-5, 1e-5), by_mask=True),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(
type='LoadImageFromFile',
to_float32=True,
backend_args={{_base_.backend_args}}),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
# If you don't have a gt annotation, delete the pipeline
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
train_dataloader = dict(
dataset=dict(
type=dataset_type,
ann_file='annotations/instances_train2017.json',
data_prefix=dict(img='train2017/'),
pipeline=train_pipeline))
val_dataloader = dict(
dataset=dict(
type=dataset_type,
ann_file='annotations/instances_val2017.json',
data_prefix=dict(img='val2017/'),
pipeline=test_pipeline))
test_dataloader = val_dataloader
val_evaluator = dict(
_delete_=True,
type='CocoMetric',
ann_file=data_root + 'annotations/instances_val2017.json',
metric=['bbox', 'segm'],
format_only=False,
backend_args={{_base_.backend_args}})
test_evaluator = val_evaluator
|
_base_ = ['./mask2former_r50_8xb2-lsj-50e_coco-panoptic.py']
num_things_classes = 80
num_stuff_classes = 0
num_classes = num_things_classes + num_stuff_classes
image_size = (1024, 1024)
batch_augments = [
dict(
type='BatchFixedSizePad',
size=image_size,
img_pad_value=0,
pad_mask=True,
mask_pad_value=0,
pad_seg=False)
]
data_preprocessor = dict(
type='DetDataPreprocessor',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
bgr_to_rgb=True,
pad_size_divisor=32,
pad_mask=True,
mask_pad_value=0,
pad_seg=False,
batch_augments=batch_augments)
model = dict(
data_preprocessor=data_preprocessor,
panoptic_head=dict(
num_things_classes=num_things_classes,
num_stuff_classes=num_stuff_classes,
loss_cls=dict(class_weight=[1.0] * num_classes + [0.1])),
panoptic_fusion_head=dict(
num_things_classes=num_things_classes,
num_stuff_classes=num_stuff_classes),
test_cfg=dict(panoptic_on=False))
# dataset settings
train_pipeline = [
dict(type='LoadImageFromFile', to_float32=True),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(type='RandomFlip', prob=0.5),
# large scale jittering
dict(
type='RandomResize',
scale=image_size,
ratio_range=(0.1, 2.0),
resize_type='Resize',
keep_ratio=True),
dict(
type='RandomCrop',
crop_size=image_size,
crop_type='absolute',
recompute_bbox=True,
allow_negative_crop=True),
dict(type='FilterAnnotations', min_gt_bbox_wh=(1e-5, 1e-5), by_mask=True),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile', to_float32=True),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
# If you don't have a gt annotation, delete the pipeline
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
train_dataloader = dict(
dataset=dict(
type=dataset_type,
ann_file='annotations/instances_train2017.json',
data_prefix=dict(img='train2017/'),
pipeline=train_pipeline))
val_dataloader = dict(
dataset=dict(
type=dataset_type,
ann_file='annotations/instances_val2017.json',
data_prefix=dict(img='val2017/'),
pipeline=test_pipeline))
test_dataloader = val_dataloader
val_evaluator = dict(
_delete_=True,
type='CocoMetric',
ann_file=data_root + 'annotations/instances_val2017.json',
metric=['bbox', 'segm'],
format_only=False)
test_evaluator = val_evaluator
|
import pytest
from fastapi import Depends, FastAPI, HTTPException
from fastapi.exceptions import RequestValidationError
from fastapi.testclient import TestClient
from starlette.responses import JSONResponse
def http_exception_handler(request, exception):
return JSONResponse({"exception": "http-exception"})
def request_validation_exception_handler(request, exception):
return JSONResponse({"exception": "request-validation"})
def server_error_exception_handler(request, exception):
return JSONResponse(status_code=500, content={"exception": "server-error"})
app = FastAPI(
exception_handlers={
HTTPException: http_exception_handler,
RequestValidationError: request_validation_exception_handler,
Exception: server_error_exception_handler,
}
)
client = TestClient(app)
def raise_value_error():
raise ValueError()
def dependency_with_yield():
yield raise_value_error()
@app.get("/dependency-with-yield", dependencies=[Depends(dependency_with_yield)])
def with_yield(): ...
@app.get("/http-exception")
def route_with_http_exception():
raise HTTPException(status_code=400)
@app.get("/request-validation/{param}/")
def route_with_request_validation_exception(param: int):
pass # pragma: no cover
@app.get("/server-error")
def route_with_server_error():
raise RuntimeError("Oops!")
def test_override_http_exception():
response = client.get("/http-exception")
assert response.status_code == 200
assert response.json() == {"exception": "http-exception"}
def test_override_request_validation_exception():
response = client.get("/request-validation/invalid")
assert response.status_code == 200
assert response.json() == {"exception": "request-validation"}
def test_override_server_error_exception_raises():
with pytest.raises(RuntimeError):
client.get("/server-error")
def test_override_server_error_exception_response():
client = TestClient(app, raise_server_exceptions=False)
response = client.get("/server-error")
assert response.status_code == 500
assert response.json() == {"exception": "server-error"}
def test_traceback_for_dependency_with_yield():
client = TestClient(app, raise_server_exceptions=True)
with pytest.raises(ValueError) as exc_info:
client.get("/dependency-with-yield")
last_frame = exc_info.traceback[-1]
assert str(last_frame.path) == __file__
assert last_frame.lineno == raise_value_error.__code__.co_firstlineno
|
import pytest
from fastapi import FastAPI, HTTPException
from fastapi.exceptions import RequestValidationError
from fastapi.testclient import TestClient
from starlette.responses import JSONResponse
def http_exception_handler(request, exception):
return JSONResponse({"exception": "http-exception"})
def request_validation_exception_handler(request, exception):
return JSONResponse({"exception": "request-validation"})
def server_error_exception_handler(request, exception):
return JSONResponse(status_code=500, content={"exception": "server-error"})
app = FastAPI(
exception_handlers={
HTTPException: http_exception_handler,
RequestValidationError: request_validation_exception_handler,
Exception: server_error_exception_handler,
}
)
client = TestClient(app)
@app.get("/http-exception")
def route_with_http_exception():
raise HTTPException(status_code=400)
@app.get("/request-validation/{param}/")
def route_with_request_validation_exception(param: int):
pass # pragma: no cover
@app.get("/server-error")
def route_with_server_error():
raise RuntimeError("Oops!")
def test_override_http_exception():
response = client.get("/http-exception")
assert response.status_code == 200
assert response.json() == {"exception": "http-exception"}
def test_override_request_validation_exception():
response = client.get("/request-validation/invalid")
assert response.status_code == 200
assert response.json() == {"exception": "request-validation"}
def test_override_server_error_exception_raises():
with pytest.raises(RuntimeError):
client.get("/server-error")
def test_override_server_error_exception_response():
client = TestClient(app, raise_server_exceptions=False)
response = client.get("/server-error")
assert response.status_code == 500
assert response.json() == {"exception": "server-error"}
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.document_transformers import (
EmbeddingsClusteringFilter,
EmbeddingsRedundantFilter,
get_stateful_documents,
)
from langchain_community.document_transformers.embeddings_redundant_filter import (
_DocumentWithState,
_filter_similar_embeddings,
_get_embeddings_from_stateful_docs,
)
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"EmbeddingsRedundantFilter": "langchain_community.document_transformers",
"EmbeddingsClusteringFilter": "langchain_community.document_transformers",
"_DocumentWithState": (
"langchain_community.document_transformers.embeddings_redundant_filter"
),
"get_stateful_documents": "langchain_community.document_transformers",
"_get_embeddings_from_stateful_docs": (
"langchain_community.document_transformers.embeddings_redundant_filter"
),
"_filter_similar_embeddings": (
"langchain_community.document_transformers.embeddings_redundant_filter"
),
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"EmbeddingsClusteringFilter",
"EmbeddingsRedundantFilter",
"_DocumentWithState",
"_filter_similar_embeddings",
"_get_embeddings_from_stateful_docs",
"get_stateful_documents",
]
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.document_transformers import (
EmbeddingsClusteringFilter,
EmbeddingsRedundantFilter,
get_stateful_documents,
)
from langchain_community.document_transformers.embeddings_redundant_filter import (
_DocumentWithState,
_filter_similar_embeddings,
_get_embeddings_from_stateful_docs,
)
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"EmbeddingsRedundantFilter": "langchain_community.document_transformers",
"EmbeddingsClusteringFilter": "langchain_community.document_transformers",
"_DocumentWithState": (
"langchain_community.document_transformers.embeddings_redundant_filter"
),
"get_stateful_documents": "langchain_community.document_transformers",
"_get_embeddings_from_stateful_docs": (
"langchain_community.document_transformers.embeddings_redundant_filter"
),
"_filter_similar_embeddings": (
"langchain_community.document_transformers.embeddings_redundant_filter"
),
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"EmbeddingsRedundantFilter",
"EmbeddingsClusteringFilter",
"_DocumentWithState",
"get_stateful_documents",
"_get_embeddings_from_stateful_docs",
"_filter_similar_embeddings",
]
|
"""load multiple Python files specified as command line arguments."""
import sys
import traceback
from importlib.machinery import SourceFileLoader
if __name__ == "__main__":
files = sys.argv[1:]
has_failure = False
for file in files:
try:
SourceFileLoader("x", file).load_module()
except Exception:
has_failure = True
print(file) # noqa: T201
traceback.print_exc()
print() # noqa: T201
sys.exit(1 if has_failure else 0)
|
"""load multiple Python files specified as command line arguments."""
import sys
import traceback
from importlib.machinery import SourceFileLoader
if __name__ == "__main__":
files = sys.argv[1:]
has_failure = False
for file in files:
try:
SourceFileLoader("x", file).load_module()
except Exception:
has_failure = True
print(file) # noqa: T201
traceback.print_exc()
print() # noqa: T201
sys.exit(1 if has_failure else 0)
|
import pytest
from llama_index.core.node_parser.text.semantic_double_merging_splitter import (
SemanticDoubleMergingSplitterNodeParser,
LanguageConfig,
)
from llama_index.core.schema import Document
doc = Document(
text="Warsaw: Warsaw, the capital city of Poland, is a bustling metropolis located on the banks of the Vistula River. "
"It is known for its rich history, vibrant culture, and resilient spirit. Warsaw's skyline is characterized by a mix of historic architecture and modern skyscrapers. "
"The Old Town, with its cobblestone streets and colorful buildings, is a UNESCO World Heritage Site.\n\n"
"Football: Football, also known as soccer, is a popular sport played by millions of people worldwide. "
"It is a team sport that involves two teams of eleven players each. The objective of the game is to score goals by kicking the ball into the opposing team's goal. "
"Football matches are typically played on a rectangular field called a pitch, with goals at each end. "
"The game is governed by a set of rules known as the Laws of the Game. Football is known for its passionate fanbase and intense rivalries between clubs and countries. "
"The FIFA World Cup is the most prestigious international football tournament.\n\n"
"Mathematics: Mathematics is a fundamental discipline that deals with the study of numbers, quantities, and shapes. "
"Its branches include algebra, calculus, geometry, and statistics."
)
try:
splitter = SemanticDoubleMergingSplitterNodeParser(
initial_threshold=0.7,
appending_threshold=0.8,
merging_threshold=0.7,
max_chunk_size=1000,
)
splitter.language_config.load_model()
spacy_available = True
except Exception:
spacy_available = False
@pytest.mark.skipif(not spacy_available, reason="Spacy model not available")
def test_number_of_returned_nodes() -> None:
nodes = splitter.get_nodes_from_documents([doc])
assert len(nodes) == 4
@pytest.mark.skipif(not spacy_available, reason="Spacy model not available")
def test_creating_initial_chunks() -> None:
text = doc.text
sentences = splitter.sentence_splitter(text)
initial_chunks = splitter._create_initial_chunks(sentences)
assert len(initial_chunks) == 9
@pytest.mark.skipif(not spacy_available, reason="Spacy model not available")
def test_config_models() -> None:
with pytest.raises(ValueError):
LanguageConfig(language="polish")
with pytest.raises(ValueError):
LanguageConfig(language="polish", spacy_model="en_core_web_md")
with pytest.raises(ValueError):
LanguageConfig(language="french", spacy_model="en_core_web_md")
with pytest.raises(ValueError):
LanguageConfig(language="empty", spacy_model="empty")
LanguageConfig(language="english", spacy_model="en_core_web_md")
@pytest.mark.skipif(not spacy_available, reason="Spacy model not available")
def test_chunk_size_1() -> None:
splitter.max_chunk_size = 0
nodes = splitter.get_nodes_from_documents([doc])
# length of each sentence
assert len(nodes) == 13
assert len(nodes[0].get_content()) == 111
assert len(nodes[1].get_content()) == 72
assert len(nodes[2].get_content()) == 91
assert len(nodes[3].get_content()) == 99
assert len(nodes[4].get_content()) == 100
assert len(nodes[5].get_content()) == 66
assert len(nodes[6].get_content()) == 94
assert len(nodes[7].get_content()) == 100
assert len(nodes[8].get_content()) == 69
assert len(nodes[9].get_content()) == 95
assert len(nodes[10].get_content()) == 77
assert len(nodes[11].get_content()) == 114
assert len(nodes[12].get_content()) == 65
@pytest.mark.skipif(not spacy_available, reason="Spacy model not available")
def test_chunk_size_2() -> None:
splitter.max_chunk_size = 200
nodes = splitter.get_nodes_from_documents([doc])
assert len(nodes) == 9
assert len(nodes[0].get_content()) < 200
assert len(nodes[1].get_content()) < 200
assert len(nodes[2].get_content()) < 200
assert len(nodes[3].get_content()) < 200
assert len(nodes[4].get_content()) < 200
assert len(nodes[5].get_content()) < 200
assert len(nodes[6].get_content()) < 200
assert len(nodes[7].get_content()) < 200
assert len(nodes[8].get_content()) < 200
|
import pytest
from llama_index.core.node_parser.text.semantic_double_merging_splitter import (
SemanticDoubleMergingSplitterNodeParser,
LanguageConfig,
)
from llama_index.core.schema import Document
doc = Document(
text="Warsaw: Warsaw, the capital city of Poland, is a bustling metropolis located on the banks of the Vistula River. "
"It is known for its rich history, vibrant culture, and resilient spirit. Warsaw's skyline is characterized by a mix of historic architecture and modern skyscrapers. "
"The Old Town, with its cobblestone streets and colorful buildings, is a UNESCO World Heritage Site.\n\n"
"Football: Football, also known as soccer, is a popular sport played by millions of people worldwide. "
"It is a team sport that involves two teams of eleven players each. The objective of the game is to score goals by kicking the ball into the opposing team's goal. "
"Football matches are typically played on a rectangular field called a pitch, with goals at each end. "
"The game is governed by a set of rules known as the Laws of the Game. Football is known for its passionate fanbase and intense rivalries between clubs and countries. "
"The FIFA World Cup is the most prestigious international football tournament.\n\n"
"Mathematics: Mathematics is a fundamental discipline that deals with the study of numbers, quantities, and shapes. "
"Its branches include algebra, calculus, geometry, and statistics."
)
try:
splitter = SemanticDoubleMergingSplitterNodeParser(
initial_threshold=0.7,
appending_threshold=0.8,
merging_threshold=0.7,
max_chunk_size=1000,
)
splitter.language_config.load_model()
spacy_available = True
except Exception:
spacy_available = False
@pytest.mark.skipif(not spacy_available, reason="Spacy model not available")
def test_number_of_returned_nodes() -> None:
nodes = splitter.get_nodes_from_documents([doc])
assert len(nodes) == 3
@pytest.mark.skipif(not spacy_available, reason="Spacy model not available")
def test_creating_initial_chunks() -> None:
text = doc.text
sentences = splitter.sentence_splitter(text)
initial_chunks = splitter._create_initial_chunks(sentences)
assert len(initial_chunks) == 9
@pytest.mark.skipif(not spacy_available, reason="Spacy model not available")
def test_config_models() -> None:
with pytest.raises(ValueError):
LanguageConfig(language="polish")
with pytest.raises(ValueError):
LanguageConfig(language="polish", spacy_model="en_core_web_md")
with pytest.raises(ValueError):
LanguageConfig(language="french", spacy_model="en_core_web_md")
with pytest.raises(ValueError):
LanguageConfig(language="empty", spacy_model="empty")
LanguageConfig(language="english", spacy_model="en_core_web_md")
@pytest.mark.skipif(not spacy_available, reason="Spacy model not available")
def test_chunk_size_1() -> None:
splitter.max_chunk_size = 0
nodes = splitter.get_nodes_from_documents([doc])
# length of each sentence
assert len(nodes) == 13
assert len(nodes[0].get_content()) == 111
assert len(nodes[1].get_content()) == 72
assert len(nodes[2].get_content()) == 91
assert len(nodes[3].get_content()) == 99
assert len(nodes[4].get_content()) == 100
assert len(nodes[5].get_content()) == 66
assert len(nodes[6].get_content()) == 94
assert len(nodes[7].get_content()) == 100
assert len(nodes[8].get_content()) == 69
assert len(nodes[9].get_content()) == 95
assert len(nodes[10].get_content()) == 77
assert len(nodes[11].get_content()) == 114
assert len(nodes[12].get_content()) == 65
@pytest.mark.skipif(not spacy_available, reason="Spacy model not available")
def test_chunk_size_2() -> None:
splitter.max_chunk_size = 200
nodes = splitter.get_nodes_from_documents([doc])
assert len(nodes) == 9
assert len(nodes[0].get_content()) < 200
assert len(nodes[1].get_content()) < 200
assert len(nodes[2].get_content()) < 200
assert len(nodes[3].get_content()) < 200
assert len(nodes[4].get_content()) < 200
assert len(nodes[5].get_content()) < 200
assert len(nodes[6].get_content()) < 200
assert len(nodes[7].get_content()) < 200
assert len(nodes[8].get_content()) < 200
|
import inspect
from abc import ABC
from functools import reduce
from typing import TYPE_CHECKING, Any, Dict, Optional, Set, Type, Union
if TYPE_CHECKING:
from jina.orchestrate.flow.base import Flow
from jina.serve.executors import BaseExecutor
class VersionedYAMLParser:
"""Flow YAML parser for specific version
Every :class:`VersionedYAMLParser` must implement two methods and one class attribute:
- :meth:`parse`: to load data (in :class:`dict`) into a :class:`BaseFlow` or :class:`BaseExecutor` object
- :meth:`dump`: to dump a :class:`BaseFlow` or :class:`BaseExecutor` object into a :class:`dict`
- :attr:`version`: version number in :class:`str` in format ``MAJOR.[MINOR]``
"""
version = 'legacy' #: the version number this parser designed for
def parse(
self, cls: type, data: Dict, runtime_args: Optional[Dict[str, Any]]
) -> Union['Flow', 'BaseExecutor']:
"""Return the Flow YAML parser given the syntax version number
.. # noqa: DAR401
:param cls: target class type to parse into, must be a :class:`JAMLCompatible` type
:param data: flow yaml file loaded as python dict
:param runtime_args: Optional runtime_args to be directly passed without being parsed into a yaml config
"""
raise NotImplementedError
def dump(self, data: Union['Flow', 'BaseExecutor']) -> Dict:
"""Return the dictionary given a versioned flow object
.. # noqa: DAR401
:param data: versioned flow object
"""
raise NotImplementedError
class BaseLegacyParser(VersionedYAMLParser, ABC):
"""
BaseLegacyParser for classes that need parameter injection and that will be managed inside a runtime
for instance, :class:`BaseExecutor` and :class:`BaseGateway`
"""
@staticmethod
def _get_all_arguments(class_):
"""
:param class_: target class from which we want to retrieve arguments
:return: all the arguments of all the classes from which `class_` inherits
"""
def get_class_arguments(class_):
"""
:param class_: the class to check
:return: a list containing the arguments from `class_`
"""
signature = inspect.signature(class_.__init__)
class_arguments = [p.name for p in signature.parameters.values()]
return class_arguments
def accumulate_classes(cls) -> Set[Type]:
"""
:param cls: the class to check
:return: all classes from which cls inherits from
"""
def _accumulate_classes(c, cs):
cs.append(c)
if cls == object:
return cs
for base in c.__bases__:
_accumulate_classes(base, cs)
return cs
classes = []
_accumulate_classes(cls, classes)
return set(classes)
all_classes = accumulate_classes(class_)
args = list(map(lambda x: get_class_arguments(x), all_classes))
return set(reduce(lambda x, y: x + y, args))
|
from typing import TYPE_CHECKING, Any, Dict, Optional, Union
if TYPE_CHECKING:
from jina.orchestrate.flow.base import Flow
from jina.serve.executors import BaseExecutor
class VersionedYAMLParser:
"""Flow YAML parser for specific version
Every :class:`VersionedYAMLParser` must implement two methods and one class attribute:
- :meth:`parse`: to load data (in :class:`dict`) into a :class:`BaseFlow` or :class:`BaseExecutor` object
- :meth:`dump`: to dump a :class:`BaseFlow` or :class:`BaseExecutor` object into a :class:`dict`
- :attr:`version`: version number in :class:`str` in format ``MAJOR.[MINOR]``
"""
version = 'legacy' #: the version number this parser designed for
def parse(
self, cls: type, data: Dict, runtime_args: Optional[Dict[str, Any]]
) -> Union['Flow', 'BaseExecutor']:
"""Return the Flow YAML parser given the syntax version number
.. # noqa: DAR401
:param cls: target class type to parse into, must be a :class:`JAMLCompatible` type
:param data: flow yaml file loaded as python dict
:param runtime_args: Optional runtime_args to be directly passed without being parsed into a yaml config
"""
raise NotImplementedError
def dump(self, data: Union['Flow', 'BaseExecutor']) -> Dict:
"""Return the dictionary given a versioned flow object
.. # noqa: DAR401
:param data: versioned flow object
"""
raise NotImplementedError
|
import pytest
from docarray import Document, DocumentArray
@pytest.mark.filterwarnings('ignore::UserWarning')
@pytest.mark.parametrize('deleted_elmnts', [[0, 1], ['r0', 'r1']])
@pytest.mark.parametrize('columns', [[('price', 'int')], {'price': 'int'}])
def test_delete_offset_success_sync_es_offset_index(
deleted_elmnts, start_storage, columns
):
elastic_doc = DocumentArray(
storage='elasticsearch',
config={
'n_dim': 3,
'columns': columns,
'distance': 'l2_norm',
},
)
with elastic_doc:
elastic_doc.extend(
[
Document(id='r0', embedding=[0, 0, 0]),
Document(id='r1', embedding=[1, 1, 1]),
Document(id='r2', embedding=[2, 2, 2]),
Document(id='r3', embedding=[3, 3, 3]),
Document(id='r4', embedding=[4, 4, 4]),
Document(id='r5', embedding=[5, 5, 5]),
Document(id='r6', embedding=[6, 6, 6]),
Document(id='r7', embedding=[7, 7, 7]),
]
)
expected_offset_after_del = ['r2', 'r3', 'r4', 'r5', 'r6', 'r7']
with elastic_doc:
del elastic_doc[deleted_elmnts]
indexed_offset_count = elastic_doc._client.count(
index=elastic_doc._index_name_offset2id
)['count']
assert len(elastic_doc._offset2ids.ids) == indexed_offset_count
assert len(elastic_doc._offset2ids.ids) == 6
assert len(elastic_doc[:, 'embedding']) == 6
for id in expected_offset_after_del:
expected_offset = str(expected_offset_after_del.index(id))
actual_offset_index = elastic_doc._client.search(
index=elastic_doc._index_name_offset2id, query={'match': {'blob': id}}
)['hits']['hits'][0]['_id']
assert actual_offset_index == expected_offset
@pytest.mark.filterwarnings('ignore::UserWarning')
@pytest.mark.parametrize('columns', [[('price', 'int')], {'price': 'int'}])
def test_success_handle_bulk_delete_not_found(start_storage, columns):
elastic_doc = DocumentArray(
storage='elasticsearch',
config={
'n_dim': 3,
'columns': columns,
'distance': 'l2_norm',
},
)
with elastic_doc:
elastic_doc.extend(
[
Document(id='r0', embedding=[0, 0, 0]),
Document(id='r1', embedding=[1, 1, 1]),
]
)
offset_index = elastic_doc._index_name_offset2id
expected_to_be_fail_del_data = [
{
'_op_type': 'delete',
'_id': 0, # offset data exist
'_index': offset_index,
},
{
'_op_type': 'delete',
'_id': 2, # offset data not exist, expect to fail
'_index': offset_index,
},
]
info = elastic_doc._send_requests(expected_to_be_fail_del_data)
assert len(info) == 1
assert 'delete' in info[0].keys()
|
from docarray import Document, DocumentArray
import pytest
@pytest.mark.filterwarnings('ignore::UserWarning')
@pytest.mark.parametrize('deleted_elmnts', [[0, 1], ['r0', 'r1']])
def test_delete_offset_success_sync_es_offset_index(deleted_elmnts, start_storage):
elastic_doc = DocumentArray(
storage='elasticsearch',
config={
'n_dim': 3,
'columns': [('price', 'int')],
'distance': 'l2_norm',
'index_name': 'test_delete_offset_success_sync_es_offset_index',
},
)
with elastic_doc:
elastic_doc.extend(
[
Document(id='r0', embedding=[0, 0, 0]),
Document(id='r1', embedding=[1, 1, 1]),
Document(id='r2', embedding=[2, 2, 2]),
Document(id='r3', embedding=[3, 3, 3]),
Document(id='r4', embedding=[4, 4, 4]),
Document(id='r5', embedding=[5, 5, 5]),
Document(id='r6', embedding=[6, 6, 6]),
Document(id='r7', embedding=[7, 7, 7]),
]
)
expected_offset_after_del = ['r2', 'r3', 'r4', 'r5', 'r6', 'r7']
with elastic_doc:
del elastic_doc[deleted_elmnts]
indexed_offset_count = elastic_doc._client.count(
index=elastic_doc._index_name_offset2id
)['count']
assert len(elastic_doc._offset2ids.ids) == indexed_offset_count
assert len(elastic_doc._offset2ids.ids) == 6
assert len(elastic_doc[:, 'embedding']) == 6
for id in expected_offset_after_del:
expected_offset = str(expected_offset_after_del.index(id))
actual_offset_index = elastic_doc._client.search(
index=elastic_doc._index_name_offset2id, query={'match': {'blob': id}}
)['hits']['hits'][0]['_id']
assert actual_offset_index == expected_offset
@pytest.mark.filterwarnings('ignore::UserWarning')
def test_success_handle_bulk_delete_not_found(start_storage):
elastic_doc = DocumentArray(
storage='elasticsearch',
config={
'n_dim': 3,
'columns': [('price', 'int')],
'distance': 'l2_norm',
'index_name': 'test_bulk_delete_not_found',
},
)
with elastic_doc:
elastic_doc.extend(
[
Document(id='r0', embedding=[0, 0, 0]),
Document(id='r1', embedding=[1, 1, 1]),
]
)
offset_index = elastic_doc._index_name_offset2id
expected_to_be_fail_del_data = [
{
'_op_type': 'delete',
'_id': 0, # offset data exist
'_index': offset_index,
},
{
'_op_type': 'delete',
'_id': 2, # offset data not exist, expect to fail
'_index': offset_index,
},
]
info = elastic_doc._send_requests(expected_to_be_fail_del_data)
assert len(info) == 1
assert 'delete' in info[0].keys()
|
from abc import ABC, abstractmethod
import warnings
from collections import namedtuple
from dataclasses import is_dataclass, asdict
from typing import Dict, Optional, TYPE_CHECKING, Union, List, Tuple
if TYPE_CHECKING: # pragma: no cover
from docarray.typing import DocumentArraySourceType, ArrayType
TypeMap = namedtuple('TypeMap', ['type', 'converter'])
class BaseBackendMixin(ABC):
TYPE_MAP: Dict[str, TypeMap]
def _init_storage(
self,
_docs: Optional['DocumentArraySourceType'] = None,
copy: bool = False,
*args,
**kwargs,
):
self._load_offset2ids()
def _init_subindices(
self, _docs: Optional['DocumentArraySourceType'] = None, *args, **kwargs
):
self._subindices = {}
subindex_configs = kwargs.get('subindex_configs', None)
if subindex_configs:
config = asdict(self._config) if getattr(self, '_config', None) else dict()
for name, config_subindex in subindex_configs.items():
config_subindex = (
dict() if config_subindex is None else config_subindex
) # allow None as input
if is_dataclass(config_subindex):
config_subindex = asdict(config_subindex)
config_joined = {**config, **config_subindex}
config_joined = self._ensure_unique_config(
config, config_subindex, config_joined, name
)
self._subindices[name] = self.__class__(config=config_joined)
if _docs:
from docarray import DocumentArray
self._subindices[name].extend(
DocumentArray(_docs).traverse_flat(name[1:])
)
@abstractmethod
def _ensure_unique_config(
self,
config_root: dict,
config_subindex: dict,
config_joined: dict,
subindex_name: str,
) -> dict:
"""
Ensures that the subindex configuration is unique, despite it inheriting unpopulated fields from the root config.
:param config_root: The configuration of the root index.
:param config_subindex: The configuration that was explicitly provided by the user for the subindex.
:param config_joined: The configuration that combines root and subindex configs. This is the configuration that will be used for subindex construction.
:param subindex_name: Name (access path) of the subindex
:return: config_joined that is unique compared to config_root
"""
...
def _get_storage_infos(self) -> Optional[Dict]:
if hasattr(self, '_config') and is_dataclass(self._config):
return {k: str(v) for k, v in asdict(self._config).items()}
def _map_id(self, _id: str) -> str:
return _id
def _map_column(self, value, col_type) -> str:
return self.TYPE_MAP[col_type].converter(value)
def _map_embedding(self, embedding: 'ArrayType') -> 'ArrayType':
from docarray.math.ndarray import to_numpy_array
return to_numpy_array(embedding)
def _map_type(self, col_type: str) -> str:
return self.TYPE_MAP[col_type].type
def _normalize_columns(
self, columns: Optional[Union[List[Tuple[str, str]], Dict[str, str]]]
) -> Dict[str, str]:
if columns is None:
return {}
if isinstance(columns, list):
warnings.warn(
'Using "columns" as a List of Tuples will be deprecated soon. Please provide a Dictionary.'
)
columns = {col_desc[0]: col_desc[1] for col_desc in columns}
return columns
|
from abc import ABC, abstractmethod
import warnings
from collections import namedtuple
from dataclasses import is_dataclass, asdict
from typing import Dict, Optional, TYPE_CHECKING, Union, List, Tuple
if TYPE_CHECKING:
from docarray.typing import DocumentArraySourceType, ArrayType
TypeMap = namedtuple('TypeMap', ['type', 'converter'])
class BaseBackendMixin(ABC):
TYPE_MAP: Dict[str, TypeMap]
def _init_storage(
self,
_docs: Optional['DocumentArraySourceType'] = None,
copy: bool = False,
*args,
**kwargs,
):
self._load_offset2ids()
def _init_subindices(
self, _docs: Optional['DocumentArraySourceType'] = None, *args, **kwargs
):
self._subindices = {}
subindex_configs = kwargs.get('subindex_configs', None)
if subindex_configs:
config = asdict(self._config) if getattr(self, '_config', None) else dict()
for name, config_subindex in subindex_configs.items():
config_subindex = (
dict() if config_subindex is None else config_subindex
) # allow None as input
if is_dataclass(config_subindex):
config_subindex = asdict(config_subindex)
config_joined = {**config, **config_subindex}
config_joined = self._ensure_unique_config(
config, config_subindex, config_joined, name
)
self._subindices[name] = self.__class__(config=config_joined)
if _docs:
from docarray import DocumentArray
self._subindices[name].extend(
DocumentArray(_docs).traverse_flat(name[1:])
)
@abstractmethod
def _ensure_unique_config(
self,
config_root: dict,
config_subindex: dict,
config_joined: dict,
subindex_name: str,
) -> dict:
"""
Ensures that the subindex configuration is unique, despite it inheriting unpopulated fields from the root config.
:param config_root: The configuration of the root index.
:param config_subindex: The configuration that was explicitly provided by the user for the subindex.
:param config_joined: The configuration that combines root and subindex configs. This is the configuration that will be used for subindex construction.
:param subindex_name: Name (access path) of the subindex
:return: config_joined that is unique compared to config_root
"""
...
def _get_storage_infos(self) -> Optional[Dict]:
if hasattr(self, '_config') and is_dataclass(self._config):
return {k: str(v) for k, v in asdict(self._config).items()}
def _map_id(self, _id: str) -> str:
return _id
def _map_column(self, value, col_type) -> str:
return self.TYPE_MAP[col_type].converter(value)
def _map_embedding(self, embedding: 'ArrayType') -> 'ArrayType':
from docarray.math.ndarray import to_numpy_array
return to_numpy_array(embedding)
def _map_type(self, col_type: str) -> str:
return self.TYPE_MAP[col_type].type
def _normalize_columns(
self, columns: Optional[Union[List[Tuple[str, str]], Dict[str, str]]]
) -> Dict[str, str]:
if columns is None:
return {}
if isinstance(columns, list):
warnings.warn(
'Using "columns" as a List of Tuples will be deprecated soon. Please provide a Dictionary.'
)
columns = {col_desc[0]: col_desc[1] for col_desc in columns}
return columns
|
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import os
import subprocess
from pathlib import Path
import pytest
from jina import Document, DocumentArray
@pytest.fixture()
def test_dir() -> str:
return os.path.dirname(os.path.abspath(__file__))
@pytest.fixture()
def data_generator(test_dir: str):
def _generator():
data_file_path = os.path.join(test_dir, 'test_data', 'test_data.txt')
with open(data_file_path, 'r') as file:
lines = file.readlines()
for line in lines:
yield Document(text=line.strip())
return _generator
@pytest.fixture(scope='session')
def docker_image_name() -> str:
return Path(__file__).parents[1].stem.lower()
@pytest.fixture(scope='session')
def build_docker_image(docker_image_name: str) -> str:
subprocess.run(['docker', 'build', '-t', docker_image_name, '.'], check=True)
return docker_image_name
@pytest.fixture()
def docs_with_text() -> DocumentArray:
return DocumentArray([Document(text='hello world') for _ in range(10)])
@pytest.fixture()
def docs_with_chunk_text() -> DocumentArray:
return DocumentArray(
[Document(chunks=[Document(text='hello world') for _ in range(10)])]
)
@pytest.fixture()
def docs_with_chunk_chunk_text() -> DocumentArray:
return DocumentArray(
[
Document(
chunks=[
Document(chunks=[Document(text='hello world') for _ in range(10)])
]
)
]
)
@pytest.fixture(scope='session')
def build_docker_image_gpu(docker_image_name: str) -> str:
image_name = f'{docker_image_name}:gpu'
subprocess.run(
['docker', 'build', '-t', image_name, '-f', 'Dockerfile.gpu', '.'], check=True
)
return image_name
|
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import subprocess
from pathlib import Path
import pytest
@pytest.fixture(scope='session')
def docker_image_name() -> str:
return Path(__file__).parents[1].stem.lower()
@pytest.fixture(scope='session')
def build_docker_image(docker_image_name: str) -> str:
subprocess.run(['docker', 'build', '-t', docker_image_name, '.'], check=True)
return docker_image_name
@pytest.fixture(scope='session')
def build_docker_image_gpu(docker_image_name: str) -> str:
image_name = f'{docker_image_name}:gpu'
subprocess.run(
['docker', 'build', '-t', image_name, '-f', 'Dockerfile.gpu', '.'], check=True
)
return image_name
|
from typing import Any, Optional, Type, TypeVar, Union
import numpy as np
from docarray.base_document import BaseDocument
from docarray.documents import AudioDoc
from docarray.typing import AnyEmbedding, AnyTensor
from docarray.typing.tensor.abstract_tensor import AbstractTensor
from docarray.typing.tensor.video.video_tensor import VideoTensor
from docarray.typing.url.video_url import VideoUrl
from docarray.utils.misc import is_tf_available, is_torch_available
torch_available = is_torch_available()
if torch_available:
import torch
tf_available = is_tf_available()
if tf_available:
import tensorflow as tf # type: ignore
T = TypeVar('T', bound='VideoDoc')
class VideoDoc(BaseDocument):
"""
Document for handling video.
The Video Document can contain a VideoUrl (`VideoDoc.url`), an Audio Document
(`VideoDoc.audio`), a VideoTensor (`VideoDoc.tensor`), an AnyTensor representing
the indices of the video's key frames (`VideoDoc.key_frame_indices`) and an
AnyEmbedding (`VideoDoc.embedding`).
EXAMPLE USAGE:
You can use this Document directly:
.. code-block:: python
from docarray.documents import Video
# use it directly
vid = Video(
url='https://github.com/docarray/docarray/tree/feat-add-video-v2/tests/toydata/mov_bbb.mp4?raw=true'
)
vid.audio.tensor, vid.tensor, vid.key_frame_indices = vid.url.load()
model = MyEmbeddingModel()
vid.embedding = model(vid.tensor)
You can extend this Document:
.. code-block:: python
from typing import Optional
from docarray.documents import TextDoc, VideoDoc
# extend it
class MyVideo(Video):
name: Optional[Text]
video = MyVideo(
url='https://github.com/docarray/docarray/blob/feat-rewrite-v2/tests/toydata/mov_bbb.mp4?raw=true'
)
video.video_tensor = video.url.load().video
model = MyEmbeddingModel()
video.embedding = model(video.tensor)
video.name = Text(text='my first video')
You can use this Document for composition:
.. code-block:: python
from docarray import BaseDocument
from docarray.documents import TextDoc, VideoDoc
# compose it
class MultiModalDoc(BaseDocument):
video: Video
text: Text
mmdoc = MultiModalDoc(
video=Video(
url='https://github.com/docarray/docarray/blob/feat-rewrite-v2/tests/toydata/mov_bbb.mp4?raw=true'
),
text=Text(text='hello world, how are you doing?'),
)
mmdoc.video.video_tensor = mmdoc.video.url.load().video
# or
mmdoc.video.bytes_ = mmdoc.video.url.load_bytes()
"""
url: Optional[VideoUrl]
audio: Optional[AudioDoc] = AudioDoc()
tensor: Optional[VideoTensor]
key_frame_indices: Optional[AnyTensor]
embedding: Optional[AnyEmbedding]
bytes_: Optional[bytes]
@classmethod
def validate(
cls: Type[T],
value: Union[str, AbstractTensor, Any],
) -> T:
if isinstance(value, str):
value = cls(url=value)
elif isinstance(value, (AbstractTensor, np.ndarray)) or (
torch_available
and isinstance(value, torch.Tensor)
or (tf_available and isinstance(value, tf.Tensor))
):
value = cls(tensor=value)
return super().validate(value)
|
from typing import Any, Optional, Type, TypeVar, Union
import numpy as np
from docarray.base_document import BaseDocument
from docarray.documents import AudioDoc
from docarray.typing import AnyEmbedding, AnyTensor
from docarray.typing.tensor.abstract_tensor import AbstractTensor
from docarray.typing.tensor.video.video_tensor import VideoTensor
from docarray.typing.url.video_url import VideoUrl
from docarray.utils.misc import is_tf_available, is_torch_available
torch_available = is_torch_available()
if torch_available:
import torch
tf_available = is_tf_available()
if tf_available:
import tensorflow as tf # type: ignore
T = TypeVar('T', bound='VideoDoc')
class VideoDoc(BaseDocument):
"""
Document for handling video.
The Video Document can contain a VideoUrl (`VideoDoc.url`), an Audio Document
(`VideoDoc.audio`), a VideoTensor (`VideoDoc.tensor`), an AnyTensor representing
the indices of the video's key frames (`VideoDoc.key_frame_indices`) and an
AnyEmbedding (`VideoDoc.embedding`).
EXAMPLE USAGE:
You can use this Document directly:
.. code-block:: python
from docarray.documents import Video
# use it directly
vid = Video(
url='https://github.com/docarray/docarray/tree/feat-add-video-v2/tests/toydata/mov_bbb.mp4?raw=true'
)
vid.audio.tensor, vid.tensor, vid.key_frame_indices = vid.url.load()
model = MyEmbeddingModel()
vid.embedding = model(vid.tensor)
You can extend this Document:
.. code-block:: python
from typing import Optional
from docarray.documents import TextDoc, VideoDoc
# extend it
class MyVideo(Video):
name: Optional[Text]
video = MyVideo(
url='https://github.com/docarray/docarray/blob/feat-rewrite-v2/tests/toydata/mov_bbb.mp4?raw=true'
)
video.video_tensor = video.url.load().video
model = MyEmbeddingModel()
video.embedding = model(video.tensor)
video.name = Text(text='my first video')
You can use this Document for composition:
.. code-block:: python
from docarray import BaseDocument
from docarray.documents import TextDoc, VideoDoc
# compose it
class MultiModalDoc(BaseDocument):
video: Video
text: Text
mmdoc = MultiModalDoc(
video=Video(
url='https://github.com/docarray/docarray/blob/feat-rewrite-v2/tests/toydata/mov_bbb.mp4?raw=true'
),
text=Text(text='hello world, how are you doing?'),
)
mmdoc.video.video_tensor = mmdoc.video.url.load().video
# or
mmdoc.video.bytes = mmdoc.video.url.load_bytes()
"""
url: Optional[VideoUrl]
audio: Optional[AudioDoc] = AudioDoc()
tensor: Optional[VideoTensor]
key_frame_indices: Optional[AnyTensor]
embedding: Optional[AnyEmbedding]
bytes: Optional[bytes]
@classmethod
def validate(
cls: Type[T],
value: Union[str, AbstractTensor, Any],
) -> T:
if isinstance(value, str):
value = cls(url=value)
elif isinstance(value, (AbstractTensor, np.ndarray)) or (
torch_available
and isinstance(value, torch.Tensor)
or (tf_available and isinstance(value, tf.Tensor))
):
value = cls(tensor=value)
return super().validate(value)
|
"""Test EvalQueryEngine tool."""
from typing import Optional, Sequence, Any
from unittest import IsolatedAsyncioTestCase
from unittest.mock import AsyncMock
from llama_index.core.evaluation import EvaluationResult
from llama_index.core.evaluation.base import BaseEvaluator
from llama_index.core.prompts.mixin import PromptDictType
from llama_index.core.query_engine.custom import CustomQueryEngine
from llama_index.core.response import Response
from llama_index.core.tools.eval_query_engine import EvalQueryEngineTool
from llama_index.core.tools.types import ToolOutput
class MockEvaluator(BaseEvaluator):
"""Mock Evaluator for testing purposes."""
def _get_prompts(self) -> PromptDictType: ...
def _update_prompts(self, prompts_dict: PromptDictType) -> None: ...
async def aevaluate(
self,
query: Optional[str] = None,
response: Optional[str] = None,
contexts: Optional[Sequence[str]] = None,
**kwargs: Any,
) -> EvaluationResult: ...
class MockQueryEngine(CustomQueryEngine):
"""Custom query engine."""
def custom_query(self, query_str: str) -> str:
"""Query."""
return "custom_" + query_str
class TestEvalQueryEngineTool(IsolatedAsyncioTestCase):
def setUp(self) -> None:
self.mock_evaluator = MockEvaluator()
self.mock_evaluator.aevaluate = AsyncMock()
self.mock_evaluator.aevaluate.return_value = EvaluationResult(passing=True)
tool_name = "nice_tool"
self.tool_input = "hello world"
self.expected_content = f"custom_{self.tool_input}"
self.expected_tool_output = ToolOutput(
content=self.expected_content,
raw_input={"input": self.tool_input},
raw_output=Response(
response=self.expected_content,
source_nodes=[],
),
tool_name=tool_name,
)
self.eval_query_engine_tool = EvalQueryEngineTool.from_defaults(
MockQueryEngine(), evaluator=self.mock_evaluator, name=tool_name
)
def test_eval_query_engine_tool_with_eval_passing(self) -> None:
"""Test eval query engine tool with evaluation passing."""
tool_output = self.eval_query_engine_tool(self.tool_input)
self.assertEqual(self.expected_tool_output, tool_output)
def test_eval_query_engine_tool_with_eval_failing(self) -> None:
"""Test eval query engine tool with evaluation failing."""
evaluation_feedback = "The context does not provide a relevant answer."
self.mock_evaluator.aevaluate.return_value = EvaluationResult(
passing=False, feedback=evaluation_feedback
)
self.expected_tool_output.content = (
"Could not use tool nice_tool because it failed evaluation.\n"
f"Reason: {evaluation_feedback}"
)
tool_output = self.eval_query_engine_tool(self.tool_input)
self.assertEqual(self.expected_tool_output, tool_output)
|
"""Test EvalQueryEngine tool."""
from typing import Optional, Sequence, Any
from unittest import IsolatedAsyncioTestCase
from unittest.mock import AsyncMock
from llama_index.core.evaluation import EvaluationResult
from llama_index.core.evaluation.base import BaseEvaluator
from llama_index.core.prompts.mixin import PromptDictType
from llama_index.core.query_engine.custom import CustomQueryEngine
from llama_index.core.response import Response
from llama_index.core.tools.eval_query_engine import EvalQueryEngineTool
from llama_index.core.tools.types import ToolOutput
class MockEvaluator(BaseEvaluator):
"""Mock Evaluator for testing purposes."""
def _get_prompts(self) -> PromptDictType:
...
def _update_prompts(self, prompts_dict: PromptDictType) -> None:
...
async def aevaluate(
self,
query: Optional[str] = None,
response: Optional[str] = None,
contexts: Optional[Sequence[str]] = None,
**kwargs: Any,
) -> EvaluationResult:
...
class MockQueryEngine(CustomQueryEngine):
"""Custom query engine."""
def custom_query(self, query_str: str) -> str:
"""Query."""
return "custom_" + query_str
class TestEvalQueryEngineTool(IsolatedAsyncioTestCase):
def setUp(self) -> None:
self.mock_evaluator = MockEvaluator()
self.mock_evaluator.aevaluate = AsyncMock()
self.mock_evaluator.aevaluate.return_value = EvaluationResult(passing=True)
tool_name = "nice_tool"
self.tool_input = "hello world"
self.expected_content = f"custom_{self.tool_input}"
self.expected_tool_output = ToolOutput(
content=self.expected_content,
raw_input={"input": self.tool_input},
raw_output=Response(
response=self.expected_content,
source_nodes=[],
),
tool_name=tool_name,
)
self.eval_query_engine_tool = EvalQueryEngineTool.from_defaults(
MockQueryEngine(), evaluator=self.mock_evaluator, name=tool_name
)
def test_eval_query_engine_tool_with_eval_passing(self) -> None:
"""Test eval query engine tool with evaluation passing."""
tool_output = self.eval_query_engine_tool(self.tool_input)
self.assertEqual(self.expected_tool_output, tool_output)
def test_eval_query_engine_tool_with_eval_failing(self) -> None:
"""Test eval query engine tool with evaluation failing."""
evaluation_feedback = "The context does not provide a relevant answer."
self.mock_evaluator.aevaluate.return_value = EvaluationResult(
passing=False, feedback=evaluation_feedback
)
self.expected_tool_output.content = (
"Could not use tool nice_tool because it failed evaluation.\n"
f"Reason: {evaluation_feedback}"
)
tool_output = self.eval_query_engine_tool(self.tool_input)
self.assertEqual(self.expected_tool_output, tool_output)
|
import fastapi
from .config import settings
from .middleware import auth_middleware
from .models import DEFAULT_USER_ID, User
def requires_user(payload: dict = fastapi.Depends(auth_middleware)) -> User:
return verify_user(payload, admin_only=False)
def requires_admin_user(
payload: dict = fastapi.Depends(auth_middleware),
) -> User:
return verify_user(payload, admin_only=True)
def verify_user(payload: dict | None, admin_only: bool) -> User:
if not payload:
if settings.ENABLE_AUTH:
raise fastapi.HTTPException(
status_code=401, detail="Authorization header is missing"
)
# This handles the case when authentication is disabled
payload = {"sub": DEFAULT_USER_ID, "role": "admin"}
user_id = payload.get("sub")
if not user_id:
raise fastapi.HTTPException(
status_code=401, detail="User ID not found in token"
)
if admin_only and payload["role"] != "admin":
raise fastapi.HTTPException(status_code=403, detail="Admin access required")
return User.from_payload(payload)
def get_user_id(payload: dict = fastapi.Depends(auth_middleware)) -> str:
user_id = payload.get("sub")
if not user_id:
raise fastapi.HTTPException(
status_code=401, detail="User ID not found in token"
)
return user_id
|
import fastapi
from .config import Settings
from .middleware import auth_middleware
from .models import DEFAULT_USER_ID, User
def requires_user(payload: dict = fastapi.Depends(auth_middleware)) -> User:
return verify_user(payload, admin_only=False)
def requires_admin_user(
payload: dict = fastapi.Depends(auth_middleware),
) -> User:
return verify_user(payload, admin_only=True)
def verify_user(payload: dict | None, admin_only: bool) -> User:
if not payload:
if Settings.ENABLE_AUTH:
raise fastapi.HTTPException(
status_code=401, detail="Authorization header is missing"
)
# This handles the case when authentication is disabled
payload = {"sub": DEFAULT_USER_ID, "role": "admin"}
user_id = payload.get("sub")
if not user_id:
raise fastapi.HTTPException(
status_code=401, detail="User ID not found in token"
)
if admin_only and payload["role"] != "admin":
raise fastapi.HTTPException(status_code=403, detail="Admin access required")
return User.from_payload(payload)
def get_user_id(payload: dict = fastapi.Depends(auth_middleware)) -> str:
user_id = payload.get("sub")
if not user_id:
raise fastapi.HTTPException(
status_code=401, detail="User ID not found in token"
)
return user_id
|
import asyncio
import os
from typing import Dict, List
import pytest
import requests
from jina import Flow
from jina.logging.logger import JinaLogger
from tests.k8s_otel.kind_wrapper import KindClusterWrapperV2
from tests.k8s_otel.util import get_last_health_check_data, parse_string_jaeger_tags
@pytest.mark.asyncio
@pytest.mark.timeout(1800)
async def test_flow_resource_labeling(
tmpdir, otel_test_namespace: str, k8s_cluster_v2: KindClusterWrapperV2
):
NAMESPACE = 'test-flow-resource-labeling'
dump_path = os.path.join(tmpdir, NAMESPACE)
logger = JinaLogger(NAMESPACE)
# Create k8s flow artifacts
flow = Flow(
name='test-flow-metrics',
port=8080,
metrics=True,
metrics_exporter_host=f'otel-collector.{otel_test_namespace}.svc.cluster.local',
metrics_exporter_port=4317,
tracing=True,
traces_exporter_host=f'jaeger.{otel_test_namespace}.svc.cluster.local',
traces_exporter_port=4317,
).add(
name='instrumentation',
uses='docker://test-instrumentation:test-pip',
)
flow.to_kubernetes_yaml(dump_path, k8s_namespace=NAMESPACE)
# Deploy flow
k8s_cluster_v2.deploy_from_dir(dir=dump_path, namespace=NAMESPACE)
# Make client requests
with k8s_cluster_v2.port_forward(
'svc/gateway', NAMESPACE, svc_port=8080
) as gateway_port:
from jina import Client
[docs async for docs in Client(port=gateway_port, asyncio=True).post("/")]
# Give grace period for metrics and traces to be exported
await asyncio.sleep(60)
# Check Jaeger API
with k8s_cluster_v2.port_forward(
'svc/jaeger', otel_test_namespace, svc_port=16686
) as jaeger_port:
try:
# Gateway
trace_data = get_last_health_check_data(
jaeger_port=jaeger_port, service_name='gateway'
)
assert trace_data['processes']['p1']['serviceName'] == 'gateway'
tags: Dict[str, str] = parse_string_jaeger_tags(
trace_data['processes']['p1']['tags']
)
assert tags['k8s.deployment.name'] == 'gateway'
assert tags['k8s.namespace.name'] == NAMESPACE
assert tags['k8s.pod.name'].startswith('gateway-')
# Instrumentation Executor
trace_data = get_last_health_check_data(
jaeger_port=jaeger_port, service_name='instrumentation'
)
assert trace_data['processes']['p1']['serviceName'] == 'instrumentation'
tags: Dict[str, str] = parse_string_jaeger_tags(
trace_data['processes']['p1']['tags']
)
assert tags['k8s.deployment.name'] == 'instrumentation'
assert tags['k8s.namespace.name'] == NAMESPACE
assert tags['k8s.pod.name'].startswith('instrumentation-')
except AssertionError as e:
logger.error(trace_data)
raise e
with k8s_cluster_v2.port_forward(
'svc/prometheus', otel_test_namespace, svc_port=9090
) as prometheus_port:
try:
# Check Prometheus Labels
prometheus_labels: List[str] = requests.get(
f'http://localhost:{prometheus_port}/api/v1/labels'
).json()['data']
assert 'k8s_deployment_name' in prometheus_labels
assert 'k8s_namespace_name' in prometheus_labels
assert 'k8s_pod_name' in prometheus_labels
except AssertionError as e:
logger.error(prometheus_labels)
raise e
try:
depl_values: List[str] = requests.get(
f'http://localhost:{prometheus_port}/api/v1/label/k8s_deployment_name/values'
).json()['data']
assert 'gateway' in depl_values
assert 'instrumentation' in depl_values
except AssertionError as e:
logger.error(depl_values)
raise e
try:
ns_values: List[str] = requests.get(
f'http://localhost:{prometheus_port}/api/v1/label/k8s_namespace_name/values'
).json()['data']
assert NAMESPACE in ns_values
except AssertionError as e:
logger.error(ns_values)
raise e
try:
pod_values: List[str] = requests.get(
f'http://localhost:{prometheus_port}/api/v1/label/k8s_pod_name/values'
).json()['data']
assert any(i.startswith('gateway-') for i in pod_values)
assert any(i.startswith('instrumentation-') for i in pod_values)
except AssertionError as e:
logger.error(pod_values)
raise e
|
import pytest
import os
import requests
import asyncio
from typing import List, Dict
from jina.logging.logger import JinaLogger
from jina import Flow
from tests.k8s_otel.kind_wrapper import KindClusterWrapperV2
from tests.k8s_otel.util import parse_string_jaeger_tags, get_last_health_check_data
@pytest.mark.asyncio
@pytest.mark.timeout(1800)
async def test_flow_resource_labeling(tmpdir, otel_test_namespace: str, k8s_cluster_v2: KindClusterWrapperV2):
NAMESPACE = 'test-flow-resource-labeling'
dump_path = os.path.join(tmpdir, NAMESPACE)
logger = JinaLogger(NAMESPACE)
# Create k8s flow artifacts
flow = Flow(
name='test-flow-metrics',
port=8080,
metrics=True,
metrics_exporter_host=f'otel-collector.{otel_test_namespace}.svc.cluster.local',
metrics_exporter_port=4317,
tracing=True,
traces_exporter_host=f'jaeger.{otel_test_namespace}.svc.cluster.local',
traces_exporter_port=4317,
).add(
name='instrumentation',
uses='docker://test-instrumentation:test-pip',
)
flow.to_kubernetes_yaml(dump_path, k8s_namespace=NAMESPACE)
# Deploy flow
k8s_cluster_v2.deploy_from_dir(dir=dump_path, namespace=NAMESPACE)
# Make client requests
with k8s_cluster_v2.port_forward('svc/gateway', NAMESPACE, svc_port=8080) as gateway_port:
from jina import Client
[docs async for docs in Client(port=gateway_port, asyncio=True).post("/")]
# Give grace period for metrics and traces to be exported
await asyncio.sleep(60)
# Check Jaeger API
with k8s_cluster_v2.port_forward('svc/jaeger', otel_test_namespace, svc_port=16686) as jaeger_port:
try:
# Gateway
trace_data = get_last_health_check_data(jaeger_port=jaeger_port, service_name='gateway')
assert trace_data['processes']['p1']['serviceName'] == 'gateway'
tags: Dict[str, str] = parse_string_jaeger_tags(trace_data['processes']['p1']['tags'])
assert tags['k8s.deployment.name'] == 'gateway'
assert tags['k8s.namespace.name'] == NAMESPACE
assert tags['k8s.pod.name'].startswith('gateway-')
# Instrumentation Executor
trace_data = get_last_health_check_data(jaeger_port=jaeger_port, service_name='instrumentation')
assert trace_data['processes']['p1']['serviceName'] == 'instrumentation'
tags: Dict[str, str] = parse_string_jaeger_tags(trace_data['processes']['p1']['tags'])
assert tags['k8s.deployment.name'] == 'instrumentation'
assert tags['k8s.namespace.name'] == NAMESPACE
assert tags['k8s.pod.name'].startswith('instrumentation-')
except AssertionError as e:
logger.error(trace_data)
raise e
with k8s_cluster_v2.port_forward('svc/prometheus', otel_test_namespace, svc_port=9090) as prometheus_port:
try:
# Check Prometheus Labels
prometheus_labels: List[str] = requests.get(f'http://localhost:{prometheus_port}/api/v1/labels').json()['data']
assert 'k8s_deployment_name' in prometheus_labels
assert 'k8s_namespace_name' in prometheus_labels
assert 'k8s_pod_name' in prometheus_labels
except AssertionError as e:
logger.error(prometheus_labels)
raise e
try:
depl_values: List[str] = requests.get(f'http://localhost:{prometheus_port}/api/v1/label/k8s_deployment_name/values').json()['data']
assert 'gateway' in depl_values
assert 'instrumentation' in depl_values
except AssertionError as e:
logger.error(depl_values)
raise e
try:
ns_values: List[str] = requests.get(f'http://localhost:{prometheus_port}/api/v1/label/k8s_namespace_name/values').json()['data']
assert NAMESPACE in ns_values
except AssertionError as e:
logger.error(ns_values)
raise e
try:
pod_values: List[str] = requests.get(f'http://localhost:{prometheus_port}/api/v1/label/k8s_pod_name/values').json()['data']
assert any(i.startswith('gateway-') for i in pod_values)
assert any(i.startswith('instrumentation-') for i in pod_values)
except AssertionError as e:
logger.error(pod_values)
raise e
|
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Image
from .base import TaskTemplate
@dataclass(frozen=True)
class ImageClassification(TaskTemplate):
task: str = field(default="image-classification", metadata={"include_in_asdict_even_if_is_default": True})
input_schema: ClassVar[Features] = Features({"image": Image()})
label_schema: ClassVar[Features] = Features({"labels": ClassLabel})
image_column: str = "image"
label_column: str = "labels"
def align_with_features(self, features):
if self.label_column not in features:
raise ValueError(f"Column {self.label_column} is not present in features.")
if not isinstance(features[self.label_column], ClassLabel):
raise ValueError(f"Column {self.label_column} is not a ClassLabel.")
task_template = copy.deepcopy(self)
label_schema = self.label_schema.copy()
label_schema["labels"] = features[self.label_column]
task_template.__dict__["label_schema"] = label_schema
return task_template
@property
def column_mapping(self) -> Dict[str, str]:
return {
self.image_column: "image",
self.label_column: "labels",
}
|
import copy
from dataclasses import dataclass
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Image
from .base import TaskTemplate
@dataclass(frozen=True)
class ImageClassification(TaskTemplate):
task: str = "image-classification"
input_schema: ClassVar[Features] = Features({"image": Image()})
label_schema: ClassVar[Features] = Features({"labels": ClassLabel})
image_column: str = "image"
label_column: str = "labels"
def align_with_features(self, features):
if self.label_column not in features:
raise ValueError(f"Column {self.label_column} is not present in features.")
if not isinstance(features[self.label_column], ClassLabel):
raise ValueError(f"Column {self.label_column} is not a ClassLabel.")
task_template = copy.deepcopy(self)
label_schema = self.label_schema.copy()
label_schema["labels"] = features[self.label_column]
task_template.__dict__["label_schema"] = label_schema
return task_template
@property
def column_mapping(self) -> Dict[str, str]:
return {
self.image_column: "image",
self.label_column: "labels",
}
|
# Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import torch
from mmdet.models.roi_heads.mask_heads import (DynamicMaskHead, FCNMaskHead,
MaskIoUHead)
from .utils import _dummy_bbox_sampling
def test_mask_head_loss():
"""Test mask head loss when mask target is empty."""
self = FCNMaskHead(
num_convs=1,
roi_feat_size=6,
in_channels=8,
conv_out_channels=8,
num_classes=8)
# Dummy proposals
proposal_list = [
torch.Tensor([[23.6667, 23.8757, 228.6326, 153.8874]]),
]
gt_bboxes = [
torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]),
]
gt_labels = [torch.LongTensor([2])]
sampling_results = _dummy_bbox_sampling(proposal_list, gt_bboxes,
gt_labels)
# create dummy mask
import numpy as np
from mmdet.core import BitmapMasks
dummy_mask = np.random.randint(0, 2, (1, 160, 240), dtype=np.uint8)
gt_masks = [BitmapMasks(dummy_mask, 160, 240)]
# create dummy train_cfg
train_cfg = mmcv.Config(dict(mask_size=12, mask_thr_binary=0.5))
# Create dummy features "extracted" for each sampled bbox
num_sampled = sum(len(res.bboxes) for res in sampling_results)
dummy_feats = torch.rand(num_sampled, 8, 6, 6)
mask_pred = self.forward(dummy_feats)
mask_targets = self.get_targets(sampling_results, gt_masks, train_cfg)
pos_labels = torch.cat([res.pos_gt_labels for res in sampling_results])
loss_mask = self.loss(mask_pred, mask_targets, pos_labels)
onegt_mask_loss = sum(loss_mask['loss_mask'])
assert onegt_mask_loss.item() > 0, 'mask loss should be non-zero'
# test mask_iou_head
mask_iou_head = MaskIoUHead(
num_convs=1,
num_fcs=1,
roi_feat_size=6,
in_channels=8,
conv_out_channels=8,
fc_out_channels=8,
num_classes=8)
pos_mask_pred = mask_pred[range(mask_pred.size(0)), pos_labels]
mask_iou_pred = mask_iou_head(dummy_feats, pos_mask_pred)
pos_mask_iou_pred = mask_iou_pred[range(mask_iou_pred.size(0)), pos_labels]
mask_iou_targets = mask_iou_head.get_targets(sampling_results, gt_masks,
pos_mask_pred, mask_targets,
train_cfg)
loss_mask_iou = mask_iou_head.loss(pos_mask_iou_pred, mask_iou_targets)
onegt_mask_iou_loss = loss_mask_iou['loss_mask_iou'].sum()
assert onegt_mask_iou_loss.item() >= 0
# test dynamic_mask_head
dummy_proposal_feats = torch.rand(num_sampled, 8)
dynamic_mask_head = DynamicMaskHead(
dynamic_conv_cfg=dict(
type='DynamicConv',
in_channels=8,
feat_channels=8,
out_channels=8,
input_feat_shape=6,
with_proj=False,
act_cfg=dict(type='ReLU', inplace=True),
norm_cfg=dict(type='LN')),
num_convs=1,
num_classes=8,
in_channels=8,
roi_feat_size=6)
mask_pred = dynamic_mask_head(dummy_feats, dummy_proposal_feats)
mask_target = dynamic_mask_head.get_targets(sampling_results, gt_masks,
train_cfg)
loss_mask = dynamic_mask_head.loss(mask_pred, mask_target, pos_labels)
loss_mask = loss_mask['loss_mask'].sum()
assert loss_mask.item() >= 0
|
# Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import torch
from mmdet.models.roi_heads.mask_heads import FCNMaskHead, MaskIoUHead
from .utils import _dummy_bbox_sampling
def test_mask_head_loss():
"""Test mask head loss when mask target is empty."""
self = FCNMaskHead(
num_convs=1,
roi_feat_size=6,
in_channels=8,
conv_out_channels=8,
num_classes=8)
# Dummy proposals
proposal_list = [
torch.Tensor([[23.6667, 23.8757, 228.6326, 153.8874]]),
]
gt_bboxes = [
torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]),
]
gt_labels = [torch.LongTensor([2])]
sampling_results = _dummy_bbox_sampling(proposal_list, gt_bboxes,
gt_labels)
# create dummy mask
import numpy as np
from mmdet.core import BitmapMasks
dummy_mask = np.random.randint(0, 2, (1, 160, 240), dtype=np.uint8)
gt_masks = [BitmapMasks(dummy_mask, 160, 240)]
# create dummy train_cfg
train_cfg = mmcv.Config(dict(mask_size=12, mask_thr_binary=0.5))
# Create dummy features "extracted" for each sampled bbox
num_sampled = sum(len(res.bboxes) for res in sampling_results)
dummy_feats = torch.rand(num_sampled, 8, 6, 6)
mask_pred = self.forward(dummy_feats)
mask_targets = self.get_targets(sampling_results, gt_masks, train_cfg)
pos_labels = torch.cat([res.pos_gt_labels for res in sampling_results])
loss_mask = self.loss(mask_pred, mask_targets, pos_labels)
onegt_mask_loss = sum(loss_mask['loss_mask'])
assert onegt_mask_loss.item() > 0, 'mask loss should be non-zero'
# test mask_iou_head
mask_iou_head = MaskIoUHead(
num_convs=1,
num_fcs=1,
roi_feat_size=6,
in_channels=8,
conv_out_channels=8,
fc_out_channels=8,
num_classes=8)
pos_mask_pred = mask_pred[range(mask_pred.size(0)), pos_labels]
mask_iou_pred = mask_iou_head(dummy_feats, pos_mask_pred)
pos_mask_iou_pred = mask_iou_pred[range(mask_iou_pred.size(0)), pos_labels]
mask_iou_targets = mask_iou_head.get_targets(sampling_results, gt_masks,
pos_mask_pred, mask_targets,
train_cfg)
loss_mask_iou = mask_iou_head.loss(pos_mask_iou_pred, mask_iou_targets)
onegt_mask_iou_loss = loss_mask_iou['loss_mask_iou'].sum()
assert onegt_mask_iou_loss.item() >= 0
|
from ._conformer_wav2vec2 import (
conformer_wav2vec2_base,
conformer_wav2vec2_model,
conformer_wav2vec2_pretrain_base,
conformer_wav2vec2_pretrain_large,
conformer_wav2vec2_pretrain_model,
ConformerWav2Vec2PretrainModel,
)
from ._emformer_hubert import emformer_hubert_base, emformer_hubert_model
from .conv_emformer import ConvEmformer
from .hifi_gan import hifigan_vocoder, hifigan_vocoder_v1, hifigan_vocoder_v2, hifigan_vocoder_v3, HiFiGANVocoder
from .rnnt import conformer_rnnt_base, conformer_rnnt_model
from .squim import SQUIM_OBJECTIVE, squim_objective_base, squim_objective_model
__all__ = [
"conformer_rnnt_base",
"conformer_rnnt_model",
"ConvEmformer",
"conformer_wav2vec2_model",
"conformer_wav2vec2_base",
"conformer_wav2vec2_pretrain_model",
"conformer_wav2vec2_pretrain_base",
"conformer_wav2vec2_pretrain_large",
"ConformerWav2Vec2PretrainModel",
"emformer_hubert_base",
"emformer_hubert_model",
"HiFiGANVocoder",
"hifigan_vocoder_v1",
"hifigan_vocoder_v2",
"hifigan_vocoder_v3",
"hifigan_vocoder",
"squim_objective_base",
"squim_objective_model",
"SQUIM_OBJECTIVE",
]
|
from ._conformer_wav2vec2 import (
conformer_wav2vec2_base,
conformer_wav2vec2_model,
conformer_wav2vec2_pretrain_base,
conformer_wav2vec2_pretrain_large,
conformer_wav2vec2_pretrain_model,
ConformerWav2Vec2PretrainModel,
)
from ._emformer_hubert import emformer_hubert_base, emformer_hubert_model
from .conv_emformer import ConvEmformer
from .hifi_gan import hifigan_vocoder, hifigan_vocoder_v1, hifigan_vocoder_v2, hifigan_vocoder_v3, HiFiGANVocoder
from .rnnt import conformer_rnnt_base, conformer_rnnt_model
__all__ = [
"conformer_rnnt_base",
"conformer_rnnt_model",
"ConvEmformer",
"conformer_wav2vec2_model",
"conformer_wav2vec2_base",
"conformer_wav2vec2_pretrain_model",
"conformer_wav2vec2_pretrain_base",
"conformer_wav2vec2_pretrain_large",
"ConformerWav2Vec2PretrainModel",
"emformer_hubert_base",
"emformer_hubert_model",
"HiFiGANVocoder",
"hifigan_vocoder_v1",
"hifigan_vocoder_v2",
"hifigan_vocoder_v3",
"hifigan_vocoder",
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import List, Tuple
from torch import Tensor
from mmdet.registry import MODELS
from mmdet.structures import SampleList
from mmdet.utils import ConfigType, OptConfigType, OptMultiConfig
from .single_stage import SingleStageDetector
@MODELS.register_module()
class DETR(SingleStageDetector):
r"""Implementation of `DETR: End-to-End Object Detection with
Transformers <https://arxiv.org/pdf/2005.12872>`_"""
def __init__(self,
backbone: ConfigType,
bbox_head: ConfigType,
train_cfg: OptConfigType = None,
test_cfg: OptConfigType = None,
data_preprocessor: OptConfigType = None,
init_cfg: OptMultiConfig = None) -> None:
super().__init__(
backbone=backbone,
neck=None,
bbox_head=bbox_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
data_preprocessor=data_preprocessor,
init_cfg=init_cfg)
def _forward(self, batch_inputs: Tensor,
batch_data_samples: SampleList) -> Tuple[List[Tensor]]:
"""Network forward process. Usually includes backbone, neck and head
forward without any post-processing.
Args:
batch_inputs (Tensor): Inputs with shape (N, C, H, W).
batch_data_samples (list[:obj:`DetDataSample`]): The batch
data samples. It usually includes information such
as `gt_instance` or `gt_panoptic_seg` or `gt_sem_seg`.
Returns:
tuple[list]: A tuple of features from ``bbox_head`` forward.
"""
x = self.extract_feat(batch_inputs)
batch_img_metas = [
data_samples.metainfo for data_samples in batch_data_samples
]
results = self.bbox_head.forward(x, batch_img_metas)
return results
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.registry import MODELS
from mmdet.utils import ConfigType, OptConfigType, OptMultiConfig
from .single_stage import SingleStageDetector
@MODELS.register_module()
class DETR(SingleStageDetector):
r"""Implementation of `DETR: End-to-End Object Detection with
Transformers <https://arxiv.org/pdf/2005.12872>`_"""
def __init__(self,
backbone: ConfigType,
bbox_head: ConfigType,
train_cfg: OptConfigType = None,
test_cfg: OptConfigType = None,
data_preprocessor: OptConfigType = None,
init_cfg: OptMultiConfig = None) -> None:
super().__init__(
backbone=backbone,
neck=None,
bbox_head=bbox_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
data_preprocessor=data_preprocessor,
init_cfg=init_cfg)
|
import numpy as np
import pytest
from docarray.documents import PointCloud3D
from tests import TOYDATA_DIR
LOCAL_OBJ_FILE = str(TOYDATA_DIR / 'tetrahedron.obj')
REMOTE_OBJ_FILE = 'https://people.sc.fsu.edu/~jburkardt/data/obj/al.obj'
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize('file_url', [LOCAL_OBJ_FILE, REMOTE_OBJ_FILE])
def test_point_cloud(file_url):
print(f"file_url = {file_url}")
point_cloud = PointCloud3D(url=file_url)
point_cloud.tensor = point_cloud.url.load(samples=100)
assert isinstance(point_cloud.tensor, np.ndarray)
|
import numpy as np
import pytest
from docarray import PointCloud3D
from tests import TOYDATA_DIR
LOCAL_OBJ_FILE = str(TOYDATA_DIR / 'tetrahedron.obj')
REMOTE_OBJ_FILE = 'https://people.sc.fsu.edu/~jburkardt/data/obj/al.obj'
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize('file_url', [LOCAL_OBJ_FILE, REMOTE_OBJ_FILE])
def test_point_cloud(file_url):
print(f"file_url = {file_url}")
point_cloud = PointCloud3D(url=file_url)
point_cloud.tensor = point_cloud.url.load(samples=100)
assert isinstance(point_cloud.tensor, np.ndarray)
|
# coding=utf-8
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Fast Image processor class for ViT."""
from ...image_processing_utils_fast import (
BaseImageProcessorFast,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
PILImageResampling,
)
from ...utils import (
auto_docstring,
)
@auto_docstring
class ViTImageProcessorFast(BaseImageProcessorFast):
resample = PILImageResampling.BILINEAR
image_mean = IMAGENET_STANDARD_MEAN
image_std = IMAGENET_STANDARD_STD
size = {"height": 224, "width": 224}
do_resize = True
do_rescale = True
do_normalize = True
__all__ = ["ViTImageProcessorFast"]
|
# coding=utf-8
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Fast Image processor class for ViT."""
from ...image_processing_utils_fast import (
BASE_IMAGE_PROCESSOR_FAST_DOCSTRING,
BaseImageProcessorFast,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
PILImageResampling,
)
from ...utils import (
add_start_docstrings,
)
@add_start_docstrings(
"Constructs a fast ViT image processor.",
BASE_IMAGE_PROCESSOR_FAST_DOCSTRING,
)
class ViTImageProcessorFast(BaseImageProcessorFast):
resample = PILImageResampling.BILINEAR
image_mean = IMAGENET_STANDARD_MEAN
image_std = IMAGENET_STANDARD_STD
size = {"height": 224, "width": 224}
do_resize = True
do_rescale = True
do_normalize = True
__all__ = ["ViTImageProcessorFast"]
|
import multiprocessing
import os
import signal
import time
import pytest
from jina import Document, DocumentArray, Executor, requests
from jina.clients.request import request_generator
from jina.parsers import set_gateway_parser
from jina.serve.networking.utils import send_request_sync
from jina_cli.api import executor_native, gateway
from tests.helper import _generate_pod_args
class DummyExecutor(Executor):
def __init__(self, dir=None, *args, **kwargs):
super().__init__(*args, **kwargs)
self.dir = dir
self.request_count = 0
@requests
def slow_count(self, **kwargs):
time.sleep(0.5)
self.request_count += 1
def close(self):
super().close()
with open(f'{self.dir}/test.txt', 'w') as fp:
fp.write(f'proper close;{self.request_count}')
def _create_test_data_message():
req = list(
request_generator(
'/', DocumentArray([Document(text='input document') for _ in range(10)])
)
)[0]
return req
@pytest.mark.parametrize('signal', [signal.SIGTERM, signal.SIGINT])
def test_executor_runtimes(signal, tmpdir):
import time
args = _generate_pod_args()
def run(args):
args.uses = {
'jtype': 'DummyExecutor',
'with': {'dir': str(tmpdir)},
'metas': {'workspace': str(tmpdir)},
}
executor_native(args)
process = multiprocessing.Process(target=run, args=(args,))
process.start()
time.sleep(0.5)
send_request_sync(_create_test_data_message(), target=f'{args.host}:{args.port}')
time.sleep(0.1)
os.kill(process.pid, signal)
process.join()
with open(f'{tmpdir}/test.txt', 'r') as fp:
output = fp.read()
split = output.split(';')
assert split[0] == 'proper close'
assert split[1] == '1'
@pytest.mark.parametrize('signal', [signal.SIGTERM, signal.SIGINT])
@pytest.mark.parametrize('protocol', ['grpc', 'http', 'websocket'])
def test_gateway(signal, protocol):
import time
def run():
args = set_gateway_parser().parse_args(
[
'--protocol',
protocol,
'--graph-description',
'{}',
'--deployments-addresses',
'{}',
]
)
gateway(args)
process = multiprocessing.Process(target=run)
process.start()
time.sleep(0.5)
os.kill(process.pid, signal)
process.join()
|
import multiprocessing
import os
import signal
import time
import pytest
from jina import Document, DocumentArray, Executor, requests
from jina.clients.request import request_generator
from jina.parsers import set_gateway_parser
from jina.serve.networking import GrpcConnectionPool
from jina_cli.api import executor_native, gateway
from tests.helper import _generate_pod_args
class DummyExecutor(Executor):
def __init__(self, dir=None, *args, **kwargs):
super().__init__(*args, **kwargs)
self.dir = dir
self.request_count = 0
@requests
def slow_count(self, **kwargs):
time.sleep(0.5)
self.request_count += 1
def close(self):
super().close()
with open(f'{self.dir}/test.txt', 'w') as fp:
fp.write(f'proper close;{self.request_count}')
def _create_test_data_message():
req = list(
request_generator(
'/', DocumentArray([Document(text='input document') for _ in range(10)])
)
)[0]
return req
@pytest.mark.parametrize('signal', [signal.SIGTERM, signal.SIGINT])
def test_executor_runtimes(signal, tmpdir):
import time
args = _generate_pod_args()
def run(args):
args.uses = {
'jtype': 'DummyExecutor',
'with': {'dir': str(tmpdir)},
'metas': {'workspace': str(tmpdir)},
}
executor_native(args)
process = multiprocessing.Process(target=run, args=(args,))
process.start()
time.sleep(0.5)
GrpcConnectionPool.send_request_sync(
_create_test_data_message(), target=f'{args.host}:{args.port}'
)
time.sleep(0.1)
os.kill(process.pid, signal)
process.join()
with open(f'{tmpdir}/test.txt', 'r') as fp:
output = fp.read()
split = output.split(';')
assert split[0] == 'proper close'
assert split[1] == '1'
@pytest.mark.parametrize('signal', [signal.SIGTERM, signal.SIGINT])
@pytest.mark.parametrize('protocol', ['grpc', 'http', 'websocket'])
def test_gateway(signal, protocol):
import time
def run():
args = set_gateway_parser().parse_args(
[
'--protocol',
protocol,
'--graph-description',
'{}',
'--deployments-addresses',
'{}',
]
)
gateway(args)
process = multiprocessing.Process(target=run)
process.start()
time.sleep(0.5)
os.kill(process.pid, signal)
process.join()
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch
from mmengine.data import InstanceData
from mmdet.core.bbox.assigners import AssignResult
from mmdet.registry import TASK_UTILS
from .base_sampler import BaseSampler
from .sampling_result import SamplingResult
@TASK_UTILS.register_module()
class PseudoSampler(BaseSampler):
"""A pseudo sampler that does not do sampling actually."""
def __init__(self, **kwargs):
pass
def _sample_pos(self, **kwargs):
"""Sample positive samples."""
raise NotImplementedError
def _sample_neg(self, **kwargs):
"""Sample negative samples."""
raise NotImplementedError
def sample(self, assign_result: AssignResult, pred_instances: InstanceData,
gt_instances: InstanceData, *args, **kwargs):
"""Directly returns the positive and negative indices of samples.
Args:
assign_result (:obj:`AssignResult`): Bbox assigning results.
pred_instances (:obj:`InstaceData`): Instances of model
predictions. It includes ``priors``, and the priors can
be anchors, points, or bboxes predicted by the model,
shape(n, 4).
gt_instances (:obj:`InstaceData`): Ground truth of instance
annotations. It usually includes ``bboxes`` and ``labels``
attributes.
Returns:
:obj:`SamplingResult`: sampler results
"""
gt_bboxes = gt_instances.bboxes
priors = pred_instances.priors
pos_inds = torch.nonzero(
assign_result.gt_inds > 0, as_tuple=False).squeeze(-1).unique()
neg_inds = torch.nonzero(
assign_result.gt_inds == 0, as_tuple=False).squeeze(-1).unique()
gt_flags = priors.new_zeros(priors.shape[0], dtype=torch.uint8)
sampling_result = SamplingResult(
pos_inds=pos_inds,
neg_inds=neg_inds,
priors=priors,
gt_bboxes=gt_bboxes,
assign_result=assign_result,
gt_flags=gt_flags)
return sampling_result
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch
from mmdet.registry import TASK_UTILS
from .base_sampler import BaseSampler
from .sampling_result import SamplingResult
@TASK_UTILS.register_module()
class PseudoSampler(BaseSampler):
"""A pseudo sampler that does not do sampling actually."""
def __init__(self, **kwargs):
pass
def _sample_pos(self, **kwargs):
"""Sample positive samples."""
raise NotImplementedError
def _sample_neg(self, **kwargs):
"""Sample negative samples."""
raise NotImplementedError
def sample(self, assign_result, bboxes, gt_bboxes, *args, **kwargs):
"""Directly returns the positive and negative indices of samples.
Args:
assign_result (:obj:`AssignResult`): Assigned results
bboxes (torch.Tensor): Bounding boxes
gt_bboxes (torch.Tensor): Ground truth boxes
Returns:
:obj:`SamplingResult`: sampler results
"""
pos_inds = torch.nonzero(
assign_result.gt_inds > 0, as_tuple=False).squeeze(-1).unique()
neg_inds = torch.nonzero(
assign_result.gt_inds == 0, as_tuple=False).squeeze(-1).unique()
gt_flags = bboxes.new_zeros(bboxes.shape[0], dtype=torch.uint8)
sampling_result = SamplingResult(pos_inds, neg_inds, bboxes, gt_bboxes,
assign_result, gt_flags)
return sampling_result
|
import pytest
from docarray import DocumentArray, Document
from docarray.array.qdrant import DocumentArrayQdrant
from docarray.array.sqlite import DocumentArraySqlite
from docarray.array.annlite import DocumentArrayAnnlite, AnnliteConfig
from docarray.array.storage.qdrant import QdrantConfig
from docarray.array.storage.weaviate import WeaviateConfig
from docarray.array.weaviate import DocumentArrayWeaviate
from docarray.array.elastic import DocumentArrayElastic, ElasticConfig
from docarray.array.redis import DocumentArrayRedis, RedisConfig
N = 100
def da_and_dam():
da = DocumentArray.empty(N)
dasq = DocumentArraySqlite.empty(N)
return (da, dasq)
@pytest.fixture
def docs():
yield (Document(text=str(j)) for j in range(100))
@pytest.mark.parametrize(
'da_cls,config',
[
(DocumentArray, None),
(DocumentArraySqlite, None),
(DocumentArrayAnnlite, AnnliteConfig(n_dim=128)),
(DocumentArrayWeaviate, WeaviateConfig(n_dim=128)),
(DocumentArrayQdrant, QdrantConfig(n_dim=128)),
(DocumentArrayElastic, ElasticConfig(n_dim=128)),
(DocumentArrayRedis, RedisConfig(n_dim=1)),
],
)
def test_iter_len_bool(da_cls, config, start_storage):
if config:
da = da_cls.empty(N, config=config)
else:
da = da_cls.empty(N)
j = 0
for _ in da:
j += 1
assert j == N
assert j == len(da)
assert da
da.clear()
assert not da
@pytest.mark.parametrize(
'da_cls,config',
[
(DocumentArray, None),
(DocumentArraySqlite, None),
(DocumentArrayAnnlite, AnnliteConfig(n_dim=128)),
(DocumentArrayWeaviate, WeaviateConfig(n_dim=128)),
(DocumentArrayQdrant, QdrantConfig(n_dim=128)),
(DocumentArrayElastic, ElasticConfig(n_dim=128)),
(DocumentArrayRedis, RedisConfig(n_dim=128)),
],
)
def test_repr(da_cls, config, start_storage):
if config:
da = da_cls.empty(N, config=config)
else:
da = da_cls.empty(N)
assert f'length={N}' in repr(da)
@pytest.mark.parametrize(
'storage, config',
[
('memory', None),
('sqlite', None),
('annlite', AnnliteConfig(n_dim=128)),
('weaviate', WeaviateConfig(n_dim=128)),
('qdrant', QdrantConfig(n_dim=128)),
('elasticsearch', ElasticConfig(n_dim=128)),
('redis', RedisConfig(n_dim=128)),
],
)
def test_repr_str(docs, storage, config, start_storage):
if config:
da = DocumentArray(docs, storage=storage, config=config)
else:
da = DocumentArray(docs, storage=storage)
da.summary()
assert da
da.clear()
assert not da
print(da)
@pytest.mark.parametrize(
'da_cls, config',
[
(DocumentArray, None),
(DocumentArraySqlite, None),
(DocumentArrayAnnlite, AnnliteConfig(n_dim=10)),
(DocumentArrayWeaviate, WeaviateConfig(n_dim=10)),
(DocumentArrayQdrant, QdrantConfig(n_dim=10)),
(DocumentArrayElastic, ElasticConfig(n_dim=10)),
(DocumentArrayRedis, RedisConfig(n_dim=10)),
],
)
def test_iadd(da_cls, config, start_storage):
if config:
da = da_cls.empty(N, config=config)
else:
da = da_cls.empty(N)
oid = id(da)
dap = DocumentArray.empty(10)
da += dap
assert len(da) == N + len(dap)
nid = id(da)
assert nid == oid
@pytest.mark.parametrize('da', [da_and_dam()[0]])
def test_add(da):
oid = id(da)
dap = DocumentArray.empty(10)
da = da + dap
assert len(da) == N + len(dap)
nid = id(da)
assert nid != oid
|
import pytest
from docarray import DocumentArray, Document
from docarray.array.qdrant import DocumentArrayQdrant
from docarray.array.sqlite import DocumentArraySqlite
from docarray.array.annlite import DocumentArrayAnnlite, AnnliteConfig
from docarray.array.storage.qdrant import QdrantConfig
from docarray.array.storage.weaviate import WeaviateConfig
from docarray.array.weaviate import DocumentArrayWeaviate
from docarray.array.elastic import DocumentArrayElastic, ElasticConfig
from docarray.array.redis import DocumentArrayRedis, RedisConfig
N = 100
def da_and_dam():
da = DocumentArray.empty(N)
dasq = DocumentArraySqlite.empty(N)
return (da, dasq)
@pytest.fixture
def docs():
yield (Document(text=str(j)) for j in range(100))
@pytest.mark.parametrize(
'da_cls,config',
[
(DocumentArray, None),
(DocumentArraySqlite, None),
(DocumentArrayAnnlite, AnnliteConfig(n_dim=128)),
(DocumentArrayWeaviate, WeaviateConfig(n_dim=128)),
(DocumentArrayQdrant, QdrantConfig(n_dim=128)),
(DocumentArrayElastic, ElasticConfig(n_dim=128)),
(DocumentArrayRedis, RedisConfig(n_dim=1, flush=True)),
],
)
def test_iter_len_bool(da_cls, config, start_storage):
if config:
da = da_cls.empty(N, config=config)
else:
da = da_cls.empty(N)
j = 0
for _ in da:
j += 1
assert j == N
assert j == len(da)
assert da
da.clear()
assert not da
@pytest.mark.parametrize(
'da_cls,config',
[
(DocumentArray, None),
(DocumentArraySqlite, None),
(DocumentArrayAnnlite, AnnliteConfig(n_dim=128)),
(DocumentArrayWeaviate, WeaviateConfig(n_dim=128)),
(DocumentArrayQdrant, QdrantConfig(n_dim=128)),
(DocumentArrayElastic, ElasticConfig(n_dim=128)),
(DocumentArrayRedis, RedisConfig(n_dim=128, flush=True)),
],
)
def test_repr(da_cls, config, start_storage):
if config:
da = da_cls.empty(N, config=config)
else:
da = da_cls.empty(N)
assert f'length={N}' in repr(da)
@pytest.mark.parametrize(
'storage, config',
[
('memory', None),
('sqlite', None),
('annlite', AnnliteConfig(n_dim=128)),
('weaviate', WeaviateConfig(n_dim=128)),
('qdrant', QdrantConfig(n_dim=128)),
('elasticsearch', ElasticConfig(n_dim=128)),
('redis', RedisConfig(n_dim=128, flush=True)),
],
)
def test_repr_str(docs, storage, config, start_storage):
if config:
da = DocumentArray(docs, storage=storage, config=config)
else:
da = DocumentArray(docs, storage=storage)
da.summary()
assert da
da.clear()
assert not da
print(da)
@pytest.mark.parametrize(
'da_cls, config',
[
(DocumentArray, None),
(DocumentArraySqlite, None),
(DocumentArrayAnnlite, AnnliteConfig(n_dim=10)),
(DocumentArrayWeaviate, WeaviateConfig(n_dim=10)),
(DocumentArrayQdrant, QdrantConfig(n_dim=10)),
(DocumentArrayElastic, ElasticConfig(n_dim=10)),
(DocumentArrayRedis, RedisConfig(n_dim=10, flush=True)),
],
)
def test_iadd(da_cls, config, start_storage):
if config:
da = da_cls.empty(N, config=config)
else:
da = da_cls.empty(N)
oid = id(da)
dap = DocumentArray.empty(10)
da += dap
assert len(da) == N + len(dap)
nid = id(da)
assert nid == oid
@pytest.mark.parametrize('da', [da_and_dam()[0]])
def test_add(da):
oid = id(da)
dap = DocumentArray.empty(10)
da = da + dap
assert len(da) == N + len(dap)
nid = id(da)
assert nid != oid
|
_base_ = [
'../_base_/default_runtime.py', '../_base_/datasets/coco_detection.py'
]
data_preprocessor = dict(
type='DetDataPreprocessor',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
bgr_to_rgb=True)
# model settings
model = dict(
type='CornerNet',
data_preprocessor=data_preprocessor,
backbone=dict(
type='HourglassNet',
downsample_times=5,
num_stacks=2,
stage_channels=[256, 256, 384, 384, 384, 512],
stage_blocks=[2, 2, 2, 2, 2, 4],
norm_cfg=dict(type='BN', requires_grad=True)),
neck=None,
bbox_head=dict(
type='CentripetalHead',
num_classes=80,
in_channels=256,
num_feat_levels=2,
corner_emb_channels=0,
loss_heatmap=dict(
type='GaussianFocalLoss', alpha=2.0, gamma=4.0, loss_weight=1),
loss_offset=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1),
loss_guiding_shift=dict(
type='SmoothL1Loss', beta=1.0, loss_weight=0.05),
loss_centripetal_shift=dict(
type='SmoothL1Loss', beta=1.0, loss_weight=1)),
# training and testing settings
train_cfg=None,
test_cfg=dict(
corner_topk=100,
local_maximum_kernel=3,
distance_threshold=0.5,
score_thr=0.05,
max_per_img=100,
nms=dict(type='soft_nms', iou_threshold=0.5, method='gaussian')))
# data settings
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PhotoMetricDistortion',
brightness_delta=32,
contrast_range=(0.5, 1.5),
saturation_range=(0.5, 1.5),
hue_delta=18),
dict(
# The cropped images are padded into squares during training,
# but may be smaller than crop_size.
type='RandomCenterCropPad',
crop_size=(511, 511),
ratios=(0.6, 0.7, 0.8, 0.9, 1.0, 1.1, 1.2, 1.3),
test_mode=False,
test_pad_mode=None,
mean=data_preprocessor['mean'],
std=data_preprocessor['std'],
# Image data is not converted to rgb.
to_rgb=data_preprocessor['bgr_to_rgb']),
dict(type='Resize', scale=(511, 511), keep_ratio=False),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs'),
]
# TODO: mstest is not currently implemented
test_pipeline = [
dict(
type='LoadImageFromFile',
to_float32=True,
file_client_args={{_base_.file_client_args}}),
# don't need Resize
dict(
type='RandomCenterCropPad',
crop_size=None,
ratios=None,
border=None,
test_mode=True,
test_pad_mode=['logical_or', 127],
mean=data_preprocessor['mean'],
std=data_preprocessor['std'],
# Image data is not converted to rgb.
to_rgb=data_preprocessor['bgr_to_rgb']),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', 'border'))
]
train_dataloader = dict(
batch_size=6,
num_workers=3,
batch_sampler=None,
dataset=dict(pipeline=train_pipeline))
val_dataloader = dict(dataset=dict(pipeline=test_pipeline))
test_dataloader = val_dataloader
# optimizer
optim_wrapper = dict(
type='OptimWrapper',
optimizer=dict(type='Adam', lr=0.0005),
clip_grad=dict(max_norm=35, norm_type=2))
max_epochs = 210
# learning rate
param_scheduler = [
dict(
type='LinearLR',
start_factor=1.0 / 3,
by_epoch=False,
begin=0,
end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[190],
gamma=0.1)
]
train_cfg = dict(
type='EpochBasedTrainLoop', max_epochs=max_epochs, val_interval=1)
val_cfg = dict(type='ValLoop')
test_cfg = dict(type='TestLoop')
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (16 GPUs) x (6 samples per GPU)
auto_scale_lr = dict(base_batch_size=96)
|
_base_ = [
'../_base_/default_runtime.py', '../_base_/datasets/coco_detection.py'
]
# model settings
model = dict(
type='CornerNet',
backbone=dict(
type='HourglassNet',
downsample_times=5,
num_stacks=2,
stage_channels=[256, 256, 384, 384, 384, 512],
stage_blocks=[2, 2, 2, 2, 2, 4],
norm_cfg=dict(type='BN', requires_grad=True)),
neck=None,
bbox_head=dict(
type='CentripetalHead',
num_classes=80,
in_channels=256,
num_feat_levels=2,
corner_emb_channels=0,
loss_heatmap=dict(
type='GaussianFocalLoss', alpha=2.0, gamma=4.0, loss_weight=1),
loss_offset=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1),
loss_guiding_shift=dict(
type='SmoothL1Loss', beta=1.0, loss_weight=0.05),
loss_centripetal_shift=dict(
type='SmoothL1Loss', beta=1.0, loss_weight=1)),
# training and testing settings
train_cfg=None,
test_cfg=dict(
corner_topk=100,
local_maximum_kernel=3,
distance_threshold=0.5,
score_thr=0.05,
max_per_img=100,
nms=dict(type='soft_nms', iou_threshold=0.5, method='gaussian')))
# data settings
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile', to_float32=True),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PhotoMetricDistortion',
brightness_delta=32,
contrast_range=(0.5, 1.5),
saturation_range=(0.5, 1.5),
hue_delta=18),
dict(
type='RandomCenterCropPad',
crop_size=(511, 511),
ratios=(0.6, 0.7, 0.8, 0.9, 1.0, 1.1, 1.2, 1.3),
test_mode=False,
test_pad_mode=None,
**img_norm_cfg),
dict(type='Resize', img_scale=(511, 511), keep_ratio=False),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile', to_float32=True),
dict(
type='MultiScaleFlipAug',
scale_factor=1.0,
flip=True,
transforms=[
dict(type='Resize'),
dict(
type='RandomCenterCropPad',
crop_size=None,
ratios=None,
border=None,
test_mode=True,
test_pad_mode=['logical_or', 127],
**img_norm_cfg),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='ImageToTensor', keys=['img']),
dict(
type='Collect',
keys=['img'],
meta_keys=('filename', 'ori_shape', 'img_shape', 'pad_shape',
'scale_factor', 'flip', 'img_norm_cfg', 'border')),
])
]
data = dict(
samples_per_gpu=6,
workers_per_gpu=3,
train=dict(pipeline=train_pipeline),
val=dict(pipeline=test_pipeline),
test=dict(pipeline=test_pipeline))
# optimizer
optimizer = dict(type='Adam', lr=0.0005)
optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=1.0 / 3,
step=[190])
runner = dict(type='EpochBasedRunner', max_epochs=210)
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (16 GPUs) x (6 samples per GPU)
auto_scale_lr = dict(base_batch_size=96)
|
# coding: utf-8
"""LightGBM, Light Gradient Boosting Machine.
Contributors: https://github.com/microsoft/LightGBM/graphs/contributors.
"""
from pathlib import Path
# .basic is intentionally loaded as early as possible, to dlopen() lib_lightgbm.{dll,dylib,so}
# and its dependencies as early as possible
from .basic import Booster, Dataset, Sequence, register_logger
from .callback import EarlyStopException, early_stopping, log_evaluation, record_evaluation, reset_parameter
from .engine import CVBooster, cv, train
try:
from .sklearn import LGBMClassifier, LGBMModel, LGBMRanker, LGBMRegressor
except ImportError:
pass
try:
from .plotting import create_tree_digraph, plot_importance, plot_metric, plot_split_value_histogram, plot_tree
except ImportError:
pass
try:
from .dask import DaskLGBMClassifier, DaskLGBMRanker, DaskLGBMRegressor
except ImportError:
pass
_version_path = Path(__file__).absolute().parent / "VERSION.txt"
if _version_path.is_file():
__version__ = _version_path.read_text(encoding="utf-8").strip()
__all__ = [
"Dataset",
"Booster",
"CVBooster",
"Sequence",
"register_logger",
"train",
"cv",
"LGBMModel",
"LGBMRegressor",
"LGBMClassifier",
"LGBMRanker",
"DaskLGBMRegressor",
"DaskLGBMClassifier",
"DaskLGBMRanker",
"log_evaluation",
"record_evaluation",
"reset_parameter",
"early_stopping",
"EarlyStopException",
"plot_importance",
"plot_split_value_histogram",
"plot_metric",
"plot_tree",
"create_tree_digraph",
]
|
# coding: utf-8
"""LightGBM, Light Gradient Boosting Machine.
Contributors: https://github.com/microsoft/LightGBM/graphs/contributors.
"""
from pathlib import Path
from .basic import Booster, Dataset, Sequence, register_logger
from .callback import EarlyStopException, early_stopping, log_evaluation, record_evaluation, reset_parameter
from .engine import CVBooster, cv, train
try:
from .sklearn import LGBMClassifier, LGBMModel, LGBMRanker, LGBMRegressor
except ImportError:
pass
try:
from .plotting import create_tree_digraph, plot_importance, plot_metric, plot_split_value_histogram, plot_tree
except ImportError:
pass
try:
from .dask import DaskLGBMClassifier, DaskLGBMRanker, DaskLGBMRegressor
except ImportError:
pass
_version_path = Path(__file__).absolute().parent / "VERSION.txt"
if _version_path.is_file():
__version__ = _version_path.read_text(encoding="utf-8").strip()
__all__ = [
"Dataset",
"Booster",
"CVBooster",
"Sequence",
"register_logger",
"train",
"cv",
"LGBMModel",
"LGBMRegressor",
"LGBMClassifier",
"LGBMRanker",
"DaskLGBMRegressor",
"DaskLGBMClassifier",
"DaskLGBMRanker",
"log_evaluation",
"record_evaluation",
"reset_parameter",
"early_stopping",
"EarlyStopException",
"plot_importance",
"plot_split_value_histogram",
"plot_metric",
"plot_tree",
"create_tree_digraph",
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from abc import ABCMeta, abstractmethod
import torch.nn.functional as F
from mmcv.runner import BaseModule, force_fp32
from ..builder import build_loss
from ..utils import interpolate_as
class BaseSemanticHead(BaseModule, metaclass=ABCMeta):
"""Base module of Semantic Head.
Args:
num_classes (int): the number of classes.
init_cfg (dict): the initialization config.
loss_seg (dict): the loss of the semantic head.
"""
def __init__(self,
num_classes,
init_cfg=None,
loss_seg=dict(
type='CrossEntropyLoss',
ignore_index=255,
loss_weight=1.0)):
super(BaseSemanticHead, self).__init__(init_cfg)
self.loss_seg = build_loss(loss_seg)
self.num_classes = num_classes
@force_fp32(apply_to=('seg_preds', ))
def loss(self, seg_preds, gt_semantic_seg):
"""Get the loss of semantic head.
Args:
seg_preds (Tensor): The input logits with the shape (N, C, H, W).
gt_semantic_seg: The ground truth of semantic segmentation with
the shape (N, H, W).
label_bias: The starting number of the semantic label.
Default: 1.
Returns:
dict: the loss of semantic head.
"""
if seg_preds.shape[-2:] != gt_semantic_seg.shape[-2:]:
seg_preds = interpolate_as(seg_preds, gt_semantic_seg)
seg_preds = seg_preds.permute((0, 2, 3, 1))
loss_seg = self.loss_seg(
seg_preds.reshape(-1, self.num_classes), # => [NxHxW, C]
gt_semantic_seg.reshape(-1).long())
return dict(loss_seg=loss_seg)
@abstractmethod
def forward(self, x):
"""Placeholder of forward function.
Returns:
dict[str, Tensor]: A dictionary, including features
and predicted scores. Required keys: 'seg_preds'
and 'feats'.
"""
pass
def forward_train(self, x, gt_semantic_seg):
output = self.forward(x)
seg_preds = output['seg_preds']
return self.loss(seg_preds, gt_semantic_seg)
def simple_test(self, x, img_metas, rescale=False):
output = self.forward(x)
seg_preds = output['seg_preds']
seg_preds = F.interpolate(
seg_preds,
size=img_metas[0]['pad_shape'][:2],
mode='bilinear',
align_corners=False)
if rescale:
h, w, _ = img_metas[0]['img_shape']
seg_preds = seg_preds[:, :, :h, :w]
h, w, _ = img_metas[0]['ori_shape']
seg_preds = F.interpolate(
seg_preds, size=(h, w), mode='bilinear', align_corners=False)
return seg_preds
|
# Copyright (c) OpenMMLab. All rights reserved.
from abc import ABCMeta, abstractmethod
import torch.nn.functional as F
from mmcv.runner import BaseModule, force_fp32
from ..builder import build_loss
from ..utils import interpolate_as
class BaseSemanticHead(BaseModule, metaclass=ABCMeta):
"""Base module of Semantic Head.
Args:
num_classes (int): the number of classes.
init_cfg (dict): the initialization config.
loss_seg (dict): the loss of the semantic head.
"""
def __init__(self,
num_classes,
init_cfg=None,
loss_seg=dict(
type='CrossEntropyLoss', ignore_index=-1,
loss_weight=1.0)):
super(BaseSemanticHead, self).__init__(init_cfg)
self.loss_seg = build_loss(loss_seg)
self.num_classes = num_classes
@force_fp32(apply_to=('seg_preds', ))
def loss(self, seg_preds, gt_semantic_seg, label_bias=1):
"""Get the loss of semantic head.
Args:
seg_preds (Tensor): The input logits with the shape (N, C, H, W).
gt_semantic_seg: The ground truth of semantic segmentation with
the shape (N, H, W).
label_bias: The starting number of the semantic label.
Default: 1.
Returns:
dict: the loss of semantic head.
"""
if seg_preds.shape[-2:] != gt_semantic_seg.shape[-2:]:
seg_preds = interpolate_as(seg_preds, gt_semantic_seg)
seg_preds = seg_preds.permute((0, 2, 3, 1))
# make the semantic label start from 0
gt_semantic_seg = gt_semantic_seg - label_bias
loss_seg = self.loss_seg(
seg_preds.reshape(-1, self.num_classes), # => [NxHxW, C]
gt_semantic_seg.reshape(-1).long())
return dict(loss_seg=loss_seg)
@abstractmethod
def forward(self, x):
"""Placeholder of forward function.
Returns:
dict[str, Tensor]: A dictionary, including features
and predicted scores. Required keys: 'seg_preds'
and 'feats'.
"""
pass
def forward_train(self, x, gt_semantic_seg, label_bias=1):
output = self.forward(x)
seg_preds = output['seg_preds']
return self.loss(seg_preds, gt_semantic_seg, label_bias)
def simple_test(self, x, img_metas, rescale=False):
output = self.forward(x)
seg_preds = output['seg_preds']
seg_preds = F.interpolate(
seg_preds,
size=img_metas[0]['pad_shape'][:2],
mode='bilinear',
align_corners=False)
if rescale:
h, w, _ = img_metas[0]['img_shape']
seg_preds = seg_preds[:, :, :h, :w]
h, w, _ = img_metas[0]['ori_shape']
seg_preds = F.interpolate(
seg_preds, size=(h, w), mode='bilinear', align_corners=False)
return seg_preds
|
# Copyright (c) OpenMMLab. All rights reserved.
import unittest
from unittest import TestCase
import torch
from parameterized import parameterized
from mmdet.core import DetDataSample
from mmdet.testing import demo_mm_inputs, get_detector_cfg
from mmdet.utils import register_all_modules
class TestRPN(TestCase):
def setUp(self):
register_all_modules()
@parameterized.expand(['rpn/rpn_r50_fpn_1x_coco.py'])
def test_init(self, cfg_file):
model = get_detector_cfg(cfg_file)
# backbone convert to ResNet18
model.backbone.depth = 18
model.neck.in_channels = [64, 128, 256, 512]
model.backbone.init_cfg = None
from mmdet.models import build_detector
detector = build_detector(model)
self.assertTrue(detector.backbone)
self.assertTrue(detector.neck)
self.assertTrue(detector.bbox_head)
# if rpn.num_classes > 1, force set rpn.num_classes = 1
model.rpn_head.num_classes = 2
detector = build_detector(model)
self.assertEqual(detector.bbox_head.num_classes, 1)
@parameterized.expand([('rpn/rpn_r50_fpn_1x_coco.py', ('cpu', 'cuda'))])
def test_rpn_forward_loss_mode(self, cfg_file, devices):
model = get_detector_cfg(cfg_file)
# backbone convert to ResNet18
model.backbone.depth = 18
model.neck.in_channels = [64, 128, 256, 512]
model.backbone.init_cfg = None
from mmdet.models import build_detector
assert all([device in ['cpu', 'cuda'] for device in devices])
for device in devices:
detector = build_detector(model)
if device == 'cuda':
if not torch.cuda.is_available():
return unittest.skip('test requires GPU and torch+cuda')
detector = detector.cuda()
packed_inputs = demo_mm_inputs(2, [[3, 128, 128], [3, 125, 130]])
batch_inputs, data_samples = detector.data_preprocessor(
packed_inputs, True)
# Test forward train
losses = detector.forward(batch_inputs, data_samples, mode='loss')
self.assertIsInstance(losses, dict)
@parameterized.expand([('rpn/rpn_r50_fpn_1x_coco.py', ('cpu', 'cuda'))])
def test_rpn_forward_predict_mode(self, cfg_file, devices):
model = get_detector_cfg(cfg_file)
# backbone convert to ResNet18
model.backbone.depth = 18
model.neck.in_channels = [64, 128, 256, 512]
model.backbone.init_cfg = None
from mmdet.models import build_detector
assert all([device in ['cpu', 'cuda'] for device in devices])
for device in devices:
detector = build_detector(model)
if device == 'cuda':
if not torch.cuda.is_available():
return unittest.skip('test requires GPU and torch+cuda')
detector = detector.cuda()
packed_inputs = demo_mm_inputs(2, [[3, 128, 128], [3, 125, 130]])
batch_inputs, data_samples = detector.data_preprocessor(
packed_inputs, False)
# Test forward test
detector.eval()
with torch.no_grad():
batch_results = detector.forward(
batch_inputs, data_samples, mode='predict')
self.assertEqual(len(batch_results), 2)
self.assertIsInstance(batch_results[0], DetDataSample)
@parameterized.expand([('rpn/rpn_r50_fpn_1x_coco.py', ('cpu', 'cuda'))])
def test_rpn_forward_tensor_mode(self, cfg_file, devices):
model = get_detector_cfg(cfg_file)
# backbone convert to ResNet18
model.backbone.depth = 18
model.neck.in_channels = [64, 128, 256, 512]
model.backbone.init_cfg = None
from mmdet.models import build_detector
assert all([device in ['cpu', 'cuda'] for device in devices])
for device in devices:
detector = build_detector(model)
if device == 'cuda':
if not torch.cuda.is_available():
return unittest.skip('test requires GPU and torch+cuda')
detector = detector.cuda()
packed_inputs = demo_mm_inputs(2, [[3, 128, 128], [3, 125, 130]])
batch_inputs, data_samples = detector.data_preprocessor(
packed_inputs, False)
batch_results = detector.forward(
batch_inputs, data_samples, mode='tensor')
self.assertIsInstance(batch_results, tuple)
|
# Copyright (c) OpenMMLab. All rights reserved.
import unittest
from unittest import TestCase
import torch
from parameterized import parameterized
from mmdet import * # noqa
from mmdet.core import DetDataSample
from mmdet.testing import demo_mm_inputs, get_detector_cfg
class TestRPN(TestCase):
@parameterized.expand(['rpn/rpn_r50_fpn_1x_coco.py'])
def test_init(self, cfg_file):
model = get_detector_cfg(cfg_file)
# backbone convert to ResNet18
model.backbone.depth = 18
model.neck.in_channels = [64, 128, 256, 512]
model.backbone.init_cfg = None
from mmdet.models import build_detector
detector = build_detector(model)
assert detector.backbone
assert detector.neck
assert detector.rpn_head
assert detector.device.type == 'cpu'
# if rpn.num_classes > 1, force set rpn.num_classes = 1
model.rpn_head.num_classes = 2
detector = build_detector(model)
assert detector.rpn_head.num_classes == 1
@parameterized.expand([('rpn/rpn_r50_fpn_1x_coco.py', ('cpu', 'cuda'))])
def test_rpn_forward_train(self, cfg_file, devices):
model = get_detector_cfg(cfg_file)
# backbone convert to ResNet18
model.backbone.depth = 18
model.neck.in_channels = [64, 128, 256, 512]
model.backbone.init_cfg = None
from mmdet.models import build_detector
assert all([device in ['cpu', 'cuda'] for device in devices])
for device in devices:
detector = build_detector(model)
if device == 'cuda':
if not torch.cuda.is_available():
return unittest.skip('test requires GPU and torch+cuda')
detector = detector.cuda()
assert detector.device.type == device
packed_inputs = demo_mm_inputs(2, [[3, 128, 128], [3, 125, 130]])
# Test forward train
losses = detector.forward(packed_inputs, return_loss=True)
assert isinstance(losses, dict)
# Test forward_dummy
batch = torch.ones((1, 3, 64, 64)).to(device=device)
out = detector.forward_dummy(batch)
assert isinstance(out, tuple)
assert len(out) == 2
@parameterized.expand([('rpn/rpn_r50_fpn_1x_coco.py', ('cpu', 'cuda'))])
def test_single_stage_forward_test(self, cfg_file, devices):
model = get_detector_cfg(cfg_file)
# backbone convert to ResNet18
model.backbone.depth = 18
model.neck.in_channels = [64, 128, 256, 512]
model.backbone.init_cfg = None
from mmdet.models import build_detector
assert all([device in ['cpu', 'cuda'] for device in devices])
for device in devices:
detector = build_detector(model)
if device == 'cuda':
if not torch.cuda.is_available():
return unittest.skip('test requires GPU and torch+cuda')
detector = detector.cuda()
assert detector.device.type == device
packed_inputs = demo_mm_inputs(2, [[3, 128, 128], [3, 125, 130]])
# Test forward test
detector.eval()
with torch.no_grad():
batch_results = detector.forward(
packed_inputs, return_loss=False)
assert len(batch_results) == 2
isinstance(batch_results[0], DetDataSample)
|
from __future__ import annotations
from collections.abc import Iterable
import torch
from torch import Tensor, nn
from sentence_transformers import SentenceTransformer
class MSELoss(nn.Module):
def __init__(self, model: SentenceTransformer) -> None:
"""
Computes the MSE loss between the computed sentence embedding and a target sentence embedding. This loss
is used when extending sentence embeddings to new languages as described in our publication
Making Monolingual Sentence Embeddings Multilingual using Knowledge Distillation.
For an example, see `the distillation documentation <../../../examples/sentence_transformer/training/distillation/README.html>`_ on extending language models to new languages.
Args:
model: SentenceTransformerModel
References:
- Making Monolingual Sentence Embeddings Multilingual using Knowledge Distillation: https://arxiv.org/abs/2004.09813
- `Training > Model Distillation <../../../examples/sentence_transformer/training/distillation/README.html>`_
- `Training > Multilingual Models <../../../examples/sentence_transformer/training/multilingual/README.html>`_
Requirements:
1. Usually uses a finetuned teacher M in a knowledge distillation setup
Inputs:
+-----------------------------------------+-----------------------------+
| Texts | Labels |
+=========================================+=============================+
| sentence | model sentence embeddings |
+-----------------------------------------+-----------------------------+
| sentence_1, sentence_2, ..., sentence_N | model sentence embeddings |
+-----------------------------------------+-----------------------------+
Relations:
- :class:`MarginMSELoss` is equivalent to this loss, but with a margin through a negative pair.
Example:
::
from sentence_transformers import SentenceTransformer, SentenceTransformerTrainer, losses
from datasets import Dataset
student_model = SentenceTransformer("microsoft/mpnet-base")
teacher_model = SentenceTransformer("all-mpnet-base-v2")
train_dataset = Dataset.from_dict({
"english": ["The first sentence", "The second sentence", "The third sentence", "The fourth sentence"],
"french": ["La première phrase", "La deuxième phrase", "La troisième phrase", "La quatrième phrase"],
})
def compute_labels(batch):
return {
"label": teacher_model.encode(batch["english"])
}
train_dataset = train_dataset.map(compute_labels, batched=True)
loss = losses.MSELoss(student_model)
trainer = SentenceTransformerTrainer(
model=student_model,
train_dataset=train_dataset,
loss=loss,
)
trainer.train()
"""
super().__init__()
self.model = model
self.loss_fct = nn.MSELoss()
def forward(self, sentence_features: Iterable[dict[str, Tensor]], labels: Tensor) -> Tensor:
# Concatenate multiple inputs on the batch dimension
if len(sentence_features) > 1:
embeddings = torch.cat([self.model(inputs)["sentence_embedding"] for inputs in sentence_features], dim=0)
# Repeat the labels for each input
return self.loss_fct(embeddings, labels.repeat(len(sentence_features), 1))
embeddings = self.model(sentence_features[0])["sentence_embedding"]
return self.loss_fct(embeddings, labels)
@property
def citation(self) -> str:
return """
@inproceedings{reimers-2020-multilingual-sentence-bert,
title = "Making Monolingual Sentence Embeddings Multilingual using Knowledge Distillation",
author = "Reimers, Nils and Gurevych, Iryna",
booktitle = "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing",
month = "11",
year = "2020",
publisher = "Association for Computational Linguistics",
url = "https://arxiv.org/abs/2004.09813",
}
"""
|
from __future__ import annotations
from collections.abc import Iterable
import torch
from torch import Tensor, nn
from sentence_transformers import SentenceTransformer
class MSELoss(nn.Module):
def __init__(self, model: SentenceTransformer) -> None:
"""
Computes the MSE loss between the computed sentence embedding and a target sentence embedding. This loss
is used when extending sentence embeddings to new languages as described in our publication
Making Monolingual Sentence Embeddings Multilingual using Knowledge Distillation.
For an example, see `the distillation documentation <../../examples/training/distillation/README.html>`_ on extending language models to new languages.
Args:
model: SentenceTransformerModel
References:
- Making Monolingual Sentence Embeddings Multilingual using Knowledge Distillation: https://arxiv.org/abs/2004.09813
- `Training > Model Distillation <../../examples/training/distillation/README.html>`_
- `Training > Multilingual Models <../../examples/training/multilingual/README.html>`_
Requirements:
1. Usually uses a finetuned teacher M in a knowledge distillation setup
Inputs:
+-----------------------------------------+-----------------------------+
| Texts | Labels |
+=========================================+=============================+
| sentence | model sentence embeddings |
+-----------------------------------------+-----------------------------+
| sentence_1, sentence_2, ..., sentence_N | model sentence embeddings |
+-----------------------------------------+-----------------------------+
Relations:
- :class:`MarginMSELoss` is equivalent to this loss, but with a margin through a negative pair.
Example:
::
from sentence_transformers import SentenceTransformer, SentenceTransformerTrainer, losses
from datasets import Dataset
student_model = SentenceTransformer("microsoft/mpnet-base")
teacher_model = SentenceTransformer("all-mpnet-base-v2")
train_dataset = Dataset.from_dict({
"english": ["The first sentence", "The second sentence", "The third sentence", "The fourth sentence"],
"french": ["La première phrase", "La deuxième phrase", "La troisième phrase", "La quatrième phrase"],
})
def compute_labels(batch):
return {
"label": teacher_model.encode(batch["english"])
}
train_dataset = train_dataset.map(compute_labels, batched=True)
loss = losses.MSELoss(student_model)
trainer = SentenceTransformerTrainer(
model=student_model,
train_dataset=train_dataset,
loss=loss,
)
trainer.train()
"""
super().__init__()
self.model = model
self.loss_fct = nn.MSELoss()
def forward(self, sentence_features: Iterable[dict[str, Tensor]], labels: Tensor) -> Tensor:
# Concatenate multiple inputs on the batch dimension
if len(sentence_features) > 1:
embeddings = torch.cat([self.model(inputs)["sentence_embedding"] for inputs in sentence_features], dim=0)
# Repeat the labels for each input
return self.loss_fct(embeddings, labels.repeat(len(sentence_features), 1))
embeddings = self.model(sentence_features[0])["sentence_embedding"]
return self.loss_fct(embeddings, labels)
@property
def citation(self) -> str:
return """
@inproceedings{reimers-2020-multilingual-sentence-bert,
title = "Making Monolingual Sentence Embeddings Multilingual using Knowledge Distillation",
author = "Reimers, Nils and Gurevych, Iryna",
booktitle = "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing",
month = "11",
year = "2020",
publisher = "Association for Computational Linguistics",
url = "https://arxiv.org/abs/2004.09813",
}
"""
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.