input
stringlengths 33
5k
| output
stringlengths 32
5k
|
---|---|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import List, Tuple, Union
import mmcv
import numpy as np
from mmengine.utils import is_str
def palette_val(palette: List[tuple]) -> List[tuple]:
"""Convert palette to matplotlib palette.
Args:
palette (List[tuple]): A list of color tuples.
Returns:
List[tuple[float]]: A list of RGB matplotlib color tuples.
"""
new_palette = []
for color in palette:
color = [c / 255 for c in color]
new_palette.append(tuple(color))
return new_palette
def get_palette(palette: Union[List[tuple], str, tuple],
num_classes: int) -> List[Tuple[int]]:
"""Get palette from various inputs.
Args:
palette (list[tuple] | str | tuple): palette inputs.
num_classes (int): the number of classes.
Returns:
list[tuple[int]]: A list of color tuples.
"""
assert isinstance(num_classes, int)
if isinstance(palette, list):
dataset_palette = palette
elif isinstance(palette, tuple):
dataset_palette = [palette] * num_classes
elif palette == 'random' or palette is None:
state = np.random.get_state()
# random color
np.random.seed(42)
palette = np.random.randint(0, 256, size=(num_classes, 3))
np.random.set_state(state)
dataset_palette = [tuple(c) for c in palette]
elif palette == 'coco':
from mmdet.datasets import CocoDataset, CocoPanopticDataset
dataset_palette = CocoDataset.METAINFO['PALETTE']
if len(dataset_palette) < num_classes:
dataset_palette = CocoPanopticDataset.METAINFO['PALETTE']
elif palette == 'citys':
from mmdet.datasets import CityscapesDataset
dataset_palette = CityscapesDataset.METAINFO['PALETTE']
elif palette == 'voc':
from mmdet.datasets import VOCDataset
dataset_palette = VOCDataset.METAINFO['PALETTE']
elif is_str(palette):
dataset_palette = [mmcv.color_val(palette)[::-1]] * num_classes
else:
raise TypeError(f'Invalid type for palette: {type(palette)}')
assert len(dataset_palette) >= num_classes, \
'The length of palette should not be less than `num_classes`.'
return dataset_palette
def _get_adaptive_scales(areas: np.ndarray,
min_area: int = 800,
max_area: int = 30000) -> np.ndarray:
"""Get adaptive scales according to areas.
The scale range is [0.5, 1.0]. When the area is less than
``min_area``, the scale is 0.5 while the area is larger than
``max_area``, the scale is 1.0.
Args:
areas (ndarray): The areas of bboxes or masks with the
shape of (n, ).
min_area (int): Lower bound areas for adaptive scales.
Defaults to 800.
max_area (int): Upper bound areas for adaptive scales.
Defaults to 30000.
Returns:
ndarray: The adaotive scales with the shape of (n, ).
"""
scales = 0.5 + (areas - min_area) / (max_area - min_area)
scales = np.clip(scales, 0.5, 1.0)
return scales
def jitter_color(color: tuple) -> tuple:
"""Randomly jitter the given color in order to better distinguish instances
with the same class.
Args:
color (tuple): The RGB color tuple. Each value is between [0, 255].
Returns:
tuple: The jittered color tuple.
"""
jitter = np.random.rand(3)
jitter = (jitter / np.linalg.norm(jitter) - 0.5) * 0.5 * 255
color = np.clip(jitter + color, 0, 255).astype(np.uint8)
return tuple(color)
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import List, Tuple, Union
import mmcv
import numpy as np
from mmengine.utils import is_str
def palette_val(palette: List[tuple]) -> List[tuple]:
"""Convert palette to matplotlib palette.
Args:
palette (List[tuple]): A list of color tuples.
Returns:
List[tuple[float]]: A list of RGB matplotlib color tuples.
"""
new_palette = []
for color in palette:
color = [c / 255 for c in color]
new_palette.append(tuple(color))
return new_palette
def get_palette(palette: Union[List[tuple], str, tuple],
num_classes: int) -> List[Tuple[int]]:
"""Get palette from various inputs.
Args:
palette (list[tuple] | str | tuple): palette inputs.
num_classes (int): the number of classes.
Returns:
list[tuple[int]]: A list of color tuples.
"""
assert isinstance(num_classes, int)
if isinstance(palette, list):
dataset_palette = palette
elif isinstance(palette, tuple):
dataset_palette = [palette] * num_classes
elif palette == 'random' or palette is None:
state = np.random.get_state()
# random color
np.random.seed(42)
palette = np.random.randint(0, 256, size=(num_classes, 3))
np.random.set_state(state)
dataset_palette = [tuple(c) for c in palette]
elif palette == 'coco':
from mmdet.datasets import CocoDataset, CocoPanopticDataset
dataset_palette = CocoDataset.METAINFO['PALETTE']
if len(dataset_palette) < num_classes:
dataset_palette = CocoPanopticDataset.METAINFO['PALETTE']
elif palette == 'citys':
from mmdet.datasets import CityscapesDataset
dataset_palette = CityscapesDataset.METAINFO['PALETTE']
elif palette == 'voc':
from mmdet.datasets import VOCDataset
dataset_palette = VOCDataset.METAINFO['PALETTE']
elif is_str(palette):
dataset_palette = [mmcv.color_val(palette)[::-1]] * num_classes
else:
raise TypeError(f'Invalid type for palette: {type(palette)}')
assert len(dataset_palette) >= num_classes, \
'The length of palette should not be less than `num_classes`.'
return dataset_palette
def _get_adaptive_scales(areas: np.ndarray,
min_area: int = 800,
max_area: int = 30000) -> np.ndarray:
"""Get adaptive scales according to areas.
The scale range is [0.5, 1.0]. When the area is less than
``min_area``, the scale is 0.5 while the area is larger than
``max_area``, the scale is 1.0.
Args:
areas (ndarray): The areas of bboxes or masks with the
shape of (n, ).
min_area (int): Lower bound areas for adaptive scales.
Defaults to 800.
max_area (int): Upper bound areas for adaptive scales.
Defaults to 30000.
Returns:
ndarray: The adaotive scales with the shape of (n, ).
"""
scales = 0.5 + (areas - min_area) / (max_area - min_area)
scales = np.clip(scales, 0.5, 1.0)
return scales
|
from __future__ import annotations
from collections.abc import Iterable
from torch import Tensor
from sentence_transformers import util
from sentence_transformers.losses.MultipleNegativesRankingLoss import MultipleNegativesRankingLoss
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
class SparseMultipleNegativesRankingLoss(MultipleNegativesRankingLoss):
def __init__(self, model: SparseEncoder, scale: float = 1.0, similarity_fct=util.dot_score) -> None:
"""
Given a list of (anchor, positive) pairs or (anchor, positive, negative) triplets, this loss optimizes the following:
1. Given an anchor (e.g. a question), assign the highest similarity to the corresponding positive (i.e. answer)
out of every single positive and negative (e.g. all answers) in the batch.
If you provide the optional negatives, they will all be used as extra options from which the model must pick the
correct positive. Within reason, the harder this "picking" is, the stronger the model will become. Because of
this, a higher batch size results in more in-batch negatives, which then increases performance (to a point).
This loss function works great to train embeddings for retrieval setups where you have positive pairs
(e.g. (query, answer)) as it will sample in each batch ``n-1`` negative docs randomly.
This loss is also known as InfoNCE loss, SimCSE loss, Cross-Entropy Loss with in-batch negatives, or simply
in-batch negatives loss.
Args:
model: SparseEncoder model
scale: Output of similarity function is multiplied by scale
value
similarity_fct: similarity function between sentence
embeddings. By default, dot product. Can also be set to cosine
similarity (and then set scale to 20)
Requirements:
1. Need to be used in SpladeLoss or CSRLoss as a loss function.
2. (anchor, positive) pairs or (anchor, positive, negative) triplets
Inputs:
+-------------------------------------------------+--------+
| Texts | Labels |
+=================================================+========+
| (anchor, positive) pairs | none |
+-------------------------------------------------+--------+
| (anchor, positive, negative) triplets | none |
+-------------------------------------------------+--------+
| (anchor, positive, negative_1, ..., negative_n) | none |
+-------------------------------------------------+--------+
Recommendations:
- Use ``BatchSamplers.NO_DUPLICATES`` (:class:`docs <sentence_transformers.training_args.BatchSamplers>`) to
ensure that no in-batch negatives are duplicates of the anchor or positive samples.
Relations:
- :class:`SparseCachedMultipleNegativesRankingLoss` is equivalent to this loss, but it uses caching that allows for
much higher batch sizes (and thus better performance) without extra memory usage. However, it is slightly
slower.
- :class:`SparseGISTEmbedLoss` is equivalent to this loss, but uses a guide model to guide the in-batch negative
sample selection. `SparseGISTEmbedLoss` yields a stronger training signal at the cost of some training overhead.
Example:
::
from datasets import Dataset
from sentence_transformers.sparse_encoder import SparseEncoder, SparseEncoderTrainer, losses
model = SparseEncoder("distilbert/distilbert-base-uncased")
train_dataset = Dataset.from_dict(
{
"anchor": ["It's nice weather outside today.", "He drove to work."],
"positive": ["It's so sunny.", "He took the car to the office."],
}
)
loss = losses.SpladeLoss(
model=model, loss=losses.SparseMultipleNegativesRankingLoss(model), document_regularizer_weight=3e-5, query_regularizer_weight=5e-5
)
trainer = SparseEncoderTrainer(model=model, train_dataset=train_dataset, loss=loss)
trainer.train()
"""
return super().__init__(model, scale=scale, similarity_fct=similarity_fct)
def forward(self, sentence_features: Iterable[dict[str, Tensor]], labels: Tensor) -> Tensor:
raise AttributeError(
"SparseMultipleNegativesRankingLoss should not be used alone. Use it with SpladeLoss or CSRLoss."
)
|
from __future__ import annotations
from collections.abc import Iterable
from torch import Tensor
from sentence_transformers import util
from sentence_transformers.losses.MultipleNegativesRankingLoss import MultipleNegativesRankingLoss
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
class SparseMultipleNegativesRankingLoss(MultipleNegativesRankingLoss):
def __init__(self, model: SparseEncoder, scale: float = 1.0, similarity_fct=util.dot_score) -> None:
"""
Given a list of (anchor, positive) pairs or (anchor, positive, negative) triplets, this loss optimizes the following:
1. Given an anchor (e.g. a question), assign the highest similarity to the corresponding positive (i.e. answer)
out of every single positive and negative (e.g. all answers) in the batch.
If you provide the optional negatives, they will all be used as extra options from which the model must pick the
correct positive. Within reason, the harder this "picking" is, the stronger the model will become. Because of
this, a higher batch size results in more in-batch negatives, which then increases performance (to a point).
This loss function works great to train embeddings for retrieval setups where you have positive pairs
(e.g. (query, answer)) as it will sample in each batch ``n-1`` negative docs randomly.
This loss is also known as InfoNCE loss, SimCSE loss, Cross-Entropy Loss with in-batch negatives, or simply
in-batch negatives loss.
Args:
model: SparseEncoder model
scale: Output of similarity function is multiplied by scale
value
similarity_fct: similarity function between sentence
embeddings. By default, dot product. Can also be set to cosine
similarity (and then set scale to 20)
Requirements:
1. Need to be used in SpladeLoss or CSRLoss as a loss function.
2. (anchor, positive) pairs or (anchor, positive, negative) triplets
Inputs:
+-------------------------------------------------+--------+
| Texts | Labels |
+=================================================+========+
| (anchor, positive) pairs | none |
+-------------------------------------------------+--------+
| (anchor, positive, negative) triplets | none |
+-------------------------------------------------+--------+
| (anchor, positive, negative_1, ..., negative_n) | none |
+-------------------------------------------------+--------+
Recommendations:
- Use ``BatchSamplers.NO_DUPLICATES`` (:class:`docs <sentence_transformers.training_args.BatchSamplers>`) to
ensure that no in-batch negatives are duplicates of the anchor or positive samples.
Relations:
- :class:`SparseCachedMultipleNegativesRankingLoss` is equivalent to this loss, but it uses caching that allows for
much higher batch sizes (and thus better performance) without extra memory usage. However, it is slightly
slower.
- :class:`SparseGISTEmbedLoss` is equivalent to this loss, but uses a guide model to guide the in-batch negative
sample selection. `SparseGISTEmbedLoss` yields a stronger training signal at the cost of some training overhead.
Example:
::
from datasets import Dataset
from sentence_transformers.sparse_encoder import SparseEncoder, SparseEncoderTrainer, losses
model = SparseEncoder("distilbert/distilbert-base-uncased")
train_dataset = Dataset.from_dict(
{
"anchor": ["It's nice weather outside today.", "He drove to work."],
"positive": ["It's so sunny.", "He took the car to the office."],
}
)
loss = losses.SpladeLoss(
model=model, loss=losses.SparseMultipleNegativesRankingLoss(model), corpus_regularizer_weight=3e-5, query_regularizer_weight=5e-5
)
trainer = SparseEncoderTrainer(model=model, train_dataset=train_dataset, loss=loss)
trainer.train()
"""
return super().__init__(model, scale=scale, similarity_fct=similarity_fct)
def forward(self, sentence_features: Iterable[dict[str, Tensor]], labels: Tensor) -> Tensor:
raise AttributeError(
"SparseMultipleNegativesRankingLoss should not be used alone. Use it with SpladeLoss or CSRLoss."
)
|
import os
import numpy as np
import pytest
import torch
from pydantic import parse_obj_as
from docarray import BaseDocument
from docarray.typing.tensor.audio.audio_ndarray import AudioNdArray
from docarray.typing.tensor.audio.audio_torch_tensor import AudioTorchTensor
@pytest.mark.parametrize(
'tensor,cls_audio_tensor,cls_tensor',
[
(torch.zeros(1000, 2), AudioTorchTensor, torch.Tensor),
(np.zeros((1000, 2)), AudioNdArray, np.ndarray),
],
)
def test_set_audio_tensor(tensor, cls_audio_tensor, cls_tensor):
class MyAudioDoc(BaseDocument):
tensor: cls_audio_tensor
doc = MyAudioDoc(tensor=tensor)
assert isinstance(doc.tensor, cls_audio_tensor)
assert isinstance(doc.tensor, cls_tensor)
assert (doc.tensor == tensor).all()
@pytest.mark.parametrize(
'cls_tensor,tensor',
[
(AudioNdArray, np.zeros((1000, 2))),
(AudioTorchTensor, torch.zeros(1000, 2)),
(AudioTorchTensor, np.zeros((1000, 2))),
],
)
def test_validation(cls_tensor, tensor):
arr = parse_obj_as(cls_tensor, tensor)
assert isinstance(arr, cls_tensor)
@pytest.mark.parametrize(
'cls_tensor,tensor',
[
(AudioNdArray, torch.zeros(1000, 2)),
(AudioNdArray, 'hello'),
(AudioTorchTensor, 'hello'),
],
)
def test_illegal_validation(cls_tensor, tensor):
match = str(cls_tensor).split('.')[-1][:-2]
with pytest.raises(ValueError, match=match):
parse_obj_as(cls_tensor, tensor)
@pytest.mark.parametrize(
'cls_tensor,tensor,proto_key',
[
(AudioTorchTensor, torch.zeros(1000, 2), AudioTorchTensor._proto_type_name),
(AudioNdArray, np.zeros((1000, 2)), AudioNdArray._proto_type_name),
],
)
def test_proto_tensor(cls_tensor, tensor, proto_key):
tensor = parse_obj_as(cls_tensor, tensor)
proto = tensor._to_node_protobuf()
assert proto_key in str(proto)
@pytest.mark.parametrize(
'cls_tensor,tensor',
[
(AudioTorchTensor, torch.zeros(1000, 2)),
(AudioNdArray, np.zeros((1000, 2))),
],
)
def test_save_audio_tensor_to_wav_file(cls_tensor, tensor, tmpdir):
tmp_file = str(tmpdir / 'tmp.wav')
audio_tensor = parse_obj_as(cls_tensor, tensor)
audio_tensor.save_to_wav_file(tmp_file)
assert os.path.isfile(tmp_file)
|
import os
import numpy as np
import pytest
import torch
from pydantic import parse_obj_as
from docarray import BaseDocument
from docarray.typing.tensor.audio.audio_ndarray import AudioNdArray
from docarray.typing.tensor.audio.audio_torch_tensor import AudioTorchTensor
@pytest.mark.parametrize(
'tensor,cls_audio_tensor,cls_tensor',
[
(torch.zeros(1000, 2), AudioTorchTensor, torch.Tensor),
(np.zeros((1000, 2)), AudioNdArray, np.ndarray),
],
)
def test_set_audio_tensor(tensor, cls_audio_tensor, cls_tensor):
class MyAudioDoc(BaseDocument):
tensor: cls_audio_tensor
doc = MyAudioDoc(tensor=tensor)
assert isinstance(doc.tensor, cls_audio_tensor)
assert isinstance(doc.tensor, cls_tensor)
assert (doc.tensor == tensor).all()
@pytest.mark.parametrize(
'cls_tensor,tensor',
[
(AudioNdArray, np.zeros((1000, 2))),
(AudioTorchTensor, torch.zeros(1000, 2)),
(AudioTorchTensor, np.zeros((1000, 2))),
],
)
def test_validation(cls_tensor, tensor):
arr = parse_obj_as(cls_tensor, tensor)
assert isinstance(arr, cls_tensor)
@pytest.mark.parametrize(
'cls_tensor,tensor',
[
(AudioNdArray, torch.zeros(1000, 2)),
(AudioNdArray, 'hello'),
(AudioTorchTensor, 'hello'),
],
)
def test_illegal_validation(cls_tensor, tensor):
match = str(cls_tensor).split('.')[-1][:-2]
with pytest.raises(ValueError, match=match):
parse_obj_as(cls_tensor, tensor)
@pytest.mark.parametrize(
'cls_tensor,tensor,proto_key',
[
(AudioTorchTensor, torch.zeros(1000, 2), AudioTorchTensor._PROTO_FIELD_NAME),
(AudioNdArray, np.zeros((1000, 2)), AudioNdArray._PROTO_FIELD_NAME),
],
)
def test_proto_tensor(cls_tensor, tensor, proto_key):
tensor = parse_obj_as(cls_tensor, tensor)
proto = tensor._to_node_protobuf()
assert str(proto).startswith(proto_key)
@pytest.mark.parametrize(
'cls_tensor,tensor',
[
(AudioTorchTensor, torch.zeros(1000, 2)),
(AudioNdArray, np.zeros((1000, 2))),
],
)
def test_save_audio_tensor_to_wav_file(cls_tensor, tensor, tmpdir):
tmp_file = str(tmpdir / 'tmp.wav')
audio_tensor = parse_obj_as(cls_tensor, tensor)
audio_tensor.save_to_wav_file(tmp_file)
assert os.path.isfile(tmp_file)
|
from .autoencoder_asym_kl import AsymmetricAutoencoderKL
from .autoencoder_dc import AutoencoderDC
from .autoencoder_kl import AutoencoderKL
from .autoencoder_kl_allegro import AutoencoderKLAllegro
from .autoencoder_kl_cogvideox import AutoencoderKLCogVideoX
from .autoencoder_kl_cosmos import AutoencoderKLCosmos
from .autoencoder_kl_hunyuan_video import AutoencoderKLHunyuanVideo
from .autoencoder_kl_ltx import AutoencoderKLLTXVideo
from .autoencoder_kl_magvit import AutoencoderKLMagvit
from .autoencoder_kl_mochi import AutoencoderKLMochi
from .autoencoder_kl_temporal_decoder import AutoencoderKLTemporalDecoder
from .autoencoder_kl_wan import AutoencoderKLWan
from .autoencoder_oobleck import AutoencoderOobleck
from .autoencoder_tiny import AutoencoderTiny
from .consistency_decoder_vae import ConsistencyDecoderVAE
from .vq_model import VQModel
|
from .autoencoder_asym_kl import AsymmetricAutoencoderKL
from .autoencoder_dc import AutoencoderDC
from .autoencoder_kl import AutoencoderKL
from .autoencoder_kl_allegro import AutoencoderKLAllegro
from .autoencoder_kl_cogvideox import AutoencoderKLCogVideoX
from .autoencoder_kl_hunyuan_video import AutoencoderKLHunyuanVideo
from .autoencoder_kl_ltx import AutoencoderKLLTXVideo
from .autoencoder_kl_magvit import AutoencoderKLMagvit
from .autoencoder_kl_mochi import AutoencoderKLMochi
from .autoencoder_kl_temporal_decoder import AutoencoderKLTemporalDecoder
from .autoencoder_kl_wan import AutoencoderKLWan
from .autoencoder_oobleck import AutoencoderOobleck
from .autoencoder_tiny import AutoencoderTiny
from .consistency_decoder_vae import ConsistencyDecoderVAE
from .vq_model import VQModel
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Optional, Union
from mmengine.config import ConfigDict
from mmdet.registry import MODELS
from .two_stage import TwoStageDetector
@MODELS.register_module()
class FasterRCNN(TwoStageDetector):
"""Implementation of `Faster R-CNN <https://arxiv.org/abs/1506.01497>`_"""
def __init__(self,
backbone: Union[ConfigDict, dict],
rpn_head: Union[ConfigDict, dict],
roi_head: Union[ConfigDict, dict],
train_cfg: Union[ConfigDict, dict],
test_cfg: Union[ConfigDict, dict],
neck: Optional[Union[ConfigDict, dict]] = None,
pretrained: Optional[str] = None,
preprocess_cfg: Optional[Union[ConfigDict, dict]] = None,
init_cfg: Optional[Union[ConfigDict, dict]] = None) -> None:
super().__init__(
backbone=backbone,
neck=neck,
rpn_head=rpn_head,
roi_head=roi_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
pretrained=pretrained,
init_cfg=init_cfg,
preprocess_cfg=preprocess_cfg)
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.registry import MODELS
from .two_stage import TwoStageDetector
@MODELS.register_module()
class FasterRCNN(TwoStageDetector):
"""Implementation of `Faster R-CNN <https://arxiv.org/abs/1506.01497>`_"""
def __init__(self,
backbone,
rpn_head,
roi_head,
train_cfg,
test_cfg,
neck=None,
pretrained=None,
init_cfg=None,
img_norm_cfg=None):
super(FasterRCNN, self).__init__(
backbone=backbone,
neck=neck,
rpn_head=rpn_head,
roi_head=roi_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
pretrained=pretrained,
init_cfg=init_cfg,
img_norm_cfg=img_norm_cfg)
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Union
import torch
from numpy import ndarray
from torch import Tensor
from mmdet.core.bbox.assigners import AssignResult
from mmdet.registry import TASK_UTILS
from .base_sampler import BaseSampler
@TASK_UTILS.register_module()
class RandomSampler(BaseSampler):
"""Random sampler.
Args:
num (int): Number of samples
pos_fraction (float): Fraction of positive samples
neg_pos_up (int): Upper bound number of negative and
positive samples. Defaults to -1.
add_gt_as_proposals (bool): Whether to add ground truth
boxes as proposals. Defaults to True.
"""
def __init__(self,
num: int,
pos_fraction: float,
neg_pos_ub: int = -1,
add_gt_as_proposals: bool = True,
**kwargs):
from mmdet.core.bbox import demodata
super().__init__(
num=num,
pos_fraction=pos_fraction,
neg_pos_ub=neg_pos_ub,
add_gt_as_proposals=add_gt_as_proposals)
self.rng = demodata.ensure_rng(kwargs.get('rng', None))
def random_choice(self, gallery: Union[Tensor, ndarray, list],
num: int) -> Union[Tensor, ndarray]:
"""Random select some elements from the gallery.
If `gallery` is a Tensor, the returned indices will be a Tensor;
If `gallery` is a ndarray or list, the returned indices will be a
ndarray.
Args:
gallery (Tensor | ndarray | list): indices pool.
num (int): expected sample num.
Returns:
Tensor or ndarray: sampled indices.
"""
assert len(gallery) >= num
is_tensor = isinstance(gallery, torch.Tensor)
if not is_tensor:
if torch.cuda.is_available():
device = torch.cuda.current_device()
else:
device = 'cpu'
gallery = torch.tensor(gallery, dtype=torch.long, device=device)
# This is a temporary fix. We can revert the following code
# when PyTorch fixes the abnormal return of torch.randperm.
# See: https://github.com/open-mmlab/mmdetection/pull/5014
perm = torch.randperm(gallery.numel())[:num].to(device=gallery.device)
rand_inds = gallery[perm]
if not is_tensor:
rand_inds = rand_inds.cpu().numpy()
return rand_inds
def _sample_pos(self, assign_result: AssignResult, num_expected: int,
**kwargs) -> Union[Tensor, ndarray]:
"""Randomly sample some positive samples.
Args:
assign_result (:obj:`AssignResult`): Bbox assigning results.
num_expected (int): The number of expected positive samples
Returns:
Tensor or ndarray: sampled indices.
"""
pos_inds = torch.nonzero(assign_result.gt_inds > 0, as_tuple=False)
if pos_inds.numel() != 0:
pos_inds = pos_inds.squeeze(1)
if pos_inds.numel() <= num_expected:
return pos_inds
else:
return self.random_choice(pos_inds, num_expected)
def _sample_neg(self, assign_result: AssignResult, num_expected: int,
**kwargs) -> Union[Tensor, ndarray]:
"""Randomly sample some negative samples.
Args:
assign_result (:obj:`AssignResult`): Bbox assigning results.
num_expected (int): The number of expected positive samples
Returns:
Tensor or ndarray: sampled indices.
"""
neg_inds = torch.nonzero(assign_result.gt_inds == 0, as_tuple=False)
if neg_inds.numel() != 0:
neg_inds = neg_inds.squeeze(1)
if len(neg_inds) <= num_expected:
return neg_inds
else:
return self.random_choice(neg_inds, num_expected)
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Union
import torch
from numpy import ndarray
from torch import Tensor
from mmdet.core.bbox.assigners import AssignResult
from mmdet.registry import TASK_UTILS
from .base_sampler import BaseSampler
@TASK_UTILS.register_module()
class RandomSampler(BaseSampler):
"""Random sampler.
Args:
num (int): Number of samples
pos_fraction (float): Fraction of positive samples
neg_pos_up (int): Upper bound number of negative and
positive samples. Defaults to -1.
add_gt_as_proposals (bool): Whether to add ground truth
boxes as proposals. Defaults to True.
"""
def __init__(self,
num: int,
pos_fraction: float,
neg_pos_ub: int = -1,
add_gt_as_proposals: bool = True,
**kwargs):
from mmdet.core.bbox import demodata
super(RandomSampler, self).__init__(num, pos_fraction, neg_pos_ub,
add_gt_as_proposals)
self.rng = demodata.ensure_rng(kwargs.get('rng', None))
def random_choice(self, gallery: Union[Tensor, ndarray, list],
num: int) -> Union[Tensor, ndarray]:
"""Random select some elements from the gallery.
If `gallery` is a Tensor, the returned indices will be a Tensor;
If `gallery` is a ndarray or list, the returned indices will be a
ndarray.
Args:
gallery (Tensor | ndarray | list): indices pool.
num (int): expected sample num.
Returns:
Tensor or ndarray: sampled indices.
"""
assert len(gallery) >= num
is_tensor = isinstance(gallery, torch.Tensor)
if not is_tensor:
if torch.cuda.is_available():
device = torch.cuda.current_device()
else:
device = 'cpu'
gallery = torch.tensor(gallery, dtype=torch.long, device=device)
# This is a temporary fix. We can revert the following code
# when PyTorch fixes the abnormal return of torch.randperm.
# See: https://github.com/open-mmlab/mmdetection/pull/5014
perm = torch.randperm(gallery.numel())[:num].to(device=gallery.device)
rand_inds = gallery[perm]
if not is_tensor:
rand_inds = rand_inds.cpu().numpy()
return rand_inds
def _sample_pos(self, assign_result: AssignResult, num_expected: int,
**kwargs) -> Union[Tensor, ndarray]:
"""Randomly sample some positive samples.
Args:
assign_result (:obj:`AssignResult`): Bbox assigning results.
num_expected (int): The number of expected positive samples
Returns:
Tensor or ndarray: sampled indices.
"""
pos_inds = torch.nonzero(assign_result.gt_inds > 0, as_tuple=False)
if pos_inds.numel() != 0:
pos_inds = pos_inds.squeeze(1)
if pos_inds.numel() <= num_expected:
return pos_inds
else:
return self.random_choice(pos_inds, num_expected)
def _sample_neg(self, assign_result: AssignResult, num_expected: int,
**kwargs) -> Union[Tensor, ndarray]:
"""Randomly sample some negative samples.
Args:
assign_result (:obj:`AssignResult`): Bbox assigning results.
num_expected (int): The number of expected positive samples
Returns:
Tensor or ndarray: sampled indices.
"""
neg_inds = torch.nonzero(assign_result.gt_inds == 0, as_tuple=False)
if neg_inds.numel() != 0:
neg_inds = neg_inds.squeeze(1)
if len(neg_inds) <= num_expected:
return neg_inds
else:
return self.random_choice(neg_inds, num_expected)
|
"""Pydantic v1 compatibility shim."""
from langchain_core._api import warn_deprecated
try:
from pydantic.v1.dataclasses import * # noqa: F403
except ImportError:
from pydantic.dataclasses import * # type: ignore # noqa: F403
warn_deprecated(
"0.3.0",
removal="1.0.0",
alternative="pydantic.v1 or pydantic",
message=(
"As of langchain-core 0.3.0, LangChain uses pydantic v2 internally. "
"The langchain_core.pydantic_v1 module was a "
"compatibility shim for pydantic v1, and should no longer be used. "
"Please update the code to import from Pydantic directly.\n\n"
"For example, replace imports like: "
"`from langchain_core.pydantic_v1 import BaseModel`\n"
"with: `from pydantic import BaseModel`\n"
"or the v1 compatibility namespace if you are working in a code base "
"that has not been fully upgraded to pydantic 2 yet. "
"\tfrom pydantic.v1 import BaseModel\n"
),
)
|
from langchain_core._api import warn_deprecated
try:
from pydantic.v1.dataclasses import * # noqa: F403
except ImportError:
from pydantic.dataclasses import * # type: ignore # noqa: F403
warn_deprecated(
"0.3.0",
removal="1.0.0",
alternative="pydantic.v1 or pydantic",
message=(
"As of langchain-core 0.3.0, LangChain uses pydantic v2 internally. "
"The langchain_core.pydantic_v1 module was a "
"compatibility shim for pydantic v1, and should no longer be used. "
"Please update the code to import from Pydantic directly.\n\n"
"For example, replace imports like: "
"`from langchain_core.pydantic_v1 import BaseModel`\n"
"with: `from pydantic import BaseModel`\n"
"or the v1 compatibility namespace if you are working in a code base "
"that has not been fully upgraded to pydantic 2 yet. "
"\tfrom pydantic.v1 import BaseModel\n"
),
)
|
from langchain_huggingface.chat_models.huggingface import ( # type: ignore[import-not-found]
TGI_MESSAGE,
TGI_RESPONSE,
ChatHuggingFace,
_convert_dict_to_message,
)
__all__ = ["TGI_MESSAGE", "TGI_RESPONSE", "ChatHuggingFace", "_convert_dict_to_message"]
|
from langchain_huggingface.chat_models.huggingface import ( # type: ignore[import-not-found]
TGI_MESSAGE,
TGI_RESPONSE,
ChatHuggingFace,
_convert_dict_to_message,
)
__all__ = ["ChatHuggingFace", "_convert_dict_to_message", "TGI_MESSAGE", "TGI_RESPONSE"]
|
import re
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from backend.data.model import SchemaField
class CodeExtractionBlock(Block):
class Input(BlockSchema):
text: str = SchemaField(
description="Text containing code blocks to extract (e.g., AI response)",
placeholder="Enter text containing code blocks",
)
class Output(BlockSchema):
html: str = SchemaField(description="Extracted HTML code")
css: str = SchemaField(description="Extracted CSS code")
javascript: str = SchemaField(description="Extracted JavaScript code")
python: str = SchemaField(description="Extracted Python code")
sql: str = SchemaField(description="Extracted SQL code")
java: str = SchemaField(description="Extracted Java code")
cpp: str = SchemaField(description="Extracted C++ code")
csharp: str = SchemaField(description="Extracted C# code")
json_code: str = SchemaField(description="Extracted JSON code")
bash: str = SchemaField(description="Extracted Bash code")
php: str = SchemaField(description="Extracted PHP code")
ruby: str = SchemaField(description="Extracted Ruby code")
yaml: str = SchemaField(description="Extracted YAML code")
markdown: str = SchemaField(description="Extracted Markdown code")
typescript: str = SchemaField(description="Extracted TypeScript code")
xml: str = SchemaField(description="Extracted XML code")
remaining_text: str = SchemaField(
description="Remaining text after code extraction"
)
def __init__(self):
super().__init__(
id="d3a7d896-3b78-4f44-8b4b-48fbf4f0bcd8",
description="Extracts code blocks from text and identifies their programming languages",
categories={BlockCategory.TEXT},
input_schema=CodeExtractionBlock.Input,
output_schema=CodeExtractionBlock.Output,
test_input={
"text": "Here's a Python example:\n```python\nprint('Hello World')\n```\nAnd some HTML:\n```html\n<h1>Title</h1>\n```"
},
test_output=[
("html", "<h1>Title</h1>"),
("python", "print('Hello World')"),
("remaining_text", "Here's a Python example:\nAnd some HTML:"),
],
)
async def run(self, input_data: Input, **kwargs) -> BlockOutput:
# List of supported programming languages with mapped aliases
language_aliases = {
"html": ["html", "htm"],
"css": ["css"],
"javascript": ["javascript", "js"],
"python": ["python", "py"],
"sql": ["sql"],
"java": ["java"],
"cpp": ["cpp", "c++"],
"csharp": ["csharp", "c#", "cs"],
"json_code": ["json"],
"bash": ["bash", "shell", "sh"],
"php": ["php"],
"ruby": ["ruby", "rb"],
"yaml": ["yaml", "yml"],
"markdown": ["markdown", "md"],
"typescript": ["typescript", "ts"],
"xml": ["xml"],
}
# Extract code for each language
for canonical_name, aliases in language_aliases.items():
code = ""
# Try each alias for the language
for alias in aliases:
code_for_alias = self.extract_code(input_data.text, alias)
if code_for_alias:
code = code + "\n\n" + code_for_alias if code else code_for_alias
if code: # Only yield if there's actual code content
yield canonical_name, code
# Remove all code blocks from the text to get remaining text
pattern = (
r"```(?:"
+ "|".join(
re.escape(alias)
for aliases in language_aliases.values()
for alias in aliases
)
+ r")\s+[\s\S]*?```"
)
remaining_text = re.sub(pattern, "", input_data.text).strip()
remaining_text = re.sub(r"\n\s*\n", "\n", remaining_text)
if remaining_text: # Only yield if there's remaining text
yield "remaining_text", remaining_text
def extract_code(self, text: str, language: str) -> str:
# Escape special regex characters in the language string
language = re.escape(language)
# Extract all code blocks enclosed in ```language``` blocks
pattern = re.compile(rf"```{language}\s+(.*?)```", re.DOTALL | re.IGNORECASE)
matches = pattern.finditer(text)
# Combine all code blocks for this language with newlines between them
code_blocks = [match.group(1).strip() for match in matches]
return "\n\n".join(code_blocks) if code_blocks else ""
|
import re
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from backend.data.model import SchemaField
class CodeExtractionBlock(Block):
class Input(BlockSchema):
text: str = SchemaField(
description="Text containing code blocks to extract (e.g., AI response)",
placeholder="Enter text containing code blocks",
)
class Output(BlockSchema):
html: str = SchemaField(description="Extracted HTML code")
css: str = SchemaField(description="Extracted CSS code")
javascript: str = SchemaField(description="Extracted JavaScript code")
python: str = SchemaField(description="Extracted Python code")
sql: str = SchemaField(description="Extracted SQL code")
java: str = SchemaField(description="Extracted Java code")
cpp: str = SchemaField(description="Extracted C++ code")
csharp: str = SchemaField(description="Extracted C# code")
json_code: str = SchemaField(description="Extracted JSON code")
bash: str = SchemaField(description="Extracted Bash code")
php: str = SchemaField(description="Extracted PHP code")
ruby: str = SchemaField(description="Extracted Ruby code")
yaml: str = SchemaField(description="Extracted YAML code")
markdown: str = SchemaField(description="Extracted Markdown code")
typescript: str = SchemaField(description="Extracted TypeScript code")
xml: str = SchemaField(description="Extracted XML code")
remaining_text: str = SchemaField(
description="Remaining text after code extraction"
)
def __init__(self):
super().__init__(
id="d3a7d896-3b78-4f44-8b4b-48fbf4f0bcd8",
description="Extracts code blocks from text and identifies their programming languages",
categories={BlockCategory.TEXT},
input_schema=CodeExtractionBlock.Input,
output_schema=CodeExtractionBlock.Output,
test_input={
"text": "Here's a Python example:\n```python\nprint('Hello World')\n```\nAnd some HTML:\n```html\n<h1>Title</h1>\n```"
},
test_output=[
("html", "<h1>Title</h1>"),
("python", "print('Hello World')"),
("remaining_text", "Here's a Python example:\nAnd some HTML:"),
],
)
def run(self, input_data: Input, **kwargs) -> BlockOutput:
# List of supported programming languages with mapped aliases
language_aliases = {
"html": ["html", "htm"],
"css": ["css"],
"javascript": ["javascript", "js"],
"python": ["python", "py"],
"sql": ["sql"],
"java": ["java"],
"cpp": ["cpp", "c++"],
"csharp": ["csharp", "c#", "cs"],
"json_code": ["json"],
"bash": ["bash", "shell", "sh"],
"php": ["php"],
"ruby": ["ruby", "rb"],
"yaml": ["yaml", "yml"],
"markdown": ["markdown", "md"],
"typescript": ["typescript", "ts"],
"xml": ["xml"],
}
# Extract code for each language
for canonical_name, aliases in language_aliases.items():
code = ""
# Try each alias for the language
for alias in aliases:
code_for_alias = self.extract_code(input_data.text, alias)
if code_for_alias:
code = code + "\n\n" + code_for_alias if code else code_for_alias
if code: # Only yield if there's actual code content
yield canonical_name, code
# Remove all code blocks from the text to get remaining text
pattern = (
r"```(?:"
+ "|".join(
re.escape(alias)
for aliases in language_aliases.values()
for alias in aliases
)
+ r")\s+[\s\S]*?```"
)
remaining_text = re.sub(pattern, "", input_data.text).strip()
remaining_text = re.sub(r"\n\s*\n", "\n", remaining_text)
if remaining_text: # Only yield if there's remaining text
yield "remaining_text", remaining_text
def extract_code(self, text: str, language: str) -> str:
# Escape special regex characters in the language string
language = re.escape(language)
# Extract all code blocks enclosed in ```language``` blocks
pattern = re.compile(rf"```{language}\s+(.*?)```", re.DOTALL | re.IGNORECASE)
matches = pattern.finditer(text)
# Combine all code blocks for this language with newlines between them
code_blocks = [match.group(1).strip() for match in matches]
return "\n\n".join(code_blocks) if code_blocks else ""
|
from argparse import Namespace
from copy import deepcopy
from typing import TYPE_CHECKING, Type
from hubble.executor.helper import is_valid_huburi
from hubble.executor.hubio import HubIO
from jina.enums import PodRoleType
from jina.orchestrate.pods import Pod
from jina.orchestrate.pods.container import ContainerPod
if TYPE_CHECKING: # pragma: no cover
from jina.orchestrate.pods import BasePod
class PodFactory:
"""
A PodFactory is a factory class, abstracting the Pod creation
"""
@staticmethod
def build_pod(args: 'Namespace') -> Type['BasePod']:
"""Build an implementation of a `BasePod` interface
:param args: deployment arguments parsed from the CLI.
:return: the created BaseDeployment
"""
# copy to update but forward original
cargs = deepcopy(args)
if is_valid_huburi(cargs.uses):
_hub_args = deepcopy(args)
_hub_args.uri = args.uses
_hub_args.no_usage = True
cargs.uses = HubIO(_hub_args).pull()
if (
cargs.pod_role != PodRoleType.HEAD
and cargs.uses
and cargs.uses.startswith('docker://')
):
return ContainerPod(cargs)
else:
return Pod(args)
|
from argparse import Namespace
from copy import deepcopy
from typing import TYPE_CHECKING, Type
from hubble.executor.helper import is_valid_huburi
from hubble.executor.hubio import HubIO
from jina.enums import PodRoleType
from jina.orchestrate.pods import Pod
from jina.orchestrate.pods.container import ContainerPod
if TYPE_CHECKING: # pragma: no cover
from jina.orchestrate.pods import BasePod
class PodFactory:
"""
A PodFactory is a factory class, abstracting the Pod creation
"""
@staticmethod
def build_pod(args: 'Namespace') -> Type['BasePod']:
"""Build an implementation of a `BasePod` interface
:param args: deployment arguments parsed from the CLI.
:return: the created BaseDeployment
"""
# copy to update but forward original
cargs = deepcopy(args)
if is_valid_huburi(cargs.uses):
_hub_args = deepcopy(args)
_hub_args.uri = args.uses
_hub_args.no_usage = True
cargs.uses = HubIO(_hub_args).pull()
if (
cargs.pod_role != PodRoleType.HEAD
and cargs.uses
and cargs.uses.startswith('docker://')
):
return ContainerPod(cargs)
else:
return Pod(args)
|
from datetime import datetime, timedelta, timezone
import jwt
from fastapi import Depends, FastAPI, HTTPException, status
from fastapi.security import OAuth2PasswordBearer, OAuth2PasswordRequestForm
from jwt.exceptions import InvalidTokenError
from passlib.context import CryptContext
from pydantic import BaseModel
# to get a string like this run:
# openssl rand -hex 32
SECRET_KEY = "09d25e094faa6ca2556c818166b7a9563b93f7099f6f0f4caa6cf63b88e8d3e7"
ALGORITHM = "HS256"
ACCESS_TOKEN_EXPIRE_MINUTES = 30
fake_users_db = {
"johndoe": {
"username": "johndoe",
"full_name": "John Doe",
"email": "johndoe@example.com",
"hashed_password": "$2b$12$EixZaYVK1fsbw1ZfbX3OXePaWxn96p36WQoeG6Lruj3vjPGga31lW",
"disabled": False,
}
}
class Token(BaseModel):
access_token: str
token_type: str
class TokenData(BaseModel):
username: str | None = None
class User(BaseModel):
username: str
email: str | None = None
full_name: str | None = None
disabled: bool | None = None
class UserInDB(User):
hashed_password: str
pwd_context = CryptContext(schemes=["bcrypt"], deprecated="auto")
oauth2_scheme = OAuth2PasswordBearer(tokenUrl="token")
app = FastAPI()
def verify_password(plain_password, hashed_password):
return pwd_context.verify(plain_password, hashed_password)
def get_password_hash(password):
return pwd_context.hash(password)
def get_user(db, username: str):
if username in db:
user_dict = db[username]
return UserInDB(**user_dict)
def authenticate_user(fake_db, username: str, password: str):
user = get_user(fake_db, username)
if not user:
return False
if not verify_password(password, user.hashed_password):
return False
return user
def create_access_token(data: dict, expires_delta: timedelta | None = None):
to_encode = data.copy()
if expires_delta:
expire = datetime.now(timezone.utc) + expires_delta
else:
expire = datetime.now(timezone.utc) + timedelta(minutes=15)
to_encode.update({"exp": expire})
encoded_jwt = jwt.encode(to_encode, SECRET_KEY, algorithm=ALGORITHM)
return encoded_jwt
async def get_current_user(token: str = Depends(oauth2_scheme)):
credentials_exception = HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
detail="Could not validate credentials",
headers={"WWW-Authenticate": "Bearer"},
)
try:
payload = jwt.decode(token, SECRET_KEY, algorithms=[ALGORITHM])
username = payload.get("sub")
if username is None:
raise credentials_exception
token_data = TokenData(username=username)
except InvalidTokenError:
raise credentials_exception
user = get_user(fake_users_db, username=token_data.username)
if user is None:
raise credentials_exception
return user
async def get_current_active_user(current_user: User = Depends(get_current_user)):
if current_user.disabled:
raise HTTPException(status_code=400, detail="Inactive user")
return current_user
@app.post("/token")
async def login_for_access_token(
form_data: OAuth2PasswordRequestForm = Depends(),
) -> Token:
user = authenticate_user(fake_users_db, form_data.username, form_data.password)
if not user:
raise HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
detail="Incorrect username or password",
headers={"WWW-Authenticate": "Bearer"},
)
access_token_expires = timedelta(minutes=ACCESS_TOKEN_EXPIRE_MINUTES)
access_token = create_access_token(
data={"sub": user.username}, expires_delta=access_token_expires
)
return Token(access_token=access_token, token_type="bearer")
@app.get("/users/me/", response_model=User)
async def read_users_me(current_user: User = Depends(get_current_active_user)):
return current_user
@app.get("/users/me/items/")
async def read_own_items(current_user: User = Depends(get_current_active_user)):
return [{"item_id": "Foo", "owner": current_user.username}]
|
from datetime import datetime, timedelta, timezone
import jwt
from fastapi import Depends, FastAPI, HTTPException, status
from fastapi.security import OAuth2PasswordBearer, OAuth2PasswordRequestForm
from jwt.exceptions import InvalidTokenError
from passlib.context import CryptContext
from pydantic import BaseModel
# to get a string like this run:
# openssl rand -hex 32
SECRET_KEY = "09d25e094faa6ca2556c818166b7a9563b93f7099f6f0f4caa6cf63b88e8d3e7"
ALGORITHM = "HS256"
ACCESS_TOKEN_EXPIRE_MINUTES = 30
fake_users_db = {
"johndoe": {
"username": "johndoe",
"full_name": "John Doe",
"email": "johndoe@example.com",
"hashed_password": "$2b$12$EixZaYVK1fsbw1ZfbX3OXePaWxn96p36WQoeG6Lruj3vjPGga31lW",
"disabled": False,
}
}
class Token(BaseModel):
access_token: str
token_type: str
class TokenData(BaseModel):
username: str | None = None
class User(BaseModel):
username: str
email: str | None = None
full_name: str | None = None
disabled: bool | None = None
class UserInDB(User):
hashed_password: str
pwd_context = CryptContext(schemes=["bcrypt"], deprecated="auto")
oauth2_scheme = OAuth2PasswordBearer(tokenUrl="token")
app = FastAPI()
def verify_password(plain_password, hashed_password):
return pwd_context.verify(plain_password, hashed_password)
def get_password_hash(password):
return pwd_context.hash(password)
def get_user(db, username: str):
if username in db:
user_dict = db[username]
return UserInDB(**user_dict)
def authenticate_user(fake_db, username: str, password: str):
user = get_user(fake_db, username)
if not user:
return False
if not verify_password(password, user.hashed_password):
return False
return user
def create_access_token(data: dict, expires_delta: timedelta | None = None):
to_encode = data.copy()
if expires_delta:
expire = datetime.now(timezone.utc) + expires_delta
else:
expire = datetime.now(timezone.utc) + timedelta(minutes=15)
to_encode.update({"exp": expire})
encoded_jwt = jwt.encode(to_encode, SECRET_KEY, algorithm=ALGORITHM)
return encoded_jwt
async def get_current_user(token: str = Depends(oauth2_scheme)):
credentials_exception = HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
detail="Could not validate credentials",
headers={"WWW-Authenticate": "Bearer"},
)
try:
payload = jwt.decode(token, SECRET_KEY, algorithms=[ALGORITHM])
username: str = payload.get("sub")
if username is None:
raise credentials_exception
token_data = TokenData(username=username)
except InvalidTokenError:
raise credentials_exception
user = get_user(fake_users_db, username=token_data.username)
if user is None:
raise credentials_exception
return user
async def get_current_active_user(current_user: User = Depends(get_current_user)):
if current_user.disabled:
raise HTTPException(status_code=400, detail="Inactive user")
return current_user
@app.post("/token")
async def login_for_access_token(
form_data: OAuth2PasswordRequestForm = Depends(),
) -> Token:
user = authenticate_user(fake_users_db, form_data.username, form_data.password)
if not user:
raise HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
detail="Incorrect username or password",
headers={"WWW-Authenticate": "Bearer"},
)
access_token_expires = timedelta(minutes=ACCESS_TOKEN_EXPIRE_MINUTES)
access_token = create_access_token(
data={"sub": user.username}, expires_delta=access_token_expires
)
return Token(access_token=access_token, token_type="bearer")
@app.get("/users/me/", response_model=User)
async def read_users_me(current_user: User = Depends(get_current_active_user)):
return current_user
@app.get("/users/me/items/")
async def read_own_items(current_user: User = Depends(get_current_active_user)):
return [{"item_id": "Foo", "owner": current_user.username}]
|
"""Joint QA Summary graph."""
from typing import List, Optional, Sequence
from llama_index.core.base.embeddings.base import BaseEmbedding
from llama_index.core.callbacks.base import CallbackManager
from llama_index.core.indices.list.base import SummaryIndex
from llama_index.core.indices.vector_store import VectorStoreIndex
from llama_index.core.ingestion import run_transformations
from llama_index.core.llms.llm import LLM
from llama_index.core.query_engine.router_query_engine import RouterQueryEngine
from llama_index.core.schema import Document, TransformComponent
from llama_index.core.settings import Settings
from llama_index.core.storage.storage_context import StorageContext
from llama_index.core.tools.query_engine import QueryEngineTool
DEFAULT_SUMMARY_TEXT = "Use this index for summarization queries"
DEFAULT_QA_TEXT = (
"Use this index for queries that require retrieval of specific "
"context from documents."
)
class QASummaryQueryEngineBuilder:
"""
Joint QA Summary graph builder.
Can build a graph that provides a unified query interface
for both QA and summarization tasks.
NOTE: this is a beta feature. The API may change in the future.
Args:
docstore (BaseDocumentStore): A BaseDocumentStore to use for storing nodes.
summary_text (str): Text to use for the summary index.
qa_text (str): Text to use for the QA index.
node_parser (NodeParser): A NodeParser to use for parsing.
"""
def __init__(
self,
llm: Optional[LLM] = None,
embed_model: Optional[BaseEmbedding] = None,
callback_manager: Optional[CallbackManager] = None,
transformations: Optional[List[TransformComponent]] = None,
storage_context: Optional[StorageContext] = None,
summary_text: str = DEFAULT_SUMMARY_TEXT,
qa_text: str = DEFAULT_QA_TEXT,
) -> None:
"""Init params."""
self._llm = llm or Settings.llm
self._callback_manager = callback_manager or Settings.callback_manager
self._embed_model = embed_model or Settings.embed_model
self._transformations = transformations or Settings.transformations
self._storage_context = storage_context or StorageContext.from_defaults()
self._summary_text = summary_text
self._qa_text = qa_text
def build_from_documents(
self,
documents: Sequence[Document],
) -> RouterQueryEngine:
"""Build query engine."""
# parse nodes
nodes = run_transformations(documents, self._transformations) # type: ignore
# ingest nodes
self._storage_context.docstore.add_documents(nodes, allow_update=True)
# build indices
vector_index = VectorStoreIndex(
nodes=nodes,
transformations=self._transformations,
embed_model=self._embed_model,
storage_context=self._storage_context,
)
summary_index = SummaryIndex(nodes, storage_context=self._storage_context)
vector_query_engine = vector_index.as_query_engine(llm=self._llm)
list_query_engine = summary_index.as_query_engine(
llm=self._llm, response_mode="tree_summarize"
)
# build query engine
return RouterQueryEngine.from_defaults(
llm=self._llm,
query_engine_tools=[
QueryEngineTool.from_defaults(
vector_query_engine, description=self._qa_text
),
QueryEngineTool.from_defaults(
list_query_engine, description=self._summary_text
),
],
select_multi=False,
)
|
"""Joint QA Summary graph."""
from typing import List, Optional, Sequence
from llama_index.core.base.embeddings.base import BaseEmbedding
from llama_index.core.callbacks.base import CallbackManager
from llama_index.core.indices.list.base import SummaryIndex
from llama_index.core.indices.vector_store import VectorStoreIndex
from llama_index.core.ingestion import run_transformations
from llama_index.core.llms.llm import LLM
from llama_index.core.query_engine.router_query_engine import RouterQueryEngine
from llama_index.core.schema import Document, TransformComponent
from llama_index.core.settings import Settings
from llama_index.core.storage.storage_context import StorageContext
from llama_index.core.tools.query_engine import QueryEngineTool
DEFAULT_SUMMARY_TEXT = "Use this index for summarization queries"
DEFAULT_QA_TEXT = (
"Use this index for queries that require retrieval of specific "
"context from documents."
)
class QASummaryQueryEngineBuilder:
"""Joint QA Summary graph builder.
Can build a graph that provides a unified query interface
for both QA and summarization tasks.
NOTE: this is a beta feature. The API may change in the future.
Args:
docstore (BaseDocumentStore): A BaseDocumentStore to use for storing nodes.
summary_text (str): Text to use for the summary index.
qa_text (str): Text to use for the QA index.
node_parser (NodeParser): A NodeParser to use for parsing.
"""
def __init__(
self,
llm: Optional[LLM] = None,
embed_model: Optional[BaseEmbedding] = None,
callback_manager: Optional[CallbackManager] = None,
transformations: Optional[List[TransformComponent]] = None,
storage_context: Optional[StorageContext] = None,
summary_text: str = DEFAULT_SUMMARY_TEXT,
qa_text: str = DEFAULT_QA_TEXT,
) -> None:
"""Init params."""
self._llm = llm or Settings.llm
self._callback_manager = callback_manager or Settings.callback_manager
self._embed_model = embed_model or Settings.embed_model
self._transformations = transformations or Settings.transformations
self._storage_context = storage_context or StorageContext.from_defaults()
self._summary_text = summary_text
self._qa_text = qa_text
def build_from_documents(
self,
documents: Sequence[Document],
) -> RouterQueryEngine:
"""Build query engine."""
# parse nodes
nodes = run_transformations(documents, self._transformations) # type: ignore
# ingest nodes
self._storage_context.docstore.add_documents(nodes, allow_update=True)
# build indices
vector_index = VectorStoreIndex(
nodes=nodes,
transformations=self._transformations,
embed_model=self._embed_model,
storage_context=self._storage_context,
)
summary_index = SummaryIndex(nodes, storage_context=self._storage_context)
vector_query_engine = vector_index.as_query_engine(llm=self._llm)
list_query_engine = summary_index.as_query_engine(
llm=self._llm, response_mode="tree_summarize"
)
# build query engine
return RouterQueryEngine.from_defaults(
llm=self._llm,
query_engine_tools=[
QueryEngineTool.from_defaults(
vector_query_engine, description=self._qa_text
),
QueryEngineTool.from_defaults(
list_query_engine, description=self._summary_text
),
],
select_multi=False,
)
|
import logging
import pytest
from backend.util.test import SpinTestServer
# NOTE: You can run tests like with the --log-cli-level=INFO to see the logs
# Set up logging
logger = logging.getLogger(__name__)
# Create console handler with formatting
ch = logging.StreamHandler()
ch.setLevel(logging.INFO)
formatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
ch.setFormatter(formatter)
logger.addHandler(ch)
@pytest.fixture(scope="session")
async def server():
async with SpinTestServer() as server:
yield server
@pytest.fixture(scope="session", autouse=True)
async def graph_cleanup(server):
created_graph_ids = []
original_create_graph = server.agent_server.test_create_graph
async def create_graph_wrapper(*args, **kwargs):
created_graph = await original_create_graph(*args, **kwargs)
# Extract user_id correctly
user_id = kwargs.get("user_id", args[2] if len(args) > 2 else None)
created_graph_ids.append((created_graph.id, user_id))
return created_graph
try:
server.agent_server.test_create_graph = create_graph_wrapper
yield # This runs the test function
finally:
server.agent_server.test_create_graph = original_create_graph
# Delete the created graphs and assert they were deleted
for graph_id, user_id in created_graph_ids:
if user_id:
resp = await server.agent_server.test_delete_graph(graph_id, user_id)
num_deleted = resp["version_counts"]
assert num_deleted > 0, f"Graph {graph_id} was not deleted."
|
import pytest
from backend.util.test import SpinTestServer
@pytest.fixture(scope="session")
async def server():
async with SpinTestServer() as server:
yield server
@pytest.fixture(scope="session", autouse=True)
async def graph_cleanup(server):
created_graph_ids = []
original_create_graph = server.agent_server.create_graph
async def create_graph_wrapper(*args, **kwargs):
created_graph = await original_create_graph(*args, **kwargs)
# Extract user_id correctly
user_id = kwargs.get("user_id", args[2] if len(args) > 2 else None)
created_graph_ids.append((created_graph.id, user_id))
return created_graph
try:
server.agent_server.create_graph = create_graph_wrapper
yield # This runs the test function
finally:
server.agent_server.create_graph = original_create_graph
# Delete the created graphs and assert they were deleted
for graph_id, user_id in created_graph_ids:
resp = await server.agent_server.delete_graph(graph_id, user_id)
num_deleted = resp["version_counts"]
assert num_deleted > 0, f"Graph {graph_id} was not deleted."
|
from typing import Any, Dict, Iterable
import torch
from torch import Tensor, nn
from sentence_transformers import util
from sentence_transformers.SentenceTransformer import SentenceTransformer
class CoSENTLoss(nn.Module):
def __init__(self, model: SentenceTransformer, scale: float = 20.0, similarity_fct=util.pairwise_cos_sim) -> None:
"""
This class implements CoSENT (Cosine Sentence) loss.
It expects that each of the InputExamples consists of a pair of texts and a float valued label, representing
the expected similarity score between the pair.
It computes the following loss function:
``loss = logsum(1+exp(s(k,l)-s(i,j))+exp...)``, where ``(i,j)`` and ``(k,l)`` are any of the input pairs in the
batch such that the expected similarity of ``(i,j)`` is greater than ``(k,l)``. The summation is over all possible
pairs of input pairs in the batch that match this condition.
Anecdotal experiments show that this loss function produces a more powerful training signal than :class:`CosineSimilarityLoss`,
resulting in faster convergence and a final model with superior performance. Consequently, CoSENTLoss may be used
as a drop-in replacement for :class:`CosineSimilarityLoss` in any training script.
Args:
model: SentenceTransformerModel
similarity_fct: Function to compute the PAIRWISE similarity
between embeddings. Default is
``util.pairwise_cos_sim``.
scale: Output of similarity function is multiplied by scale
value. Represents the inverse temperature.
References:
- For further details, see: https://kexue.fm/archives/8847
Requirements:
- Sentence pairs with corresponding similarity scores in range of the similarity function. Default is [-1,1].
Relations:
- :class:`AnglELoss` is CoSENTLoss with ``pairwise_angle_sim`` as the metric, rather than ``pairwise_cos_sim``.
- :class:`CosineSimilarityLoss` seems to produce a weaker training signal than CoSENTLoss. In our experiments, CoSENTLoss is recommended.
Inputs:
+--------------------------------+------------------------+
| Texts | Labels |
+================================+========================+
| (sentence_A, sentence_B) pairs | float similarity score |
+--------------------------------+------------------------+
Example:
::
from sentence_transformers import SentenceTransformer, SentenceTransformerTrainer, losses
from datasets import Dataset
model = SentenceTransformer("microsoft/mpnet-base")
train_dataset = Dataset.from_dict({
"sentence1": ["It's nice weather outside today.", "He drove to work."],
"sentence2": ["It's so sunny.", "She walked to the store."],
"score": [1.0, 0.3],
})
loss = losses.CoSENTLoss(model)
trainer = SentenceTransformerTrainer(
model=model,
train_dataset=train_dataset,
loss=loss,
)
trainer.train()
"""
super(CoSENTLoss, self).__init__()
self.model = model
self.similarity_fct = similarity_fct
self.scale = scale
def forward(self, sentence_features: Iterable[Dict[str, Tensor]], labels: Tensor) -> Tensor:
embeddings = [self.model(sentence_feature)["sentence_embedding"] for sentence_feature in sentence_features]
scores = self.similarity_fct(embeddings[0], embeddings[1])
scores = scores * self.scale
scores = scores[:, None] - scores[None, :]
# label matrix indicating which pairs are relevant
labels = labels[:, None] < labels[None, :]
labels = labels.float()
# mask out irrelevant pairs so they are negligible after exp()
scores = scores - (1 - labels) * 1e12
# append a zero as e^0 = 1
scores = torch.cat((torch.zeros(1).to(scores.device), scores.view(-1)), dim=0)
loss = torch.logsumexp(scores, dim=0)
return loss
def get_config_dict(self) -> Dict[str, Any]:
return {"scale": self.scale, "similarity_fct": self.similarity_fct.__name__}
@property
def citation(self) -> str:
return """
@online{kexuefm-8847,
title={CoSENT: A more efficient sentence vector scheme than Sentence-BERT},
author={Su Jianlin},
year={2022},
month={Jan},
url={https://kexue.fm/archives/8847},
}
"""
|
from typing import Dict, Iterable
import torch
from torch import Tensor, nn
from sentence_transformers import util
from sentence_transformers.SentenceTransformer import SentenceTransformer
class CoSENTLoss(nn.Module):
def __init__(self, model: SentenceTransformer, scale: float = 20.0, similarity_fct=util.pairwise_cos_sim):
"""
This class implements CoSENT (Cosine Sentence) loss.
It expects that each of the InputExamples consists of a pair of texts and a float valued label, representing
the expected similarity score between the pair.
It computes the following loss function:
``loss = logsum(1+exp(s(k,l)-s(i,j))+exp...)``, where ``(i,j)`` and ``(k,l)`` are any of the input pairs in the
batch such that the expected similarity of ``(i,j)`` is greater than ``(k,l)``. The summation is over all possible
pairs of input pairs in the batch that match this condition.
Anecdotal experiments show that this loss function produces a more powerful training signal than :class:`CosineSimilarityLoss`,
resulting in faster convergence and a final model with superior performance. Consequently, CoSENTLoss may be used
as a drop-in replacement for :class:`CosineSimilarityLoss` in any training script.
Args:
model: SentenceTransformerModel
similarity_fct: Function to compute the PAIRWISE similarity
between embeddings. Default is
``util.pairwise_cos_sim``.
scale: Output of similarity function is multiplied by scale
value. Represents the inverse temperature.
References:
- For further details, see: https://kexue.fm/archives/8847
Requirements:
- Sentence pairs with corresponding similarity scores in range of the similarity function. Default is [-1,1].
Relations:
- :class:`AnglELoss` is CoSENTLoss with ``pairwise_angle_sim`` as the metric, rather than ``pairwise_cos_sim``.
- :class:`CosineSimilarityLoss` seems to produce a weaker training signal than CoSENTLoss. In our experiments, CoSENTLoss is recommended.
Inputs:
+--------------------------------+------------------------+
| Texts | Labels |
+================================+========================+
| (sentence_A, sentence_B) pairs | float similarity score |
+--------------------------------+------------------------+
Example:
::
from sentence_transformers import SentenceTransformer, SentenceTransformerTrainer, losses
from datasets import Dataset
model = SentenceTransformer("microsoft/mpnet-base")
train_dataset = Dataset.from_dict({
"sentence1": ["It's nice weather outside today.", "He drove to work."],
"sentence2": ["It's so sunny.", "She walked to the store."],
"score": [1.0, 0.3],
})
loss = losses.CoSENTLoss(model)
trainer = SentenceTransformerTrainer(
model=model,
train_dataset=train_dataset,
loss=loss,
)
trainer.train()
"""
super(CoSENTLoss, self).__init__()
self.model = model
self.similarity_fct = similarity_fct
self.scale = scale
def forward(self, sentence_features: Iterable[Dict[str, Tensor]], labels: Tensor):
embeddings = [self.model(sentence_feature)["sentence_embedding"] for sentence_feature in sentence_features]
scores = self.similarity_fct(embeddings[0], embeddings[1])
scores = scores * self.scale
scores = scores[:, None] - scores[None, :]
# label matrix indicating which pairs are relevant
labels = labels[:, None] < labels[None, :]
labels = labels.float()
# mask out irrelevant pairs so they are negligible after exp()
scores = scores - (1 - labels) * 1e12
# append a zero as e^0 = 1
scores = torch.cat((torch.zeros(1).to(scores.device), scores.view(-1)), dim=0)
loss = torch.logsumexp(scores, dim=0)
return loss
def get_config_dict(self):
return {"scale": self.scale, "similarity_fct": self.similarity_fct.__name__}
@property
def citation(self) -> str:
return """
@online{kexuefm-8847,
title={CoSENT: A more efficient sentence vector scheme than Sentence-BERT},
author={Su Jianlin},
year={2022},
month={Jan},
url={https://kexue.fm/archives/8847},
}
"""
|
# Copyright (c) OpenMMLab. All rights reserved.
import mmcv
from .version import __version__, short_version
def digit_version(version_str):
digit_version = []
for x in version_str.split('.'):
if x.isdigit():
digit_version.append(int(x))
elif x.find('rc') != -1:
patch_version = x.split('rc')
digit_version.append(int(patch_version[0]) - 1)
digit_version.append(int(patch_version[1]))
return digit_version
mmcv_minimum_version = '1.3.8'
mmcv_maximum_version = '1.5.0'
mmcv_version = digit_version(mmcv.__version__)
assert (mmcv_version >= digit_version(mmcv_minimum_version)
and mmcv_version <= digit_version(mmcv_maximum_version)), \
f'MMCV=={mmcv.__version__} is used but incompatible. ' \
f'Please install mmcv>={mmcv_minimum_version}, <={mmcv_maximum_version}.'
__all__ = ['__version__', 'short_version']
|
# Copyright (c) OpenMMLab. All rights reserved.
import mmcv
from .version import __version__, short_version
def digit_version(version_str):
digit_version = []
for x in version_str.split('.'):
if x.isdigit():
digit_version.append(int(x))
elif x.find('rc') != -1:
patch_version = x.split('rc')
digit_version.append(int(patch_version[0]) - 1)
digit_version.append(int(patch_version[1]))
return digit_version
mmcv_minimum_version = '1.3.8'
mmcv_maximum_version = '1.4.0'
mmcv_version = digit_version(mmcv.__version__)
assert (mmcv_version >= digit_version(mmcv_minimum_version)
and mmcv_version <= digit_version(mmcv_maximum_version)), \
f'MMCV=={mmcv.__version__} is used but incompatible. ' \
f'Please install mmcv>={mmcv_minimum_version}, <={mmcv_maximum_version}.'
__all__ = ['__version__', 'short_version']
|
import os
from typing import BinaryIO, Optional, Tuple, Union
import torch
import torchaudio
from .backend import Backend
from .common import AudioMetaData
sox_ext = torchaudio._extension.lazy_import_sox_ext()
class SoXBackend(Backend):
@staticmethod
def info(uri: Union[BinaryIO, str, os.PathLike], format: Optional[str], buffer_size: int = 4096) -> AudioMetaData:
if hasattr(uri, "read"):
raise ValueError(
"SoX backend does not support reading from file-like objects. ",
"Please use an alternative backend that does support reading from file-like objects, e.g. FFmpeg.",
)
else:
sinfo = sox_ext.get_info(uri, format)
if sinfo:
return AudioMetaData(*sinfo)
else:
raise RuntimeError(f"Failed to fetch metadata for {uri}.")
@staticmethod
def load(
uri: Union[BinaryIO, str, os.PathLike],
frame_offset: int = 0,
num_frames: int = -1,
normalize: bool = True,
channels_first: bool = True,
format: Optional[str] = None,
buffer_size: int = 4096,
) -> Tuple[torch.Tensor, int]:
if hasattr(uri, "read"):
raise ValueError(
"SoX backend does not support loading from file-like objects. ",
"Please use an alternative backend that does support loading from file-like objects, e.g. FFmpeg.",
)
else:
ret = sox_ext.load_audio_file(uri, frame_offset, num_frames, normalize, channels_first, format)
if not ret:
raise RuntimeError(f"Failed to load audio from {uri}.")
return ret
@staticmethod
def save(
uri: Union[BinaryIO, str, os.PathLike],
src: torch.Tensor,
sample_rate: int,
channels_first: bool = True,
format: Optional[str] = None,
encoding: Optional[str] = None,
bits_per_sample: Optional[int] = None,
buffer_size: int = 4096,
compression: Optional[Union[torchaudio.io.CodecConfig, float, int]] = None,
) -> None:
if not isinstance(compression, (float, int, type(None))):
raise ValueError(
"SoX backend expects non-`None` value for argument `compression` to be of ",
f"type `float` or `int`, but received value of type {type(compression)}",
)
if hasattr(uri, "write"):
raise ValueError(
"SoX backend does not support writing to file-like objects. ",
"Please use an alternative backend that does support writing to file-like objects, e.g. FFmpeg.",
)
else:
sox_ext.save_audio_file(
uri,
src,
sample_rate,
channels_first,
compression,
format,
encoding,
bits_per_sample,
)
@staticmethod
def can_decode(uri: Union[BinaryIO, str, os.PathLike], format: Optional[str]) -> bool:
# i.e. not a file-like object.
return not hasattr(uri, "read")
@staticmethod
def can_encode(uri: Union[BinaryIO, str, os.PathLike], format: Optional[str]) -> bool:
# i.e. not a file-like object.
return not hasattr(uri, "write")
|
import os
from typing import BinaryIO, Optional, Tuple, Union
import torch
import torchaudio
from .backend import Backend
from .common import AudioMetaData
sox_ext = torchaudio._extension.lazy_import_sox_ext()
class SoXBackend(Backend):
@staticmethod
def info(uri: Union[BinaryIO, str, os.PathLike], format: Optional[str], buffer_size: int = 4096) -> AudioMetaData:
if hasattr(uri, "read"):
raise ValueError(
"SoX backend does not support reading from file-like objects. ",
"Please use an alternative backend that does support reading from file-like objects, e.g. FFmpeg.",
)
else:
sinfo = sox_ext.get_info(uri, format)
if sinfo:
return AudioMetaData(*sinfo)
else:
raise RuntimeError(f"Failed to fetch metadata for {uri}.")
@staticmethod
def load(
uri: Union[BinaryIO, str, os.PathLike],
frame_offset: int = 0,
num_frames: int = -1,
normalize: bool = True,
channels_first: bool = True,
format: Optional[str] = None,
buffer_size: int = 4096,
) -> Tuple[torch.Tensor, int]:
if hasattr(uri, "read"):
raise ValueError(
"SoX backend does not support loading from file-like objects. ",
"Please use an alternative backend that does support loading from file-like objects, e.g. FFmpeg.",
)
else:
ret = sox_ext.load_audio_file(uri, frame_offset, num_frames, normalize, channels_first, format)
if not ret:
raise RuntimeError(f"Failed to load audio from {uri}.")
return ret
@staticmethod
def save(
uri: Union[BinaryIO, str, os.PathLike],
src: torch.Tensor,
sample_rate: int,
channels_first: bool = True,
format: Optional[str] = None,
encoding: Optional[str] = None,
bits_per_sample: Optional[int] = None,
buffer_size: int = 4096,
) -> None:
if hasattr(uri, "write"):
raise ValueError(
"SoX backend does not support writing to file-like objects. ",
"Please use an alternative backend that does support writing to file-like objects, e.g. FFmpeg.",
)
else:
sox_ext.save_audio_file(
uri,
src,
sample_rate,
channels_first,
None,
format,
encoding,
bits_per_sample,
)
@staticmethod
def can_decode(uri: Union[BinaryIO, str, os.PathLike], format: Optional[str]) -> bool:
# i.e. not a file-like object.
return not hasattr(uri, "read")
@staticmethod
def can_encode(uri: Union[BinaryIO, str, os.PathLike], format: Optional[str]) -> bool:
# i.e. not a file-like object.
return not hasattr(uri, "write")
|
# -*- coding: utf-8 -*-
"""
Audio Feature Augmentation
==========================
**Author**: `Moto Hira <moto@meta.com>`__
"""
# When running this tutorial in Google Colab, install the required packages
# with the following.
# !pip install torchaudio librosa
import torch
import torchaudio
import torchaudio.transforms as T
print(torch.__version__)
print(torchaudio.__version__)
######################################################################
# Preparation
# -----------
#
import librosa
import matplotlib.pyplot as plt
from torchaudio.utils import download_asset
######################################################################
# In this tutorial, we will use a speech data from
# `VOiCES dataset <https://iqtlabs.github.io/voices/>`__,
# which is licensed under Creative Commos BY 4.0.
SAMPLE_WAV_SPEECH_PATH = download_asset("tutorial-assets/Lab41-SRI-VOiCES-src-sp0307-ch127535-sg0042.wav")
def _get_sample(path, resample=None):
effects = [["remix", "1"]]
if resample:
effects.extend(
[
["lowpass", f"{resample // 2}"],
["rate", f"{resample}"],
]
)
return torchaudio.sox_effects.apply_effects_file(path, effects=effects)
def get_speech_sample(*, resample=None):
return _get_sample(SAMPLE_WAV_SPEECH_PATH, resample=resample)
def get_spectrogram(
n_fft=400,
win_len=None,
hop_len=None,
power=2.0,
):
waveform, _ = get_speech_sample()
spectrogram = T.Spectrogram(
n_fft=n_fft,
win_length=win_len,
hop_length=hop_len,
center=True,
pad_mode="reflect",
power=power,
)
return spectrogram(waveform)
def plot_spec(ax, spec, title, ylabel="freq_bin"):
ax.set_title(title)
ax.imshow(librosa.power_to_db(spec), origin="lower", aspect="auto")
######################################################################
# SpecAugment
# -----------
#
# `SpecAugment <https://ai.googleblog.com/2019/04/specaugment-new-data-augmentation.html>`__
# is a popular spectrogram augmentation technique.
#
# ``torchaudio`` implements :py:func:`torchaudio.transforms.TimeStretch`,
# :py:func:`torchaudio.transforms.TimeMasking` and
# :py:func:`torchaudio.transforms.FrequencyMasking`.
#
######################################################################
# TimeStretch
# -----------
#
spec = get_spectrogram(power=None)
stretch = T.TimeStretch()
spec_12 = stretch(spec, overriding_rate=1.2)
spec_09 = stretch(spec, overriding_rate=0.9)
######################################################################
#
def plot():
fig, axes = plt.subplots(3, 1, sharex=True, sharey=True)
plot_spec(axes[0], torch.abs(spec_12[0]), title="Stretched x1.2")
plot_spec(axes[1], torch.abs(spec[0]), title="Original")
plot_spec(axes[2], torch.abs(spec_09[0]), title="Stretched x0.9")
fig.tight_layout()
plot()
######################################################################
# Time and Frequency Masking
# --------------------------
#
torch.random.manual_seed(4)
time_masking = T.TimeMasking(time_mask_param=80)
freq_masking = T.FrequencyMasking(freq_mask_param=80)
spec = get_spectrogram()
time_masked = time_masking(spec)
freq_masked = freq_masking(spec)
######################################################################
#
def plot():
fig, axes = plt.subplots(3, 1, sharex=True, sharey=True)
plot_spec(axes[0], spec[0], title="Original")
plot_spec(axes[1], time_masked[0], title="Masked along time axis")
plot_spec(axes[2], freq_masked[0], title="Masked along frequency axis")
fig.tight_layout()
plot()
|
# -*- coding: utf-8 -*-
"""
Audio Feature Augmentation
==========================
**Author**: `Moto Hira <moto@meta.com>`__
"""
# When running this tutorial in Google Colab, install the required packages
# with the following.
# !pip install torchaudio librosa
import torch
import torchaudio
import torchaudio.transforms as T
print(torch.__version__)
print(torchaudio.__version__)
######################################################################
# Preparing data and utility functions (skip this section)
# --------------------------------------------------------
#
# @title Prepare data and utility functions. {display-mode: "form"}
# @markdown
# @markdown You do not need to look into this cell.
# @markdown Just execute once and you are good to go.
# @markdown
# @markdown In this tutorial, we will use a speech data from [VOiCES dataset](https://iqtlabs.github.io/voices/),
# @markdown which is licensed under Creative Commos BY 4.0.
# -------------------------------------------------------------------------------
# Preparation of data and helper functions.
# -------------------------------------------------------------------------------
import librosa
import matplotlib.pyplot as plt
from torchaudio.utils import download_asset
SAMPLE_WAV_SPEECH_PATH = download_asset("tutorial-assets/Lab41-SRI-VOiCES-src-sp0307-ch127535-sg0042.wav")
def _get_sample(path, resample=None):
effects = [["remix", "1"]]
if resample:
effects.extend(
[
["lowpass", f"{resample // 2}"],
["rate", f"{resample}"],
]
)
return torchaudio.sox_effects.apply_effects_file(path, effects=effects)
def get_speech_sample(*, resample=None):
return _get_sample(SAMPLE_WAV_SPEECH_PATH, resample=resample)
def get_spectrogram(
n_fft=400,
win_len=None,
hop_len=None,
power=2.0,
):
waveform, _ = get_speech_sample()
spectrogram = T.Spectrogram(
n_fft=n_fft,
win_length=win_len,
hop_length=hop_len,
center=True,
pad_mode="reflect",
power=power,
)
return spectrogram(waveform)
def plot_spectrogram(spec, title=None, ylabel="freq_bin", aspect="auto", xmax=None):
fig, axs = plt.subplots(1, 1)
axs.set_title(title or "Spectrogram (db)")
axs.set_ylabel(ylabel)
axs.set_xlabel("frame")
im = axs.imshow(librosa.power_to_db(spec), origin="lower", aspect=aspect)
if xmax:
axs.set_xlim((0, xmax))
fig.colorbar(im, ax=axs)
plt.show(block=False)
######################################################################
# SpecAugment
# -----------
#
# `SpecAugment <https://ai.googleblog.com/2019/04/specaugment-new-data-augmentation.html>`__
# is a popular spectrogram augmentation technique.
#
# ``torchaudio`` implements :py:func:`torchaudio.transforms.TimeStretch`,
# :py:func:`torchaudio.transforms.TimeMasking` and
# :py:func:`torchaudio.transforms.FrequencyMasking`.
#
######################################################################
# TimeStretch
# -----------
#
spec = get_spectrogram(power=None)
stretch = T.TimeStretch()
rate = 1.2
spec_ = stretch(spec, rate)
plot_spectrogram(torch.abs(spec_[0]), title=f"Stretched x{rate}", aspect="equal", xmax=304)
plot_spectrogram(torch.abs(spec[0]), title="Original", aspect="equal", xmax=304)
rate = 0.9
spec_ = stretch(spec, rate)
plot_spectrogram(torch.abs(spec_[0]), title=f"Stretched x{rate}", aspect="equal", xmax=304)
######################################################################
# TimeMasking
# -----------
#
torch.random.manual_seed(4)
spec = get_spectrogram()
plot_spectrogram(spec[0], title="Original")
masking = T.TimeMasking(time_mask_param=80)
spec = masking(spec)
plot_spectrogram(spec[0], title="Masked along time axis")
######################################################################
# FrequencyMasking
# ----------------
#
torch.random.manual_seed(4)
spec = get_spectrogram()
plot_spectrogram(spec[0], title="Original")
masking = T.FrequencyMasking(freq_mask_param=80)
spec = masking(spec)
plot_spectrogram(spec[0], title="Masked along frequency axis")
|
import csv
import logging
import os
from typing import Optional
import numpy as np
from sklearn.metrics import ndcg_score
logger = logging.getLogger(__name__)
class CERerankingEvaluator:
"""
This class evaluates a CrossEncoder model for the task of re-ranking.
Given a query and a list of documents, it computes the score [query, doc_i] for all possible
documents and sorts them in decreasing order. Then, MRR@10 and NDCG@10 are computed to measure the quality of the ranking.
Args:
samples (List[Dict, str, Union[str, List[str]]): Must be a list and each element is of the form:
{'query': '', 'positive': [], 'negative': []}. Query is the search query, positive is a list
of positive (relevant) documents, negative is a list of negative (irrelevant) documents.
"""
def __init__(
self, samples, at_k: int = 10, name: str = "", write_csv: bool = True, mrr_at_k: Optional[int] = None
):
self.samples = samples
self.name = name
if mrr_at_k is not None:
logger.warning(f"The `mrr_at_k` parameter has been deprecated; please use `at_k={mrr_at_k}` instead.")
self.at_k = mrr_at_k
else:
self.at_k = at_k
if isinstance(self.samples, dict):
self.samples = list(self.samples.values())
self.csv_file = "CERerankingEvaluator" + ("_" + name if name else "") + f"_results_@{self.at_k}.csv"
self.csv_headers = [
"epoch",
"steps",
"MRR@{}".format(self.at_k),
"NDCG@{}".format(self.at_k),
]
self.write_csv = write_csv
def __call__(self, model, output_path: str = None, epoch: int = -1, steps: int = -1) -> float:
if epoch != -1:
if steps == -1:
out_txt = " after epoch {}:".format(epoch)
else:
out_txt = " in epoch {} after {} steps:".format(epoch, steps)
else:
out_txt = ":"
logger.info("CERerankingEvaluator: Evaluating the model on " + self.name + " dataset" + out_txt)
all_mrr_scores = []
all_ndcg_scores = []
num_queries = 0
num_positives = []
num_negatives = []
for instance in self.samples:
query = instance["query"]
positive = list(instance["positive"])
negative = list(instance["negative"])
docs = positive + negative
is_relevant = [1] * len(positive) + [0] * len(negative)
if len(positive) == 0 or len(negative) == 0:
continue
num_queries += 1
num_positives.append(len(positive))
num_negatives.append(len(negative))
model_input = [[query, doc] for doc in docs]
pred_scores = model.predict(model_input, convert_to_numpy=True, show_progress_bar=False)
pred_scores_argsort = np.argsort(-pred_scores) # Sort in decreasing order
mrr_score = 0
for rank, index in enumerate(pred_scores_argsort[0 : self.at_k]):
if is_relevant[index]:
mrr_score = 1 / (rank + 1)
break
all_mrr_scores.append(mrr_score)
all_ndcg_scores.append(ndcg_score([is_relevant], [pred_scores], k=self.at_k))
mean_mrr = np.mean(all_mrr_scores)
mean_ndcg = np.mean(all_ndcg_scores)
logger.info(
"Queries: {} \t Positives: Min {:.1f}, Mean {:.1f}, Max {:.1f} \t Negatives: Min {:.1f}, Mean {:.1f}, Max {:.1f}".format(
num_queries,
np.min(num_positives),
np.mean(num_positives),
np.max(num_positives),
np.min(num_negatives),
np.mean(num_negatives),
np.max(num_negatives),
)
)
logger.info("MRR@{}: {:.2f}".format(self.at_k, mean_mrr * 100))
logger.info("NDCG@{}: {:.2f}".format(self.at_k, mean_ndcg * 100))
if output_path is not None and self.write_csv:
csv_path = os.path.join(output_path, self.csv_file)
output_file_exists = os.path.isfile(csv_path)
with open(csv_path, mode="a" if output_file_exists else "w", encoding="utf-8") as f:
writer = csv.writer(f)
if not output_file_exists:
writer.writerow(self.csv_headers)
writer.writerow([epoch, steps, mean_mrr, mean_ndcg])
return mean_mrr
|
import logging
import numpy as np
import os
import csv
from typing import Optional
from sklearn.metrics import ndcg_score
logger = logging.getLogger(__name__)
class CERerankingEvaluator:
"""
This class evaluates a CrossEncoder model for the task of re-ranking.
Given a query and a list of documents, it computes the score [query, doc_i] for all possible
documents and sorts them in decreasing order. Then, MRR@10 and NDCG@10 are computed to measure the quality of the ranking.
Args:
samples (List[Dict, str, Union[str, List[str]]): Must be a list and each element is of the form:
{'query': '', 'positive': [], 'negative': []}. Query is the search query, positive is a list
of positive (relevant) documents, negative is a list of negative (irrelevant) documents.
"""
def __init__(
self, samples, at_k: int = 10, name: str = "", write_csv: bool = True, mrr_at_k: Optional[int] = None
):
self.samples = samples
self.name = name
if mrr_at_k is not None:
logger.warning(f"The `mrr_at_k` parameter has been deprecated; please use `at_k={mrr_at_k}` instead.")
self.at_k = mrr_at_k
else:
self.at_k = at_k
if isinstance(self.samples, dict):
self.samples = list(self.samples.values())
self.csv_file = "CERerankingEvaluator" + ("_" + name if name else "") + f"_results_@{self.at_k}.csv"
self.csv_headers = [
"epoch",
"steps",
"MRR@{}".format(self.at_k),
"NDCG@{}".format(self.at_k),
]
self.write_csv = write_csv
def __call__(self, model, output_path: str = None, epoch: int = -1, steps: int = -1) -> float:
if epoch != -1:
if steps == -1:
out_txt = " after epoch {}:".format(epoch)
else:
out_txt = " in epoch {} after {} steps:".format(epoch, steps)
else:
out_txt = ":"
logger.info("CERerankingEvaluator: Evaluating the model on " + self.name + " dataset" + out_txt)
all_mrr_scores = []
all_ndcg_scores = []
num_queries = 0
num_positives = []
num_negatives = []
for instance in self.samples:
query = instance["query"]
positive = list(instance["positive"])
negative = list(instance["negative"])
docs = positive + negative
is_relevant = [1] * len(positive) + [0] * len(negative)
if len(positive) == 0 or len(negative) == 0:
continue
num_queries += 1
num_positives.append(len(positive))
num_negatives.append(len(negative))
model_input = [[query, doc] for doc in docs]
pred_scores = model.predict(model_input, convert_to_numpy=True, show_progress_bar=False)
pred_scores_argsort = np.argsort(-pred_scores) # Sort in decreasing order
mrr_score = 0
for rank, index in enumerate(pred_scores_argsort[0 : self.at_k]):
if is_relevant[index]:
mrr_score = 1 / (rank + 1)
break
all_mrr_scores.append(mrr_score)
all_ndcg_scores.append(ndcg_score([is_relevant], [pred_scores], k=self.at_k))
mean_mrr = np.mean(all_mrr_scores)
mean_ndcg = np.mean(all_ndcg_scores)
logger.info(
"Queries: {} \t Positives: Min {:.1f}, Mean {:.1f}, Max {:.1f} \t Negatives: Min {:.1f}, Mean {:.1f}, Max {:.1f}".format(
num_queries,
np.min(num_positives),
np.mean(num_positives),
np.max(num_positives),
np.min(num_negatives),
np.mean(num_negatives),
np.max(num_negatives),
)
)
logger.info("MRR@{}: {:.2f}".format(self.at_k, mean_mrr * 100))
logger.info("NDCG@{}: {:.2f}".format(self.at_k, mean_ndcg * 100))
if output_path is not None and self.write_csv:
csv_path = os.path.join(output_path, self.csv_file)
output_file_exists = os.path.isfile(csv_path)
with open(csv_path, mode="a" if output_file_exists else "w", encoding="utf-8") as f:
writer = csv.writer(f)
if not output_file_exists:
writer.writerow(self.csv_headers)
writer.writerow([epoch, steps, mean_mrr, mean_ndcg])
return mean_mrr
|
"""
Outlook local calendar reader for Windows.
Created on Sun Apr 16 12:03:19 2023
@author: tevslin
"""
import datetime
import importlib
import platform
from typing import List, Optional, Union
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
# Copyright 2023 Evslin Consulting
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class OutlookLocalCalendarReader(BaseReader):
"""
Outlook local calendar reader for Windows.
Reads events from local copy of Outlook calendar.
"""
def load_data(
self,
number_of_results: Optional[int] = 100,
start_date: Optional[Union[str, datetime.date]] = None,
end_date: Optional[Union[str, datetime.date]] = None,
more_attributes: Optional[List[str]] = None,
) -> List[Document]:
"""
Load data from user's local calendar.
Args:
number_of_results (Optional[int]): the number of events to return. Defaults to 100.
start_date (Optional[Union[str, datetime.date]]): the start date to return events from. Defaults to today.
end_date (Optional[Union[str, datetime.date]]): the last date (inclusive) to return events from. Defaults to 2199-01-01.
more_attributes (Optional[ List[str]]): additional attributes to be retrieved from calendar entries. Non-existnat attributes are ignored.
Returns a list of documents sutitable for indexing by llam_index. Always returns Start, End, Subject, Location, and Organizer
attributes and optionally returns additional attributes specified in the more_attributes parameter.
"""
if platform.system().lower() != "windows":
return []
attributes = [
"Start",
"End",
"Subject",
"Location",
"Organizer",
] # base attributes to return
if more_attributes is not None: # if the user has specified more attributes
attributes += more_attributes
if start_date is None:
start_date = datetime.date.today()
elif isinstance(start_date, str):
start_date = datetime.date.fromisoformat(start_date)
# Initialize the Outlook application
winstuff = importlib.import_module("win32com.client")
outlook = winstuff.Dispatch("Outlook.Application").GetNamespace("MAPI")
# Get the Calendar folder
calendar_folder = outlook.GetDefaultFolder(9)
# Retrieve calendar items
events = calendar_folder.Items
if not events:
return []
events.Sort("[Start]") # Sort items by start time
numberReturned = 0
results = []
for event in events:
converted_date = datetime.date(
event.Start.year, event.Start.month, event.Start.day
)
if converted_date > start_date: # if past start date
numberReturned += 1
eventstring = ""
for attribute in attributes:
if hasattr(event, attribute):
eventstring += f"{attribute}: {getattr(event, attribute)}, "
results.append(Document(text=eventstring))
if numberReturned >= number_of_results:
break
return results
if __name__ == "__main__":
reader = OutlookLocalCalendarReader()
print(reader.load_data())
|
"""
Outlook local calendar reader for Windows.
Created on Sun Apr 16 12:03:19 2023
@author: tevslin
"""
import datetime
import importlib
import platform
from typing import List, Optional, Union
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
# Copyright 2023 Evslin Consulting
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class OutlookLocalCalendarReader(BaseReader):
"""
Outlook local calendar reader for Windows.
Reads events from local copy of Outlook calendar.
"""
def load_data(
self,
number_of_results: Optional[int] = 100,
start_date: Optional[Union[str, datetime.date]] = None,
end_date: Optional[Union[str, datetime.date]] = None,
more_attributes: Optional[List[str]] = None,
) -> List[Document]:
"""
Load data from user's local calendar.
Args:
number_of_results (Optional[int]): the number of events to return. Defaults to 100.
start_date (Optional[Union[str, datetime.date]]): the start date to return events from. Defaults to today.
end_date (Optional[Union[str, datetime.date]]): the last date (inclusive) to return events from. Defaults to 2199-01-01.
more_attributes (Optional[ List[str]]): additional attributes to be retrieved from calendar entries. Non-existnat attributes are ignored.
Returns a list of documents sutitable for indexing by llam_index. Always returns Start, End, Subject, Location, and Organizer
attributes and optionally returns additional attributes specified in the more_attributes parameter.
"""
if platform.system().lower() != "windows":
return []
attributes = [
"Start",
"End",
"Subject",
"Location",
"Organizer",
] # base attributes to return
if more_attributes is not None: # if the user has specified more attributes
attributes += more_attributes
if start_date is None:
start_date = datetime.date.today()
elif isinstance(start_date, str):
start_date = datetime.date.fromisoformat(start_date)
# Initialize the Outlook application
winstuff = importlib.import_module("win32com.client")
outlook = winstuff.Dispatch("Outlook.Application").GetNamespace("MAPI")
# Get the Calendar folder
calendar_folder = outlook.GetDefaultFolder(9)
# Retrieve calendar items
events = calendar_folder.Items
if not events:
return []
events.Sort("[Start]") # Sort items by start time
numberReturned = 0
results = []
for event in events:
converted_date = datetime.date(
event.Start.year, event.Start.month, event.Start.day
)
if converted_date > start_date: # if past start date
numberReturned += 1
eventstring = ""
for attribute in attributes:
if hasattr(event, attribute):
eventstring += f"{attribute}: {getattr(event,attribute)}, "
results.append(Document(text=eventstring))
if numberReturned >= number_of_results:
break
return results
if __name__ == "__main__":
reader = OutlookLocalCalendarReader()
print(reader.load_data())
|
"""Retriever tool."""
from typing import TYPE_CHECKING, Any, List, Optional
from llama_index.core.base.base_retriever import BaseRetriever
if TYPE_CHECKING:
from llama_index.core.langchain_helpers.agents.tools import LlamaIndexTool
from llama_index.core.schema import (
MetadataMode,
Node,
NodeWithScore,
QueryBundle,
TextNode,
)
from llama_index.core.tools.types import AsyncBaseTool, ToolMetadata, ToolOutput
from llama_index.core.postprocessor.types import BaseNodePostprocessor
DEFAULT_NAME = "retriever_tool"
DEFAULT_DESCRIPTION = """Useful for running a natural language query
against a knowledge base and retrieving a set of relevant documents.
"""
class RetrieverTool(AsyncBaseTool):
"""
Retriever tool.
A tool making use of a retriever.
Args:
retriever (BaseRetriever): A retriever.
metadata (ToolMetadata): The associated metadata of the query engine.
node_postprocessors (Optional[List[BaseNodePostprocessor]]): A list of
node postprocessors.
"""
def __init__(
self,
retriever: BaseRetriever,
metadata: ToolMetadata,
node_postprocessors: Optional[List[BaseNodePostprocessor]] = None,
) -> None:
self._retriever = retriever
self._metadata = metadata
self._node_postprocessors = node_postprocessors or []
@classmethod
def from_defaults(
cls,
retriever: BaseRetriever,
node_postprocessors: Optional[List[BaseNodePostprocessor]] = None,
name: Optional[str] = None,
description: Optional[str] = None,
) -> "RetrieverTool":
name = name or DEFAULT_NAME
description = description or DEFAULT_DESCRIPTION
metadata = ToolMetadata(name=name, description=description)
return cls(
retriever=retriever,
metadata=metadata,
node_postprocessors=node_postprocessors,
)
@property
def retriever(self) -> BaseRetriever:
return self._retriever
@property
def metadata(self) -> ToolMetadata:
return self._metadata
def call(self, *args: Any, **kwargs: Any) -> ToolOutput:
query_str = ""
if args is not None:
query_str += ", ".join([str(arg) for arg in args]) + "\n"
if kwargs is not None:
query_str += (
", ".join([f"{k!s} is {v!s}" for k, v in kwargs.items()]) + "\n"
)
if query_str == "":
raise ValueError("Cannot call query engine without inputs")
docs = self._retriever.retrieve(query_str)
docs = self._apply_node_postprocessors(docs, QueryBundle(query_str))
content = ""
for doc in docs:
assert isinstance(doc.node, (Node, TextNode))
node_copy = doc.node.model_copy()
content += node_copy.get_content(MetadataMode.LLM) + "\n\n"
return ToolOutput(
content=content,
tool_name=self.metadata.name,
raw_input={"input": query_str},
raw_output=docs,
)
async def acall(self, *args: Any, **kwargs: Any) -> ToolOutput:
query_str = ""
if args is not None:
query_str += ", ".join([str(arg) for arg in args]) + "\n"
if kwargs is not None:
query_str += (
", ".join([f"{k!s} is {v!s}" for k, v in kwargs.items()]) + "\n"
)
if query_str == "":
raise ValueError("Cannot call query engine without inputs")
docs = await self._retriever.aretrieve(query_str)
content = ""
docs = self._apply_node_postprocessors(docs, QueryBundle(query_str))
for doc in docs:
assert isinstance(doc.node, (Node, TextNode))
node_copy = doc.node.model_copy()
content += node_copy.get_content(MetadataMode.LLM) + "\n\n"
return ToolOutput(
content=content,
tool_name=self.metadata.name,
raw_input={"input": query_str},
raw_output=docs,
)
def as_langchain_tool(self) -> "LlamaIndexTool":
raise NotImplementedError("`as_langchain_tool` not implemented here.")
def _apply_node_postprocessors(
self, nodes: List[NodeWithScore], query_bundle: QueryBundle
) -> List[NodeWithScore]:
for node_postprocessor in self._node_postprocessors:
nodes = node_postprocessor.postprocess_nodes(
nodes, query_bundle=query_bundle
)
return nodes
|
"""Retriever tool."""
from typing import TYPE_CHECKING, Any, List, Optional
from llama_index.core.base.base_retriever import BaseRetriever
if TYPE_CHECKING:
from llama_index.core.langchain_helpers.agents.tools import LlamaIndexTool
from llama_index.core.schema import (
MetadataMode,
Node,
NodeWithScore,
QueryBundle,
TextNode,
)
from llama_index.core.tools.types import AsyncBaseTool, ToolMetadata, ToolOutput
from llama_index.core.postprocessor.types import BaseNodePostprocessor
DEFAULT_NAME = "retriever_tool"
DEFAULT_DESCRIPTION = """Useful for running a natural language query
against a knowledge base and retrieving a set of relevant documents.
"""
class RetrieverTool(AsyncBaseTool):
"""Retriever tool.
A tool making use of a retriever.
Args:
retriever (BaseRetriever): A retriever.
metadata (ToolMetadata): The associated metadata of the query engine.
node_postprocessors (Optional[List[BaseNodePostprocessor]]): A list of
node postprocessors.
"""
def __init__(
self,
retriever: BaseRetriever,
metadata: ToolMetadata,
node_postprocessors: Optional[List[BaseNodePostprocessor]] = None,
) -> None:
self._retriever = retriever
self._metadata = metadata
self._node_postprocessors = node_postprocessors or []
@classmethod
def from_defaults(
cls,
retriever: BaseRetriever,
node_postprocessors: Optional[List[BaseNodePostprocessor]] = None,
name: Optional[str] = None,
description: Optional[str] = None,
) -> "RetrieverTool":
name = name or DEFAULT_NAME
description = description or DEFAULT_DESCRIPTION
metadata = ToolMetadata(name=name, description=description)
return cls(
retriever=retriever,
metadata=metadata,
node_postprocessors=node_postprocessors,
)
@property
def retriever(self) -> BaseRetriever:
return self._retriever
@property
def metadata(self) -> ToolMetadata:
return self._metadata
def call(self, *args: Any, **kwargs: Any) -> ToolOutput:
query_str = ""
if args is not None:
query_str += ", ".join([str(arg) for arg in args]) + "\n"
if kwargs is not None:
query_str += (
", ".join([f"{k!s} is {v!s}" for k, v in kwargs.items()]) + "\n"
)
if query_str == "":
raise ValueError("Cannot call query engine without inputs")
docs = self._retriever.retrieve(query_str)
docs = self._apply_node_postprocessors(docs, QueryBundle(query_str))
content = ""
for doc in docs:
assert isinstance(doc.node, (Node, TextNode))
node_copy = doc.node.model_copy()
content += node_copy.get_content(MetadataMode.LLM) + "\n\n"
return ToolOutput(
content=content,
tool_name=self.metadata.name,
raw_input={"input": query_str},
raw_output=docs,
)
async def acall(self, *args: Any, **kwargs: Any) -> ToolOutput:
query_str = ""
if args is not None:
query_str += ", ".join([str(arg) for arg in args]) + "\n"
if kwargs is not None:
query_str += (
", ".join([f"{k!s} is {v!s}" for k, v in kwargs.items()]) + "\n"
)
if query_str == "":
raise ValueError("Cannot call query engine without inputs")
docs = await self._retriever.aretrieve(query_str)
content = ""
docs = self._apply_node_postprocessors(docs, QueryBundle(query_str))
for doc in docs:
assert isinstance(doc.node, (Node, TextNode))
node_copy = doc.node.model_copy()
content += node_copy.get_content(MetadataMode.LLM) + "\n\n"
return ToolOutput(
content=content,
tool_name=self.metadata.name,
raw_input={"input": query_str},
raw_output=docs,
)
def as_langchain_tool(self) -> "LlamaIndexTool":
raise NotImplementedError("`as_langchain_tool` not implemented here.")
def _apply_node_postprocessors(
self, nodes: List[NodeWithScore], query_bundle: QueryBundle
) -> List[NodeWithScore]:
for node_postprocessor in self._node_postprocessors:
nodes = node_postprocessor.postprocess_nodes(
nodes, query_bundle=query_bundle
)
return nodes
|
"""
================================================
Kernel Density Estimate of Species Distributions
================================================
This shows an example of a neighbors-based query (in particular a kernel
density estimate) on geospatial data, using a Ball Tree built upon the
Haversine distance metric -- i.e. distances over points in latitude/longitude.
The dataset is provided by Phillips et. al. (2006).
If available, the example uses
`basemap <https://matplotlib.org/basemap/>`_
to plot the coast lines and national boundaries of South America.
This example does not perform any learning over the data (see
:ref:`sphx_glr_auto_examples_applications_plot_species_distribution_modeling.py` for an
example of classification based on the attributes in this dataset). It simply shows the
kernel density estimate of observed data points in geospatial coordinates.
The two species are:
- `"Bradypus variegatus"
<https://www.iucnredlist.org/species/3038/47437046>`_ ,
the Brown-throated Sloth.
- `"Microryzomys minutus"
<http://www.iucnredlist.org/details/13408/0>`_ ,
also known as the Forest Small Rice Rat, a rodent that lives in Peru,
Colombia, Ecuador, Peru, and Venezuela.
References
----------
- `"Maximum entropy modeling of species geographic distributions"
<http://rob.schapire.net/papers/ecolmod.pdf>`_
S. J. Phillips, R. P. Anderson, R. E. Schapire - Ecological Modelling,
190:231-259, 2006.
""" # noqa: E501
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import matplotlib.pyplot as plt
import numpy as np
from sklearn.datasets import fetch_species_distributions
from sklearn.neighbors import KernelDensity
# if basemap is available, we'll use it.
# otherwise, we'll improvise later...
try:
from mpl_toolkits.basemap import Basemap
basemap = True
except ImportError:
basemap = False
def construct_grids(batch):
"""Construct the map grid from the batch object
Parameters
----------
batch : Batch object
The object returned by :func:`fetch_species_distributions`
Returns
-------
(xgrid, ygrid) : 1-D arrays
The grid corresponding to the values in batch.coverages
"""
# x,y coordinates for corner cells
xmin = batch.x_left_lower_corner + batch.grid_size
xmax = xmin + (batch.Nx * batch.grid_size)
ymin = batch.y_left_lower_corner + batch.grid_size
ymax = ymin + (batch.Ny * batch.grid_size)
# x coordinates of the grid cells
xgrid = np.arange(xmin, xmax, batch.grid_size)
# y coordinates of the grid cells
ygrid = np.arange(ymin, ymax, batch.grid_size)
return (xgrid, ygrid)
# Get matrices/arrays of species IDs and locations
data = fetch_species_distributions()
species_names = ["Bradypus Variegatus", "Microryzomys Minutus"]
Xtrain = np.vstack([data["train"]["dd lat"], data["train"]["dd long"]]).T
ytrain = np.array(
[d.decode("ascii").startswith("micro") for d in data["train"]["species"]],
dtype="int",
)
Xtrain *= np.pi / 180.0 # Convert lat/long to radians
# Set up the data grid for the contour plot
xgrid, ygrid = construct_grids(data)
X, Y = np.meshgrid(xgrid[::5], ygrid[::5][::-1])
land_reference = data.coverages[6][::5, ::5]
land_mask = (land_reference > -9999).ravel()
xy = np.vstack([Y.ravel(), X.ravel()]).T
xy = xy[land_mask]
xy *= np.pi / 180.0
# Plot map of South America with distributions of each species
fig = plt.figure()
fig.subplots_adjust(left=0.05, right=0.95, wspace=0.05)
for i in range(2):
plt.subplot(1, 2, i + 1)
# construct a kernel density estimate of the distribution
print(" - computing KDE in spherical coordinates")
kde = KernelDensity(
bandwidth=0.04, metric="haversine", kernel="gaussian", algorithm="ball_tree"
)
kde.fit(Xtrain[ytrain == i])
# evaluate only on the land: -9999 indicates ocean
Z = np.full(land_mask.shape[0], -9999, dtype="int")
Z[land_mask] = np.exp(kde.score_samples(xy))
Z = Z.reshape(X.shape)
# plot contours of the density
levels = np.linspace(0, Z.max(), 25)
plt.contourf(X, Y, Z, levels=levels, cmap=plt.cm.Reds)
if basemap:
print(" - plot coastlines using basemap")
m = Basemap(
projection="cyl",
llcrnrlat=Y.min(),
urcrnrlat=Y.max(),
llcrnrlon=X.min(),
urcrnrlon=X.max(),
resolution="c",
)
m.drawcoastlines()
m.drawcountries()
else:
print(" - plot coastlines from coverage")
plt.contour(
X, Y, land_reference, levels=[-9998], colors="k", linestyles="solid"
)
plt.xticks([])
plt.yticks([])
plt.title(species_names[i])
plt.show()
|
"""
================================================
Kernel Density Estimate of Species Distributions
================================================
This shows an example of a neighbors-based query (in particular a kernel
density estimate) on geospatial data, using a Ball Tree built upon the
Haversine distance metric -- i.e. distances over points in latitude/longitude.
The dataset is provided by Phillips et. al. (2006).
If available, the example uses
`basemap <https://matplotlib.org/basemap/>`_
to plot the coast lines and national boundaries of South America.
This example does not perform any learning over the data
(see :ref:`sphx_glr_auto_examples_applications_plot_species_distribution_modeling.py` for
an example of classification based on the attributes in this dataset). It
simply shows the kernel density estimate of observed data points in
geospatial coordinates.
The two species are:
- `"Bradypus variegatus"
<https://www.iucnredlist.org/species/3038/47437046>`_ ,
the Brown-throated Sloth.
- `"Microryzomys minutus"
<http://www.iucnredlist.org/details/13408/0>`_ ,
also known as the Forest Small Rice Rat, a rodent that lives in Peru,
Colombia, Ecuador, Peru, and Venezuela.
References
----------
- `"Maximum entropy modeling of species geographic distributions"
<http://rob.schapire.net/papers/ecolmod.pdf>`_
S. J. Phillips, R. P. Anderson, R. E. Schapire - Ecological Modelling,
190:231-259, 2006.
""" # noqa: E501
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import matplotlib.pyplot as plt
import numpy as np
from sklearn.datasets import fetch_species_distributions
from sklearn.neighbors import KernelDensity
# if basemap is available, we'll use it.
# otherwise, we'll improvise later...
try:
from mpl_toolkits.basemap import Basemap
basemap = True
except ImportError:
basemap = False
def construct_grids(batch):
"""Construct the map grid from the batch object
Parameters
----------
batch : Batch object
The object returned by :func:`fetch_species_distributions`
Returns
-------
(xgrid, ygrid) : 1-D arrays
The grid corresponding to the values in batch.coverages
"""
# x,y coordinates for corner cells
xmin = batch.x_left_lower_corner + batch.grid_size
xmax = xmin + (batch.Nx * batch.grid_size)
ymin = batch.y_left_lower_corner + batch.grid_size
ymax = ymin + (batch.Ny * batch.grid_size)
# x coordinates of the grid cells
xgrid = np.arange(xmin, xmax, batch.grid_size)
# y coordinates of the grid cells
ygrid = np.arange(ymin, ymax, batch.grid_size)
return (xgrid, ygrid)
# Get matrices/arrays of species IDs and locations
data = fetch_species_distributions()
species_names = ["Bradypus Variegatus", "Microryzomys Minutus"]
Xtrain = np.vstack([data["train"]["dd lat"], data["train"]["dd long"]]).T
ytrain = np.array(
[d.decode("ascii").startswith("micro") for d in data["train"]["species"]],
dtype="int",
)
Xtrain *= np.pi / 180.0 # Convert lat/long to radians
# Set up the data grid for the contour plot
xgrid, ygrid = construct_grids(data)
X, Y = np.meshgrid(xgrid[::5], ygrid[::5][::-1])
land_reference = data.coverages[6][::5, ::5]
land_mask = (land_reference > -9999).ravel()
xy = np.vstack([Y.ravel(), X.ravel()]).T
xy = xy[land_mask]
xy *= np.pi / 180.0
# Plot map of South America with distributions of each species
fig = plt.figure()
fig.subplots_adjust(left=0.05, right=0.95, wspace=0.05)
for i in range(2):
plt.subplot(1, 2, i + 1)
# construct a kernel density estimate of the distribution
print(" - computing KDE in spherical coordinates")
kde = KernelDensity(
bandwidth=0.04, metric="haversine", kernel="gaussian", algorithm="ball_tree"
)
kde.fit(Xtrain[ytrain == i])
# evaluate only on the land: -9999 indicates ocean
Z = np.full(land_mask.shape[0], -9999, dtype="int")
Z[land_mask] = np.exp(kde.score_samples(xy))
Z = Z.reshape(X.shape)
# plot contours of the density
levels = np.linspace(0, Z.max(), 25)
plt.contourf(X, Y, Z, levels=levels, cmap=plt.cm.Reds)
if basemap:
print(" - plot coastlines using basemap")
m = Basemap(
projection="cyl",
llcrnrlat=Y.min(),
urcrnrlat=Y.max(),
llcrnrlon=X.min(),
urcrnrlon=X.max(),
resolution="c",
)
m.drawcoastlines()
m.drawcountries()
else:
print(" - plot coastlines from coverage")
plt.contour(
X, Y, land_reference, levels=[-9998], colors="k", linestyles="solid"
)
plt.xticks([])
plt.yticks([])
plt.title(species_names[i])
plt.show()
|
from llama_index.agent.azure_foundry_agent.base import AzureFoundryAgent
__all__ = ["AzureFoundryAgent"]
|
from llama_index.agent.azure_foundry_agent.base import AzureFoundryAgent
__all__ = [
"AzureFoundryAgent"
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import List, Tuple, Union
import torch.nn as nn
from mmcv.cnn import ConvModule
from mmengine.model import BaseModule
from torch import Tensor
from mmdet.registry import MODELS
from mmdet.utils import OptConfigType, OptMultiConfig
@MODELS.register_module()
class ChannelMapper(BaseModule):
"""Channel Mapper to reduce/increase channels of backbone features.
This is used to reduce/increase channels of backbone features.
Args:
in_channels (List[int]): Number of input channels per scale.
out_channels (int): Number of output channels (used at each scale).
kernel_size (int, optional): kernel_size for reducing channels (used
at each scale). Default: 3.
conv_cfg (:obj:`ConfigDict` or dict, optional): Config dict for
convolution layer. Default: None.
norm_cfg (:obj:`ConfigDict` or dict, optional): Config dict for
normalization layer. Default: None.
act_cfg (:obj:`ConfigDict` or dict, optional): Config dict for
activation layer in ConvModule. Default: dict(type='ReLU').
bias (bool | str): If specified as `auto`, it will be decided by the
norm_cfg. Bias will be set as True if `norm_cfg` is None, otherwise
False. Default: "auto".
num_outs (int, optional): Number of output feature maps. There would
be extra_convs when num_outs larger than the length of in_channels.
init_cfg (:obj:`ConfigDict` or dict or list[:obj:`ConfigDict` or dict],
optional): Initialization config dict.
Example:
>>> import torch
>>> in_channels = [2, 3, 5, 7]
>>> scales = [340, 170, 84, 43]
>>> inputs = [torch.rand(1, c, s, s)
... for c, s in zip(in_channels, scales)]
>>> self = ChannelMapper(in_channels, 11, 3).eval()
>>> outputs = self.forward(inputs)
>>> for i in range(len(outputs)):
... print(f'outputs[{i}].shape = {outputs[i].shape}')
outputs[0].shape = torch.Size([1, 11, 340, 340])
outputs[1].shape = torch.Size([1, 11, 170, 170])
outputs[2].shape = torch.Size([1, 11, 84, 84])
outputs[3].shape = torch.Size([1, 11, 43, 43])
"""
def __init__(
self,
in_channels: List[int],
out_channels: int,
kernel_size: int = 3,
conv_cfg: OptConfigType = None,
norm_cfg: OptConfigType = None,
act_cfg: OptConfigType = dict(type='ReLU'),
bias: Union[bool, str] = 'auto',
num_outs: int = None,
init_cfg: OptMultiConfig = dict(
type='Xavier', layer='Conv2d', distribution='uniform')
) -> None:
super().__init__(init_cfg=init_cfg)
assert isinstance(in_channels, list)
self.extra_convs = None
if num_outs is None:
num_outs = len(in_channels)
self.convs = nn.ModuleList()
for in_channel in in_channels:
self.convs.append(
ConvModule(
in_channel,
out_channels,
kernel_size,
padding=(kernel_size - 1) // 2,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg,
bias=bias))
if num_outs > len(in_channels):
self.extra_convs = nn.ModuleList()
for i in range(len(in_channels), num_outs):
if i == len(in_channels):
in_channel = in_channels[-1]
else:
in_channel = out_channels
self.extra_convs.append(
ConvModule(
in_channel,
out_channels,
3,
stride=2,
padding=1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg,
bias=bias))
def forward(self, inputs: Tuple[Tensor]) -> Tuple[Tensor]:
"""Forward function."""
assert len(inputs) == len(self.convs)
outs = [self.convs[i](inputs[i]) for i in range(len(inputs))]
if self.extra_convs:
for i in range(len(self.extra_convs)):
if i == 0:
outs.append(self.extra_convs[0](inputs[-1]))
else:
outs.append(self.extra_convs[i](outs[-1]))
return tuple(outs)
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import List, Tuple
import torch.nn as nn
from mmcv.cnn import ConvModule
from mmengine.model import BaseModule
from torch import Tensor
from mmdet.registry import MODELS
from mmdet.utils import OptConfigType, OptMultiConfig
@MODELS.register_module()
class ChannelMapper(BaseModule):
"""Channel Mapper to reduce/increase channels of backbone features.
This is used to reduce/increase channels of backbone features.
Args:
in_channels (List[int]): Number of input channels per scale.
out_channels (int): Number of output channels (used at each scale).
kernel_size (int, optional): kernel_size for reducing channels (used
at each scale). Default: 3.
conv_cfg (:obj:`ConfigDict` or dict, optional): Config dict for
convolution layer. Default: None.
norm_cfg (:obj:`ConfigDict` or dict, optional): Config dict for
normalization layer. Default: None.
act_cfg (:obj:`ConfigDict` or dict, optional): Config dict for
activation layer in ConvModule. Default: dict(type='ReLU').
num_outs (int, optional): Number of output feature maps. There would
be extra_convs when num_outs larger than the length of in_channels.
init_cfg (:obj:`ConfigDict` or dict or list[:obj:`ConfigDict` or dict],
optional): Initialization config dict.
Example:
>>> import torch
>>> in_channels = [2, 3, 5, 7]
>>> scales = [340, 170, 84, 43]
>>> inputs = [torch.rand(1, c, s, s)
... for c, s in zip(in_channels, scales)]
>>> self = ChannelMapper(in_channels, 11, 3).eval()
>>> outputs = self.forward(inputs)
>>> for i in range(len(outputs)):
... print(f'outputs[{i}].shape = {outputs[i].shape}')
outputs[0].shape = torch.Size([1, 11, 340, 340])
outputs[1].shape = torch.Size([1, 11, 170, 170])
outputs[2].shape = torch.Size([1, 11, 84, 84])
outputs[3].shape = torch.Size([1, 11, 43, 43])
"""
def __init__(
self,
in_channels: List[int],
out_channels: int,
kernel_size: int = 3,
conv_cfg: OptConfigType = None,
norm_cfg: OptConfigType = None,
act_cfg: OptConfigType = dict(type='ReLU'),
num_outs: int = None,
init_cfg: OptMultiConfig = dict(
type='Xavier', layer='Conv2d', distribution='uniform')
) -> None:
super().__init__(init_cfg=init_cfg)
assert isinstance(in_channels, list)
self.extra_convs = None
if num_outs is None:
num_outs = len(in_channels)
self.convs = nn.ModuleList()
for in_channel in in_channels:
self.convs.append(
ConvModule(
in_channel,
out_channels,
kernel_size,
padding=(kernel_size - 1) // 2,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg))
if num_outs > len(in_channels):
self.extra_convs = nn.ModuleList()
for i in range(len(in_channels), num_outs):
if i == len(in_channels):
in_channel = in_channels[-1]
else:
in_channel = out_channels
self.extra_convs.append(
ConvModule(
in_channel,
out_channels,
3,
stride=2,
padding=1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg))
def forward(self, inputs: Tuple[Tensor]) -> Tuple[Tensor]:
"""Forward function."""
assert len(inputs) == len(self.convs)
outs = [self.convs[i](inputs[i]) for i in range(len(inputs))]
if self.extra_convs:
for i in range(len(self.extra_convs)):
if i == 0:
outs.append(self.extra_convs[0](inputs[-1]))
else:
outs.append(self.extra_convs[i](outs[-1]))
return tuple(outs)
|
# Copyright (c) OpenMMLab. All rights reserved.
import unittest
from unittest import TestCase
import torch
from parameterized import parameterized
from mmdet.models.roi_heads import PointRendRoIHead # noqa
from mmdet.registry import MODELS
from mmdet.testing import demo_mm_inputs, demo_mm_proposals, get_roi_head_cfg
class TestHTCRoIHead(TestCase):
@parameterized.expand(
['point_rend/point-rend_r50-caffe_fpn_ms-1x_coco.py'])
def test_init(self, cfg_file):
"""Test init Point rend RoI head."""
# Normal HTC RoI head
roi_head_cfg = get_roi_head_cfg(cfg_file)
roi_head = MODELS.build(roi_head_cfg)
assert roi_head.with_bbox
assert roi_head.with_mask
@parameterized.expand(
['point_rend/point-rend_r50-caffe_fpn_ms-1x_coco.py'])
def test_point_rend_roi_head_loss(self, cfg_file):
"""Tests htc roi head loss when truth is empty and non-empty."""
if not torch.cuda.is_available():
# RoI pooling only support in GPU
return unittest.skip('test requires GPU and torch+cuda')
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'scale_factor': 1,
}]
roi_head_cfg = get_roi_head_cfg(cfg_file)
roi_head = MODELS.build(roi_head_cfg)
roi_head = roi_head.cuda()
feats = []
for i in range(len(roi_head_cfg.bbox_roi_extractor.featmap_strides)):
feats.append(
torch.rand(1, 256, s // (2**(i + 2)),
s // (2**(i + 2))).to(device='cuda'))
feats = tuple(feats)
# When truth is non-empty then both cls, box, and mask loss
# should be nonzero for random inputs
img_shape_list = [img_meta['img_shape'] for img_meta in img_metas]
proposal_list = demo_mm_proposals(img_shape_list, 100, device='cuda')
batch_data_samples = demo_mm_inputs(
batch_size=1,
image_shapes=[(3, s, s)],
num_items=[1],
num_classes=4,
with_mask=True,
device='cuda')['data_samples']
out = roi_head.loss(feats, proposal_list, batch_data_samples)
for name, value in out.items():
if 'loss' in name:
self.assertGreaterEqual(
value.sum(), 0, msg='loss should be non-zero')
# Positive rois must not be empty
proposal_list = demo_mm_proposals(img_shape_list, 100, device='cuda')
batch_data_samples = demo_mm_inputs(
batch_size=1,
image_shapes=[(3, s, s)],
num_items=[0],
num_classes=4,
with_mask=True,
device='cuda')['data_samples']
with self.assertRaises(AssertionError):
out = roi_head.loss(feats, proposal_list, batch_data_samples)
@parameterized.expand(
['point_rend/point-rend_r50-caffe_fpn_ms-1x_coco.py'])
def test_point_rend_roi_head_predict(self, cfg_file):
if not torch.cuda.is_available():
# RoI pooling only support in GPU
return unittest.skip('test requires GPU and torch+cuda')
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'scale_factor': 1,
}]
roi_head_cfg = get_roi_head_cfg(cfg_file)
roi_head = MODELS.build(roi_head_cfg)
roi_head = roi_head.cuda()
feats = []
for i in range(len(roi_head_cfg.bbox_roi_extractor.featmap_strides)):
feats.append(
torch.rand(1, 256, s // (2**(i + 2)),
s // (2**(i + 2))).to(device='cuda'))
feats = tuple(feats)
img_shape_list = [img_meta['img_shape'] for img_meta in img_metas]
proposal_list = demo_mm_proposals(img_shape_list, 100, device='cuda')
batch_data_samples = demo_mm_inputs(
batch_size=1,
image_shapes=[(3, s, s)],
num_items=[1],
num_classes=4,
with_mask=True,
device='cuda')['data_samples']
results = roi_head.predict(
feats, proposal_list, batch_data_samples, rescale=True)
self.assertEqual(results[0].masks.shape[-2:], (s, s))
|
# Copyright (c) OpenMMLab. All rights reserved.
import unittest
from unittest import TestCase
import torch
from parameterized import parameterized
from mmdet.models.roi_heads import PointRendRoIHead # noqa
from mmdet.registry import MODELS
from mmdet.testing import demo_mm_inputs, demo_mm_proposals, get_roi_head_cfg
class TestHTCRoIHead(TestCase):
@parameterized.expand(
['point_rend/point_rend_r50_caffe_fpn_mstrain_1x_coco.py'])
def test_init(self, cfg_file):
"""Test init Point rend RoI head."""
# Normal HTC RoI head
roi_head_cfg = get_roi_head_cfg(cfg_file)
roi_head = MODELS.build(roi_head_cfg)
assert roi_head.with_bbox
assert roi_head.with_mask
@parameterized.expand(
['point_rend/point_rend_r50_caffe_fpn_mstrain_1x_coco.py'])
def test_point_rend_roi_head_loss(self, cfg_file):
"""Tests htc roi head loss when truth is empty and non-empty."""
if not torch.cuda.is_available():
# RoI pooling only support in GPU
return unittest.skip('test requires GPU and torch+cuda')
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'scale_factor': 1,
}]
roi_head_cfg = get_roi_head_cfg(cfg_file)
roi_head = MODELS.build(roi_head_cfg)
roi_head = roi_head.cuda()
feats = []
for i in range(len(roi_head_cfg.bbox_roi_extractor.featmap_strides)):
feats.append(
torch.rand(1, 256, s // (2**(i + 2)),
s // (2**(i + 2))).to(device='cuda'))
feats = tuple(feats)
# When truth is non-empty then both cls, box, and mask loss
# should be nonzero for random inputs
img_shape_list = [img_meta['img_shape'] for img_meta in img_metas]
proposal_list = demo_mm_proposals(img_shape_list, 100, device='cuda')
batch_data_samples = demo_mm_inputs(
batch_size=1,
image_shapes=[(3, s, s)],
num_items=[1],
num_classes=4,
with_mask=True,
device='cuda')['data_samples']
out = roi_head.loss(feats, proposal_list, batch_data_samples)
for name, value in out.items():
if 'loss' in name:
self.assertGreaterEqual(
value.sum(), 0, msg='loss should be non-zero')
# Positive rois must not be empty
proposal_list = demo_mm_proposals(img_shape_list, 100, device='cuda')
batch_data_samples = demo_mm_inputs(
batch_size=1,
image_shapes=[(3, s, s)],
num_items=[0],
num_classes=4,
with_mask=True,
device='cuda')['data_samples']
with self.assertRaises(AssertionError):
out = roi_head.loss(feats, proposal_list, batch_data_samples)
@parameterized.expand(
['point_rend/point_rend_r50_caffe_fpn_mstrain_1x_coco.py'])
def test_point_rend_roi_head_predict(self, cfg_file):
if not torch.cuda.is_available():
# RoI pooling only support in GPU
return unittest.skip('test requires GPU and torch+cuda')
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'scale_factor': 1,
}]
roi_head_cfg = get_roi_head_cfg(cfg_file)
roi_head = MODELS.build(roi_head_cfg)
roi_head = roi_head.cuda()
feats = []
for i in range(len(roi_head_cfg.bbox_roi_extractor.featmap_strides)):
feats.append(
torch.rand(1, 256, s // (2**(i + 2)),
s // (2**(i + 2))).to(device='cuda'))
feats = tuple(feats)
img_shape_list = [img_meta['img_shape'] for img_meta in img_metas]
proposal_list = demo_mm_proposals(img_shape_list, 100, device='cuda')
batch_data_samples = demo_mm_inputs(
batch_size=1,
image_shapes=[(3, s, s)],
num_items=[1],
num_classes=4,
with_mask=True,
device='cuda')['data_samples']
results = roi_head.predict(
feats, proposal_list, batch_data_samples, rescale=True)
self.assertEqual(results[0].masks.shape[-2:], (s, s))
|
# Copyright (c) OpenMMLab. All rights reserved.
import glob
import os
import os.path as osp
import warnings
from mmengine.config import Config, ConfigDict
from mmengine.logging import print_log
def find_latest_checkpoint(path, suffix='pth'):
"""Find the latest checkpoint from the working directory.
Args:
path(str): The path to find checkpoints.
suffix(str): File extension.
Defaults to pth.
Returns:
latest_path(str | None): File path of the latest checkpoint.
References:
.. [1] https://github.com/microsoft/SoftTeacher
/blob/main/ssod/utils/patch.py
"""
if not osp.exists(path):
warnings.warn('The path of checkpoints does not exist.')
return None
if osp.exists(osp.join(path, f'latest.{suffix}')):
return osp.join(path, f'latest.{suffix}')
checkpoints = glob.glob(osp.join(path, f'*.{suffix}'))
if len(checkpoints) == 0:
warnings.warn('There are no checkpoints in the path.')
return None
latest = -1
latest_path = None
for checkpoint in checkpoints:
count = int(osp.basename(checkpoint).split('_')[-1].split('.')[0])
if count > latest:
latest = count
latest_path = checkpoint
return latest_path
def update_data_root(cfg, logger=None):
"""Update data root according to env MMDET_DATASETS.
If set env MMDET_DATASETS, update cfg.data_root according to
MMDET_DATASETS. Otherwise, using cfg.data_root as default.
Args:
cfg (:obj:`Config`): The model config need to modify
logger (logging.Logger | str | None): the way to print msg
"""
assert isinstance(cfg, Config), \
f'cfg got wrong type: {type(cfg)}, expected mmengine.Config'
if 'MMDET_DATASETS' in os.environ:
dst_root = os.environ['MMDET_DATASETS']
print_log(f'MMDET_DATASETS has been set to be {dst_root}.'
f'Using {dst_root} as data root.')
else:
return
assert isinstance(cfg, Config), \
f'cfg got wrong type: {type(cfg)}, expected mmengine.Config'
def update(cfg, src_str, dst_str):
for k, v in cfg.items():
if isinstance(v, ConfigDict):
update(cfg[k], src_str, dst_str)
if isinstance(v, str) and src_str in v:
cfg[k] = v.replace(src_str, dst_str)
update(cfg.data, cfg.data_root, dst_root)
cfg.data_root = dst_root
|
# Copyright (c) OpenMMLab. All rights reserved.
import glob
import os
import os.path as osp
import warnings
import mmcv
from mmcv.utils import print_log
def find_latest_checkpoint(path, suffix='pth'):
"""Find the latest checkpoint from the working directory.
Args:
path(str): The path to find checkpoints.
suffix(str): File extension.
Defaults to pth.
Returns:
latest_path(str | None): File path of the latest checkpoint.
References:
.. [1] https://github.com/microsoft/SoftTeacher
/blob/main/ssod/utils/patch.py
"""
if not osp.exists(path):
warnings.warn('The path of checkpoints does not exist.')
return None
if osp.exists(osp.join(path, f'latest.{suffix}')):
return osp.join(path, f'latest.{suffix}')
checkpoints = glob.glob(osp.join(path, f'*.{suffix}'))
if len(checkpoints) == 0:
warnings.warn('There are no checkpoints in the path.')
return None
latest = -1
latest_path = None
for checkpoint in checkpoints:
count = int(osp.basename(checkpoint).split('_')[-1].split('.')[0])
if count > latest:
latest = count
latest_path = checkpoint
return latest_path
def update_data_root(cfg, logger=None):
"""Update data root according to env MMDET_DATASETS.
If set env MMDET_DATASETS, update cfg.data_root according to
MMDET_DATASETS. Otherwise, using cfg.data_root as default.
Args:
cfg (mmcv.Config): The model config need to modify
logger (logging.Logger | str | None): the way to print msg
"""
assert isinstance(cfg, mmcv.Config), \
f'cfg got wrong type: {type(cfg)}, expected mmcv.Config'
if 'MMDET_DATASETS' in os.environ:
dst_root = os.environ['MMDET_DATASETS']
print_log(f'MMDET_DATASETS has been set to be {dst_root}.'
f'Using {dst_root} as data root.')
else:
return
assert isinstance(cfg, mmcv.Config), \
f'cfg got wrong type: {type(cfg)}, expected mmcv.Config'
def update(cfg, src_str, dst_str):
for k, v in cfg.items():
if isinstance(v, mmcv.ConfigDict):
update(cfg[k], src_str, dst_str)
if isinstance(v, str) and src_str in v:
cfg[k] = v.replace(src_str, dst_str)
update(cfg.data, cfg.data_root, dst_root)
cfg.data_root = dst_root
|
# Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
import torch.nn.functional as F
from mmengine.model.utils import constant_init
from mmdet.models.layers import DyReLU, SELayer
def test_se_layer():
with pytest.raises(AssertionError):
# act_cfg sequence length must equal to 2
SELayer(channels=32, act_cfg=(dict(type='ReLU'), ))
with pytest.raises(AssertionError):
# act_cfg sequence must be a tuple of dict
SELayer(channels=32, act_cfg=[dict(type='ReLU'), dict(type='ReLU')])
# Test SELayer forward
layer = SELayer(channels=32)
layer.init_weights()
layer.train()
x = torch.randn((1, 32, 10, 10))
x_out = layer(x)
assert x_out.shape == torch.Size((1, 32, 10, 10))
def test_dyrelu():
with pytest.raises(AssertionError):
# act_cfg sequence length must equal to 2
DyReLU(channels=32, act_cfg=(dict(type='ReLU'), ))
with pytest.raises(AssertionError):
# act_cfg sequence must be a tuple of dict
DyReLU(channels=32, act_cfg=[dict(type='ReLU'), dict(type='ReLU')])
# Test DyReLU forward
layer = DyReLU(channels=32)
layer.init_weights()
layer.train()
x = torch.randn((1, 32, 10, 10))
x_out = layer(x)
assert x_out.shape == torch.Size((1, 32, 10, 10))
# DyReLU should act as standard (static) ReLU
# when eliminating the effect of SE-like module
layer = DyReLU(channels=32)
constant_init(layer.conv2.conv, 0)
layer.train()
x = torch.randn((1, 32, 10, 10))
x_out = layer(x)
relu_out = F.relu(x)
assert torch.equal(x_out, relu_out)
|
# Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
import torch.nn.functional as F
from mmcv.cnn import constant_init
from mmdet.models.layers import DyReLU, SELayer
def test_se_layer():
with pytest.raises(AssertionError):
# act_cfg sequence length must equal to 2
SELayer(channels=32, act_cfg=(dict(type='ReLU'), ))
with pytest.raises(AssertionError):
# act_cfg sequence must be a tuple of dict
SELayer(channels=32, act_cfg=[dict(type='ReLU'), dict(type='ReLU')])
# Test SELayer forward
layer = SELayer(channels=32)
layer.init_weights()
layer.train()
x = torch.randn((1, 32, 10, 10))
x_out = layer(x)
assert x_out.shape == torch.Size((1, 32, 10, 10))
def test_dyrelu():
with pytest.raises(AssertionError):
# act_cfg sequence length must equal to 2
DyReLU(channels=32, act_cfg=(dict(type='ReLU'), ))
with pytest.raises(AssertionError):
# act_cfg sequence must be a tuple of dict
DyReLU(channels=32, act_cfg=[dict(type='ReLU'), dict(type='ReLU')])
# Test DyReLU forward
layer = DyReLU(channels=32)
layer.init_weights()
layer.train()
x = torch.randn((1, 32, 10, 10))
x_out = layer(x)
assert x_out.shape == torch.Size((1, 32, 10, 10))
# DyReLU should act as standard (static) ReLU
# when eliminating the effect of SE-like module
layer = DyReLU(channels=32)
constant_init(layer.conv2.conv, 0)
layer.train()
x = torch.randn((1, 32, 10, 10))
x_out = layer(x)
relu_out = F.relu(x)
assert torch.equal(x_out, relu_out)
|
"""Standard LangChain interface tests"""
import os
from langchain_core.language_models import BaseChatModel
from langchain_tests.integration_tests import ChatModelIntegrationTests
from langchain_openai import AzureChatOpenAI
OPENAI_API_VERSION = os.environ.get("AZURE_OPENAI_API_VERSION", "")
OPENAI_API_BASE = os.environ.get("AZURE_OPENAI_API_BASE", "")
class TestAzureOpenAIStandard(ChatModelIntegrationTests):
@property
def chat_model_class(self) -> type[BaseChatModel]:
return AzureChatOpenAI
@property
def chat_model_params(self) -> dict:
return {
"deployment_name": os.environ["AZURE_OPENAI_CHAT_DEPLOYMENT_NAME"],
"model": "gpt-4o-mini",
"openai_api_version": OPENAI_API_VERSION,
"azure_endpoint": OPENAI_API_BASE,
"stream_usage": True,
}
@property
def supports_image_inputs(self) -> bool:
return True
@property
def supports_json_mode(self) -> bool:
return True
class TestAzureOpenAIStandardLegacy(ChatModelIntegrationTests):
"""Test a legacy model."""
@property
def chat_model_class(self) -> type[BaseChatModel]:
return AzureChatOpenAI
@property
def chat_model_params(self) -> dict:
return {
"deployment_name": os.environ["AZURE_OPENAI_LEGACY_CHAT_DEPLOYMENT_NAME"],
"openai_api_version": OPENAI_API_VERSION,
"azure_endpoint": OPENAI_API_BASE,
"stream_usage": True,
}
@property
def structured_output_kwargs(self) -> dict:
return {"method": "function_calling"}
|
"""Standard LangChain interface tests"""
import os
from typing import Type
from langchain_core.language_models import BaseChatModel
from langchain_tests.integration_tests import ChatModelIntegrationTests
from langchain_openai import AzureChatOpenAI
OPENAI_API_VERSION = os.environ.get("AZURE_OPENAI_API_VERSION", "")
OPENAI_API_BASE = os.environ.get("AZURE_OPENAI_API_BASE", "")
class TestAzureOpenAIStandard(ChatModelIntegrationTests):
@property
def chat_model_class(self) -> Type[BaseChatModel]:
return AzureChatOpenAI
@property
def chat_model_params(self) -> dict:
return {
"deployment_name": os.environ["AZURE_OPENAI_CHAT_DEPLOYMENT_NAME"],
"model": "gpt-4o-mini",
"openai_api_version": OPENAI_API_VERSION,
"azure_endpoint": OPENAI_API_BASE,
"stream_usage": True,
}
@property
def supports_image_inputs(self) -> bool:
return True
@property
def supports_json_mode(self) -> bool:
return True
class TestAzureOpenAIStandardLegacy(ChatModelIntegrationTests):
"""Test a legacy model."""
@property
def chat_model_class(self) -> Type[BaseChatModel]:
return AzureChatOpenAI
@property
def chat_model_params(self) -> dict:
return {
"deployment_name": os.environ["AZURE_OPENAI_LEGACY_CHAT_DEPLOYMENT_NAME"],
"openai_api_version": OPENAI_API_VERSION,
"azure_endpoint": OPENAI_API_BASE,
"stream_usage": True,
}
@property
def structured_output_kwargs(self) -> dict:
return {"method": "function_calling"}
|
from .backend_utils import set_audio_backend
from .case_utils import (
HttpServerMixin,
is_ffmpeg_available,
PytorchTestCase,
skipIfNoCtcDecoder,
skipIfNoCuda,
skipIfNoExec,
skipIfNoFFmpeg,
skipIfNoKaldi,
skipIfNoModule,
skipIfNoQengine,
skipIfNoSox,
skipIfPy310,
skipIfRocm,
TempDirMixin,
TestBaseMixin,
TorchaudioTestCase,
zip_equal,
)
from .data_utils import get_asset_path, get_sinusoid, get_spectrogram, get_whitenoise
from .func_utils import torch_script
from .image_utils import get_image, rgb_to_gray, rgb_to_yuv_ccir, save_image
from .parameterized_utils import load_params, nested_params
from .wav_utils import get_wav_data, load_wav, normalize_wav, save_wav
__all__ = [
"get_asset_path",
"get_whitenoise",
"get_sinusoid",
"get_spectrogram",
"set_audio_backend",
"TempDirMixin",
"HttpServerMixin",
"TestBaseMixin",
"PytorchTestCase",
"TorchaudioTestCase",
"is_ffmpeg_available",
"skipIfNoCtcDecoder",
"skipIfNoCuda",
"skipIfNoExec",
"skipIfNoModule",
"skipIfNoKaldi",
"skipIfNoSox",
"skipIfNoSoxBackend",
"skipIfRocm",
"skipIfNoQengine",
"skipIfNoFFmpeg",
"skipIfPy310",
"get_wav_data",
"normalize_wav",
"load_wav",
"save_wav",
"load_params",
"nested_params",
"torch_script",
"save_image",
"get_image",
"rgb_to_gray",
"rgb_to_yuv_ccir",
"zip_equal",
]
|
from .backend_utils import set_audio_backend
from .case_utils import (
HttpServerMixin,
is_ffmpeg_available,
PytorchTestCase,
skipIfNoCtcDecoder,
skipIfNoCuda,
skipIfNoExec,
skipIfNoFFmpeg,
skipIfNoKaldi,
skipIfNoModule,
skipIfNoQengine,
skipIfNoSox,
skipIfPy310,
skipIfRocm,
TempDirMixin,
TestBaseMixin,
TorchaudioTestCase,
)
from .data_utils import get_asset_path, get_sinusoid, get_spectrogram, get_whitenoise
from .func_utils import torch_script
from .image_utils import get_image, rgb_to_gray, rgb_to_yuv_ccir, save_image
from .parameterized_utils import load_params, nested_params
from .wav_utils import get_wav_data, load_wav, normalize_wav, save_wav
__all__ = [
"get_asset_path",
"get_whitenoise",
"get_sinusoid",
"get_spectrogram",
"set_audio_backend",
"TempDirMixin",
"HttpServerMixin",
"TestBaseMixin",
"PytorchTestCase",
"TorchaudioTestCase",
"is_ffmpeg_available",
"skipIfNoCtcDecoder",
"skipIfNoCuda",
"skipIfNoExec",
"skipIfNoModule",
"skipIfNoKaldi",
"skipIfNoSox",
"skipIfNoSoxBackend",
"skipIfRocm",
"skipIfNoQengine",
"skipIfNoFFmpeg",
"skipIfPy310",
"get_wav_data",
"normalize_wav",
"load_wav",
"save_wav",
"load_params",
"nested_params",
"torch_script",
"save_image",
"get_image",
"rgb_to_gray",
"rgb_to_yuv_ccir",
]
|
_base_ = [
'mmdet::_base_/datasets/coco_instance.py',
'mmdet::_base_/schedules/schedule_1x.py',
'mmdet::_base_/default_runtime.py'
]
custom_imports = dict(
imports=['projects.SparseInst.sparseinst'], allow_failed_imports=False)
model = dict(
type='SparseInst',
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
bgr_to_rgb=True,
pad_mask=True,
pad_size_divisor=32),
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(1, 2, 3),
frozen_stages=0,
norm_cfg=dict(type='BN', requires_grad=False),
norm_eval=True,
style='pytorch',
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')),
encoder=dict(
type='InstanceContextEncoder',
in_channels=[512, 1024, 2048],
out_channels=256),
decoder=dict(
type='BaseIAMDecoder',
in_channels=256 + 2,
num_classes=80,
ins_dim=256,
ins_conv=4,
mask_dim=256,
mask_conv=4,
kernel_dim=128,
scale_factor=2.0,
output_iam=False,
num_masks=100),
criterion=dict(
type='SparseInstCriterion',
num_classes=80,
assigner=dict(type='SparseInstMatcher', alpha=0.8, beta=0.2),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
alpha=0.25,
gamma=2.0,
reduction='sum',
loss_weight=2.0),
loss_obj=dict(
type='CrossEntropyLoss',
use_sigmoid=True,
reduction='mean',
loss_weight=1.0),
loss_mask=dict(
type='CrossEntropyLoss',
use_sigmoid=True,
reduction='mean',
loss_weight=5.0),
loss_dice=dict(
type='DiceLoss',
use_sigmoid=True,
reduction='sum',
eps=5e-5,
loss_weight=2.0),
),
test_cfg=dict(score_thr=0.005, mask_thr_binary=0.45))
backend = 'pillow'
train_pipeline = [
dict(
type='LoadImageFromFile',
backend_args={{_base_.backend_args}},
imdecode_backend=backend),
dict(
type='LoadAnnotations',
with_bbox=True,
with_mask=True,
poly2mask=False),
dict(
type='RandomChoiceResize',
scales=[(416, 853), (448, 853), (480, 853), (512, 853), (544, 853),
(576, 853), (608, 853), (640, 853)],
keep_ratio=True,
backend=backend),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(
type='LoadImageFromFile',
backend_args={{_base_.backend_args}},
imdecode_backend=backend),
dict(type='Resize', scale=(640, 853), keep_ratio=True, backend=backend),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(
batch_size=8,
num_workers=8,
sampler=dict(type='InfiniteSampler'),
dataset=dict(pipeline=train_pipeline))
test_dataloader = dict(dataset=dict(pipeline=test_pipeline))
val_dataloader = test_dataloader
val_evaluator = dict(metric='segm')
test_evaluator = val_evaluator
# optimizer
optim_wrapper = dict(
type='OptimWrapper',
optimizer=dict(_delete_=True, type='AdamW', lr=0.00005, weight_decay=0.05))
train_cfg = dict(
_delete_=True,
type='IterBasedTrainLoop',
max_iters=270000,
val_interval=10000)
# learning rate
param_scheduler = [
dict(
type='MultiStepLR',
begin=0,
end=270000,
by_epoch=False,
milestones=[210000, 250000],
gamma=0.1)
]
default_hooks = dict(
checkpoint=dict(by_epoch=False, interval=10000, max_keep_ckpts=3))
log_processor = dict(by_epoch=False)
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (8 GPUs) x (8 samples per GPU)
auto_scale_lr = dict(base_batch_size=64, enable=True)
|
_base_ = [
'mmdet::_base_/datasets/coco_instance.py',
'mmdet::_base_/schedules/schedule_1x.py',
'mmdet::_base_/default_runtime.py'
]
custom_imports = dict(
imports=['projects.SparseInst.sparseinst'], allow_failed_imports=False)
model = dict(
type='SparseInst',
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
bgr_to_rgb=True,
pad_mask=True,
pad_size_divisor=32),
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(1, 2, 3),
frozen_stages=0,
norm_cfg=dict(type='BN', requires_grad=False),
norm_eval=True,
style='pytorch',
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')),
encoder=dict(
type='InstanceContextEncoder',
in_channels=[512, 1024, 2048],
out_channels=256),
decoder=dict(
type='BaseIAMDecoder',
in_channels=256 + 2,
num_classes=80,
ins_dim=256,
ins_conv=4,
mask_dim=256,
mask_conv=4,
kernel_dim=128,
scale_factor=2.0,
output_iam=False,
num_masks=100),
criterion=dict(
type='SparseInstCriterion',
num_classes=80,
assigner=dict(type='SparseInstMatcher', alpha=0.8, beta=0.2),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
alpha=0.25,
gamma=2.0,
reduction='sum',
loss_weight=2.0),
loss_obj=dict(
type='CrossEntropyLoss',
use_sigmoid=True,
reduction='mean',
loss_weight=1.0),
loss_mask=dict(
type='CrossEntropyLoss',
use_sigmoid=True,
reduction='mean',
loss_weight=5.0),
loss_dice=dict(
type='DiceLoss',
use_sigmoid=True,
reduction='sum',
eps=5e-5,
loss_weight=2.0),
),
test_cfg=dict(score_thr=0.005, mask_thr_binary=0.45))
backend = 'pillow'
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}},
imdecode_backend=backend),
dict(
type='LoadAnnotations',
with_bbox=True,
with_mask=True,
poly2mask=False),
dict(
type='RandomChoiceResize',
scales=[(416, 853), (448, 853), (480, 853), (512, 853), (544, 853),
(576, 853), (608, 853), (640, 853)],
keep_ratio=True,
backend=backend),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}},
imdecode_backend=backend),
dict(type='Resize', scale=(640, 853), keep_ratio=True, backend=backend),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(
batch_size=8,
num_workers=8,
sampler=dict(type='InfiniteSampler'),
dataset=dict(pipeline=train_pipeline))
test_dataloader = dict(dataset=dict(pipeline=test_pipeline))
val_dataloader = test_dataloader
val_evaluator = dict(metric='segm')
test_evaluator = val_evaluator
# optimizer
optim_wrapper = dict(
type='OptimWrapper',
optimizer=dict(_delete_=True, type='AdamW', lr=0.00005, weight_decay=0.05))
train_cfg = dict(
_delete_=True,
type='IterBasedTrainLoop',
max_iters=270000,
val_interval=10000)
# learning rate
param_scheduler = [
dict(
type='MultiStepLR',
begin=0,
end=270000,
by_epoch=False,
milestones=[210000, 250000],
gamma=0.1)
]
default_hooks = dict(
checkpoint=dict(by_epoch=False, interval=10000, max_keep_ckpts=3))
log_processor = dict(by_epoch=False)
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (8 GPUs) x (8 samples per GPU)
auto_scale_lr = dict(base_batch_size=64, enable=True)
|
# Copyright (c) OpenMMLab. All rights reserved.
from .batch_sampler import (AspectRatioBatchSampler,
TrackAspectRatioBatchSampler)
from .class_aware_sampler import ClassAwareSampler
from .multi_source_sampler import GroupMultiSourceSampler, MultiSourceSampler
from .track_img_sampler import TrackImgSampler
__all__ = [
'ClassAwareSampler', 'AspectRatioBatchSampler', 'MultiSourceSampler',
'GroupMultiSourceSampler', 'TrackImgSampler',
'TrackAspectRatioBatchSampler'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .batch_sampler import AspectRatioBatchSampler
from .class_aware_sampler import ClassAwareSampler
from .multi_source_sampler import GroupMultiSourceSampler, MultiSourceSampler
from .track_img_sampler import TrackImgSampler
__all__ = [
'ClassAwareSampler', 'AspectRatioBatchSampler', 'MultiSourceSampler',
'GroupMultiSourceSampler', 'TrackImgSampler'
]
|
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__version__ = "3.1.0"
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled
from .info import DatasetInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_default_config_name,
get_dataset_infos,
get_dataset_split_names,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .utils import *
from .utils import logging
|
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__version__ = "3.0.3.dev0"
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled
from .info import DatasetInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_default_config_name,
get_dataset_infos,
get_dataset_split_names,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .utils import *
from .utils import logging
|
from contextlib import contextmanager
from typing import TYPE_CHECKING, Callable, Iterator
from llama_index.core.llms.llm import LLM
from llama_index.llms.huggingface import HuggingFaceLLM
from llama_index.llms.llama_cpp import LlamaCPP
if TYPE_CHECKING:
from lmformatenforcer import CharacterLevelParser
def build_lm_format_enforcer_function(
llm: LLM, character_level_parser: "CharacterLevelParser"
) -> Callable:
"""
Prepare for using the LM format enforcer.
This builds the processing function that will be injected into the LLM to
activate the LM Format Enforcer.
"""
if isinstance(llm, HuggingFaceLLM):
from lmformatenforcer.integrations.transformers import (
build_transformers_prefix_allowed_tokens_fn,
)
return build_transformers_prefix_allowed_tokens_fn(
llm._tokenizer, character_level_parser
)
if isinstance(llm, LlamaCPP):
from llama_cpp import LogitsProcessorList
from lmformatenforcer.integrations.llamacpp import (
build_llamacpp_logits_processor,
)
return LogitsProcessorList(
[build_llamacpp_logits_processor(llm._model, character_level_parser)]
)
raise ValueError("Unsupported LLM type")
@contextmanager
def activate_lm_format_enforcer(
llm: LLM, lm_format_enforcer_fn: Callable
) -> Iterator[None]:
"""
Activate the LM Format Enforcer for the given LLM.
with activate_lm_format_enforcer(llm, lm_format_enforcer_fn):
llm.complete(...)
"""
if isinstance(llm, HuggingFaceLLM):
generate_kwargs_key = "prefix_allowed_tokens_fn"
elif isinstance(llm, LlamaCPP):
generate_kwargs_key = "logits_processor"
else:
raise ValueError("Unsupported LLM type")
llm.generate_kwargs[generate_kwargs_key] = lm_format_enforcer_fn
try:
# This is where the user code will run
yield
finally:
# We remove the token enforcer function from the generate_kwargs at the end
# in case other code paths use the same llm object.
del llm.generate_kwargs[generate_kwargs_key]
|
from contextlib import contextmanager
from typing import TYPE_CHECKING, Callable, Iterator
from llama_index.core.llms.llm import LLM
from llama_index.llms.huggingface import HuggingFaceLLM
from llama_index.llms.llama_cpp import LlamaCPP
if TYPE_CHECKING:
from lmformatenforcer import CharacterLevelParser
def build_lm_format_enforcer_function(
llm: LLM, character_level_parser: "CharacterLevelParser"
) -> Callable:
"""Prepare for using the LM format enforcer.
This builds the processing function that will be injected into the LLM to
activate the LM Format Enforcer.
"""
if isinstance(llm, HuggingFaceLLM):
from lmformatenforcer.integrations.transformers import (
build_transformers_prefix_allowed_tokens_fn,
)
return build_transformers_prefix_allowed_tokens_fn(
llm._tokenizer, character_level_parser
)
if isinstance(llm, LlamaCPP):
from llama_cpp import LogitsProcessorList
from lmformatenforcer.integrations.llamacpp import (
build_llamacpp_logits_processor,
)
return LogitsProcessorList(
[build_llamacpp_logits_processor(llm._model, character_level_parser)]
)
raise ValueError("Unsupported LLM type")
@contextmanager
def activate_lm_format_enforcer(
llm: LLM, lm_format_enforcer_fn: Callable
) -> Iterator[None]:
"""Activate the LM Format Enforcer for the given LLM.
with activate_lm_format_enforcer(llm, lm_format_enforcer_fn):
llm.complete(...)
"""
if isinstance(llm, HuggingFaceLLM):
generate_kwargs_key = "prefix_allowed_tokens_fn"
elif isinstance(llm, LlamaCPP):
generate_kwargs_key = "logits_processor"
else:
raise ValueError("Unsupported LLM type")
llm.generate_kwargs[generate_kwargs_key] = lm_format_enforcer_fn
try:
# This is where the user code will run
yield
finally:
# We remove the token enforcer function from the generate_kwargs at the end
# in case other code paths use the same llm object.
del llm.generate_kwargs[generate_kwargs_key]
|
"""Standard LangChain interface tests"""
from pathlib import Path
from typing import Dict, List, Literal, Type, cast
from langchain_core.language_models import BaseChatModel
from langchain_core.messages import AIMessage
from langchain_tests.integration_tests import ChatModelIntegrationTests
from langchain_openai import ChatOpenAI
REPO_ROOT_DIR = Path(__file__).parents[6]
class TestOpenAIStandard(ChatModelIntegrationTests):
@property
def chat_model_class(self) -> Type[BaseChatModel]:
return ChatOpenAI
@property
def chat_model_params(self) -> dict:
return {"model": "gpt-4o-mini", "stream_usage": True}
@property
def supports_image_inputs(self) -> bool:
return True
@property
def supports_json_mode(self) -> bool:
return True
@property
def supports_anthropic_inputs(self) -> bool:
return True
@property
def supported_usage_metadata_details(
self,
) -> Dict[
Literal["invoke", "stream"],
List[
Literal[
"audio_input",
"audio_output",
"reasoning_output",
"cache_read_input",
"cache_creation_input",
]
],
]:
return {"invoke": ["reasoning_output", "cache_read_input"], "stream": []}
def invoke_with_cache_read_input(self, *, stream: bool = False) -> AIMessage:
with open(REPO_ROOT_DIR / "README.md", "r") as f:
readme = f.read()
input_ = f"""What's langchain? Here's the langchain README:
{readme}
"""
llm = ChatOpenAI(model="gpt-4o-mini", stream_usage=True)
_invoke(llm, input_, stream)
# invoke twice so first invocation is cached
return _invoke(llm, input_, stream)
def invoke_with_reasoning_output(self, *, stream: bool = False) -> AIMessage:
llm = ChatOpenAI(model="o1-mini", stream_usage=True, temperature=1)
input_ = (
"explain the relationship between the 2008/9 economic crisis and the "
"startup ecosystem in the early 2010s"
)
return _invoke(llm, input_, stream)
def _invoke(llm: ChatOpenAI, input_: str, stream: bool) -> AIMessage:
if stream:
full = None
for chunk in llm.stream(input_):
full = full + chunk if full else chunk # type: ignore[operator]
return cast(AIMessage, full)
else:
return cast(AIMessage, llm.invoke(input_))
|
"""Standard LangChain interface tests"""
from pathlib import Path
from typing import Dict, List, Literal, Type, cast
from langchain_core.language_models import BaseChatModel
from langchain_core.messages import AIMessage
from langchain_tests.integration_tests import ChatModelIntegrationTests
from langchain_openai import ChatOpenAI
REPO_ROOT_DIR = Path(__file__).parents[6]
class TestOpenAIStandard(ChatModelIntegrationTests):
@property
def chat_model_class(self) -> Type[BaseChatModel]:
return ChatOpenAI
@property
def chat_model_params(self) -> dict:
return {"model": "gpt-4o-mini", "stream_usage": True}
@property
def supports_image_inputs(self) -> bool:
return True
@property
def supports_json_mode(self) -> bool:
return True
@property
def supported_usage_metadata_details(
self,
) -> Dict[
Literal["invoke", "stream"],
List[
Literal[
"audio_input",
"audio_output",
"reasoning_output",
"cache_read_input",
"cache_creation_input",
]
],
]:
return {"invoke": ["reasoning_output", "cache_read_input"], "stream": []}
def invoke_with_cache_read_input(self, *, stream: bool = False) -> AIMessage:
with open(REPO_ROOT_DIR / "README.md", "r") as f:
readme = f.read()
input_ = f"""What's langchain? Here's the langchain README:
{readme}
"""
llm = ChatOpenAI(model="gpt-4o-mini", stream_usage=True)
_invoke(llm, input_, stream)
# invoke twice so first invocation is cached
return _invoke(llm, input_, stream)
def invoke_with_reasoning_output(self, *, stream: bool = False) -> AIMessage:
llm = ChatOpenAI(model="o1-mini", stream_usage=True, temperature=1)
input_ = (
"explain the relationship between the 2008/9 economic crisis and the "
"startup ecosystem in the early 2010s"
)
return _invoke(llm, input_, stream)
def _invoke(llm: ChatOpenAI, input_: str, stream: bool) -> AIMessage:
if stream:
full = None
for chunk in llm.stream(input_):
full = full + chunk if full else chunk # type: ignore[operator]
return cast(AIMessage, full)
else:
return cast(AIMessage, llm.invoke(input_))
|
"""Util that sends calendar events in Office 365.
Free, but setup is required. See link below.
https://learn.microsoft.com/en-us/graph/auth/
"""
from datetime import datetime as dt
from typing import List, Optional, Type
from zoneinfo import ZoneInfo
from langchain_core.callbacks import CallbackManagerForToolRun
from pydantic import BaseModel, Field
from langchain_community.tools.office365.base import O365BaseTool
from langchain_community.tools.office365.utils import UTC_FORMAT
class SendEventSchema(BaseModel):
"""Input for CreateEvent Tool."""
body: str = Field(
...,
description="The message body to include in the event.",
)
attendees: List[str] = Field(
...,
description="The list of attendees for the event.",
)
subject: str = Field(
...,
description="The subject of the event.",
)
start_datetime: str = Field(
description=" The start datetime for the event in the following format: "
' YYYY-MM-DDTHH:MM:SS±hh:mm, where "T" separates the date and time '
" components, and the time zone offset is specified as ±hh:mm. "
' For example: "2023-06-09T10:30:00+03:00" represents June 9th, '
" 2023, at 10:30 AM in a time zone with a positive offset of 3 "
" hours from Coordinated Universal Time (UTC).",
)
end_datetime: str = Field(
description=" The end datetime for the event in the following format: "
' YYYY-MM-DDTHH:MM:SS±hh:mm, where "T" separates the date and time '
" components, and the time zone offset is specified as ±hh:mm. "
' For example: "2023-06-09T10:30:00+03:00" represents June 9th, '
" 2023, at 10:30 AM in a time zone with a positive offset of 3 "
" hours from Coordinated Universal Time (UTC).",
)
class O365SendEvent(O365BaseTool):
"""Tool for sending calendar events in Office 365."""
name: str = "send_event"
description: str = (
"Use this tool to create and send an event with the provided event fields."
)
args_schema: Type[SendEventSchema] = SendEventSchema
def _run(
self,
body: str,
attendees: List[str],
subject: str,
start_datetime: str,
end_datetime: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
# Get calendar object
schedule = self.account.schedule()
calendar = schedule.get_default_calendar()
event = calendar.new_event()
event.body = body
event.subject = subject
try:
event.start = dt.fromisoformat(start_datetime).replace(
tzinfo=ZoneInfo("UTC")
)
except ValueError:
# fallback for backwards compatibility
event.start = dt.strptime(start_datetime, UTC_FORMAT)
try:
event.end = dt.fromisoformat(end_datetime).replace(tzinfo=ZoneInfo("UTC"))
except ValueError:
# fallback for backwards compatibility
event.end = dt.strptime(end_datetime, UTC_FORMAT)
for attendee in attendees:
event.attendees.add(attendee)
event.save()
output = "Event sent: " + str(event)
return output
|
"""Util that sends calendar events in Office 365.
Free, but setup is required. See link below.
https://learn.microsoft.com/en-us/graph/auth/
"""
from datetime import datetime as dt
from typing import List, Optional, Type
from zoneinfo import ZoneInfo
from langchain_core.callbacks import CallbackManagerForToolRun
from pydantic import BaseModel, Field
from langchain_community.tools.office365.base import O365BaseTool
from langchain_community.tools.office365.utils import UTC_FORMAT
class SendEventSchema(BaseModel):
"""Input for CreateEvent Tool."""
body: str = Field(
...,
description="The message body to include in the event.",
)
attendees: List[str] = Field(
...,
description="The list of attendees for the event.",
)
subject: str = Field(
...,
description="The subject of the event.",
)
start_datetime: str = Field(
description=" The start datetime for the event in the following format: "
' YYYY-MM-DDTHH:MM:SS±hh:mm, where "T" separates the date and time '
" components, and the time zone offset is specified as ±hh:mm. "
' For example: "2023-06-09T10:30:00+03:00" represents June 9th, '
" 2023, at 10:30 AM in a time zone with a positive offset of 3 "
" hours from Coordinated Universal Time (UTC).",
)
end_datetime: str = Field(
description=" The end datetime for the event in the following format: "
' YYYY-MM-DDTHH:MM:SS±hh:mm, where "T" separates the date and time '
" components, and the time zone offset is specified as ±hh:mm. "
' For example: "2023-06-09T10:30:00+03:00" represents June 9th, '
" 2023, at 10:30 AM in a time zone with a positive offset of 3 "
" hours from Coordinated Universal Time (UTC).",
)
class O365SendEvent(O365BaseTool): # type: ignore[override, override]
"""Tool for sending calendar events in Office 365."""
name: str = "send_event"
description: str = (
"Use this tool to create and send an event with the provided event fields."
)
args_schema: Type[SendEventSchema] = SendEventSchema
def _run(
self,
body: str,
attendees: List[str],
subject: str,
start_datetime: str,
end_datetime: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
# Get calendar object
schedule = self.account.schedule()
calendar = schedule.get_default_calendar()
event = calendar.new_event()
event.body = body
event.subject = subject
try:
event.start = dt.fromisoformat(start_datetime).replace(
tzinfo=ZoneInfo("UTC")
)
except ValueError:
# fallback for backwards compatibility
event.start = dt.strptime(start_datetime, UTC_FORMAT)
try:
event.end = dt.fromisoformat(end_datetime).replace(tzinfo=ZoneInfo("UTC"))
except ValueError:
# fallback for backwards compatibility
event.end = dt.strptime(end_datetime, UTC_FORMAT)
for attendee in attendees:
event.attendees.add(attendee)
event.save()
output = "Event sent: " + str(event)
return output
|
# Copyright (c) OpenMMLab. All rights reserved.
from .default_scope import DefaultScope
from .registry import Registry, build_from_cfg
from .root import (DATA_SAMPLERS, DATASETS, HOOKS, LOOPS, METRICS,
MODEL_WRAPPERS, MODELS, OPTIMIZER_CONSTRUCTORS, OPTIMIZERS,
PARAM_SCHEDULERS, RUNNER_CONSTRUCTORS, RUNNERS, TASK_UTILS,
TRANSFORMS, VISBACKENDS, VISUALIZERS, WEIGHT_INITIALIZERS)
__all__ = [
'Registry', 'build_from_cfg', 'RUNNERS', 'RUNNER_CONSTRUCTORS', 'HOOKS',
'DATASETS', 'DATA_SAMPLERS', 'TRANSFORMS', 'MODELS', 'WEIGHT_INITIALIZERS',
'OPTIMIZERS', 'OPTIMIZER_CONSTRUCTORS', 'TASK_UTILS', 'PARAM_SCHEDULERS',
'METRICS', 'MODEL_WRAPPERS', 'LOOPS', 'VISBACKENDS', 'VISUALIZERS',
'DefaultScope'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .default_scope import DefaultScope
from .registry import Registry, build_from_cfg
from .root import (DATA_SAMPLERS, DATASETS, HOOKS, LOOPS, METRICS,
MODEL_WRAPPERS, MODELS, OPTIMIZER_CONSTRUCTORS, OPTIMIZERS,
PARAM_SCHEDULERS, RUNNER_CONSTRUCTORS, RUNNERS, TASK_UTILS,
TRANSFORMS, VISUALIZERS, WEIGHT_INITIALIZERS, WRITERS)
__all__ = [
'Registry', 'build_from_cfg', 'RUNNERS', 'RUNNER_CONSTRUCTORS', 'HOOKS',
'DATASETS', 'DATA_SAMPLERS', 'TRANSFORMS', 'MODELS', 'WEIGHT_INITIALIZERS',
'OPTIMIZERS', 'OPTIMIZER_CONSTRUCTORS', 'TASK_UTILS', 'PARAM_SCHEDULERS',
'METRICS', 'MODEL_WRAPPERS', 'LOOPS', 'WRITERS', 'VISUALIZERS',
'DefaultScope'
]
|
import asyncio
import os
from typing import Dict, List
import pytest
import requests
from jina import Flow
from jina.logging.logger import JinaLogger
from tests.k8s_otel.kind_wrapper import KindClusterWrapperV2
from tests.k8s_otel.util import get_last_health_check_data, parse_string_jaeger_tags
@pytest.mark.asyncio
@pytest.mark.timeout(1800)
async def test_flow_resource_labeling(
tmpdir, otel_test_namespace: str, k8s_cluster_v2: KindClusterWrapperV2
):
NAMESPACE = 'test-flow-resource-labeling'
dump_path = os.path.join(tmpdir, NAMESPACE)
logger = JinaLogger(NAMESPACE)
# Create k8s flow artifacts
flow = Flow(
name='test-flow-metrics',
port=8080,
metrics=True,
metrics_exporter_host=f'http://otel-collector.{otel_test_namespace}.svc.cluster.local',
metrics_exporter_port=4317,
tracing=True,
traces_exporter_host=f'http://jaeger.{otel_test_namespace}.svc.cluster.local',
traces_exporter_port=4317,
).add(
name='instrumentation',
uses='docker://test-instrumentation:test-pip',
)
flow.to_kubernetes_yaml(dump_path, k8s_namespace=NAMESPACE)
# Deploy flow
k8s_cluster_v2.deploy_from_dir(dir=dump_path, namespace=NAMESPACE)
# Make client requests
with k8s_cluster_v2.port_forward(
'svc/gateway', NAMESPACE, svc_port=8080
) as gateway_port:
from jina import Client
[docs async for docs in Client(port=gateway_port, asyncio=True).post("/")]
# Give grace period for metrics and traces to be exported
await asyncio.sleep(60)
# Check Jaeger API
with k8s_cluster_v2.port_forward(
'svc/jaeger', otel_test_namespace, svc_port=16686
) as jaeger_port:
try:
# Gateway
trace_data = get_last_health_check_data(
jaeger_port=jaeger_port, service_name='gateway'
)
assert trace_data['processes']['p1']['serviceName'] == 'gateway'
tags: Dict[str, str] = parse_string_jaeger_tags(
trace_data['processes']['p1']['tags']
)
assert tags['k8s.deployment.name'] == 'gateway'
assert tags['k8s.namespace.name'] == NAMESPACE
assert tags['k8s.pod.name'].startswith('gateway-')
# Instrumentation Executor
trace_data = get_last_health_check_data(
jaeger_port=jaeger_port, service_name='instrumentation'
)
assert trace_data['processes']['p1']['serviceName'] == 'instrumentation'
tags: Dict[str, str] = parse_string_jaeger_tags(
trace_data['processes']['p1']['tags']
)
assert tags['k8s.deployment.name'] == 'instrumentation'
assert tags['k8s.namespace.name'] == NAMESPACE
assert tags['k8s.pod.name'].startswith('instrumentation-')
except AssertionError as e:
logger.error(trace_data)
raise e
with k8s_cluster_v2.port_forward(
'svc/prometheus', otel_test_namespace, svc_port=9090
) as prometheus_port:
try:
# Check Prometheus Labels
prometheus_labels: List[str] = requests.get(
f'http://localhost:{prometheus_port}/api/v1/labels'
).json()['data']
assert 'k8s_deployment_name' in prometheus_labels
assert 'k8s_namespace_name' in prometheus_labels
assert 'k8s_pod_name' in prometheus_labels
except AssertionError as e:
logger.error(prometheus_labels)
raise e
try:
depl_values: List[str] = requests.get(
f'http://localhost:{prometheus_port}/api/v1/label/k8s_deployment_name/values'
).json()['data']
assert 'gateway' in depl_values
assert 'instrumentation' in depl_values
except AssertionError as e:
logger.error(depl_values)
raise e
try:
ns_values: List[str] = requests.get(
f'http://localhost:{prometheus_port}/api/v1/label/k8s_namespace_name/values'
).json()['data']
assert NAMESPACE in ns_values
except AssertionError as e:
logger.error(ns_values)
raise e
try:
pod_values: List[str] = requests.get(
f'http://localhost:{prometheus_port}/api/v1/label/k8s_pod_name/values'
).json()['data']
assert any(i.startswith('gateway-') for i in pod_values)
assert any(i.startswith('instrumentation-') for i in pod_values)
except AssertionError as e:
logger.error(pod_values)
raise e
|
import asyncio
import os
from typing import Dict, List
import pytest
import requests
from jina import Flow
from jina.logging.logger import JinaLogger
from tests.k8s_otel.kind_wrapper import KindClusterWrapperV2
from tests.k8s_otel.util import get_last_health_check_data, parse_string_jaeger_tags
@pytest.mark.asyncio
@pytest.mark.timeout(1800)
async def test_flow_resource_labeling(
tmpdir, otel_test_namespace: str, k8s_cluster_v2: KindClusterWrapperV2
):
NAMESPACE = 'test-flow-resource-labeling'
dump_path = os.path.join(tmpdir, NAMESPACE)
logger = JinaLogger(NAMESPACE)
# Create k8s flow artifacts
flow = Flow(
name='test-flow-metrics',
port=8080,
metrics=True,
metrics_exporter_host=f'otel-collector.{otel_test_namespace}.svc.cluster.local',
metrics_exporter_port=4317,
tracing=True,
traces_exporter_host=f'jaeger.{otel_test_namespace}.svc.cluster.local',
traces_exporter_port=4317,
).add(
name='instrumentation',
uses='docker://test-instrumentation:test-pip',
)
flow.to_kubernetes_yaml(dump_path, k8s_namespace=NAMESPACE)
# Deploy flow
k8s_cluster_v2.deploy_from_dir(dir=dump_path, namespace=NAMESPACE)
# Make client requests
with k8s_cluster_v2.port_forward(
'svc/gateway', NAMESPACE, svc_port=8080
) as gateway_port:
from jina import Client
[docs async for docs in Client(port=gateway_port, asyncio=True).post("/")]
# Give grace period for metrics and traces to be exported
await asyncio.sleep(60)
# Check Jaeger API
with k8s_cluster_v2.port_forward(
'svc/jaeger', otel_test_namespace, svc_port=16686
) as jaeger_port:
try:
# Gateway
trace_data = get_last_health_check_data(
jaeger_port=jaeger_port, service_name='gateway'
)
assert trace_data['processes']['p1']['serviceName'] == 'gateway'
tags: Dict[str, str] = parse_string_jaeger_tags(
trace_data['processes']['p1']['tags']
)
assert tags['k8s.deployment.name'] == 'gateway'
assert tags['k8s.namespace.name'] == NAMESPACE
assert tags['k8s.pod.name'].startswith('gateway-')
# Instrumentation Executor
trace_data = get_last_health_check_data(
jaeger_port=jaeger_port, service_name='instrumentation'
)
assert trace_data['processes']['p1']['serviceName'] == 'instrumentation'
tags: Dict[str, str] = parse_string_jaeger_tags(
trace_data['processes']['p1']['tags']
)
assert tags['k8s.deployment.name'] == 'instrumentation'
assert tags['k8s.namespace.name'] == NAMESPACE
assert tags['k8s.pod.name'].startswith('instrumentation-')
except AssertionError as e:
logger.error(trace_data)
raise e
with k8s_cluster_v2.port_forward(
'svc/prometheus', otel_test_namespace, svc_port=9090
) as prometheus_port:
try:
# Check Prometheus Labels
prometheus_labels: List[str] = requests.get(
f'http://localhost:{prometheus_port}/api/v1/labels'
).json()['data']
assert 'k8s_deployment_name' in prometheus_labels
assert 'k8s_namespace_name' in prometheus_labels
assert 'k8s_pod_name' in prometheus_labels
except AssertionError as e:
logger.error(prometheus_labels)
raise e
try:
depl_values: List[str] = requests.get(
f'http://localhost:{prometheus_port}/api/v1/label/k8s_deployment_name/values'
).json()['data']
assert 'gateway' in depl_values
assert 'instrumentation' in depl_values
except AssertionError as e:
logger.error(depl_values)
raise e
try:
ns_values: List[str] = requests.get(
f'http://localhost:{prometheus_port}/api/v1/label/k8s_namespace_name/values'
).json()['data']
assert NAMESPACE in ns_values
except AssertionError as e:
logger.error(ns_values)
raise e
try:
pod_values: List[str] = requests.get(
f'http://localhost:{prometheus_port}/api/v1/label/k8s_pod_name/values'
).json()['data']
assert any(i.startswith('gateway-') for i in pod_values)
assert any(i.startswith('instrumentation-') for i in pod_values)
except AssertionError as e:
logger.error(pod_values)
raise e
|
import math
import os
import pytest
import torch
import torchvision
from torchvision.io import _HAS_GPU_VIDEO_DECODER, VideoReader
try:
import av
except ImportError:
av = None
VIDEO_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), "assets", "videos")
@pytest.mark.skipif(_HAS_GPU_VIDEO_DECODER is False, reason="Didn't compile with support for gpu decoder")
class TestVideoGPUDecoder:
@pytest.mark.skipif(av is None, reason="PyAV unavailable")
@pytest.mark.parametrize(
"video_file",
[
"RATRACE_wave_f_nm_np1_fr_goo_37.avi",
"TrumanShow_wave_f_nm_np1_fr_med_26.avi",
"v_SoccerJuggling_g23_c01.avi",
"v_SoccerJuggling_g24_c01.avi",
"R6llTwEh07w.mp4",
"SOX5yA1l24A.mp4",
"WUzgd7C1pWA.mp4",
],
)
def test_frame_reading(self, video_file):
torchvision.set_video_backend("cuda")
full_path = os.path.join(VIDEO_DIR, video_file)
decoder = VideoReader(full_path)
with av.open(full_path) as container:
for av_frame in container.decode(container.streams.video[0]):
av_frames = torch.tensor(av_frame.to_rgb(src_colorspace="ITU709").to_ndarray())
vision_frames = next(decoder)["data"]
mean_delta = torch.mean(torch.abs(av_frames.float() - vision_frames.cpu().float()))
assert mean_delta < 0.75
@pytest.mark.skipif(av is None, reason="PyAV unavailable")
@pytest.mark.parametrize("keyframes", [True, False])
@pytest.mark.parametrize(
"full_path, duration",
[
(os.path.join(VIDEO_DIR, x), y)
for x, y in [
("v_SoccerJuggling_g23_c01.avi", 8.0),
("v_SoccerJuggling_g24_c01.avi", 8.0),
("R6llTwEh07w.mp4", 10.0),
("SOX5yA1l24A.mp4", 11.0),
("WUzgd7C1pWA.mp4", 11.0),
]
],
)
def test_seek_reading(self, keyframes, full_path, duration):
torchvision.set_video_backend("cuda")
decoder = VideoReader(full_path)
time = duration / 2
decoder.seek(time, keyframes_only=keyframes)
with av.open(full_path) as container:
container.seek(int(time * 1000000), any_frame=not keyframes, backward=False)
for av_frame in container.decode(container.streams.video[0]):
av_frames = torch.tensor(av_frame.to_rgb(src_colorspace="ITU709").to_ndarray())
vision_frames = next(decoder)["data"]
mean_delta = torch.mean(torch.abs(av_frames.float() - vision_frames.cpu().float()))
assert mean_delta < 0.75
@pytest.mark.skipif(av is None, reason="PyAV unavailable")
@pytest.mark.parametrize(
"video_file",
[
"RATRACE_wave_f_nm_np1_fr_goo_37.avi",
"TrumanShow_wave_f_nm_np1_fr_med_26.avi",
"v_SoccerJuggling_g23_c01.avi",
"v_SoccerJuggling_g24_c01.avi",
"R6llTwEh07w.mp4",
"SOX5yA1l24A.mp4",
"WUzgd7C1pWA.mp4",
],
)
def test_metadata(self, video_file):
torchvision.set_video_backend("cuda")
full_path = os.path.join(VIDEO_DIR, video_file)
decoder = VideoReader(full_path)
video_metadata = decoder.get_metadata()["video"]
with av.open(full_path) as container:
video = container.streams.video[0]
av_duration = float(video.duration * video.time_base)
assert math.isclose(video_metadata["duration"], av_duration, rel_tol=1e-2)
assert math.isclose(video_metadata["fps"], video.base_rate, rel_tol=1e-2)
if __name__ == "__main__":
pytest.main([__file__])
|
import math
import os
import pytest
import torch
from torchvision.io import _HAS_GPU_VIDEO_DECODER, VideoReader
try:
import av
except ImportError:
av = None
VIDEO_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), "assets", "videos")
@pytest.mark.skipif(_HAS_GPU_VIDEO_DECODER is False, reason="Didn't compile with support for gpu decoder")
class TestVideoGPUDecoder:
@pytest.mark.skipif(av is None, reason="PyAV unavailable")
@pytest.mark.parametrize(
"video_file",
[
"RATRACE_wave_f_nm_np1_fr_goo_37.avi",
"TrumanShow_wave_f_nm_np1_fr_med_26.avi",
"v_SoccerJuggling_g23_c01.avi",
"v_SoccerJuggling_g24_c01.avi",
"R6llTwEh07w.mp4",
"SOX5yA1l24A.mp4",
"WUzgd7C1pWA.mp4",
],
)
def test_frame_reading(self, video_file):
full_path = os.path.join(VIDEO_DIR, video_file)
decoder = VideoReader(full_path, device="cuda")
with av.open(full_path) as container:
for av_frame in container.decode(container.streams.video[0]):
av_frames = torch.tensor(av_frame.to_rgb(src_colorspace="ITU709").to_ndarray())
vision_frames = next(decoder)["data"]
mean_delta = torch.mean(torch.abs(av_frames.float() - vision_frames.cpu().float()))
assert mean_delta < 0.75
@pytest.mark.skipif(av is None, reason="PyAV unavailable")
@pytest.mark.parametrize("keyframes", [True, False])
@pytest.mark.parametrize(
"full_path, duration",
[
(os.path.join(VIDEO_DIR, x), y)
for x, y in [
("v_SoccerJuggling_g23_c01.avi", 8.0),
("v_SoccerJuggling_g24_c01.avi", 8.0),
("R6llTwEh07w.mp4", 10.0),
("SOX5yA1l24A.mp4", 11.0),
("WUzgd7C1pWA.mp4", 11.0),
]
],
)
def test_seek_reading(self, keyframes, full_path, duration):
decoder = VideoReader(full_path, device="cuda")
time = duration / 2
decoder.seek(time, keyframes_only=keyframes)
with av.open(full_path) as container:
container.seek(int(time * 1000000), any_frame=not keyframes, backward=False)
for av_frame in container.decode(container.streams.video[0]):
av_frames = torch.tensor(av_frame.to_rgb(src_colorspace="ITU709").to_ndarray())
vision_frames = next(decoder)["data"]
mean_delta = torch.mean(torch.abs(av_frames.float() - vision_frames.cpu().float()))
assert mean_delta < 0.75
@pytest.mark.skipif(av is None, reason="PyAV unavailable")
@pytest.mark.parametrize(
"video_file",
[
"RATRACE_wave_f_nm_np1_fr_goo_37.avi",
"TrumanShow_wave_f_nm_np1_fr_med_26.avi",
"v_SoccerJuggling_g23_c01.avi",
"v_SoccerJuggling_g24_c01.avi",
"R6llTwEh07w.mp4",
"SOX5yA1l24A.mp4",
"WUzgd7C1pWA.mp4",
],
)
def test_metadata(self, video_file):
full_path = os.path.join(VIDEO_DIR, video_file)
decoder = VideoReader(full_path, device="cuda")
video_metadata = decoder.get_metadata()["video"]
with av.open(full_path) as container:
video = container.streams.video[0]
av_duration = float(video.duration * video.time_base)
assert math.isclose(video_metadata["duration"], av_duration, rel_tol=1e-2)
assert math.isclose(video_metadata["fps"], video.base_rate, rel_tol=1e-2)
if __name__ == "__main__":
pytest.main([__file__])
|
# Copyright (c) OpenMMLab. All rights reserved.
from .config import Config, ConfigDict, DictAction
from .get_config_model import get_config, get_model
__all__ = ['Config', 'ConfigDict', 'DictAction', 'get_config', 'get_model']
|
# Copyright (c) OpenMMLab. All rights reserved.
from .config import Config, ConfigDict, DictAction
__all__ = ['Config', 'ConfigDict', 'DictAction']
|
from typing import List
import torch
from parameterized import parameterized
from torchaudio import sox_effects
from torchaudio_unittest.common_utils import (
get_sinusoid,
save_wav,
skipIfNoSox,
TempDirMixin,
torch_script,
TorchaudioTestCase,
)
from .common import load_params
class SoxEffectTensorTransform(torch.nn.Module):
effects: List[List[str]]
def __init__(self, effects: List[List[str]], sample_rate: int, channels_first: bool):
super().__init__()
self.effects = effects
self.sample_rate = sample_rate
self.channels_first = channels_first
def forward(self, tensor: torch.Tensor):
return sox_effects.apply_effects_tensor(tensor, self.sample_rate, self.effects, self.channels_first)
class SoxEffectFileTransform(torch.nn.Module):
effects: List[List[str]]
channels_first: bool
def __init__(self, effects: List[List[str]], channels_first: bool):
super().__init__()
self.effects = effects
self.channels_first = channels_first
def forward(self, path: str):
return sox_effects.apply_effects_file(path, self.effects, self.channels_first)
@skipIfNoSox
class TestTorchScript(TempDirMixin, TorchaudioTestCase):
@parameterized.expand(
load_params("sox_effect_test_args.jsonl"),
name_func=lambda f, i, p: f'{f.__name__}_{i}_{p.args[0]["effects"][0][0]}',
)
def test_apply_effects_tensor(self, args):
effects = args["effects"]
channels_first = True
num_channels = args.get("num_channels", 2)
input_sr = args.get("input_sample_rate", 8000)
trans = SoxEffectTensorTransform(effects, input_sr, channels_first)
trans = torch_script(trans)
wav = get_sinusoid(
frequency=800, sample_rate=input_sr, n_channels=num_channels, dtype="float32", channels_first=channels_first
)
found, sr_found = trans(wav)
expected, sr_expected = sox_effects.apply_effects_tensor(wav, input_sr, effects, channels_first)
assert sr_found == sr_expected
self.assertEqual(expected, found)
@parameterized.expand(
load_params("sox_effect_test_args.jsonl"),
name_func=lambda f, i, p: f'{f.__name__}_{i}_{p.args[0]["effects"][0][0]}',
)
def test_apply_effects_file(self, args):
effects = args["effects"]
channels_first = True
num_channels = args.get("num_channels", 2)
input_sr = args.get("input_sample_rate", 8000)
trans = SoxEffectFileTransform(effects, channels_first)
trans = torch_script(trans)
path = self.get_temp_path("input.wav")
wav = get_sinusoid(
frequency=800, sample_rate=input_sr, n_channels=num_channels, dtype="float32", channels_first=channels_first
)
save_wav(path, wav, sample_rate=input_sr, channels_first=channels_first)
found, sr_found = trans(path)
expected, sr_expected = sox_effects.apply_effects_file(path, effects, channels_first)
assert sr_found == sr_expected
self.assertEqual(expected, found)
|
from typing import List
import torch
from parameterized import parameterized
from torchaudio import sox_effects
from torchaudio_unittest.common_utils import (
TempDirMixin,
TorchaudioTestCase,
skipIfNoSox,
get_sinusoid,
save_wav,
torch_script,
)
from .common import (
load_params,
)
class SoxEffectTensorTransform(torch.nn.Module):
effects: List[List[str]]
def __init__(self, effects: List[List[str]], sample_rate: int, channels_first: bool):
super().__init__()
self.effects = effects
self.sample_rate = sample_rate
self.channels_first = channels_first
def forward(self, tensor: torch.Tensor):
return sox_effects.apply_effects_tensor(tensor, self.sample_rate, self.effects, self.channels_first)
class SoxEffectFileTransform(torch.nn.Module):
effects: List[List[str]]
channels_first: bool
def __init__(self, effects: List[List[str]], channels_first: bool):
super().__init__()
self.effects = effects
self.channels_first = channels_first
def forward(self, path: str):
return sox_effects.apply_effects_file(path, self.effects, self.channels_first)
@skipIfNoSox
class TestTorchScript(TempDirMixin, TorchaudioTestCase):
@parameterized.expand(
load_params("sox_effect_test_args.jsonl"),
name_func=lambda f, i, p: f'{f.__name__}_{i}_{p.args[0]["effects"][0][0]}',
)
def test_apply_effects_tensor(self, args):
effects = args["effects"]
channels_first = True
num_channels = args.get("num_channels", 2)
input_sr = args.get("input_sample_rate", 8000)
trans = SoxEffectTensorTransform(effects, input_sr, channels_first)
trans = torch_script(trans)
wav = get_sinusoid(
frequency=800, sample_rate=input_sr, n_channels=num_channels, dtype="float32", channels_first=channels_first
)
found, sr_found = trans(wav)
expected, sr_expected = sox_effects.apply_effects_tensor(wav, input_sr, effects, channels_first)
assert sr_found == sr_expected
self.assertEqual(expected, found)
@parameterized.expand(
load_params("sox_effect_test_args.jsonl"),
name_func=lambda f, i, p: f'{f.__name__}_{i}_{p.args[0]["effects"][0][0]}',
)
def test_apply_effects_file(self, args):
effects = args["effects"]
channels_first = True
num_channels = args.get("num_channels", 2)
input_sr = args.get("input_sample_rate", 8000)
trans = SoxEffectFileTransform(effects, channels_first)
trans = torch_script(trans)
path = self.get_temp_path("input.wav")
wav = get_sinusoid(
frequency=800, sample_rate=input_sr, n_channels=num_channels, dtype="float32", channels_first=channels_first
)
save_wav(path, wav, sample_rate=input_sr, channels_first=channels_first)
found, sr_found = trans(path)
expected, sr_expected = sox_effects.apply_effects_file(path, effects, channels_first)
assert sr_found == sr_expected
self.assertEqual(expected, found)
|
# Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from mmdet.models.utils import ConvUpsample
@pytest.mark.parametrize('num_layers', [0, 1, 2])
def test_conv_upsample(num_layers):
num_upsample = num_layers if num_layers > 0 else 0
num_layers = num_layers if num_layers > 0 else 1
layer = ConvUpsample(
10,
5,
num_layers=num_layers,
num_upsample=num_upsample,
conv_cfg=None,
norm_cfg=None)
size = 5
x = torch.randn((1, 10, size, size))
size = size * pow(2, num_upsample)
x = layer(x)
assert x.shape[-2:] == (size, size)
|
import pytest
import torch
from mmdet.models.utils import ConvUpsample
@pytest.mark.parametrize('num_layers', [0, 1, 2])
def test_conv_upsample(num_layers):
num_upsample = num_layers if num_layers > 0 else 0
num_layers = num_layers if num_layers > 0 else 1
layer = ConvUpsample(
10,
5,
num_layers=num_layers,
num_upsample=num_upsample,
conv_cfg=None,
norm_cfg=None)
size = 5
x = torch.randn((1, 10, size, size))
size = size * pow(2, num_upsample)
x = layer(x)
assert x.shape[-2:] == (size, size)
|
_base_ = './fast-rcnn_r50_fpn_1x_coco.py'
# learning policy
lr_config = dict(step=[16, 22])
runner = dict(type='EpochBasedRunner', max_epochs=24)
|
_base_ = './fast_rcnn_r50_fpn_1x_coco.py'
# learning policy
lr_config = dict(step=[16, 22])
runner = dict(type='EpochBasedRunner', max_epochs=24)
|
import math
from keras.src import backend
from keras.src import layers
from keras.src import ops
from keras.src.api_export import keras_export
@keras_export("keras.layers.GaussianDropout")
class GaussianDropout(layers.Layer):
"""Apply multiplicative 1-centered Gaussian noise.
As it is a regularization layer, it is only active at training time.
Args:
rate: Float, drop probability (as with `Dropout`).
The multiplicative noise will have
standard deviation `sqrt(rate / (1 - rate))`.
seed: Integer, optional random seed to enable deterministic behavior.
Call arguments:
inputs: Input tensor (of any rank).
training: Python boolean indicating whether the layer should behave in
training mode (adding dropout) or in inference mode (doing nothing).
"""
def __init__(self, rate, seed=None, **kwargs):
super().__init__(**kwargs)
if not 0 <= rate <= 1:
raise ValueError(
f"Invalid value received for argument "
"`rate`. Expected a float value between 0 and 1. "
f"Received: rate={rate}"
)
self.rate = rate
self.seed = seed
if rate > 0:
self.seed_generator = backend.random.SeedGenerator(seed)
self.supports_masking = True
self.built = True
def call(self, inputs, training=False):
if training and self.rate > 0:
stddev = math.sqrt(self.rate / (1.0 - self.rate))
return inputs * backend.random.normal(
shape=ops.shape(inputs),
mean=1.0,
stddev=stddev,
dtype=self.compute_dtype,
seed=self.seed_generator,
)
return inputs
def compute_output_shape(self, input_shape):
return input_shape
def get_config(self):
base_config = super().get_config()
config = {
"rate": self.rate,
"seed": self.seed,
}
return {**base_config, **config}
|
import math
from keras.src import backend
from keras.src import layers
from keras.src import ops
from keras.src.api_export import keras_export
@keras_export("keras.layers.GaussianDropout")
class GaussianDropout(layers.Layer):
"""Apply multiplicative 1-centered Gaussian noise.
As it is a regularization layer, it is only active at training time.
Args:
rate: Float, drop probability (as with `Dropout`).
The multiplicative noise will have
standard deviation `sqrt(rate / (1 - rate))`.
seed: Integer, optional random seed to enable deterministic behavior.
Call arguments:
inputs: Input tensor (of any rank).
training: Python boolean indicating whether the layer should behave in
training mode (adding dropout) or in inference mode (doing nothing).
"""
def __init__(self, rate, seed=None, **kwargs):
super().__init__(**kwargs)
if not 0 <= rate <= 1:
raise ValueError(
f"Invalid value received for argument "
"`rate`. Expected a float value between 0 and 1. "
f"Received: rate={rate}"
)
self.rate = rate
self.seed = seed
if rate > 0:
self.seed_generator = backend.random.SeedGenerator(seed)
self.supports_masking = True
def call(self, inputs, training=False):
if training and self.rate > 0:
stddev = math.sqrt(self.rate / (1.0 - self.rate))
return inputs * backend.random.normal(
shape=ops.shape(inputs),
mean=1.0,
stddev=stddev,
dtype=self.compute_dtype,
seed=self.seed_generator,
)
return inputs
def compute_output_shape(self, input_shape):
return input_shape
def get_config(self):
base_config = super().get_config()
config = {
"rate": self.rate,
"seed": self.seed,
}
return {**base_config, **config}
|
# coding=utf-8
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tokenization utils for RoFormer."""
from tokenizers import NormalizedString, PreTokenizedString, normalizers
class JiebaPreTokenizer:
def __init__(self, vocab) -> None:
self.vocab = vocab
self.normalizers = normalizers.BertNormalizer(
clean_text=False,
handle_chinese_chars=True,
strip_accents=False,
lowercase=False,
)
try:
import rjieba
except ImportError:
raise ImportError(
"You need to install rjieba to use RoFormerTokenizer. "
"See https://pypi.org/project/rjieba/ for installation."
)
self.jieba = rjieba
def jieba_split(self, i: int, normalized_string: NormalizedString) -> list[NormalizedString]:
splits = []
# this code slice normalized_string is too slow (6s) but test_alignment_methods can pass
for token, start, end in self.jieba.tokenize(str(normalized_string), hmm=False):
if token in self.vocab:
splits.append(normalized_string[start:end])
else:
token_list = self.normalizers.normalize_str(token).split()
for token in token_list:
if token:
end = start + len(token)
splits.append(normalized_string[start:end])
start = end
# this code test_alignment_methods can't pass but fast (300ms)
# for token in self.jieba.cut(str(normalized_string), False):
# if token in self.vocab:
# splits.append(NormalizedString(token))
# else:
# token_list = self.normalizers.normalize_str(token).split()
# for token in token_list:
# if token:
# splits.append(NormalizedString(token))
return splits
def pre_tokenize(self, pretok: PreTokenizedString):
pretok.split(self.jieba_split)
|
# coding=utf-8
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tokenization utils for RoFormer."""
from typing import List
from tokenizers import NormalizedString, PreTokenizedString, normalizers
class JiebaPreTokenizer:
def __init__(self, vocab) -> None:
self.vocab = vocab
self.normalizers = normalizers.BertNormalizer(
clean_text=False,
handle_chinese_chars=True,
strip_accents=False,
lowercase=False,
)
try:
import rjieba
except ImportError:
raise ImportError(
"You need to install rjieba to use RoFormerTokenizer. "
"See https://pypi.org/project/rjieba/ for installation."
)
self.jieba = rjieba
def jieba_split(self, i: int, normalized_string: NormalizedString) -> List[NormalizedString]:
splits = []
# this code slice normalized_string is too slow (6s) but test_alignment_methods can pass
for token, start, end in self.jieba.tokenize(str(normalized_string), hmm=False):
if token in self.vocab:
splits.append(normalized_string[start:end])
else:
token_list = self.normalizers.normalize_str(token).split()
for token in token_list:
if token:
end = start + len(token)
splits.append(normalized_string[start:end])
start = end
# this code test_alignment_methods can't pass but fast (300ms)
# for token in self.jieba.cut(str(normalized_string), False):
# if token in self.vocab:
# splits.append(NormalizedString(token))
# else:
# token_list = self.normalizers.normalize_str(token).split()
# for token in token_list:
# if token:
# splits.append(NormalizedString(token))
return splits
def pre_tokenize(self, pretok: PreTokenizedString):
pretok.split(self.jieba_split)
|
# Copyright (c) OpenMMLab. All rights reserved.
import os
import pytest
import torch
import torch.nn as nn
from torch.distributed import destroy_process_group, init_process_group
from torch.nn.parallel import DataParallel, DistributedDataParallel
from mmengine.model import (MMDistributedDataParallel,
MMSeparateDistributedDataParallel,
convert_sync_batchnorm, is_model_wrapper,
revert_sync_batchnorm)
from mmengine.registry import MODEL_WRAPPERS, Registry
from mmengine.utils import is_installed
class ToyModule(nn.Module):
def __init__(self):
super().__init__()
self.layer1 = nn.Linear(1, 1)
def add_module(self, name, module):
raise ValueError()
@pytest.mark.skipif(
torch.__version__ == 'parrots', reason='not supported in parrots now')
def test_revert_syncbn():
# conv = ConvModule(3, 8, 2, norm_cfg=dict(type='SyncBN'))
conv = nn.Sequential(nn.Conv2d(3, 8, 2), nn.SyncBatchNorm(8))
x = torch.randn(1, 3, 10, 10)
# Expect a ValueError prompting that SyncBN is not supported on CPU
with pytest.raises(ValueError):
y = conv(x)
conv = revert_sync_batchnorm(conv)
y = conv(x)
assert y.shape == (1, 8, 9, 9)
# TODO, capsys provided by `pytest` cannot capture the error log produced
# by MMLogger. Test the error log after refactoring the unit test with
# `unittest`
conv = nn.Sequential(ToyModule(), nn.SyncBatchNorm(8))
revert_sync_batchnorm(conv)
@pytest.mark.skipif(
torch.__version__ == 'parrots', reason='not supported in parrots now')
def test_convert_syncbn():
# conv = ConvModule(3, 8, 2, norm_cfg=dict(type='SyncBN'))
conv = nn.Sequential(nn.Conv2d(3, 8, 2), nn.BatchNorm2d(8))
x = torch.randn(1, 3, 10, 10)
y = conv(x)
assert y.shape == (1, 8, 9, 9)
# Test convert to mmcv SyncBatchNorm
if is_installed('mmcv'):
# MMCV SyncBatchNorm is only supported on distributed training.
# torch 1.6 will throw an AssertionError, and higher version will
# throw an RuntimeError
with pytest.raises((RuntimeError, AssertionError)):
convert_sync_batchnorm(conv, implementation='mmcv')
# Test convert BN to Pytorch SyncBatchNorm
# Expect a ValueError prompting that SyncBN is not supported on CPU
converted_conv = convert_sync_batchnorm(conv)
assert isinstance(converted_conv[1], torch.nn.SyncBatchNorm)
with pytest.raises(ValueError):
converted_conv(x)
def test_is_model_wrapper():
# Test basic module wrapper.
os.environ['MASTER_ADDR'] = '127.0.0.1'
os.environ['MASTER_PORT'] = '29510'
os.environ['RANK'] = str(0)
init_process_group(backend='gloo', rank=0, world_size=1)
model = nn.Linear(1, 1)
for wrapper in [
DistributedDataParallel, MMDistributedDataParallel,
MMSeparateDistributedDataParallel, DataParallel
]:
wrapper_model = wrapper(model)
assert is_model_wrapper(wrapper_model)
# Test `is_model_wrapper` can check model wrapper registered in custom
# registry.
CHILD_REGISTRY = Registry('test_is_model_wrapper', parent=MODEL_WRAPPERS)
class CustomModelWrapper(nn.Module):
def __init__(self, model):
super().__init__()
self.module = model
pass
CHILD_REGISTRY.register_module(module=CustomModelWrapper, force=True)
for wrapper in [
DistributedDataParallel, MMDistributedDataParallel,
MMSeparateDistributedDataParallel, DataParallel, CustomModelWrapper
]:
wrapper_model = wrapper(model)
assert is_model_wrapper(wrapper_model)
# Test `is_model_wrapper` will not check model wrapper in parent
# registry from a child registry.
for wrapper in [
DistributedDataParallel, MMDistributedDataParallel,
MMSeparateDistributedDataParallel, DataParallel
]:
wrapper_model = wrapper(model)
assert not is_model_wrapper(wrapper_model, registry=CHILD_REGISTRY)
wrapper_model = CustomModelWrapper(model)
assert is_model_wrapper(wrapper_model, registry=CHILD_REGISTRY)
destroy_process_group()
|
# Copyright (c) OpenMMLab. All rights reserved.
import os
import pytest
import torch
import torch.nn as nn
from torch.distributed import destroy_process_group, init_process_group
from torch.nn.parallel import DataParallel, DistributedDataParallel
from mmengine.model import (MMDistributedDataParallel,
MMSeparateDistributedDataParallel,
convert_sync_batchnorm, is_model_wrapper,
revert_sync_batchnorm)
from mmengine.registry import MODEL_WRAPPERS, Registry
from mmengine.utils import is_installed
@pytest.mark.skipif(
torch.__version__ == 'parrots', reason='not supported in parrots now')
def test_revert_syncbn():
# conv = ConvModule(3, 8, 2, norm_cfg=dict(type='SyncBN'))
conv = nn.Sequential(nn.Conv2d(3, 8, 2), nn.SyncBatchNorm(8))
x = torch.randn(1, 3, 10, 10)
# Expect a ValueError prompting that SyncBN is not supported on CPU
with pytest.raises(ValueError):
y = conv(x)
conv = revert_sync_batchnorm(conv)
y = conv(x)
assert y.shape == (1, 8, 9, 9)
@pytest.mark.skipif(
torch.__version__ == 'parrots', reason='not supported in parrots now')
def test_convert_syncbn():
# conv = ConvModule(3, 8, 2, norm_cfg=dict(type='SyncBN'))
conv = nn.Sequential(nn.Conv2d(3, 8, 2), nn.BatchNorm2d(8))
x = torch.randn(1, 3, 10, 10)
y = conv(x)
assert y.shape == (1, 8, 9, 9)
# Test convert to mmcv SyncBatchNorm
if is_installed('mmcv'):
# MMCV SyncBatchNorm is only supported on distributed training.
with pytest.raises((RuntimeError, AssertionError)):
convert_sync_batchnorm(conv, implementation='mmcv')
# Test convert to Pytorch SyncBatchNorm
# Expect a ValueError prompting that SyncBN is not supported on CPU
converted_conv = convert_sync_batchnorm(conv)
assert isinstance(converted_conv[1], torch.nn.SyncBatchNorm)
with pytest.raises(ValueError):
converted_conv(x)
def test_is_model_wrapper():
# Test basic module wrapper.
os.environ['MASTER_ADDR'] = '127.0.0.1'
os.environ['MASTER_PORT'] = '29510'
os.environ['RANK'] = str(0)
init_process_group(backend='gloo', rank=0, world_size=1)
model = nn.Linear(1, 1)
for wrapper in [
DistributedDataParallel, MMDistributedDataParallel,
MMSeparateDistributedDataParallel, DataParallel
]:
wrapper_model = wrapper(model)
assert is_model_wrapper(wrapper_model)
# Test `is_model_wrapper` can check model wrapper registered in custom
# registry.
CHILD_REGISTRY = Registry('test_is_model_wrapper', parent=MODEL_WRAPPERS)
class CustomModelWrapper(nn.Module):
def __init__(self, model):
super().__init__()
self.module = model
pass
CHILD_REGISTRY.register_module(module=CustomModelWrapper, force=True)
for wrapper in [
DistributedDataParallel, MMDistributedDataParallel,
MMSeparateDistributedDataParallel, DataParallel, CustomModelWrapper
]:
wrapper_model = wrapper(model)
assert is_model_wrapper(wrapper_model)
# Test `is_model_wrapper` will not check model wrapper in parent
# registry from a child registry.
for wrapper in [
DistributedDataParallel, MMDistributedDataParallel,
MMSeparateDistributedDataParallel, DataParallel
]:
wrapper_model = wrapper(model)
assert not is_model_wrapper(wrapper_model, registry=CHILD_REGISTRY)
wrapper_model = CustomModelWrapper(model)
assert is_model_wrapper(wrapper_model, registry=CHILD_REGISTRY)
destroy_process_group()
|
# Copyright (c) OpenMMLab. All rights reserved.
from .base_data_element import BaseDataElement
from .instance_data import InstanceData
from .sampler import DefaultSampler, InfiniteSampler
from .utils import pseudo_collate, worker_init_fn
__all__ = [
'BaseDataElement', 'DefaultSampler', 'InfiniteSampler', 'worker_init_fn',
'pseudo_collate', 'InstanceData'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .base_data_element import BaseDataElement
from .sampler import DefaultSampler, InfiniteSampler
from .utils import pseudo_collate, worker_init_fn
__all__ = [
'BaseDataElement', 'DefaultSampler', 'InfiniteSampler', 'worker_init_fn',
'pseudo_collate'
]
|
"""XGBoost: eXtreme Gradient Boosting library.
Contributors: https://github.com/dmlc/xgboost/blob/master/CONTRIBUTORS.md
"""
from . import tracker # noqa
from . import collective
from .core import (
Booster,
DataIter,
DMatrix,
ExtMemQuantileDMatrix,
QuantileDMatrix,
_py_version,
build_info,
)
from .tracker import RabitTracker # noqa
from .training import cv, train
try:
from .config import config_context, get_config, set_config
from .plotting import plot_importance, plot_tree, to_graphviz
from .sklearn import (
XGBClassifier,
XGBModel,
XGBRanker,
XGBRegressor,
XGBRFClassifier,
XGBRFRegressor,
)
except ImportError:
pass
__version__ = _py_version()
__all__ = [
# core
"DMatrix",
"QuantileDMatrix",
"ExtMemQuantileDMatrix",
"Booster",
"DataIter",
"train",
"cv",
# utilities
"RabitTracker",
"build_info",
"plot_importance",
"plot_tree",
"to_graphviz",
"set_config",
"get_config",
"config_context",
# sklearn
"XGBModel",
"XGBClassifier",
"XGBRegressor",
"XGBRanker",
"XGBRFClassifier",
"XGBRFRegressor",
# collective
"collective",
]
|
"""XGBoost: eXtreme Gradient Boosting library.
Contributors: https://github.com/dmlc/xgboost/blob/master/CONTRIBUTORS.md
"""
from . import tracker # noqa
from . import collective, dask
from .core import (
Booster,
DataIter,
DMatrix,
ExtMemQuantileDMatrix,
QuantileDMatrix,
_py_version,
build_info,
)
from .tracker import RabitTracker # noqa
from .training import cv, train
try:
from .config import config_context, get_config, set_config
from .plotting import plot_importance, plot_tree, to_graphviz
from .sklearn import (
XGBClassifier,
XGBModel,
XGBRanker,
XGBRegressor,
XGBRFClassifier,
XGBRFRegressor,
)
except ImportError:
pass
__version__ = _py_version()
__all__ = [
# core
"DMatrix",
"QuantileDMatrix",
"ExtMemQuantileDMatrix",
"Booster",
"DataIter",
"train",
"cv",
# utilities
"RabitTracker",
"build_info",
"plot_importance",
"plot_tree",
"to_graphviz",
"set_config",
"get_config",
"config_context",
# sklearn
"XGBModel",
"XGBClassifier",
"XGBRegressor",
"XGBRanker",
"XGBRFClassifier",
"XGBRFRegressor",
# dask
"dask",
# collective
"collective",
]
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.ops.image import affine_transform
from keras.src.ops.image import crop_images
from keras.src.ops.image import extract_patches
from keras.src.ops.image import gaussian_blur
from keras.src.ops.image import hsv_to_rgb
from keras.src.ops.image import map_coordinates
from keras.src.ops.image import pad_images
from keras.src.ops.image import perspective_transform
from keras.src.ops.image import resize
from keras.src.ops.image import rgb_to_grayscale
from keras.src.ops.image import rgb_to_hsv
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.ops.image import affine_transform
from keras.src.ops.image import crop_images
from keras.src.ops.image import extract_patches
from keras.src.ops.image import hsv_to_rgb
from keras.src.ops.image import map_coordinates
from keras.src.ops.image import pad_images
from keras.src.ops.image import perspective_transform
from keras.src.ops.image import resize
from keras.src.ops.image import rgb_to_grayscale
from keras.src.ops.image import rgb_to_hsv
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmcv.parallel import is_module_wrapper
from mmcv.runner.hooks import HOOKS, Hook
@HOOKS.register_module()
class YOLOXModeSwitchHook(Hook):
"""Switch the mode of YOLOX during training.
This hook turns off the mosaic and mixup data augmentation and switches
to use L1 loss in bbox_head.
Args:
num_last_epochs (int): The number of latter epochs in the end of the
training to close the data augmentation and switch to L1 loss.
Default: 15.
skip_type_keys (list[str], optional): Sequence of type string to be
skip pipeline. Default: ('Mosaic', 'RandomAffine', 'MixUp')
"""
def __init__(self,
num_last_epochs=15,
skip_type_keys=('Mosaic', 'RandomAffine', 'MixUp')):
self.num_last_epochs = num_last_epochs
self.skip_type_keys = skip_type_keys
self._restart_dataloader = False
def before_train_epoch(self, runner):
"""Close mosaic and mixup augmentation and switches to use L1 loss."""
epoch = runner.epoch
train_loader = runner.data_loader
model = runner.model
if is_module_wrapper(model):
model = model.module
if (epoch + 1) == runner.max_epochs - self.num_last_epochs:
runner.logger.info('No mosaic and mixup aug now!')
# The dataset pipeline cannot be updated when persistent_workers
# is True, so we need to force the dataloader's multi-process
# restart. This is a very hacky approach.
train_loader.dataset.update_skip_type_keys(self.skip_type_keys)
if hasattr(train_loader, 'persistent_workers'
) and train_loader.persistent_workers is True:
train_loader._DataLoader__initialized = False
train_loader._iterator = None
self._restart_dataloader = True
runner.logger.info('Add additional L1 loss now!')
model.bbox_head.use_l1 = True
else:
# Once the restart is complete, we need to restore
# the initialization flag.
if self._restart_dataloader:
train_loader._DataLoader__initialized = True
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmcv.parallel import is_module_wrapper
from mmcv.runner.hooks import HOOKS, Hook
@HOOKS.register_module()
class YOLOXModeSwitchHook(Hook):
"""Switch the mode of YOLOX during training.
This hook turns off the mosaic and mixup data augmentation and switches
to use L1 loss in bbox_head.
Args:
num_last_epochs (int): The number of latter epochs in the end of the
training to close the data augmentation and switch to L1 loss.
Default: 15.
skip_type_keys (list[str], optional): Sequence of type string to be
skip pipeline. Default: ('Mosaic', 'RandomAffine', 'MixUp')
"""
def __init__(self,
num_last_epochs=15,
skip_type_keys=('Mosaic', 'RandomAffine', 'MixUp')):
self.num_last_epochs = num_last_epochs
self.skip_type_keys = skip_type_keys
def before_train_epoch(self, runner):
"""Close mosaic and mixup augmentation and switches to use L1 loss."""
epoch = runner.epoch
train_loader = runner.data_loader
model = runner.model
if is_module_wrapper(model):
model = model.module
if (epoch + 1) == runner.max_epochs - self.num_last_epochs:
runner.logger.info('No mosaic and mixup aug now!')
train_loader.dataset.update_skip_type_keys(self.skip_type_keys)
runner.logger.info('Add additional L1 loss now!')
model.bbox_head.use_l1 = True
|
from typing import Union
from langchain_core.agents import AgentAction, AgentFinish
from langchain_core.messages import BaseMessage
from langchain_core.outputs import ChatGeneration, Generation
from langchain.agents.agent import MultiActionAgentOutputParser
from langchain.agents.output_parsers.tools import (
ToolAgentAction,
parse_ai_message_to_tool_action,
)
OpenAIToolAgentAction = ToolAgentAction
def parse_ai_message_to_openai_tool_action(
message: BaseMessage,
) -> Union[list[AgentAction], AgentFinish]:
"""Parse an AI message potentially containing tool_calls."""
tool_actions = parse_ai_message_to_tool_action(message)
if isinstance(tool_actions, AgentFinish):
return tool_actions
final_actions: list[AgentAction] = []
for action in tool_actions:
if isinstance(action, ToolAgentAction):
final_actions.append(
OpenAIToolAgentAction(
tool=action.tool,
tool_input=action.tool_input,
log=action.log,
message_log=action.message_log,
tool_call_id=action.tool_call_id,
)
)
else:
final_actions.append(action)
return final_actions
class OpenAIToolsAgentOutputParser(MultiActionAgentOutputParser):
"""Parses a message into agent actions/finish.
Is meant to be used with OpenAI models, as it relies on the specific
tool_calls parameter from OpenAI to convey what tools to use.
If a tool_calls parameter is passed, then that is used to get
the tool names and tool inputs.
If one is not passed, then the AIMessage is assumed to be the final output.
"""
@property
def _type(self) -> str:
return "openai-tools-agent-output-parser"
def parse_result(
self, result: list[Generation], *, partial: bool = False
) -> Union[list[AgentAction], AgentFinish]:
if not isinstance(result[0], ChatGeneration):
msg = "This output parser only works on ChatGeneration output"
raise ValueError(msg)
message = result[0].message
return parse_ai_message_to_openai_tool_action(message)
def parse(self, text: str) -> Union[list[AgentAction], AgentFinish]:
msg = "Can only parse messages"
raise ValueError(msg)
|
from typing import Union
from langchain_core.agents import AgentAction, AgentFinish
from langchain_core.messages import BaseMessage
from langchain_core.outputs import ChatGeneration, Generation
from langchain.agents.agent import MultiActionAgentOutputParser
from langchain.agents.output_parsers.tools import (
ToolAgentAction,
parse_ai_message_to_tool_action,
)
OpenAIToolAgentAction = ToolAgentAction
def parse_ai_message_to_openai_tool_action(
message: BaseMessage,
) -> Union[list[AgentAction], AgentFinish]:
"""Parse an AI message potentially containing tool_calls."""
tool_actions = parse_ai_message_to_tool_action(message)
if isinstance(tool_actions, AgentFinish):
return tool_actions
final_actions: list[AgentAction] = []
for action in tool_actions:
if isinstance(action, ToolAgentAction):
final_actions.append(
OpenAIToolAgentAction(
tool=action.tool,
tool_input=action.tool_input,
log=action.log,
message_log=action.message_log,
tool_call_id=action.tool_call_id,
)
)
else:
final_actions.append(action)
return final_actions
class OpenAIToolsAgentOutputParser(MultiActionAgentOutputParser):
"""Parses a message into agent actions/finish.
Is meant to be used with OpenAI models, as it relies on the specific
tool_calls parameter from OpenAI to convey what tools to use.
If a tool_calls parameter is passed, then that is used to get
the tool names and tool inputs.
If one is not passed, then the AIMessage is assumed to be the final output.
"""
@property
def _type(self) -> str:
return "openai-tools-agent-output-parser"
def parse_result(
self, result: list[Generation], *, partial: bool = False
) -> Union[list[AgentAction], AgentFinish]:
if not isinstance(result[0], ChatGeneration):
raise ValueError("This output parser only works on ChatGeneration output")
message = result[0].message
return parse_ai_message_to_openai_tool_action(message)
def parse(self, text: str) -> Union[list[AgentAction], AgentFinish]:
raise ValueError("Can only parse messages")
|
"""Feature selection algorithms.
These include univariate filter selection methods and the recursive feature elimination
algorithm.
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
from ._base import SelectorMixin
from ._from_model import SelectFromModel
from ._mutual_info import mutual_info_classif, mutual_info_regression
from ._rfe import RFE, RFECV
from ._sequential import SequentialFeatureSelector
from ._univariate_selection import (
GenericUnivariateSelect,
SelectFdr,
SelectFpr,
SelectFwe,
SelectKBest,
SelectPercentile,
chi2,
f_classif,
f_oneway,
f_regression,
r_regression,
)
from ._variance_threshold import VarianceThreshold
__all__ = [
"RFE",
"RFECV",
"GenericUnivariateSelect",
"SelectFdr",
"SelectFpr",
"SelectFromModel",
"SelectFwe",
"SelectKBest",
"SelectPercentile",
"SelectorMixin",
"SequentialFeatureSelector",
"VarianceThreshold",
"chi2",
"f_classif",
"f_oneway",
"f_regression",
"mutual_info_classif",
"mutual_info_regression",
"r_regression",
]
|
"""Feature selection algorithms.
These include univariate filter selection methods and the recursive feature elimination
algorithm.
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
from ._base import SelectorMixin
from ._from_model import SelectFromModel
from ._mutual_info import mutual_info_classif, mutual_info_regression
from ._rfe import RFE, RFECV
from ._sequential import SequentialFeatureSelector
from ._univariate_selection import (
GenericUnivariateSelect,
SelectFdr,
SelectFpr,
SelectFwe,
SelectKBest,
SelectPercentile,
chi2,
f_classif,
f_oneway,
f_regression,
r_regression,
)
from ._variance_threshold import VarianceThreshold
__all__ = [
"GenericUnivariateSelect",
"SequentialFeatureSelector",
"RFE",
"RFECV",
"SelectFdr",
"SelectFpr",
"SelectFwe",
"SelectKBest",
"SelectFromModel",
"SelectPercentile",
"VarianceThreshold",
"chi2",
"f_classif",
"f_oneway",
"f_regression",
"r_regression",
"mutual_info_classif",
"mutual_info_regression",
"SelectorMixin",
]
|
_base_ = [
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
teacher_ckpt = 'http://download.openmmlab.com/mmdetection/v2.0/paa/paa_r101_fpn_1x_coco/paa_r101_fpn_1x_coco_20200821-0a1825a4.pth' # noqa
model = dict(
type='LAD',
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
bgr_to_rgb=True,
pad_size_divisor=32),
# student
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
add_extra_convs='on_output',
num_outs=5),
bbox_head=dict(
type='LADHead',
reg_decoded_bbox=True,
score_voting=True,
topk=9,
num_classes=80,
in_channels=256,
stacked_convs=4,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
octave_base_scale=8,
scales_per_octave=1,
strides=[8, 16, 32, 64, 128]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[0.1, 0.1, 0.2, 0.2]),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=1.3),
loss_centerness=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=0.5)),
# teacher
teacher_ckpt=teacher_ckpt,
teacher_backbone=dict(
type='ResNet',
depth=101,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch'),
teacher_neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
add_extra_convs='on_output',
num_outs=5),
teacher_bbox_head=dict(
type='LADHead',
reg_decoded_bbox=True,
score_voting=True,
topk=9,
num_classes=80,
in_channels=256,
stacked_convs=4,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
octave_base_scale=8,
scales_per_octave=1,
strides=[8, 16, 32, 64, 128]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[0.1, 0.1, 0.2, 0.2]),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=1.3),
loss_centerness=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=0.5)),
# training and testing settings
train_cfg=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.1,
neg_iou_thr=0.1,
min_pos_iou=0,
ignore_iof_thr=-1),
allowed_border=-1,
pos_weight=-1,
debug=False),
test_cfg=dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
score_voting=True,
nms=dict(type='nms', iou_threshold=0.6),
max_per_img=100))
train_dataloader = dict(batch_size=8, num_workers=4)
optim_wrapper = dict(type='AmpOptimWrapper', optimizer=dict(lr=0.01))
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (8 GPUs) x (8 samples per GPU)
auto_scale_lr = dict(base_batch_size=64)
|
_base_ = [
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
teacher_ckpt = 'http://download.openmmlab.com/mmdetection/v2.0/paa/paa_r101_fpn_1x_coco/paa_r101_fpn_1x_coco_20200821-0a1825a4.pth' # noqa
preprocess_cfg = dict(
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
to_rgb=True,
pad_size_divisor=32)
model = dict(
preprocess_cfg=preprocess_cfg,
type='LAD',
# student
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
add_extra_convs='on_output',
num_outs=5),
bbox_head=dict(
type='LADHead',
reg_decoded_bbox=True,
score_voting=True,
topk=9,
num_classes=80,
in_channels=256,
stacked_convs=4,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
octave_base_scale=8,
scales_per_octave=1,
strides=[8, 16, 32, 64, 128]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[0.1, 0.1, 0.2, 0.2]),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=1.3),
loss_centerness=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=0.5)),
# teacher
teacher_ckpt=teacher_ckpt,
teacher_backbone=dict(
type='ResNet',
depth=101,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch'),
teacher_neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
add_extra_convs='on_output',
num_outs=5),
teacher_bbox_head=dict(
type='LADHead',
reg_decoded_bbox=True,
score_voting=True,
topk=9,
num_classes=80,
in_channels=256,
stacked_convs=4,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
octave_base_scale=8,
scales_per_octave=1,
strides=[8, 16, 32, 64, 128]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[0.1, 0.1, 0.2, 0.2]),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=1.3),
loss_centerness=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=0.5)),
# training and testing settings
train_cfg=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.1,
neg_iou_thr=0.1,
min_pos_iou=0,
ignore_iof_thr=-1),
allowed_border=-1,
pos_weight=-1,
debug=False),
test_cfg=dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
score_voting=True,
nms=dict(type='nms', iou_threshold=0.6),
max_per_img=100))
train_dataloader = dict(batch_size=8, num_workers=4)
optim_wrapper = dict(optimizer=dict(lr=0.01))
# TODO: MMEngine does not support fp16 yet.
# fp16 = dict(loss_scale=512.)
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (8 GPUs) x (8 samples per GPU)
auto_scale_lr = dict(base_batch_size=64)
|
import numpy as np
from absl.testing import parameterized
from tensorflow import data as tf_data
from keras.src import backend
from keras.src import layers
from keras.src import testing
class RandomRotationTest(testing.TestCase):
@parameterized.named_parameters(
("random_rotate_neg4", -0.4),
("random_rotate_neg2", -0.2),
("random_rotate_4", 0.4),
("random_rotate_2", 0.2),
("random_rotate_tuple", (-0.2, 0.4)),
)
def test_random_rotation_shapes(self, factor):
self.run_layer_test(
layers.RandomRotation,
init_kwargs={
"factor": factor,
},
input_shape=(2, 3, 4),
expected_output_shape=(2, 3, 4),
supports_masking=False,
run_training_check=False,
)
def test_random_rotation_correctness(self):
if backend.config.image_data_format() == "channels_last":
input_shape = (1, 5, 5, 1)
else:
input_shape = (1, 1, 5, 5)
input_image = np.reshape(np.arange(0, 25), input_shape)
layer = layers.RandomRotation(factor=(0.5, 0.5))
actual_output = layer(input_image)
expected_output = np.asarray(
[
[24, 23, 22, 21, 20],
[19, 18, 17, 16, 15],
[14, 13, 12, 11, 10],
[9, 8, 7, 6, 5],
[4, 3, 2, 1, 0],
]
).reshape(input_shape)
self.assertAllClose(
backend.convert_to_tensor(expected_output), actual_output, atol=1e-5
)
def test_training_false(self):
input_image = np.reshape(np.arange(0, 25), (1, 5, 5, 1))
layer = layers.RandomRotation(factor=(0.5, 0.5))
actual_output = layer(input_image, training=False)
self.assertAllClose(actual_output, input_image)
def test_tf_data_compatibility(self):
if backend.config.image_data_format() == "channels_last":
input_shape = (1, 5, 5, 1)
else:
input_shape = (1, 1, 5, 5)
input_image = np.reshape(np.arange(0, 25), input_shape)
layer = layers.RandomRotation(factor=(0.5, 0.5))
ds = tf_data.Dataset.from_tensor_slices(input_image).map(layer)
expected_output = np.asarray(
[
[24, 23, 22, 21, 20],
[19, 18, 17, 16, 15],
[14, 13, 12, 11, 10],
[9, 8, 7, 6, 5],
[4, 3, 2, 1, 0],
]
).reshape(input_shape[1:])
output = next(iter(ds)).numpy()
self.assertAllClose(expected_output, output)
|
import numpy as np
from absl.testing import parameterized
from tensorflow import data as tf_data
from keras.src import backend
from keras.src import layers
from keras.src import testing
class RandomRotationTest(testing.TestCase):
@parameterized.named_parameters(
("random_rotate_neg4", -0.4),
("random_rotate_neg2", -0.2),
("random_rotate_4", 0.4),
("random_rotate_2", 0.2),
("random_rotate_tuple", (-0.2, 0.4)),
)
def test_random_rotation_shapes(self, factor):
self.run_layer_test(
layers.RandomRotation,
init_kwargs={
"factor": factor,
},
input_shape=(2, 3, 4),
expected_output_shape=(2, 3, 4),
supports_masking=False,
run_training_check=False,
)
def test_random_rotation_correctness(self):
if backend.config.image_data_format() == "channels_last":
input_shape = (1, 5, 5, 1)
else:
input_shape = (1, 1, 5, 5)
input_image = np.reshape(np.arange(0, 25), input_shape)
layer = layers.RandomRotation(factor=(0.5, 0.5))
actual_output = layer(input_image)
expected_output = np.asarray(
[
[24, 23, 22, 21, 20],
[19, 18, 17, 16, 15],
[14, 13, 12, 11, 10],
[9, 8, 7, 6, 5],
[4, 3, 2, 1, 0],
]
).reshape(input_shape)
self.assertAllClose(
backend.convert_to_tensor(expected_output), actual_output, atol=1e-5
)
def test_training_false(self):
input_image = np.reshape(np.arange(0, 25), (1, 5, 5, 1))
layer = layers.RandomRotation(factor=(0.5, 0.5))
actual_output = layer(input_image, training=False)
self.assertAllClose(actual_output, input_image)
def test_tf_data_compatibility(self):
if backend.config.image_data_format() == "channels_last":
input_shape = (1, 5, 5, 1)
else:
input_shape = (1, 1, 5, 5)
input_image = np.reshape(np.arange(0, 25), input_shape)
layer = layers.RandomRotation(factor=(0.5, 0.5))
ds = tf_data.Dataset.from_tensor_slices(input_image).map(layer)
expected_output = np.asarray(
[
[24, 23, 22, 21, 20],
[19, 18, 17, 16, 15],
[14, 13, 12, 11, 10],
[9, 8, 7, 6, 5],
[4, 3, 2, 1, 0],
]
).reshape(input_shape[1:])
for output in ds.take(1):
output = output.numpy()
self.assertAllClose(expected_output, output)
|
"""Test embedding functionalities."""
from collections import defaultdict
from typing import Any, Dict, List
from unittest.mock import patch
import pytest
from llama_index.core.indices.tree.base import TreeIndex
from llama_index.core.indices.tree.select_leaf_embedding_retriever import (
TreeSelectLeafEmbeddingRetriever,
)
from llama_index.core.schema import BaseNode, Document, QueryBundle
from tests.mock_utils.mock_prompts import (
MOCK_INSERT_PROMPT,
MOCK_SUMMARY_PROMPT,
)
@pytest.fixture()
def index_kwargs() -> dict:
"""Index kwargs."""
return {
"summary_template": MOCK_SUMMARY_PROMPT,
"insert_prompt": MOCK_INSERT_PROMPT,
"num_children": 2,
}
@pytest.fixture()
def documents() -> List[Document]:
"""Get documents."""
# NOTE: one document for now
doc_text = (
"Hello world.\nThis is a test.\nThis is another test.\nThis is a test v2."
)
return [Document(text=doc_text)]
def _get_node_text_embedding_similarities(
query_embedding: List[float], nodes: List[BaseNode]
) -> List[float]:
"""Get node text embedding similarity."""
text_similarity_map = defaultdict(lambda: 0.0)
text_similarity_map["Hello world."] = 0.9
text_similarity_map["This is a test."] = 0.8
text_similarity_map["This is another test."] = 0.7
text_similarity_map["This is a test v2."] = 0.6
similarities = []
for node in nodes:
similarities.append(text_similarity_map[node.get_content()])
return similarities
@patch.object(
TreeSelectLeafEmbeddingRetriever,
"_get_query_text_embedding_similarities",
side_effect=_get_node_text_embedding_similarities,
)
def test_embedding_query(
_patch_similarity: Any,
index_kwargs: Dict,
documents: List[Document],
patch_llm_predictor,
patch_token_text_splitter,
) -> None:
"""Test embedding query."""
tree = TreeIndex.from_documents(documents, **index_kwargs)
# test embedding query
query_str = "What is?"
retriever = tree.as_retriever(retriever_mode="select_leaf_embedding")
nodes = retriever.retrieve(QueryBundle(query_str))
assert nodes[0].node.get_content() == "Hello world."
|
"""Test embedding functionalities."""
from collections import defaultdict
from typing import Any, Dict, List
from unittest.mock import patch
import pytest
from llama_index.core.indices.tree.base import TreeIndex
from llama_index.core.indices.tree.select_leaf_embedding_retriever import (
TreeSelectLeafEmbeddingRetriever,
)
from llama_index.core.schema import BaseNode, Document, QueryBundle
from tests.mock_utils.mock_prompts import (
MOCK_INSERT_PROMPT,
MOCK_SUMMARY_PROMPT,
)
@pytest.fixture()
def index_kwargs() -> dict:
"""Index kwargs."""
return {
"summary_template": MOCK_SUMMARY_PROMPT,
"insert_prompt": MOCK_INSERT_PROMPT,
"num_children": 2,
}
@pytest.fixture()
def documents() -> List[Document]:
"""Get documents."""
# NOTE: one document for now
doc_text = (
"Hello world.\n"
"This is a test.\n"
"This is another test.\n"
"This is a test v2."
)
return [Document(text=doc_text)]
def _get_node_text_embedding_similarities(
query_embedding: List[float], nodes: List[BaseNode]
) -> List[float]:
"""Get node text embedding similarity."""
text_similarity_map = defaultdict(lambda: 0.0)
text_similarity_map["Hello world."] = 0.9
text_similarity_map["This is a test."] = 0.8
text_similarity_map["This is another test."] = 0.7
text_similarity_map["This is a test v2."] = 0.6
similarities = []
for node in nodes:
similarities.append(text_similarity_map[node.get_content()])
return similarities
@patch.object(
TreeSelectLeafEmbeddingRetriever,
"_get_query_text_embedding_similarities",
side_effect=_get_node_text_embedding_similarities,
)
def test_embedding_query(
_patch_similarity: Any,
index_kwargs: Dict,
documents: List[Document],
patch_llm_predictor,
patch_token_text_splitter,
) -> None:
"""Test embedding query."""
tree = TreeIndex.from_documents(documents, **index_kwargs)
# test embedding query
query_str = "What is?"
retriever = tree.as_retriever(retriever_mode="select_leaf_embedding")
nodes = retriever.retrieve(QueryBundle(query_str))
assert nodes[0].node.get_content() == "Hello world."
|
from __future__ import annotations
import math
from pathlib import Path
import numpy as np
import pytest
from packaging.version import Version, parse
from tokenizers import Tokenizer
from sentence_transformers import SentenceTransformer
from sentence_transformers.models.StaticEmbedding import StaticEmbedding
try:
import model2vec
from model2vec import __version__ as M2V_VERSION
except ImportError:
model2vec = None
skip_if_no_model2vec = pytest.mark.skipif(model2vec is None, reason="The model2vec library is not installed.")
@pytest.fixture(scope="session")
def tokenizer() -> Tokenizer:
return Tokenizer.from_pretrained("bert-base-uncased")
@pytest.fixture
def embedding_weights():
return np.random.rand(30522, 768)
@pytest.fixture
def static_embedding(tokenizer: Tokenizer, embedding_weights) -> StaticEmbedding:
return StaticEmbedding(tokenizer, embedding_weights=embedding_weights)
def test_initialization_with_embedding_weights(tokenizer: Tokenizer, embedding_weights) -> None:
model = StaticEmbedding(tokenizer, embedding_weights=embedding_weights)
assert model.embedding.weight.shape == (30522, 768)
def test_initialization_with_embedding_dim(tokenizer: Tokenizer) -> None:
model = StaticEmbedding(tokenizer, embedding_dim=768)
assert model.embedding.weight.shape == (30522, 768)
def test_tokenize(static_embedding: StaticEmbedding) -> None:
texts = ["Hello world!", "How are you?"]
tokens = static_embedding.tokenize(texts)
assert "input_ids" in tokens
assert "offsets" in tokens
def test_forward(static_embedding: StaticEmbedding) -> None:
texts = ["Hello world!", "How are you?"]
tokens = static_embedding.tokenize(texts)
output = static_embedding(tokens)
assert "sentence_embedding" in output
def test_save_and_load(tmp_path: Path, static_embedding: StaticEmbedding) -> None:
save_dir = tmp_path / "model"
save_dir.mkdir()
static_embedding.save(str(save_dir))
loaded_model = StaticEmbedding.load(str(save_dir))
assert loaded_model.embedding.weight.shape == static_embedding.embedding.weight.shape
@skip_if_no_model2vec()
def test_from_distillation() -> None:
model = StaticEmbedding.from_distillation("sentence-transformers-testing/stsb-bert-tiny-safetensors", pca_dims=32)
expected_shape = (29525 if parse(M2V_VERSION) >= Version("0.5.0") else 29528, 32)
assert model.embedding.weight.shape == expected_shape
@skip_if_no_model2vec()
def test_from_model2vec() -> None:
model = StaticEmbedding.from_model2vec("minishlab/M2V_base_output")
assert model.embedding.weight.shape == (29528, 256)
def test_loading_model2vec() -> None:
model = SentenceTransformer("minishlab/potion-base-8M")
assert model.get_sentence_embedding_dimension() == 256
assert model.max_seq_length == math.inf
test_sentences = ["It's so sunny outside!", "The sun is shining outside!"]
embeddings = model.encode(test_sentences)
assert embeddings.shape == (2, 256)
similarity = model.similarity(embeddings[0], embeddings[1])
assert similarity.item() > 0.7
|
from __future__ import annotations
import math
from pathlib import Path
import numpy as np
import pytest
from tokenizers import Tokenizer
from sentence_transformers import SentenceTransformer
from sentence_transformers.models.StaticEmbedding import StaticEmbedding
try:
import model2vec
except ImportError:
model2vec = None
skip_if_no_model2vec = pytest.mark.skipif(model2vec is None, reason="The model2vec library is not installed.")
@pytest.fixture(scope="session")
def tokenizer() -> Tokenizer:
return Tokenizer.from_pretrained("bert-base-uncased")
@pytest.fixture
def embedding_weights():
return np.random.rand(30522, 768)
@pytest.fixture
def static_embedding(tokenizer: Tokenizer, embedding_weights) -> StaticEmbedding:
return StaticEmbedding(tokenizer, embedding_weights=embedding_weights)
def test_initialization_with_embedding_weights(tokenizer: Tokenizer, embedding_weights) -> None:
model = StaticEmbedding(tokenizer, embedding_weights=embedding_weights)
assert model.embedding.weight.shape == (30522, 768)
def test_initialization_with_embedding_dim(tokenizer: Tokenizer) -> None:
model = StaticEmbedding(tokenizer, embedding_dim=768)
assert model.embedding.weight.shape == (30522, 768)
def test_tokenize(static_embedding: StaticEmbedding) -> None:
texts = ["Hello world!", "How are you?"]
tokens = static_embedding.tokenize(texts)
assert "input_ids" in tokens
assert "offsets" in tokens
def test_forward(static_embedding: StaticEmbedding) -> None:
texts = ["Hello world!", "How are you?"]
tokens = static_embedding.tokenize(texts)
output = static_embedding(tokens)
assert "sentence_embedding" in output
def test_save_and_load(tmp_path: Path, static_embedding: StaticEmbedding) -> None:
save_dir = tmp_path / "model"
save_dir.mkdir()
static_embedding.save(str(save_dir))
loaded_model = StaticEmbedding.load(str(save_dir))
assert loaded_model.embedding.weight.shape == static_embedding.embedding.weight.shape
@skip_if_no_model2vec()
def test_from_distillation() -> None:
model = StaticEmbedding.from_distillation("sentence-transformers-testing/stsb-bert-tiny-safetensors", pca_dims=32)
assert model.embedding.weight.shape == (29528, 32)
@skip_if_no_model2vec()
def test_from_model2vec() -> None:
model = StaticEmbedding.from_model2vec("minishlab/M2V_base_output")
assert model.embedding.weight.shape == (29528, 256)
def test_loading_model2vec() -> None:
model = SentenceTransformer("minishlab/potion-base-8M")
assert model.get_sentence_embedding_dimension() == 256
assert model.max_seq_length == math.inf
test_sentences = ["It's so sunny outside!", "The sun is shining outside!"]
embeddings = model.encode(test_sentences)
assert embeddings.shape == (2, 256)
similarity = model.similarity(embeddings[0], embeddings[1])
assert similarity.item() > 0.7
|
from pathlib import Path
from typing import Dict
import numpy as np
import pytest
from jina import Document, DocumentArray, Executor
from ...paddle_image import ImagePaddlehubEncoder
input_dim = 224
target_output_dim = 2048
num_doc = 2
test_data = np.random.rand(num_doc, 3, input_dim, input_dim)
tmp_files = []
def test_config():
ex = Executor.load_config(str(Path(__file__).parents[2] / 'config.yml'))
assert ex.model_name == 'xception71_imagenet'
def test_imagepaddlehubencoder_encode(test_images: Dict[str, np.array]):
encoder = ImagePaddlehubEncoder(channel_axis=3)
embeddings = {}
for name, image_arr in test_images.items():
docs = DocumentArray([Document(blob=image_arr)])
encoder.encode(docs, parameters={})
embeddings[name] = docs[0].embedding
assert docs[0].embedding.shape == (2048,)
def dist(a, b):
a_embedding = embeddings[a]
b_embedding = embeddings[b]
return np.linalg.norm(a_embedding - b_embedding)
small_distance = dist('banana1', 'banana2')
assert small_distance < dist('banana1', 'airplane')
assert small_distance < dist('banana1', 'satellite')
assert small_distance < dist('banana1', 'studio')
assert small_distance < dist('banana2', 'airplane')
assert small_distance < dist('banana2', 'satellite')
assert small_distance < dist('banana2', 'studio')
assert small_distance < dist('airplane', 'studio')
assert small_distance < dist('airplane', 'satellite')
@pytest.mark.gpu
def test_encode_gpu(test_images: Dict[str, np.array]):
encoder = ImagePaddlehubEncoder(channel_axis=3, device='cuda')
for name, image_arr in test_images.items():
docs = DocumentArray([Document(blob=image_arr)])
encoder.encode(docs, parameters={})
assert docs[0].embedding.shape == (2048,)
|
from pathlib import Path
from typing import Dict
import numpy as np
from jina import DocumentArray, Document, Executor
from ...paddle_image import ImagePaddlehubEncoder
input_dim = 224
target_output_dim = 2048
num_doc = 2
test_data = np.random.rand(num_doc, 3, input_dim, input_dim)
tmp_files = []
def test_config():
ex = Executor.load_config(str(Path(__file__).parents[2] / 'config.yml'))
assert ex.model_name == 'xception71_imagenet'
def test_imagepaddlehubencoder_encode(test_images: Dict[str, np.array]):
encoder = ImagePaddlehubEncoder(channel_axis=3)
embeddings = {}
for name, image_arr in test_images.items():
docs = DocumentArray([Document(blob=image_arr)])
encoder.encode(docs, parameters={})
embeddings[name] = docs[0].embedding
assert docs[0].embedding.shape == (2048,)
def dist(a, b):
a_embedding = embeddings[a]
b_embedding = embeddings[b]
return np.linalg.norm(a_embedding - b_embedding)
small_distance = dist('banana1', 'banana2')
assert small_distance < dist('banana1', 'airplane')
assert small_distance < dist('banana1', 'satellite')
assert small_distance < dist('banana1', 'studio')
assert small_distance < dist('banana2', 'airplane')
assert small_distance < dist('banana2', 'satellite')
assert small_distance < dist('banana2', 'studio')
assert small_distance < dist('airplane', 'studio')
assert small_distance < dist('airplane', 'satellite')
|
import importlib
import os
import re
import types
from typing import Any, Optional
import numpy as np
try:
import torch # noqa: F401
except ImportError:
torch_imported = False
else:
torch_imported = True
try:
import tensorflow as tf # type: ignore # noqa: F401
except (ImportError, TypeError):
tf_imported = False
else:
tf_imported = True
INSTALL_INSTRUCTIONS = {
'google.protobuf': '"docarray[common]"',
'lz4': '"docarray[common]"',
'pandas': '"docarray[pandas]"',
'PIL': '"docarray[image]"',
'pydub': '"docarray[audio]"',
'av': '"docarray[video]"',
'trimesh': '"docarray[mesh]"',
'hnswlib': '"docarray[hnswlib]"',
'elasticsearch': '"docarray[elasticsearch]"',
'weaviate': '"docarray[weaviate]"',
'qdrant_client': '"docarray[qdrant]"',
'fastapi': '"docarray[web]"',
'torch': '"docarray[torch]"',
'tensorflow': 'protobuf==3.19.0 tensorflow',
'hubble': '"docarray[jac]"',
'smart_open': '"docarray[aws]"',
'boto3': '"docarray[aws]"',
'botocore': '"docarray[aws]"',
}
def import_library(
package: str, raise_error: bool = True
) -> Optional[types.ModuleType]:
lib: Optional[types.ModuleType]
try:
lib = importlib.import_module(package)
except (ModuleNotFoundError, ImportError):
lib = None
if lib is None and raise_error:
raise ImportError(
f'The following required library is not installed: {package} \n'
f'To install all necessary libraries, run: `pip install {INSTALL_INSTRUCTIONS[package]}`.'
)
else:
return lib
def _get_path_from_docarray_root_level(file_path: str) -> str:
path = os.path.dirname(file_path)
rel_path = re.sub('(?s:.*)docarray', 'docarray', path).replace('/', '.')
return rel_path
def is_torch_available():
return torch_imported
def is_tf_available():
return tf_imported
def is_np_int(item: Any) -> bool:
dtype = getattr(item, 'dtype', None)
ndim = getattr(item, 'ndim', None)
if dtype is not None and ndim is not None:
try:
return ndim == 0 and np.issubdtype(dtype, np.integer)
except TypeError:
return False
return False # this is unreachable, but mypy wants it
def is_notebook() -> bool:
"""
Check if we're running in a Jupyter notebook, using magic command
`get_ipython` that only available in Jupyter.
:return: True if run in a Jupyter notebook else False.
"""
try:
shell = get_ipython().__class__.__name__ # type: ignore
except NameError:
return False
if shell == 'ZMQInteractiveShell':
return True
elif shell == 'Shell':
return True
elif shell == 'TerminalInteractiveShell':
return False
else:
return False
|
import importlib
import os
import re
import types
from typing import Any, Optional
import numpy as np
try:
import torch # noqa: F401
except ImportError:
torch_imported = False
else:
torch_imported = True
try:
import tensorflow as tf # type: ignore # noqa: F401
except (ImportError, TypeError):
tf_imported = False
else:
tf_imported = True
INSTALL_INSTRUCTIONS = {
'google.protobuf': '"docarray[common]"',
'lz4': '"docarray[common]"',
'pandas': '"docarray[pandas]"',
'PIL': '"docarray[image]"',
'pydub': '"docarray[audio]"',
'av': '"docarray[video]"',
'trimesh': '"docarray[mesh]"',
'hnswlib': '"docarray[hnswlib]"',
'elasticsearch': '"docarray[elasticsearch]"',
'fastapi': '"docarray[web]"',
'torch': '"docarray[torch]"',
'tensorflow': 'protobuf==3.19.0 tensorflow',
'hubble': '"docarray[jac]"',
'smart_open': '"docarray[aws]"',
'boto3': '"docarray[aws]"',
'botocore': '"docarray[aws]"',
}
def import_library(
package: str, raise_error: bool = True
) -> Optional[types.ModuleType]:
lib: Optional[types.ModuleType]
try:
lib = importlib.import_module(package)
except (ModuleNotFoundError, ImportError):
lib = None
if lib is None and raise_error:
raise ImportError(
f'The following required library is not installed: {package} \n'
f'To install all necessary libraries, run: `pip install {INSTALL_INSTRUCTIONS[package]}`.'
)
else:
return lib
def _get_path_from_docarray_root_level(file_path: str) -> str:
path = os.path.dirname(file_path)
rel_path = re.sub('(?s:.*)docarray', 'docarray', path).replace('/', '.')
return rel_path
def is_torch_available():
return torch_imported
def is_tf_available():
return tf_imported
def is_np_int(item: Any) -> bool:
dtype = getattr(item, 'dtype', None)
ndim = getattr(item, 'ndim', None)
if dtype is not None and ndim is not None:
try:
return ndim == 0 and np.issubdtype(dtype, np.integer)
except TypeError:
return False
return False # this is unreachable, but mypy wants it
def is_notebook() -> bool:
"""
Check if we're running in a Jupyter notebook, using magic command
`get_ipython` that only available in Jupyter.
:return: True if run in a Jupyter notebook else False.
"""
try:
shell = get_ipython().__class__.__name__ # type: ignore
except NameError:
return False
if shell == 'ZMQInteractiveShell':
return True
elif shell == 'Shell':
return True
elif shell == 'TerminalInteractiveShell':
return False
else:
return False
|
import zlib
from typing import Iterator, TextIO
def exact_div(x, y):
assert x % y == 0
return x // y
def str2bool(string):
str2val = {"True": True, "False": False}
if string in str2val:
return str2val[string]
else:
raise ValueError(f"Expected one of {set(str2val.keys())}, got {string}")
def optional_int(string):
return None if string == "None" else int(string)
def optional_float(string):
return None if string == "None" else float(string)
def compression_ratio(text) -> float:
text_bytes = text.encode("utf-8")
return len(text_bytes) / len(zlib.compress(text_bytes))
def format_timestamp(seconds: float, always_include_hours: bool = False, decimal_marker: str = '.'):
assert seconds >= 0, "non-negative timestamp expected"
milliseconds = round(seconds * 1000.0)
hours = milliseconds // 3_600_000
milliseconds -= hours * 3_600_000
minutes = milliseconds // 60_000
milliseconds -= minutes * 60_000
seconds = milliseconds // 1_000
milliseconds -= seconds * 1_000
hours_marker = f"{hours:02d}:" if always_include_hours or hours > 0 else ""
return f"{hours_marker}{minutes:02d}:{seconds:02d}{decimal_marker}{milliseconds:03d}"
def write_txt(transcript: Iterator[dict], file: TextIO):
for segment in transcript:
print(segment['text'].strip(), file=file, flush=True)
def write_vtt(transcript: Iterator[dict], file: TextIO):
print("WEBVTT\n", file=file)
for segment in transcript:
print(
f"{format_timestamp(segment['start'])} --> {format_timestamp(segment['end'])}\n"
f"{segment['text'].strip().replace('-->', '->')}\n",
file=file,
flush=True,
)
def write_srt(transcript: Iterator[dict], file: TextIO):
"""
Write a transcript to a file in SRT format.
Example usage:
from pathlib import Path
from whisper.utils import write_srt
result = transcribe(model, audio_path, temperature=temperature, **args)
# save SRT
audio_basename = Path(audio_path).stem
with open(Path(output_dir) / (audio_basename + ".srt"), "w", encoding="utf-8") as srt:
write_srt(result["segments"], file=srt)
"""
for i, segment in enumerate(transcript, start=1):
# write srt lines
print(
f"{i}\n"
f"{format_timestamp(segment['start'], always_include_hours=True, decimal_marker=',')} --> "
f"{format_timestamp(segment['end'], always_include_hours=True, decimal_marker=',')}\n"
f"{segment['text'].strip().replace('-->', '->')}\n",
file=file,
flush=True,
)
|
import zlib
from typing import Iterator, TextIO
def exact_div(x, y):
assert x % y == 0
return x // y
def str2bool(string):
str2val = {"True": True, "False": False}
if string in str2val:
return str2val[string]
else:
raise ValueError(f"Expected one of {set(str2val.keys())}, got {string}")
def optional_int(string):
return None if string == "None" else int(string)
def optional_float(string):
return None if string == "None" else float(string)
def compression_ratio(text) -> float:
return len(text) / len(zlib.compress(text.encode("utf-8")))
def format_timestamp(seconds: float, always_include_hours: bool = False, decimal_marker: str = '.'):
assert seconds >= 0, "non-negative timestamp expected"
milliseconds = round(seconds * 1000.0)
hours = milliseconds // 3_600_000
milliseconds -= hours * 3_600_000
minutes = milliseconds // 60_000
milliseconds -= minutes * 60_000
seconds = milliseconds // 1_000
milliseconds -= seconds * 1_000
hours_marker = f"{hours:02d}:" if always_include_hours or hours > 0 else ""
return f"{hours_marker}{minutes:02d}:{seconds:02d}{decimal_marker}{milliseconds:03d}"
def write_txt(transcript: Iterator[dict], file: TextIO):
for segment in transcript:
print(segment['text'].strip(), file=file, flush=True)
def write_vtt(transcript: Iterator[dict], file: TextIO):
print("WEBVTT\n", file=file)
for segment in transcript:
print(
f"{format_timestamp(segment['start'])} --> {format_timestamp(segment['end'])}\n"
f"{segment['text'].strip().replace('-->', '->')}\n",
file=file,
flush=True,
)
def write_srt(transcript: Iterator[dict], file: TextIO):
"""
Write a transcript to a file in SRT format.
Example usage:
from pathlib import Path
from whisper.utils import write_srt
result = transcribe(model, audio_path, temperature=temperature, **args)
# save SRT
audio_basename = Path(audio_path).stem
with open(Path(output_dir) / (audio_basename + ".srt"), "w", encoding="utf-8") as srt:
write_srt(result["segments"], file=srt)
"""
for i, segment in enumerate(transcript, start=1):
# write srt lines
print(
f"{i}\n"
f"{format_timestamp(segment['start'], always_include_hours=True, decimal_marker=',')} --> "
f"{format_timestamp(segment['end'], always_include_hours=True, decimal_marker=',')}\n"
f"{segment['text'].strip().replace('-->', '->')}\n",
file=file,
flush=True,
)
|
from __future__ import annotations
from typing import Any, List, Optional, cast
from langchain_text_splitters.base import TextSplitter, Tokenizer, split_text_on_tokens
class SentenceTransformersTokenTextSplitter(TextSplitter):
"""Splitting text to tokens using sentence model tokenizer."""
def __init__(
self,
chunk_overlap: int = 50,
model_name: str = "sentence-transformers/all-mpnet-base-v2",
tokens_per_chunk: Optional[int] = None,
**kwargs: Any,
) -> None:
"""Create a new TextSplitter."""
super().__init__(**kwargs, chunk_overlap=chunk_overlap)
try:
from sentence_transformers import SentenceTransformer
except ImportError:
raise ImportError(
"Could not import sentence_transformers python package. "
"This is needed in order to for SentenceTransformersTokenTextSplitter. "
"Please install it with `pip install sentence-transformers`."
)
self.model_name = model_name
self._model = SentenceTransformer(self.model_name)
self.tokenizer = self._model.tokenizer
self._initialize_chunk_configuration(tokens_per_chunk=tokens_per_chunk)
def _initialize_chunk_configuration(
self, *, tokens_per_chunk: Optional[int]
) -> None:
self.maximum_tokens_per_chunk = self._model.max_seq_length
if tokens_per_chunk is None:
self.tokens_per_chunk = self.maximum_tokens_per_chunk
else:
self.tokens_per_chunk = tokens_per_chunk
if self.tokens_per_chunk > self.maximum_tokens_per_chunk:
raise ValueError(
f"The token limit of the models '{self.model_name}'"
f" is: {self.maximum_tokens_per_chunk}."
f" Argument tokens_per_chunk={self.tokens_per_chunk}"
f" > maximum token limit."
)
def split_text(self, text: str) -> List[str]:
"""Splits the input text into smaller components by splitting text on tokens.
This method encodes the input text using a private `_encode` method, then
strips the start and stop token IDs from the encoded result. It returns the
processed segments as a list of strings.
Args:
text (str): The input text to be split.
Returns:
List[str]: A list of string components derived from the input text after
encoding and processing.
"""
def encode_strip_start_and_stop_token_ids(text: str) -> List[int]:
return self._encode(text)[1:-1]
tokenizer = Tokenizer(
chunk_overlap=self._chunk_overlap,
tokens_per_chunk=self.tokens_per_chunk,
decode=self.tokenizer.decode,
encode=encode_strip_start_and_stop_token_ids,
)
return split_text_on_tokens(text=text, tokenizer=tokenizer)
def count_tokens(self, *, text: str) -> int:
"""Counts the number of tokens in the given text.
This method encodes the input text using a private `_encode` method and
calculates the total number of tokens in the encoded result.
Args:
text (str): The input text for which the token count is calculated.
Returns:
int: The number of tokens in the encoded text.
"""
return len(self._encode(text))
_max_length_equal_32_bit_integer: int = 2**32
def _encode(self, text: str) -> list[int]:
token_ids_with_start_and_end_token_ids = self.tokenizer.encode(
text,
max_length=self._max_length_equal_32_bit_integer,
truncation="do_not_truncate",
)
return cast("list[int]", token_ids_with_start_and_end_token_ids)
|
from __future__ import annotations
from typing import Any, List, Optional, cast
from langchain_text_splitters.base import TextSplitter, Tokenizer, split_text_on_tokens
class SentenceTransformersTokenTextSplitter(TextSplitter):
"""Splitting text to tokens using sentence model tokenizer."""
def __init__(
self,
chunk_overlap: int = 50,
model_name: str = "sentence-transformers/all-mpnet-base-v2",
tokens_per_chunk: Optional[int] = None,
**kwargs: Any,
) -> None:
"""Create a new TextSplitter."""
super().__init__(**kwargs, chunk_overlap=chunk_overlap)
try:
from sentence_transformers import SentenceTransformer
except ImportError:
raise ImportError(
"Could not import sentence_transformers python package. "
"This is needed in order to for SentenceTransformersTokenTextSplitter. "
"Please install it with `pip install sentence-transformers`."
)
self.model_name = model_name
self._model = SentenceTransformer(self.model_name)
self.tokenizer = self._model.tokenizer
self._initialize_chunk_configuration(tokens_per_chunk=tokens_per_chunk)
def _initialize_chunk_configuration(
self, *, tokens_per_chunk: Optional[int]
) -> None:
self.maximum_tokens_per_chunk = cast(int, self._model.max_seq_length)
if tokens_per_chunk is None:
self.tokens_per_chunk = self.maximum_tokens_per_chunk
else:
self.tokens_per_chunk = tokens_per_chunk
if self.tokens_per_chunk > self.maximum_tokens_per_chunk:
raise ValueError(
f"The token limit of the models '{self.model_name}'"
f" is: {self.maximum_tokens_per_chunk}."
f" Argument tokens_per_chunk={self.tokens_per_chunk}"
f" > maximum token limit."
)
def split_text(self, text: str) -> List[str]:
"""Splits the input text into smaller components by splitting text on tokens.
This method encodes the input text using a private `_encode` method, then
strips the start and stop token IDs from the encoded result. It returns the
processed segments as a list of strings.
Args:
text (str): The input text to be split.
Returns:
List[str]: A list of string components derived from the input text after
encoding and processing.
"""
def encode_strip_start_and_stop_token_ids(text: str) -> List[int]:
return self._encode(text)[1:-1]
tokenizer = Tokenizer(
chunk_overlap=self._chunk_overlap,
tokens_per_chunk=self.tokens_per_chunk,
decode=self.tokenizer.decode,
encode=encode_strip_start_and_stop_token_ids,
)
return split_text_on_tokens(text=text, tokenizer=tokenizer)
def count_tokens(self, *, text: str) -> int:
"""Counts the number of tokens in the given text.
This method encodes the input text using a private `_encode` method and
calculates the total number of tokens in the encoded result.
Args:
text (str): The input text for which the token count is calculated.
Returns:
int: The number of tokens in the encoded text.
"""
return len(self._encode(text))
_max_length_equal_32_bit_integer: int = 2**32
def _encode(self, text: str) -> List[int]:
token_ids_with_start_and_end_token_ids = self.tokenizer.encode(
text,
max_length=self._max_length_equal_32_bit_integer,
truncation="do_not_truncate",
)
return token_ids_with_start_and_end_token_ids
|
"""DocumentFilter that uses an LLM chain to extract the relevant parts of documents."""
from __future__ import annotations
from collections.abc import Sequence
from typing import Any, Callable, Optional, cast
from langchain_core.callbacks.manager import Callbacks
from langchain_core.documents import Document
from langchain_core.language_models import BaseLanguageModel
from langchain_core.output_parsers import BaseOutputParser, StrOutputParser
from langchain_core.prompts import PromptTemplate
from langchain_core.runnables import Runnable
from pydantic import ConfigDict
from langchain.chains.llm import LLMChain
from langchain.retrievers.document_compressors.base import BaseDocumentCompressor
from langchain.retrievers.document_compressors.chain_extract_prompt import (
prompt_template,
)
def default_get_input(query: str, doc: Document) -> dict[str, Any]:
"""Return the compression chain input."""
return {"question": query, "context": doc.page_content}
class NoOutputParser(BaseOutputParser[str]):
"""Parse outputs that could return a null string of some sort."""
no_output_str: str = "NO_OUTPUT"
def parse(self, text: str) -> str:
cleaned_text = text.strip()
if cleaned_text == self.no_output_str:
return ""
return cleaned_text
def _get_default_chain_prompt() -> PromptTemplate:
output_parser = NoOutputParser()
template = prompt_template.format(no_output_str=output_parser.no_output_str)
return PromptTemplate(
template=template,
input_variables=["question", "context"],
output_parser=output_parser,
)
class LLMChainExtractor(BaseDocumentCompressor):
"""Document compressor that uses an LLM chain to extract
the relevant parts of documents."""
llm_chain: Runnable
"""LLM wrapper to use for compressing documents."""
get_input: Callable[[str, Document], dict] = default_get_input
"""Callable for constructing the chain input from the query and a Document."""
model_config = ConfigDict(
arbitrary_types_allowed=True,
)
def compress_documents(
self,
documents: Sequence[Document],
query: str,
callbacks: Optional[Callbacks] = None,
) -> Sequence[Document]:
"""Compress page content of raw documents."""
compressed_docs = []
for doc in documents:
_input = self.get_input(query, doc)
output_ = self.llm_chain.invoke(_input, config={"callbacks": callbacks})
if isinstance(self.llm_chain, LLMChain):
output = output_[self.llm_chain.output_key]
if self.llm_chain.prompt.output_parser is not None:
output = self.llm_chain.prompt.output_parser.parse(output)
else:
output = output_
if len(output) == 0:
continue
compressed_docs.append(
Document(page_content=cast(str, output), metadata=doc.metadata)
)
return compressed_docs
async def acompress_documents(
self,
documents: Sequence[Document],
query: str,
callbacks: Optional[Callbacks] = None,
) -> Sequence[Document]:
"""Compress page content of raw documents asynchronously."""
inputs = [self.get_input(query, doc) for doc in documents]
outputs = await self.llm_chain.abatch(inputs, {"callbacks": callbacks})
compressed_docs = []
for i, doc in enumerate(documents):
if len(outputs[i]) == 0:
continue
compressed_docs.append(
Document(page_content=outputs[i], metadata=doc.metadata) # type: ignore[arg-type]
)
return compressed_docs
@classmethod
def from_llm(
cls,
llm: BaseLanguageModel,
prompt: Optional[PromptTemplate] = None,
get_input: Optional[Callable[[str, Document], str]] = None,
llm_chain_kwargs: Optional[dict] = None,
) -> LLMChainExtractor:
"""Initialize from LLM."""
_prompt = prompt if prompt is not None else _get_default_chain_prompt()
_get_input = get_input if get_input is not None else default_get_input
if _prompt.output_parser is not None:
parser = _prompt.output_parser
else:
parser = StrOutputParser()
llm_chain = _prompt | llm | parser
return cls(llm_chain=llm_chain, get_input=_get_input) # type: ignore[arg-type]
|
"""DocumentFilter that uses an LLM chain to extract the relevant parts of documents."""
from __future__ import annotations
from typing import Any, Callable, Dict, Optional, Sequence, cast
from langchain_core.callbacks.manager import Callbacks
from langchain_core.documents import Document
from langchain_core.language_models import BaseLanguageModel
from langchain_core.output_parsers import BaseOutputParser, StrOutputParser
from langchain_core.prompts import PromptTemplate
from langchain_core.runnables import Runnable
from pydantic import ConfigDict
from langchain.chains.llm import LLMChain
from langchain.retrievers.document_compressors.base import BaseDocumentCompressor
from langchain.retrievers.document_compressors.chain_extract_prompt import (
prompt_template,
)
def default_get_input(query: str, doc: Document) -> Dict[str, Any]:
"""Return the compression chain input."""
return {"question": query, "context": doc.page_content}
class NoOutputParser(BaseOutputParser[str]):
"""Parse outputs that could return a null string of some sort."""
no_output_str: str = "NO_OUTPUT"
def parse(self, text: str) -> str:
cleaned_text = text.strip()
if cleaned_text == self.no_output_str:
return ""
return cleaned_text
def _get_default_chain_prompt() -> PromptTemplate:
output_parser = NoOutputParser()
template = prompt_template.format(no_output_str=output_parser.no_output_str)
return PromptTemplate(
template=template,
input_variables=["question", "context"],
output_parser=output_parser,
)
class LLMChainExtractor(BaseDocumentCompressor):
"""Document compressor that uses an LLM chain to extract
the relevant parts of documents."""
llm_chain: Runnable
"""LLM wrapper to use for compressing documents."""
get_input: Callable[[str, Document], dict] = default_get_input
"""Callable for constructing the chain input from the query and a Document."""
model_config = ConfigDict(
arbitrary_types_allowed=True,
)
def compress_documents(
self,
documents: Sequence[Document],
query: str,
callbacks: Optional[Callbacks] = None,
) -> Sequence[Document]:
"""Compress page content of raw documents."""
compressed_docs = []
for doc in documents:
_input = self.get_input(query, doc)
output_ = self.llm_chain.invoke(_input, config={"callbacks": callbacks})
if isinstance(self.llm_chain, LLMChain):
output = output_[self.llm_chain.output_key]
if self.llm_chain.prompt.output_parser is not None:
output = self.llm_chain.prompt.output_parser.parse(output)
else:
output = output_
if len(output) == 0:
continue
compressed_docs.append(
Document(page_content=cast(str, output), metadata=doc.metadata)
)
return compressed_docs
async def acompress_documents(
self,
documents: Sequence[Document],
query: str,
callbacks: Optional[Callbacks] = None,
) -> Sequence[Document]:
"""Compress page content of raw documents asynchronously."""
inputs = [self.get_input(query, doc) for doc in documents]
outputs = await self.llm_chain.abatch(inputs, {"callbacks": callbacks})
compressed_docs = []
for i, doc in enumerate(documents):
if len(outputs[i]) == 0:
continue
compressed_docs.append(
Document(page_content=outputs[i], metadata=doc.metadata) # type: ignore[arg-type]
)
return compressed_docs
@classmethod
def from_llm(
cls,
llm: BaseLanguageModel,
prompt: Optional[PromptTemplate] = None,
get_input: Optional[Callable[[str, Document], str]] = None,
llm_chain_kwargs: Optional[dict] = None,
) -> LLMChainExtractor:
"""Initialize from LLM."""
_prompt = prompt if prompt is not None else _get_default_chain_prompt()
_get_input = get_input if get_input is not None else default_get_input
if _prompt.output_parser is not None:
parser = _prompt.output_parser
else:
parser = StrOutputParser()
llm_chain = _prompt | llm | parser
return cls(llm_chain=llm_chain, get_input=_get_input) # type: ignore[arg-type]
|
import sys
from os import path
from setuptools import find_packages
from setuptools import setup
if sys.version_info < (3, 7, 0):
raise OSError(f'DocArray requires Python >=3.7, but yours is {sys.version}')
try:
pkg_name = 'docarray'
libinfo_py = path.join(pkg_name, '__init__.py')
libinfo_content = open(libinfo_py, 'r', encoding='utf8').readlines()
version_line = [l.strip() for l in libinfo_content if l.startswith('__version__')][
0
]
exec(version_line) # gives __version__
except FileNotFoundError:
__version__ = '0.0.0'
try:
with open('README.md', encoding='utf8') as fp:
_long_description = fp.read()
except FileNotFoundError:
_long_description = ''
setup(
name=pkg_name,
packages=find_packages(),
version=__version__,
include_package_data=True,
description='The data structure for unstructured data',
author='Jina AI',
author_email='hello@jina.ai',
license='Apache 2.0',
url='https://github.com/jina-ai/docarray',
download_url='https://github.com/jina-ai/docarray/tags',
long_description=_long_description,
long_description_content_type='text/markdown',
zip_safe=False,
setup_requires=['setuptools>=18.0', 'wheel'],
install_requires=['numpy', 'rich>=12.0.0', 'jina-hubble-sdk>=0.13.1'],
extras_require={
# req usage, please see https://docarray.jina.ai/#install
'common': [
'protobuf>=3.13.0',
'lz4',
'requests',
'matplotlib',
'Pillow',
'fastapi',
'uvicorn',
],
'full': [
'protobuf>=3.13.0',
'lz4',
'requests',
'matplotlib',
'Pillow',
'trimesh',
'scipy',
'av',
'fastapi',
'uvicorn',
'strawberry-graphql',
],
'qdrant': [
'qdrant-client==0.8.0',
],
'annlite': [
'annlite>=0.3.12',
],
'weaviate': [
'weaviate-client~=3.3.0',
],
'elasticsearch': [
'elasticsearch>=8.2.0',
],
'redis': [
'redis>=4.3.0',
],
'benchmark': [
'pandas',
'seaborn',
],
'test': [
'pytest',
'pytest-timeout',
'pytest-mock',
'pytest-cov',
'pytest-repeat',
'pytest-reraise',
'mock',
'pytest-custom_exit_code',
'black==22.3.0',
'tensorflow==2.7.0',
'paddlepaddle==2.2.0',
'torch==1.9.0',
'torchvision==0.10.0',
'datasets',
'onnx',
'onnxruntime',
'jupyterlab',
'transformers>=4.16.2',
'weaviate-client~=3.3.0',
'annlite>=0.3.12',
'elasticsearch>=8.2.0',
'redis>=4.3.0',
'jina',
],
},
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Intended Audience :: Science/Research',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
'Programming Language :: Unix Shell',
'Environment :: Console',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Topic :: Database :: Database Engines/Servers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Internet :: WWW/HTTP :: Indexing/Search',
'Topic :: Scientific/Engineering :: Image Recognition',
'Topic :: Multimedia :: Video',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Mathematics',
'Topic :: Software Development',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
],
project_urls={
'Documentation': 'https://docarray.jina.ai',
'Source': 'https://github.com/jina-ai/docarray/',
'Tracker': 'https://github.com/jina-ai/docarray/issues',
},
keywords='docarray deep-learning data-structures cross-modal multi-modal unstructured-data nested-data neural-search',
)
|
import sys
from os import path
from setuptools import find_packages
from setuptools import setup
if sys.version_info < (3, 7, 0):
raise OSError(f'DocArray requires Python >=3.7, but yours is {sys.version}')
try:
pkg_name = 'docarray'
libinfo_py = path.join(pkg_name, '__init__.py')
libinfo_content = open(libinfo_py, 'r', encoding='utf8').readlines()
version_line = [l.strip() for l in libinfo_content if l.startswith('__version__')][
0
]
exec(version_line) # gives __version__
except FileNotFoundError:
__version__ = '0.0.0'
try:
with open('README.md', encoding='utf8') as fp:
_long_description = fp.read()
except FileNotFoundError:
_long_description = ''
setup(
name=pkg_name,
packages=find_packages(),
version=__version__,
include_package_data=True,
description='The data structure for unstructured data',
author='Jina AI',
author_email='hello@jina.ai',
license='Apache 2.0',
url='https://github.com/jina-ai/docarray',
download_url='https://github.com/jina-ai/docarray/tags',
long_description=_long_description,
long_description_content_type='text/markdown',
zip_safe=False,
setup_requires=['setuptools>=18.0', 'wheel'],
install_requires=['numpy', 'rich>=12.0.0', 'jina-hubble-sdk>=0.13.1'],
extras_require={
# req usage, please see https://docarray.jina.ai/#install
'common': [
'protobuf>=3.13.0',
'lz4',
'requests',
'matplotlib',
'Pillow',
'fastapi',
'uvicorn',
],
'full': [
'protobuf>=3.13.0',
'lz4',
'requests',
'matplotlib',
'Pillow',
'trimesh',
'scipy',
'av',
'fastapi',
'uvicorn',
'strawberry-graphql',
],
'qdrant': [
'qdrant-client~=0.7.3',
],
'annlite': [
'annlite>=0.3.12',
],
'weaviate': [
'weaviate-client~=3.3.0',
],
'elasticsearch': [
'elasticsearch>=8.2.0',
],
'redis': [
'redis>=4.3.0',
],
'benchmark': [
'pandas',
'seaborn',
],
'test': [
'pytest',
'pytest-timeout',
'pytest-mock',
'pytest-cov',
'pytest-repeat',
'pytest-reraise',
'mock',
'pytest-custom_exit_code',
'black==22.3.0',
'tensorflow==2.7.0',
'paddlepaddle==2.2.0',
'torch==1.9.0',
'torchvision==0.10.0',
'datasets',
'onnx',
'onnxruntime',
'jupyterlab',
'transformers>=4.16.2',
'weaviate-client~=3.3.0',
'annlite>=0.3.12',
'elasticsearch>=8.2.0',
'redis>=4.3.0',
'jina',
],
},
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Intended Audience :: Science/Research',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
'Programming Language :: Unix Shell',
'Environment :: Console',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Topic :: Database :: Database Engines/Servers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Internet :: WWW/HTTP :: Indexing/Search',
'Topic :: Scientific/Engineering :: Image Recognition',
'Topic :: Multimedia :: Video',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Mathematics',
'Topic :: Software Development',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
],
project_urls={
'Documentation': 'https://docarray.jina.ai',
'Source': 'https://github.com/jina-ai/docarray/',
'Tracker': 'https://github.com/jina-ai/docarray/issues',
},
keywords='docarray deep-learning data-structures cross-modal multi-modal unstructured-data nested-data neural-search',
)
|
_base_ = '../mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py'
conv_cfg = dict(type='ConvWS')
norm_cfg = dict(type='GN', num_groups=32, requires_grad=True)
model = dict(
backbone=dict(
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://jhu/resnet50_gn_ws')),
neck=dict(conv_cfg=conv_cfg, norm_cfg=norm_cfg),
roi_head=dict(
bbox_head=dict(
type='Shared4Conv1FCBBoxHead',
conv_out_channels=256,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg),
mask_head=dict(conv_cfg=conv_cfg, norm_cfg=norm_cfg)))
# learning policy
max_epochs = 24
train_cfg = dict(max_epochs=max_epochs)
# learning rate
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[16, 22],
gamma=0.1)
]
|
_base_ = '../mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py'
conv_cfg = dict(type='ConvWS')
norm_cfg = dict(type='GN', num_groups=32, requires_grad=True)
model = dict(
backbone=dict(
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://jhu/resnet50_gn_ws')),
neck=dict(conv_cfg=conv_cfg, norm_cfg=norm_cfg),
roi_head=dict(
bbox_head=dict(
type='Shared4Conv1FCBBoxHead',
conv_out_channels=256,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg),
mask_head=dict(conv_cfg=conv_cfg, norm_cfg=norm_cfg)))
# learning policy
lr_config = dict(step=[16, 22])
runner = dict(type='EpochBasedRunner', max_epochs=24)
|
import asyncio
import logging
import os
import threading
import time
from functools import wraps
from uuid import uuid4
from tenacity import retry, stop_after_attempt, wait_exponential
from backend.util.process import get_service_name
logger = logging.getLogger(__name__)
def _log_prefix(resource_name: str, conn_id: str):
"""
Returns a prefix string for logging purposes.
This needs to be called on the fly to get the current process ID & service name,
not the parent process ID & service name.
"""
return f"[PID-{os.getpid()}|THREAD-{threading.get_native_id()}|{get_service_name()}|{resource_name}-{conn_id}]"
def conn_retry(
resource_name: str,
action_name: str,
max_retry: int = 5,
multiplier: int = 1,
min_wait: float = 1,
max_wait: float = 30,
):
conn_id = str(uuid4())
def on_retry(retry_state):
prefix = _log_prefix(resource_name, conn_id)
exception = retry_state.outcome.exception()
logger.warning(f"{prefix} {action_name} failed: {exception}. Retrying now...")
def decorator(func):
is_coroutine = asyncio.iscoroutinefunction(func)
retry_decorator = retry(
stop=stop_after_attempt(max_retry + 1),
wait=wait_exponential(multiplier=multiplier, min=min_wait, max=max_wait),
before_sleep=on_retry,
reraise=True,
)
wrapped_func = retry_decorator(func)
@wraps(func)
def sync_wrapper(*args, **kwargs):
prefix = _log_prefix(resource_name, conn_id)
logger.info(f"{prefix} {action_name} started...")
try:
result = wrapped_func(*args, **kwargs)
logger.info(f"{prefix} {action_name} completed successfully.")
return result
except Exception as e:
logger.error(f"{prefix} {action_name} failed after retries: {e}")
raise
@wraps(func)
async def async_wrapper(*args, **kwargs):
prefix = _log_prefix(resource_name, conn_id)
logger.info(f"{prefix} {action_name} started...")
try:
result = await wrapped_func(*args, **kwargs)
logger.info(f"{prefix} {action_name} completed successfully.")
return result
except Exception as e:
logger.error(f"{prefix} {action_name} failed after retries: {e}")
raise
return async_wrapper if is_coroutine else sync_wrapper
return decorator
func_retry = retry(
reraise=False,
stop=stop_after_attempt(5),
wait=wait_exponential(multiplier=1, min=1, max=30),
)
def continuous_retry(*, retry_delay: float = 1.0):
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
while True:
try:
return func(*args, **kwargs)
except Exception as exc:
logger.exception(
"%s failed with %s — retrying in %.2f s",
func.__name__,
exc,
retry_delay,
)
time.sleep(retry_delay)
return wrapper
return decorator
|
import asyncio
import logging
import os
import threading
from functools import wraps
from uuid import uuid4
from tenacity import retry, stop_after_attempt, wait_exponential
from backend.util.process import get_service_name
logger = logging.getLogger(__name__)
def _log_prefix(resource_name: str, conn_id: str):
"""
Returns a prefix string for logging purposes.
This needs to be called on the fly to get the current process ID & service name,
not the parent process ID & service name.
"""
return f"[PID-{os.getpid()}|THREAD-{threading.get_native_id()}|{get_service_name()}|{resource_name}-{conn_id}]"
def conn_retry(
resource_name: str,
action_name: str,
max_retry: int = 5,
multiplier: int = 1,
min_wait: float = 1,
max_wait: float = 30,
):
conn_id = str(uuid4())
def on_retry(retry_state):
prefix = _log_prefix(resource_name, conn_id)
exception = retry_state.outcome.exception()
logger.warning(f"{prefix} {action_name} failed: {exception}. Retrying now...")
def decorator(func):
is_coroutine = asyncio.iscoroutinefunction(func)
retry_decorator = retry(
stop=stop_after_attempt(max_retry + 1),
wait=wait_exponential(multiplier=multiplier, min=min_wait, max=max_wait),
before_sleep=on_retry,
reraise=True,
)
wrapped_func = retry_decorator(func)
@wraps(func)
def sync_wrapper(*args, **kwargs):
prefix = _log_prefix(resource_name, conn_id)
logger.info(f"{prefix} {action_name} started...")
try:
result = wrapped_func(*args, **kwargs)
logger.info(f"{prefix} {action_name} completed successfully.")
return result
except Exception as e:
logger.error(f"{prefix} {action_name} failed after retries: {e}")
raise
@wraps(func)
async def async_wrapper(*args, **kwargs):
prefix = _log_prefix(resource_name, conn_id)
logger.info(f"{prefix} {action_name} started...")
try:
result = await wrapped_func(*args, **kwargs)
logger.info(f"{prefix} {action_name} completed successfully.")
return result
except Exception as e:
logger.error(f"{prefix} {action_name} failed after retries: {e}")
raise
return async_wrapper if is_coroutine else sync_wrapper
return decorator
func_retry = retry(
reraise=False,
stop=stop_after_attempt(5),
wait=wait_exponential(multiplier=1, min=1, max=30),
)
|
from __future__ import annotations
from copy import deepcopy
import pytest
from sentence_transformers import SparseEncoder
@pytest.fixture(scope="session")
def _splade_bert_tiny_model() -> SparseEncoder:
model = SparseEncoder("sparse-encoder-testing/splade-bert-tiny-nq")
model.model_card_data.generate_widget_examples = False # Disable widget examples generation for testing
return model
@pytest.fixture()
def splade_bert_tiny_model(_splade_bert_tiny_model: SparseEncoder) -> SparseEncoder:
return deepcopy(_splade_bert_tiny_model)
@pytest.fixture(scope="session")
def _inference_free_splade_bert_tiny_model() -> SparseEncoder:
model = SparseEncoder("sparse-encoder-testing/inference-free-splade-bert-tiny-nq")
model.model_card_data.generate_widget_examples = False # Disable widget examples generation for testing
return model
@pytest.fixture()
def inference_free_splade_bert_tiny_model(_inference_free_splade_bert_tiny_model: SparseEncoder) -> SparseEncoder:
return deepcopy(_inference_free_splade_bert_tiny_model)
@pytest.fixture(scope="session")
def _csr_bert_tiny_model() -> SparseEncoder:
model = SparseEncoder("sentence-transformers-testing/stsb-bert-tiny-safetensors")
model[-1].k = 16
model[-1].k_aux = 32
model.model_card_data.generate_widget_examples = False # Disable widget examples generation for testing
return model
@pytest.fixture()
def csr_bert_tiny_model(_csr_bert_tiny_model: SparseEncoder) -> SparseEncoder:
return deepcopy(_csr_bert_tiny_model)
|
from __future__ import annotations
import pytest
from sentence_transformers import SparseEncoder
@pytest.fixture()
def splade_bert_tiny_model() -> SparseEncoder:
return SparseEncoder("sparse-encoder-testing/splade-bert-tiny-nq")
@pytest.fixture(scope="session")
def splade_bert_tiny_model_reused() -> SparseEncoder:
return SparseEncoder("sparse-encoder-testing/splade-bert-tiny-nq")
@pytest.fixture()
def csr_bert_tiny_model() -> SparseEncoder:
return SparseEncoder("sentence-transformers-testing/stsb-bert-tiny-safetensors")
|
# Copyright (c) OpenMMLab. All rights reserved.
from abc import ABCMeta, abstractmethod
from typing import List, Tuple, Union
from mmcv.runner import BaseModule
from torch import Tensor
from mmdet.core.utils import (InstanceList, OptInstanceList, OptMultiConfig,
OptSamplingResultList, SampleList)
class BaseMaskHead(BaseModule, metaclass=ABCMeta):
"""Base class for mask heads used in One-Stage Instance Segmentation."""
def __init__(self, init_cfg: OptMultiConfig = None) -> None:
super().__init__(init_cfg=init_cfg)
@abstractmethod
def loss(self, **kwargs):
"""Compute losses of the head."""
pass
@abstractmethod
def get_results(self, **kwargs):
"""Transform network outputs of a batch into mask results."""
pass
def forward_train(self,
x: Union[List[Tensor], Tuple[Tensor]],
batch_data_samples: SampleList,
positive_infos: OptSamplingResultList = None,
**kwargs) -> dict:
"""
Args:
x (list[Tensor] | tuple[Tensor]): Features from FPN.
Each has a shape (B, C, H, W).
batch_data_samples (list[:obj:`DetDataSample`]): Each item contains
the meta information of each image and corresponding
annotations.
positive_infos (list[:obj:``], optional): Information
of positive samples. Used when the label assignment is
done outside the MaskHead, e.g., in BboxHead in
YOLACT or CondInst, etc. When the label assignment is done in
MaskHead, it would be None, like SOLO. All values
in it should have shape (num_positive_samples, *).
Returns:
dict[str, Tensor]: A dictionary of loss components.
"""
if positive_infos is None:
outs = self(x)
else:
# TODO: Currently not checked
outs = self(x, positive_infos)
assert isinstance(outs, tuple), 'Forward results should be a tuple, ' \
'even if only one item is returned'
batch_gt_instances = []
batch_gt_instances_ignore = []
batch_img_metas = []
for data_sample in batch_data_samples:
batch_img_metas.append(data_sample.metainfo)
# pad the `gt_mask` to keep the same shape as `batch_img_shape`
img_shape = data_sample.metainfo['batch_input_shape']
gt_masks = data_sample.gt_instances.masks.pad(img_shape)
data_sample.gt_instances.masks = gt_masks
batch_gt_instances.append(data_sample.gt_instances)
if 'ignored_instances' in data_sample:
batch_gt_instances_ignore.append(data_sample.ignored_instances)
else:
batch_gt_instances_ignore.append(None)
loss = self.loss(
*outs,
batch_gt_instances=batch_gt_instances,
batch_img_metas=batch_img_metas,
positive_infos=positive_infos,
batch_gt_instances_ignore=batch_gt_instances_ignore,
**kwargs)
return loss
def simple_test(self,
x: Tuple[Tensor],
batch_img_metas: List[dict],
rescale: bool = False,
results_list: OptInstanceList = None,
**kwargs) -> InstanceList:
"""Test function without test-time augmentation.
Args:
x (tuple[Tensor]): Multi-level features from the
upstream network, each is a 4D-tensor.
batch_img_metas (list[dict]): List of image information.
rescale (bool, optional): Whether to rescale the results.
Defaults to False.
results_list (list[obj:``], optional): Detection
results of each image after the post process. Only exist
if there is a `bbox_head`, like `YOLACT`, `CondInst`, etc.
Returns:
list[obj:`InstanceData`]: Instance segmentation
results of each image after the post process.
Each item usually contains following keys.
- scores (Tensor): Classification scores, has a shape
(num_instance,)
- labels (Tensor): Has a shape (num_instances,).
- masks (Tensor): Processed mask results, has a
shape (num_instances, h, w).
"""
if results_list is None:
outs = self(x)
else:
outs = self(x, results_list=results_list)
results_list = self.get_results(
*outs,
batch_img_metas=batch_img_metas,
rescale=rescale,
results_list=results_list,
**kwargs)
return results_list
|
# Copyright (c) OpenMMLab. All rights reserved.
from abc import ABCMeta, abstractmethod
from mmcv.runner import BaseModule
class BaseMaskHead(BaseModule, metaclass=ABCMeta):
"""Base class for mask heads used in One-Stage Instance Segmentation."""
def __init__(self, init_cfg):
super(BaseMaskHead, self).__init__(init_cfg)
@abstractmethod
def loss(self, **kwargs):
pass
@abstractmethod
def get_results(self, **kwargs):
"""Get precessed :obj:`InstanceData` of multiple images."""
pass
def forward_train(self,
x,
gt_labels,
gt_masks,
img_metas,
gt_bboxes=None,
gt_bboxes_ignore=None,
positive_infos=None,
**kwargs):
"""
Args:
x (list[Tensor] | tuple[Tensor]): Features from FPN.
Each has a shape (B, C, H, W).
gt_labels (list[Tensor]): Ground truth labels of all images.
each has a shape (num_gts,).
gt_masks (list[Tensor]) : Masks for each bbox, has a shape
(num_gts, h , w).
img_metas (list[dict]): Meta information of each image, e.g.,
image size, scaling factor, etc.
gt_bboxes (list[Tensor], optional): Ground truth bboxes of
the image, each item has a shape (num_gts, 4).
gt_bboxes_ignore (list[Tensor], optional): Ground truth bboxes
to be ignored, each item has a shape (num_ignored_gts, 4).
positive_infos (list[:obj:`InstanceData`], optional): Information
of positive samples. Used when the label assignment is
done outside the MaskHead, e.g., in BboxHead in
YOLACT or CondInst, etc. When the label assignment is done in
MaskHead, it would be None, like SOLO. All values
in it should have shape (num_positive_samples, *).
Returns:
dict[str, Tensor]: A dictionary of loss components.
"""
if positive_infos is None:
outs = self(x)
else:
outs = self(x, positive_infos)
assert isinstance(outs, tuple), 'Forward results should be a tuple, ' \
'even if only one item is returned'
loss = self.loss(
*outs,
gt_labels=gt_labels,
gt_masks=gt_masks,
img_metas=img_metas,
gt_bboxes=gt_bboxes,
gt_bboxes_ignore=gt_bboxes_ignore,
positive_infos=positive_infos,
**kwargs)
return loss
def simple_test(self,
feats,
img_metas,
rescale=False,
instances_list=None,
**kwargs):
"""Test function without test-time augmentation.
Args:
feats (tuple[torch.Tensor]): Multi-level features from the
upstream network, each is a 4D-tensor.
img_metas (list[dict]): List of image information.
rescale (bool, optional): Whether to rescale the results.
Defaults to False.
instances_list (list[obj:`InstanceData`], optional): Detection
results of each image after the post process. Only exist
if there is a `bbox_head`, like `YOLACT`, `CondInst`, etc.
Returns:
list[obj:`InstanceData`]: Instance segmentation \
results of each image after the post process. \
Each item usually contains following keys. \
- scores (Tensor): Classification scores, has a shape
(num_instance,)
- labels (Tensor): Has a shape (num_instances,).
- masks (Tensor): Processed mask results, has a
shape (num_instances, h, w).
"""
if instances_list is None:
outs = self(feats)
else:
outs = self(feats, instances_list=instances_list)
mask_inputs = outs + (img_metas, )
results_list = self.get_results(
*mask_inputs,
rescale=rescale,
instances_list=instances_list,
**kwargs)
return results_list
def onnx_export(self, img, img_metas):
raise NotImplementedError(f'{self.__class__.__name__} does '
f'not support ONNX EXPORT')
|
from __future__ import annotations
from pathlib import Path
from unittest.mock import Mock, PropertyMock
import pytest
import torch
from sentence_transformers import SentenceTransformer
from sentence_transformers.evaluation import InformationRetrievalEvaluator
from sentence_transformers.util import cos_sim
@pytest.fixture
def mock_model():
def mock_encode(sentences: str | list[str], **kwargs) -> torch.Tensor:
"""
We simply one-hot encode the sentences; if a sentence contains a keyword, the corresponding one-hot
encoding is added to the sentence embedding.
"""
one_hot_encodings = {
"pokemon": torch.tensor([1.0, 0.0, 0.0, 0.0, 0.0]),
"car": torch.tensor([0.0, 1.0, 0.0, 0.0, 0.0]),
"vehicle": torch.tensor([0.0, 0.0, 1.0, 0.0, 0.0]),
"fruit": torch.tensor([0.0, 0.0, 0.0, 1.0, 0.0]),
"vegetable": torch.tensor([0.0, 0.0, 0.0, 0.0, 1.0]),
}
if isinstance(sentences, str):
sentences = [sentences]
embeddings = []
for sentence in sentences:
encoding = torch.zeros(5)
for keyword, one_hot in one_hot_encodings.items():
if keyword in sentence:
encoding += one_hot
embeddings.append(encoding)
return torch.stack(embeddings)
model = Mock(spec=SentenceTransformer)
model.similarity_fn_name = "cosine"
model.similarity.side_effect = cos_sim
model.encode.side_effect = mock_encode
model.encode_query.side_effect = mock_encode
model.encode_document.side_effect = mock_encode
model.model_card_data = PropertyMock(return_value=Mock())
return model
@pytest.fixture
def test_data():
queries = {
"0": "What is a pokemon?",
"1": "What is a vegetable?",
"2": "What is a fruit?",
"3": "What is a vehicle?",
"4": "What is a car?",
}
corpus = {
"0": "A pokemon is a fictional creature",
"1": "A vegetable is a plant",
"2": "A fruit is a plant",
"3": "A vehicle is a machine",
"4": "A car is a vehicle",
}
relevant_docs = {"0": {"0"}, "1": {"1"}, "2": {"2"}, "3": {"3", "4"}, "4": {"4"}}
return queries, corpus, relevant_docs
def test_simple(test_data, tmp_path: Path):
queries, corpus, relevant_docs = test_data
model = SentenceTransformer("sentence-transformers-testing/stsb-bert-tiny-safetensors")
ir_evaluator = InformationRetrievalEvaluator(
queries=queries,
corpus=corpus,
relevant_docs=relevant_docs,
name="test",
accuracy_at_k=[1, 3],
precision_recall_at_k=[1, 3],
mrr_at_k=[3],
ndcg_at_k=[3],
map_at_k=[5],
)
results = ir_evaluator(model, output_path=str(tmp_path))
expected_keys = [
"test_cosine_accuracy@1",
"test_cosine_accuracy@3",
"test_cosine_precision@1",
"test_cosine_precision@3",
"test_cosine_recall@1",
"test_cosine_recall@3",
"test_cosine_ndcg@3",
"test_cosine_mrr@3",
"test_cosine_map@5",
]
assert set(results.keys()) == set(expected_keys)
def test_metrices(test_data, mock_model, tmp_path: Path):
queries, corpus, relevant_docs = test_data
ir_evaluator = InformationRetrievalEvaluator(
queries=queries,
corpus=corpus,
relevant_docs=relevant_docs,
name="test",
accuracy_at_k=[1, 3],
precision_recall_at_k=[1, 3],
mrr_at_k=[3],
ndcg_at_k=[3],
map_at_k=[5],
)
results = ir_evaluator(mock_model, output_path=str(tmp_path))
# We expect test_cosine_precision@3 to be 0.4, since 6 out of 15 (5 queries * 3) are True Positives
# We expect test_cosine_recall@1 to be 0.9; the average of 4 times a recall of 1 and once a recall of 0.5
expected_results = {
"test_cosine_accuracy@1": 1.0,
"test_cosine_accuracy@3": 1.0,
"test_cosine_precision@1": 1.0,
"test_cosine_precision@3": 0.4,
"test_cosine_recall@1": 0.9,
"test_cosine_recall@3": 1.0,
"test_cosine_ndcg@3": 1.0,
"test_cosine_mrr@3": 1.0,
"test_cosine_map@5": 1.0,
}
for key, expected_value in expected_results.items():
assert results[key] == pytest.approx(expected_value, abs=1e-9)
|
from __future__ import annotations
from pathlib import Path
from unittest.mock import Mock, PropertyMock
import pytest
import torch
from sentence_transformers import SentenceTransformer
from sentence_transformers.evaluation import InformationRetrievalEvaluator
from sentence_transformers.util import cos_sim
@pytest.fixture
def mock_model():
def mock_encode(sentences: str | list[str], **kwargs) -> torch.Tensor:
"""
We simply one-hot encode the sentences; if a sentence contains a keyword, the corresponding one-hot
encoding is added to the sentence embedding.
"""
one_hot_encodings = {
"pokemon": torch.tensor([1.0, 0.0, 0.0, 0.0, 0.0]),
"car": torch.tensor([0.0, 1.0, 0.0, 0.0, 0.0]),
"vehicle": torch.tensor([0.0, 0.0, 1.0, 0.0, 0.0]),
"fruit": torch.tensor([0.0, 0.0, 0.0, 1.0, 0.0]),
"vegetable": torch.tensor([0.0, 0.0, 0.0, 0.0, 1.0]),
}
if isinstance(sentences, str):
sentences = [sentences]
embeddings = []
for sentence in sentences:
encoding = torch.zeros(5)
for keyword, one_hot in one_hot_encodings.items():
if keyword in sentence:
encoding += one_hot
embeddings.append(encoding)
return torch.stack(embeddings)
model = Mock(spec=SentenceTransformer)
model.similarity_fn_name = "cosine"
model.similarity.side_effect = cos_sim
model.encode.side_effect = mock_encode
model.model_card_data = PropertyMock(return_value=Mock())
return model
@pytest.fixture
def test_data():
queries = {
"0": "What is a pokemon?",
"1": "What is a vegetable?",
"2": "What is a fruit?",
"3": "What is a vehicle?",
"4": "What is a car?",
}
corpus = {
"0": "A pokemon is a fictional creature",
"1": "A vegetable is a plant",
"2": "A fruit is a plant",
"3": "A vehicle is a machine",
"4": "A car is a vehicle",
}
relevant_docs = {"0": {"0"}, "1": {"1"}, "2": {"2"}, "3": {"3", "4"}, "4": {"4"}}
return queries, corpus, relevant_docs
def test_simple(test_data, tmp_path: Path):
queries, corpus, relevant_docs = test_data
model = SentenceTransformer("sentence-transformers-testing/stsb-bert-tiny-safetensors")
ir_evaluator = InformationRetrievalEvaluator(
queries=queries,
corpus=corpus,
relevant_docs=relevant_docs,
name="test",
accuracy_at_k=[1, 3],
precision_recall_at_k=[1, 3],
mrr_at_k=[3],
ndcg_at_k=[3],
map_at_k=[5],
)
results = ir_evaluator(model, output_path=str(tmp_path))
expected_keys = [
"test_cosine_accuracy@1",
"test_cosine_accuracy@3",
"test_cosine_precision@1",
"test_cosine_precision@3",
"test_cosine_recall@1",
"test_cosine_recall@3",
"test_cosine_ndcg@3",
"test_cosine_mrr@3",
"test_cosine_map@5",
]
assert set(results.keys()) == set(expected_keys)
def test_metrices(test_data, mock_model, tmp_path: Path):
queries, corpus, relevant_docs = test_data
ir_evaluator = InformationRetrievalEvaluator(
queries=queries,
corpus=corpus,
relevant_docs=relevant_docs,
name="test",
accuracy_at_k=[1, 3],
precision_recall_at_k=[1, 3],
mrr_at_k=[3],
ndcg_at_k=[3],
map_at_k=[5],
)
results = ir_evaluator(mock_model, output_path=str(tmp_path))
# We expect test_cosine_precision@3 to be 0.4, since 6 out of 15 (5 queries * 3) are True Positives
# We expect test_cosine_recall@1 to be 0.9; the average of 4 times a recall of 1 and once a recall of 0.5
expected_results = {
"test_cosine_accuracy@1": 1.0,
"test_cosine_accuracy@3": 1.0,
"test_cosine_precision@1": 1.0,
"test_cosine_precision@3": 0.4,
"test_cosine_recall@1": 0.9,
"test_cosine_recall@3": 1.0,
"test_cosine_ndcg@3": 1.0,
"test_cosine_mrr@3": 1.0,
"test_cosine_map@5": 1.0,
}
for key, expected_value in expected_results.items():
assert results[key] == pytest.approx(expected_value, abs=1e-9)
|
_base_ = '../mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py'
norm_cfg = dict(type='GN', num_groups=32, requires_grad=True)
model = dict(
backbone=dict(
norm_cfg=norm_cfg,
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://contrib/resnet50_gn')),
neck=dict(norm_cfg=norm_cfg),
roi_head=dict(
bbox_head=dict(
type='Shared4Conv1FCBBoxHead',
conv_out_channels=256,
norm_cfg=norm_cfg),
mask_head=dict(norm_cfg=norm_cfg)))
# learning policy
max_epochs = 24
train_cfg = dict(max_epochs=max_epochs)
# learning rate
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[16, 22],
gamma=0.1)
]
|
_base_ = '../mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py'
norm_cfg = dict(type='GN', num_groups=32, requires_grad=True)
model = dict(
backbone=dict(
norm_cfg=norm_cfg,
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://contrib/resnet50_gn')),
neck=dict(norm_cfg=norm_cfg),
roi_head=dict(
bbox_head=dict(
type='Shared4Conv1FCBBoxHead',
conv_out_channels=256,
norm_cfg=norm_cfg),
mask_head=dict(norm_cfg=norm_cfg)))
# learning policy
lr_config = dict(step=[16, 22])
runner = dict(type='EpochBasedRunner', max_epochs=24)
|
import os
import subprocess
import sys
import pytest
from xgboost import testing as tm
DEMO_DIR = tm.demo_dir(__file__)
PYTHON_DEMO_DIR = os.path.join(DEMO_DIR, "guide-python")
@pytest.mark.skipif(**tm.no_cupy())
def test_data_iterator():
script = os.path.join(PYTHON_DEMO_DIR, "quantile_data_iterator.py")
cmd = ["python", script]
subprocess.check_call(cmd)
def test_update_process_demo():
script = os.path.join(PYTHON_DEMO_DIR, "update_process.py")
cmd = ["python", script]
subprocess.check_call(cmd)
def test_categorical_demo():
script = os.path.join(PYTHON_DEMO_DIR, "categorical.py")
cmd = ["python", script]
subprocess.check_call(cmd)
@pytest.mark.skipif(**tm.no_rmm())
@pytest.mark.skipif(**tm.no_cupy())
def test_external_memory_demo():
script = os.path.join(PYTHON_DEMO_DIR, "external_memory.py")
cmd = ["python", script]
subprocess.check_call(cmd)
|
import os
import subprocess
import sys
import pytest
from xgboost import testing as tm
sys.path.append("tests/python")
import test_demos as td # noqa
@pytest.mark.skipif(**tm.no_cupy())
def test_data_iterator():
script = os.path.join(td.PYTHON_DEMO_DIR, "quantile_data_iterator.py")
cmd = ["python", script]
subprocess.check_call(cmd)
def test_update_process_demo():
script = os.path.join(td.PYTHON_DEMO_DIR, "update_process.py")
cmd = ["python", script]
subprocess.check_call(cmd)
def test_categorical_demo():
script = os.path.join(td.PYTHON_DEMO_DIR, "categorical.py")
cmd = ["python", script]
subprocess.check_call(cmd)
|
import os
import warnings
from pathlib import Path
import torch
from torchaudio._internal import module_utils as _mod_utils # noqa: F401
_LIB_DIR = Path(__file__).parent / "lib"
def _get_lib_path(lib: str):
suffix = "pyd" if os.name == "nt" else "so"
path = _LIB_DIR / f"{lib}.{suffix}"
return path
def _load_lib(lib: str) -> bool:
"""Load extension module
Note:
In case `torchaudio` is deployed with `pex` format, the library file
is not in a standard location.
In this case, we expect that `libtorchaudio` is available somewhere
in the search path of dynamic loading mechanism, so that importing
`_torchaudio` will have library loader find and load `libtorchaudio`.
This is the reason why the function should not raising an error when the library
file is not found.
Returns:
bool:
True if the library file is found AND the library loaded without failure.
False if the library file is not found (like in the case where torchaudio
is deployed with pex format, thus the shared library file is
in a non-standard location.).
If the library file is found but there is an issue loading the library,
(such as missing dependency) then this function raises the exception as-is.
Raises:
Exception:
If the library file is found, but there is an issue loading the library file,
(when underlying `ctype.DLL` throws an exception), this function will pass
the exception as-is, instead of catching it and returning bool.
The expected case is `OSError` thrown by `ctype.DLL` when a dynamic dependency
is not found.
This behavior was chosen because the expected failure case is not recoverable.
If a dependency is missing, then users have to install it.
"""
path = _get_lib_path(lib)
if not path.exists():
return False
torch.ops.load_library(path)
torch.classes.load_library(path)
return True
_FFMPEG_INITIALIZED = False
def _init_ffmpeg():
global _FFMPEG_INITIALIZED
if _FFMPEG_INITIALIZED:
return
if not torch.ops.torchaudio.is_ffmpeg_available():
raise RuntimeError(
"torchaudio is not compiled with FFmpeg integration. Please set USE_FFMPEG=1 when compiling torchaudio."
)
try:
_load_lib("libtorchaudio_ffmpeg")
except OSError as err:
raise ImportError("FFmpeg libraries are not found. Please install FFmpeg.") from err
import torchaudio._torchaudio_ffmpeg # noqa
torch.ops.torchaudio.ffmpeg_init()
_FFMPEG_INITIALIZED = True
def _init_extension():
if not _mod_utils.is_module_available("torchaudio._torchaudio"):
warnings.warn("torchaudio C++ extension is not available.")
return
_load_lib("libtorchaudio")
# This import is for initializing the methods registered via PyBind11
# This has to happen after the base library is loaded
from torchaudio import _torchaudio # noqa
# Because this part is executed as part of `import torchaudio`, we ignore the
# initialization failure.
# If the FFmpeg integration is not properly initialized, then detailed error
# will be raised when client code attempts to import the dedicated feature.
try:
_init_ffmpeg()
except Exception:
pass
_init_extension()
|
import os
import warnings
from pathlib import Path
import torch
from torchaudio._internal import module_utils as _mod_utils # noqa: F401
_LIB_DIR = Path(__file__).parent / "lib"
def _get_lib_path(lib: str):
suffix = "pyd" if os.name == "nt" else "so"
path = _LIB_DIR / f"{lib}.{suffix}"
return path
def _load_lib(lib: str) -> bool:
"""Load extension module
Note:
In case `torchaudio` is deployed with `pex` format, the library file
is not in a standard location.
In this case, we expect that `libtorchaudio` is available somewhere
in the search path of dynamic loading mechanism, so that importing
`_torchaudio` will have library loader find and load `libtorchaudio`.
This is the reason why the function should not raising an error when the library
file is not found.
Returns:
bool:
True if the library file is found AND the library loaded without failure.
False if the library file is not found (like in the case where torchaudio
is deployed with pex format, thus the shared library file is
in a non-standard location.).
If the library file is found but there is an issue loading the library,
(such as missing dependency) then this function raises the exception as-is.
Raises:
Exception:
If the library file is found, but there is an issue loading the library file,
(when underlying `ctype.DLL` throws an exception), this function will pass
the exception as-is, instead of catching it and returning bool.
The expected case is `OSError` thrown by `ctype.DLL` when a dynamic dependency
is not found.
This behavior was chosen because the expected failure case is not recoverable.
If a dependency is missing, then users have to install it.
"""
path = _get_lib_path(lib)
if not path.exists():
return False
torch.ops.load_library(path)
torch.classes.load_library(path)
return True
def _init_extension():
if not _mod_utils.is_module_available("torchaudio._torchaudio"):
warnings.warn("torchaudio C++ extension is not available.")
return
_load_lib("libtorchaudio")
# This import is for initializing the methods registered via PyBind11
# This has to happen after the base library is loaded
from torchaudio import _torchaudio # noqa
_init_extension()
|
from .filtering import (
allpass_biquad,
band_biquad,
bandpass_biquad,
bandreject_biquad,
bass_biquad,
biquad,
contrast,
dcshift,
deemph_biquad,
dither,
equalizer_biquad,
filtfilt,
flanger,
gain,
highpass_biquad,
lfilter,
lowpass_biquad,
overdrive,
phaser,
riaa_biquad,
treble_biquad,
vad,
)
from .functional import (
amplitude_to_DB,
apply_beamforming,
apply_codec,
compute_deltas,
compute_kaldi_pitch,
create_dct,
DB_to_amplitude,
detect_pitch_frequency,
edit_distance,
griffinlim,
inverse_spectrogram,
linear_fbanks,
loudness,
mask_along_axis,
mask_along_axis_iid,
melscale_fbanks,
mu_law_decoding,
mu_law_encoding,
mvdr_weights_rtf,
mvdr_weights_souden,
phase_vocoder,
pitch_shift,
psd,
resample,
rnnt_loss,
rtf_evd,
rtf_power,
sliding_window_cmn,
spectral_centroid,
spectrogram,
)
__all__ = [
"amplitude_to_DB",
"compute_deltas",
"compute_kaldi_pitch",
"create_dct",
"melscale_fbanks",
"linear_fbanks",
"DB_to_amplitude",
"loudness",
"detect_pitch_frequency",
"griffinlim",
"mask_along_axis",
"mask_along_axis_iid",
"mu_law_encoding",
"mu_law_decoding",
"phase_vocoder",
"sliding_window_cmn",
"spectrogram",
"inverse_spectrogram",
"spectral_centroid",
"allpass_biquad",
"band_biquad",
"bandpass_biquad",
"bandreject_biquad",
"bass_biquad",
"biquad",
"contrast",
"dither",
"dcshift",
"deemph_biquad",
"equalizer_biquad",
"filtfilt",
"flanger",
"gain",
"highpass_biquad",
"lfilter",
"lowpass_biquad",
"overdrive",
"phaser",
"riaa_biquad",
"treble_biquad",
"vad",
"apply_codec",
"resample",
"edit_distance",
"pitch_shift",
"rnnt_loss",
"psd",
"mvdr_weights_souden",
"mvdr_weights_rtf",
"rtf_evd",
"rtf_power",
"apply_beamforming",
]
|
from .filtering import (
allpass_biquad,
band_biquad,
bandpass_biquad,
bandreject_biquad,
bass_biquad,
biquad,
contrast,
dcshift,
deemph_biquad,
dither,
equalizer_biquad,
filtfilt,
flanger,
gain,
highpass_biquad,
lfilter,
lowpass_biquad,
overdrive,
phaser,
riaa_biquad,
treble_biquad,
vad,
)
from .functional import (
amplitude_to_DB,
apply_beamforming,
apply_codec,
barkscale_fbanks,
compute_deltas,
compute_kaldi_pitch,
create_dct,
DB_to_amplitude,
detect_pitch_frequency,
edit_distance,
griffinlim,
inverse_spectrogram,
linear_fbanks,
loudness,
mask_along_axis,
mask_along_axis_iid,
melscale_fbanks,
mu_law_decoding,
mu_law_encoding,
mvdr_weights_rtf,
mvdr_weights_souden,
phase_vocoder,
pitch_shift,
psd,
resample,
rnnt_loss,
rtf_evd,
rtf_power,
sliding_window_cmn,
spectral_centroid,
spectrogram,
)
__all__ = [
"amplitude_to_DB",
"compute_deltas",
"compute_kaldi_pitch",
"create_dct",
"melscale_fbanks",
"barkscale_fbanks",
"linear_fbanks",
"DB_to_amplitude",
"loudness",
"detect_pitch_frequency",
"griffinlim",
"mask_along_axis",
"mask_along_axis_iid",
"mu_law_encoding",
"mu_law_decoding",
"phase_vocoder",
"sliding_window_cmn",
"spectrogram",
"inverse_spectrogram",
"spectral_centroid",
"allpass_biquad",
"band_biquad",
"bandpass_biquad",
"bandreject_biquad",
"bass_biquad",
"biquad",
"contrast",
"dither",
"dcshift",
"deemph_biquad",
"equalizer_biquad",
"filtfilt",
"flanger",
"gain",
"highpass_biquad",
"lfilter",
"lowpass_biquad",
"overdrive",
"phaser",
"riaa_biquad",
"treble_biquad",
"vad",
"apply_codec",
"resample",
"edit_distance",
"pitch_shift",
"rnnt_loss",
"psd",
"mvdr_weights_souden",
"mvdr_weights_rtf",
"rtf_evd",
"rtf_power",
"apply_beamforming",
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest.mock import Mock
from mmengine.hooks import IterTimerHook
class TestIterTimerHook:
def test_before_epoch(self):
hook = IterTimerHook()
runner = Mock()
hook._before_epoch(runner)
assert isinstance(hook.t, float)
def test_before_iter(self):
hook = IterTimerHook()
runner = Mock()
runner.log_buffer = dict()
hook._before_epoch(runner)
hook._before_iter(runner, 0)
runner.message_hub.update_scalar.assert_called()
def test_after_iter(self):
hook = IterTimerHook()
runner = Mock()
runner.log_buffer = dict()
hook._before_epoch(runner)
hook._after_iter(runner, 0)
runner.message_hub.update_scalar.assert_called()
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest.mock import Mock
from mmengine.hooks import IterTimerHook
class TestIterTimerHook:
def test_before_epoch(self):
hook = IterTimerHook()
runner = Mock()
hook._before_epoch(runner)
assert isinstance(hook.t, float)
def test_before_iter(self):
hook = IterTimerHook()
runner = Mock()
runner.log_buffer = dict()
hook._before_epoch(runner)
hook._before_iter(runner, 0)
runner.message_hub.update_log.assert_called()
def test_after_iter(self):
hook = IterTimerHook()
runner = Mock()
runner.log_buffer = dict()
hook._before_epoch(runner)
hook._after_iter(runner, 0)
runner.message_hub.update_log.assert_called()
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This software may be used and distributed according to the terms of the Llama 2 Community License Agreement.
import fire
from llama import Llama
from typing import List
def main(
ckpt_dir: str,
tokenizer_path: str,
temperature: float = 0.6,
top_p: float = 0.9,
max_seq_len: int = 128,
max_gen_len: int = 64,
max_batch_size: int = 4,
):
"""
Entry point of the program for generating text using a pretrained model.
Args:
ckpt_dir (str): The directory containing checkpoint files for the pretrained model.
tokenizer_path (str): The path to the tokenizer model used for text encoding/decoding.
temperature (float, optional): The temperature value for controlling randomness in generation.
Defaults to 0.6.
top_p (float, optional): The top-p sampling parameter for controlling diversity in generation.
Defaults to 0.9.
max_seq_len (int, optional): The maximum sequence length for input prompts. Defaults to 128.
max_gen_len (int, optional): The maximum length of generated sequences. Defaults to 64.
max_batch_size (int, optional): The maximum batch size for generating sequences. Defaults to 4.
"""
generator = Llama.build(
ckpt_dir=ckpt_dir,
tokenizer_path=tokenizer_path,
max_seq_len=max_seq_len,
max_batch_size=max_batch_size,
)
prompts: List[str] = [
# For these prompts, the expected answer is the natural continuation of the prompt
"I believe the meaning of life is",
"Simply put, the theory of relativity states that ",
"""A brief message congratulating the team on the launch:
Hi everyone,
I just """,
# Few shot prompt (providing a few examples before asking model to complete more);
"""Translate English to French:
sea otter => loutre de mer
peppermint => menthe poivrée
plush girafe => girafe peluche
cheese =>""",
]
results = generator.text_completion(
prompts,
max_gen_len=max_gen_len,
temperature=temperature,
top_p=top_p,
)
for prompt, result in zip(prompts, results):
print(prompt)
print(f"> {result['generation']}")
print("\n==================================\n")
if __name__ == "__main__":
fire.Fire(main)
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This software may be used and distributed according to the terms of the Llama 2 Community License Agreement.
import fire
from llama import Llama
def main(
ckpt_dir: str,
tokenizer_path: str,
temperature: float = 0.6,
top_p: float = 0.9,
max_seq_len: int = 128,
max_gen_len: int = 64,
max_batch_size: int = 4,
):
generator = Llama.build(
ckpt_dir=ckpt_dir,
tokenizer_path=tokenizer_path,
max_seq_len=max_seq_len,
max_batch_size=max_batch_size,
)
prompts = [
# For these prompts, the expected answer is the natural continuation of the prompt
"I believe the meaning of life is",
"Simply put, the theory of relativity states that ",
"""A brief message congratulating the team on the launch:
Hi everyone,
I just """,
# Few shot prompt (providing a few examples before asking model to complete more);
"""Translate English to French:
sea otter => loutre de mer
peppermint => menthe poivrée
plush girafe => girafe peluche
cheese =>""",
]
results = generator.text_completion(
prompts,
max_gen_len=max_gen_len,
temperature=temperature,
top_p=top_p,
)
for prompt, result in zip(prompts, results):
print(prompt)
print(f"> {result['generation']}")
print("\n==================================\n")
if __name__ == "__main__":
fire.Fire(main)
|
# Licensed to the LF AI & Data foundation under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TypeVar
from docarray.typing.proto_register import _register_proto
from docarray.typing.tensor.image.abstract_image_tensor import AbstractImageTensor
from docarray.typing.tensor.tensorflow_tensor import TensorFlowTensor, metaTensorFlow
T = TypeVar('T', bound='ImageTensorFlowTensor')
@_register_proto(proto_type_name='image_tensorflow_tensor')
class ImageTensorFlowTensor(
TensorFlowTensor, AbstractImageTensor, metaclass=metaTensorFlow
):
"""
Subclass of [`TensorFlowTensor`][docarray.typing.TensorFlowTensor],
to represent an image tensor. Adds image-specific features to the tensor.
For instance the ability convert the tensor back to
[`ImageBytes`][docarray.typing.ImageBytes] which are
optimized to send over the wire.
---
```python
from typing import Optional
from docarray import BaseDoc
from docarray.typing import ImageBytes, ImageTensorFlowTensor, ImageUrl
class MyImageDoc(BaseDoc):
title: str
tensor: Optional[ImageTensorFlowTensor]
url: Optional[ImageUrl]
bytes: Optional[ImageBytes]
doc = MyImageDoc(
title='my_second_image_doc',
url="https://upload.wikimedia.org/wikipedia/commons/8/80/"
"Dag_Sebastian_Ahlander_at_G%C3%B6teborg_Book_Fair_2012b.jpg",
)
doc.tensor = doc.url.load()
doc.bytes = doc.tensor.to_bytes()
```
---
"""
...
|
from typing import TypeVar
from docarray.typing.proto_register import _register_proto
from docarray.typing.tensor.image.abstract_image_tensor import AbstractImageTensor
from docarray.typing.tensor.tensorflow_tensor import TensorFlowTensor, metaTensorFlow
T = TypeVar('T', bound='ImageTensorFlowTensor')
@_register_proto(proto_type_name='image_tensorflow_tensor')
class ImageTensorFlowTensor(
TensorFlowTensor, AbstractImageTensor, metaclass=metaTensorFlow
):
"""
Subclass of [`TensorFlowTensor`][docarray.typing.TensorFlowTensor],
to represent an image tensor. Adds image-specific features to the tensor.
For instance the ability convert the tensor back to
[`ImageBytes`][docarray.typing.ImageBytes] which are
optimized to send over the wire.
---
```python
from typing import Optional
from docarray import BaseDoc
from docarray.typing import ImageBytes, ImageTensorFlowTensor, ImageUrl
class MyImageDoc(BaseDoc):
title: str
tensor: Optional[ImageTensorFlowTensor]
url: Optional[ImageUrl]
bytes: Optional[ImageBytes]
doc = MyImageDoc(
title='my_second_image_doc',
url="https://upload.wikimedia.org/wikipedia/commons/8/80/"
"Dag_Sebastian_Ahlander_at_G%C3%B6teborg_Book_Fair_2012b.jpg",
)
doc.tensor = doc.url.load()
doc.bytes = doc.tensor.to_bytes()
```
---
"""
...
|
from abc import ABC
from docarray.array.storage.weaviate.backend import BackendMixin, WeaviateConfig
from docarray.array.storage.weaviate.find import FindMixin
from docarray.array.storage.weaviate.getsetdel import GetSetDelMixin
from docarray.array.storage.weaviate.seqlike import SequenceLikeMixin
__all__ = ['StorageMixins', 'WeaviateConfig']
class StorageMixins(FindMixin, BackendMixin, GetSetDelMixin, SequenceLikeMixin, ABC):
...
|
from abc import ABC
from .backend import BackendMixin, WeaviateConfig
from .find import FindMixin
from .getsetdel import GetSetDelMixin
from .seqlike import SequenceLikeMixin
__all__ = ['StorageMixins', 'WeaviateConfig']
class StorageMixins(FindMixin, BackendMixin, GetSetDelMixin, SequenceLikeMixin, ABC):
...
|
"""
Pandas output parser.
DEPRECATED: This class has been moved to `llama-index-experimental`.
"""
from typing import Any
class PandasInstructionParser:
"""
Pandas instruction parser.
DEPRECATED: This class has been moved to `llama-index-experimental`.
"""
def __init__(self, *args: Any, **kwargs: Any) -> None:
raise DeprecationWarning(
"PandasInstructionParser has been moved to `llama-index-experimental`.\n"
"`pip install llama-index-experimental`\n"
"`from llama_index.experimental.query_engine.pandas import PandasInstructionParser`\n"
"Note that the PandasInstructionParser allows for arbitrary code execution, \n"
"and should be used in a secure environment."
)
|
"""Pandas output parser.
DEPRECATED: This class has been moved to `llama-index-experimental`.
"""
from typing import Any
class PandasInstructionParser:
"""Pandas instruction parser.
DEPRECATED: This class has been moved to `llama-index-experimental`.
"""
def __init__(self, *args: Any, **kwargs: Any) -> None:
raise DeprecationWarning(
"PandasInstructionParser has been moved to `llama-index-experimental`.\n"
"`pip install llama-index-experimental`\n"
"`from llama_index.experimental.query_engine.pandas import PandasInstructionParser`\n"
"Note that the PandasInstructionParser allows for arbitrary code execution, \n"
"and should be used in a secure environment."
)
|
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import subprocess
import sys
import pytorch_sphinx_theme
sys.path.insert(0, os.path.abspath('../..'))
# -- Project information -----------------------------------------------------
project = 'MMDetection'
copyright = '2018-2021, OpenMMLab'
author = 'MMDetection Authors'
version_file = '../../mmdet/version.py'
def get_version():
with open(version_file, 'r') as f:
exec(compile(f.read(), version_file, 'exec'))
return locals()['__version__']
# The full version, including alpha/beta/rc tags
release = get_version()
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.napoleon',
'sphinx.ext.viewcode',
'myst_parser',
'sphinx_markdown_tables',
'sphinx_copybutton',
]
myst_enable_extensions = ['colon_fence']
myst_heading_anchors = 3
autodoc_mock_imports = [
'matplotlib', 'pycocotools', 'terminaltables', 'mmdet.version', 'mmcv.ops'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
source_suffix = {
'.rst': 'restructuredtext',
'.md': 'markdown',
}
# The master toctree document.
master_doc = 'index'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
# html_theme = 'sphinx_rtd_theme'
html_theme = 'pytorch_sphinx_theme'
html_theme_path = [pytorch_sphinx_theme.get_html_theme_path()]
html_theme_options = {
'menu': [
{
'name': 'GitHub',
'url': 'https://github.com/open-mmlab/mmdetection'
},
],
# Specify the language of shared menu
'menu_lang':
'en'
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
html_css_files = ['css/readthedocs.css']
# -- Extension configuration -------------------------------------------------
# Ignore >>> when copying code
copybutton_prompt_text = r'>>> |\.\.\. '
copybutton_prompt_is_regexp = True
def builder_inited_handler(app):
subprocess.run(['./stat.py'])
def setup(app):
app.connect('builder-inited', builder_inited_handler)
|
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import subprocess
import sys
import pytorch_sphinx_theme
sys.path.insert(0, os.path.abspath('../..'))
# -- Project information -----------------------------------------------------
project = 'MMDetection'
copyright = '2018-2021, OpenMMLab'
author = 'MMDetection Authors'
version_file = '../../mmdet/version.py'
def get_version():
with open(version_file, 'r') as f:
exec(compile(f.read(), version_file, 'exec'))
return locals()['__version__']
# The full version, including alpha/beta/rc tags
release = get_version()
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.napoleon',
'sphinx.ext.viewcode',
'myst_parser',
'sphinx_markdown_tables',
'sphinx_copybutton',
]
autodoc_mock_imports = [
'matplotlib', 'pycocotools', 'terminaltables', 'mmdet.version', 'mmcv.ops'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
source_suffix = {
'.rst': 'restructuredtext',
'.md': 'markdown',
}
# The master toctree document.
master_doc = 'index'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
# html_theme = 'sphinx_rtd_theme'
html_theme = 'pytorch_sphinx_theme'
html_theme_path = [pytorch_sphinx_theme.get_html_theme_path()]
html_theme_options = {
'menu': [
{
'name': 'GitHub',
'url': 'https://github.com/open-mmlab/mmdetection'
},
],
# Specify the language of shared menu
'menu_lang':
'en'
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
html_css_files = ['css/readthedocs.css']
# -- Extension configuration -------------------------------------------------
# Ignore >>> when copying code
copybutton_prompt_text = r'>>> |\.\.\. '
copybutton_prompt_is_regexp = True
def builder_inited_handler(app):
subprocess.run(['./stat.py'])
def setup(app):
app.connect('builder-inited', builder_inited_handler)
|
from typing import Any, Dict, List, Optional, Sequence, Type, Union
import PIL.Image
import torch
from torchvision import datapoints
from torchvision.prototype.datapoints import Label, OneHotLabel
from torchvision.transforms.v2 import functional as F, Transform
from torchvision.transforms.v2._utils import _setup_fill_arg, _setup_size
from torchvision.transforms.v2.utils import has_any, is_simple_tensor, query_bounding_boxes, query_spatial_size
class FixedSizeCrop(Transform):
def __init__(
self,
size: Union[int, Sequence[int]],
fill: Union[datapoints._FillType, Dict[Type, datapoints._FillType]] = 0,
padding_mode: str = "constant",
) -> None:
super().__init__()
size = tuple(_setup_size(size, error_msg="Please provide only two dimensions (h, w) for size."))
self.crop_height = size[0]
self.crop_width = size[1]
self.fill = fill
self._fill = _setup_fill_arg(fill)
self.padding_mode = padding_mode
def _check_inputs(self, flat_inputs: List[Any]) -> None:
if not has_any(
flat_inputs,
PIL.Image.Image,
datapoints.Image,
is_simple_tensor,
datapoints.Video,
):
raise TypeError(
f"{type(self).__name__}() requires input sample to contain an tensor or PIL image or a Video."
)
if has_any(flat_inputs, datapoints.BoundingBoxes) and not has_any(flat_inputs, Label, OneHotLabel):
raise TypeError(
f"If a BoundingBoxes is contained in the input sample, "
f"{type(self).__name__}() also requires it to contain a Label or OneHotLabel."
)
def _get_params(self, flat_inputs: List[Any]) -> Dict[str, Any]:
height, width = query_spatial_size(flat_inputs)
new_height = min(height, self.crop_height)
new_width = min(width, self.crop_width)
needs_crop = new_height != height or new_width != width
offset_height = max(height - self.crop_height, 0)
offset_width = max(width - self.crop_width, 0)
r = torch.rand(1)
top = int(offset_height * r)
left = int(offset_width * r)
bounding_boxes: Optional[torch.Tensor]
try:
bounding_boxes = query_bounding_boxes(flat_inputs)
except ValueError:
bounding_boxes = None
if needs_crop and bounding_boxes is not None:
format = bounding_boxes.format
bounding_boxes, spatial_size = F.crop_bounding_boxes(
bounding_boxes.as_subclass(torch.Tensor),
format=format,
top=top,
left=left,
height=new_height,
width=new_width,
)
bounding_boxes = F.clamp_bounding_boxes(bounding_boxes, format=format, spatial_size=spatial_size)
height_and_width = F.convert_format_bounding_boxes(
bounding_boxes, old_format=format, new_format=datapoints.BoundingBoxFormat.XYWH
)[..., 2:]
is_valid = torch.all(height_and_width > 0, dim=-1)
else:
is_valid = None
pad_bottom = max(self.crop_height - new_height, 0)
pad_right = max(self.crop_width - new_width, 0)
needs_pad = pad_bottom != 0 or pad_right != 0
return dict(
needs_crop=needs_crop,
top=top,
left=left,
height=new_height,
width=new_width,
is_valid=is_valid,
padding=[0, 0, pad_right, pad_bottom],
needs_pad=needs_pad,
)
def _transform(self, inpt: Any, params: Dict[str, Any]) -> Any:
if params["needs_crop"]:
inpt = F.crop(
inpt,
top=params["top"],
left=params["left"],
height=params["height"],
width=params["width"],
)
if params["is_valid"] is not None:
if isinstance(inpt, (Label, OneHotLabel, datapoints.Mask)):
inpt = inpt.wrap_like(inpt, inpt[params["is_valid"]]) # type: ignore[arg-type]
elif isinstance(inpt, datapoints.BoundingBoxes):
inpt = datapoints.BoundingBoxes.wrap_like(
inpt,
F.clamp_bounding_boxes(
inpt[params["is_valid"]], format=inpt.format, spatial_size=inpt.spatial_size
),
)
if params["needs_pad"]:
fill = self._fill[type(inpt)]
inpt = F.pad(inpt, params["padding"], fill=fill, padding_mode=self.padding_mode)
return inpt
|
from typing import Any, Dict, List, Optional, Sequence, Type, Union
import PIL.Image
import torch
from torchvision import datapoints
from torchvision.prototype.datapoints import Label, OneHotLabel
from torchvision.transforms.v2 import functional as F, Transform
from torchvision.transforms.v2._utils import _setup_fill_arg, _setup_size
from torchvision.transforms.v2.utils import has_any, is_simple_tensor, query_bounding_box, query_spatial_size
class FixedSizeCrop(Transform):
def __init__(
self,
size: Union[int, Sequence[int]],
fill: Union[datapoints._FillType, Dict[Type, datapoints._FillType]] = 0,
padding_mode: str = "constant",
) -> None:
super().__init__()
size = tuple(_setup_size(size, error_msg="Please provide only two dimensions (h, w) for size."))
self.crop_height = size[0]
self.crop_width = size[1]
self.fill = fill
self._fill = _setup_fill_arg(fill)
self.padding_mode = padding_mode
def _check_inputs(self, flat_inputs: List[Any]) -> None:
if not has_any(
flat_inputs,
PIL.Image.Image,
datapoints.Image,
is_simple_tensor,
datapoints.Video,
):
raise TypeError(
f"{type(self).__name__}() requires input sample to contain an tensor or PIL image or a Video."
)
if has_any(flat_inputs, datapoints.BoundingBox) and not has_any(flat_inputs, Label, OneHotLabel):
raise TypeError(
f"If a BoundingBox is contained in the input sample, "
f"{type(self).__name__}() also requires it to contain a Label or OneHotLabel."
)
def _get_params(self, flat_inputs: List[Any]) -> Dict[str, Any]:
height, width = query_spatial_size(flat_inputs)
new_height = min(height, self.crop_height)
new_width = min(width, self.crop_width)
needs_crop = new_height != height or new_width != width
offset_height = max(height - self.crop_height, 0)
offset_width = max(width - self.crop_width, 0)
r = torch.rand(1)
top = int(offset_height * r)
left = int(offset_width * r)
bounding_boxes: Optional[torch.Tensor]
try:
bounding_boxes = query_bounding_box(flat_inputs)
except ValueError:
bounding_boxes = None
if needs_crop and bounding_boxes is not None:
format = bounding_boxes.format
bounding_boxes, spatial_size = F.crop_bounding_box(
bounding_boxes.as_subclass(torch.Tensor),
format=format,
top=top,
left=left,
height=new_height,
width=new_width,
)
bounding_boxes = F.clamp_bounding_box(bounding_boxes, format=format, spatial_size=spatial_size)
height_and_width = F.convert_format_bounding_box(
bounding_boxes, old_format=format, new_format=datapoints.BoundingBoxFormat.XYWH
)[..., 2:]
is_valid = torch.all(height_and_width > 0, dim=-1)
else:
is_valid = None
pad_bottom = max(self.crop_height - new_height, 0)
pad_right = max(self.crop_width - new_width, 0)
needs_pad = pad_bottom != 0 or pad_right != 0
return dict(
needs_crop=needs_crop,
top=top,
left=left,
height=new_height,
width=new_width,
is_valid=is_valid,
padding=[0, 0, pad_right, pad_bottom],
needs_pad=needs_pad,
)
def _transform(self, inpt: Any, params: Dict[str, Any]) -> Any:
if params["needs_crop"]:
inpt = F.crop(
inpt,
top=params["top"],
left=params["left"],
height=params["height"],
width=params["width"],
)
if params["is_valid"] is not None:
if isinstance(inpt, (Label, OneHotLabel, datapoints.Mask)):
inpt = inpt.wrap_like(inpt, inpt[params["is_valid"]]) # type: ignore[arg-type]
elif isinstance(inpt, datapoints.BoundingBox):
inpt = datapoints.BoundingBox.wrap_like(
inpt,
F.clamp_bounding_box(inpt[params["is_valid"]], format=inpt.format, spatial_size=inpt.spatial_size),
)
if params["needs_pad"]:
fill = self._fill[type(inpt)]
inpt = F.pad(inpt, params["padding"], fill=fill, padding_mode=self.padding_mode)
return inpt
|
# Copyright (c) OpenMMLab. All rights reserved.
import warnings
import torch
from ..builder import DETECTORS
from .single_stage import SingleStageDetector
@DETECTORS.register_module()
class DETR(SingleStageDetector):
r"""Implementation of `DETR: End-to-End Object Detection with
Transformers <https://arxiv.org/pdf/2005.12872>`_"""
def __init__(self,
backbone,
bbox_head,
train_cfg=None,
test_cfg=None,
pretrained=None,
init_cfg=None):
super(DETR, self).__init__(backbone, None, bbox_head, train_cfg,
test_cfg, pretrained, init_cfg)
# over-write `forward_dummy` because:
# the forward of bbox_head requires img_metas
def forward_dummy(self, img):
"""Used for computing network flops.
See `mmdetection/tools/analysis_tools/get_flops.py`
"""
warnings.warn('Warning! MultiheadAttention in DETR does not '
'support flops computation! Do not use the '
'results in your papers!')
batch_size, _, height, width = img.shape
dummy_img_metas = [
dict(
batch_input_shape=(height, width),
img_shape=(height, width, 3)) for _ in range(batch_size)
]
x = self.extract_feat(img)
outs = self.bbox_head(x, dummy_img_metas)
return outs
# over-write `onnx_export` because:
# (1) the forward of bbox_head requires img_metas
# (2) the different behavior (e.g. construction of `masks`) between
# torch and ONNX model, during the forward of bbox_head
def onnx_export(self, img, img_metas):
"""Test function for exporting to ONNX, without test time augmentation.
Args:
img (torch.Tensor): input images.
img_metas (list[dict]): List of image information.
Returns:
tuple[Tensor, Tensor]: dets of shape [N, num_det, 5]
and class labels of shape [N, num_det].
"""
x = self.extract_feat(img)
# forward of this head requires img_metas
outs = self.bbox_head.forward_onnx(x, img_metas)
# get shape as tensor
img_shape = torch._shape_as_tensor(img)[2:]
img_metas[0]['img_shape_for_onnx'] = img_shape
det_bboxes, det_labels = self.bbox_head.onnx_export(*outs, img_metas)
return det_bboxes, det_labels
|
import warnings
import torch
from ..builder import DETECTORS
from .single_stage import SingleStageDetector
@DETECTORS.register_module()
class DETR(SingleStageDetector):
r"""Implementation of `DETR: End-to-End Object Detection with
Transformers <https://arxiv.org/pdf/2005.12872>`_"""
def __init__(self,
backbone,
bbox_head,
train_cfg=None,
test_cfg=None,
pretrained=None,
init_cfg=None):
super(DETR, self).__init__(backbone, None, bbox_head, train_cfg,
test_cfg, pretrained, init_cfg)
# over-write `forward_dummy` because:
# the forward of bbox_head requires img_metas
def forward_dummy(self, img):
"""Used for computing network flops.
See `mmdetection/tools/analysis_tools/get_flops.py`
"""
warnings.warn('Warning! MultiheadAttention in DETR does not '
'support flops computation! Do not use the '
'results in your papers!')
batch_size, _, height, width = img.shape
dummy_img_metas = [
dict(
batch_input_shape=(height, width),
img_shape=(height, width, 3)) for _ in range(batch_size)
]
x = self.extract_feat(img)
outs = self.bbox_head(x, dummy_img_metas)
return outs
# over-write `onnx_export` because:
# (1) the forward of bbox_head requires img_metas
# (2) the different behavior (e.g. construction of `masks`) between
# torch and ONNX model, during the forward of bbox_head
def onnx_export(self, img, img_metas):
"""Test function for exporting to ONNX, without test time augmentation.
Args:
img (torch.Tensor): input images.
img_metas (list[dict]): List of image information.
Returns:
tuple[Tensor, Tensor]: dets of shape [N, num_det, 5]
and class labels of shape [N, num_det].
"""
x = self.extract_feat(img)
# forward of this head requires img_metas
outs = self.bbox_head.forward_onnx(x, img_metas)
# get shape as tensor
img_shape = torch._shape_as_tensor(img)[2:]
img_metas[0]['img_shape_for_onnx'] = img_shape
det_bboxes, det_labels = self.bbox_head.onnx_export(*outs, img_metas)
return det_bboxes, det_labels
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.callbacks.streamlit.streamlit_callback_handler import (
LLMThought,
LLMThoughtLabeler,
LLMThoughtState,
StreamlitCallbackHandler,
ToolRecord,
)
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"LLMThoughtState": (
"langchain_community.callbacks.streamlit.streamlit_callback_handler"
),
"ToolRecord": (
"langchain_community.callbacks.streamlit.streamlit_callback_handler"
),
"LLMThoughtLabeler": (
"langchain_community.callbacks.streamlit.streamlit_callback_handler"
),
"LLMThought": (
"langchain_community.callbacks.streamlit.streamlit_callback_handler"
),
"StreamlitCallbackHandler": (
"langchain_community.callbacks.streamlit.streamlit_callback_handler"
),
}
_import_attribute = create_importer(__file__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"LLMThought",
"LLMThoughtLabeler",
"LLMThoughtState",
"StreamlitCallbackHandler",
"ToolRecord",
]
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.callbacks.streamlit.streamlit_callback_handler import (
LLMThought,
LLMThoughtLabeler,
LLMThoughtState,
StreamlitCallbackHandler,
ToolRecord,
)
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"LLMThoughtState": (
"langchain_community.callbacks.streamlit.streamlit_callback_handler"
),
"ToolRecord": (
"langchain_community.callbacks.streamlit.streamlit_callback_handler"
),
"LLMThoughtLabeler": (
"langchain_community.callbacks.streamlit.streamlit_callback_handler"
),
"LLMThought": (
"langchain_community.callbacks.streamlit.streamlit_callback_handler"
),
"StreamlitCallbackHandler": (
"langchain_community.callbacks.streamlit.streamlit_callback_handler"
),
}
_import_attribute = create_importer(__file__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"LLMThoughtState",
"ToolRecord",
"LLMThoughtLabeler",
"LLMThought",
"StreamlitCallbackHandler",
]
|
# Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import torch
from mmdet.models.roi_heads.mask_heads import (DynamicMaskHead, FCNMaskHead,
MaskIoUHead)
from .utils import _dummy_bbox_sampling
def test_mask_head_loss():
"""Test mask head loss when mask target is empty."""
self = FCNMaskHead(
num_convs=1,
roi_feat_size=6,
in_channels=8,
conv_out_channels=8,
num_classes=8)
# Dummy proposals
proposal_list = [
torch.Tensor([[23.6667, 23.8757, 228.6326, 153.8874]]),
]
gt_bboxes = [
torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]),
]
gt_labels = [torch.LongTensor([2])]
sampling_results = _dummy_bbox_sampling(proposal_list, gt_bboxes,
gt_labels)
# create dummy mask
import numpy as np
from mmdet.core import BitmapMasks
dummy_mask = np.random.randint(0, 2, (1, 160, 240), dtype=np.uint8)
gt_masks = [BitmapMasks(dummy_mask, 160, 240)]
# create dummy train_cfg
train_cfg = mmcv.Config(dict(mask_size=12, mask_thr_binary=0.5))
# Create dummy features "extracted" for each sampled bbox
num_sampled = sum(len(res.bboxes) for res in sampling_results)
dummy_feats = torch.rand(num_sampled, 8, 6, 6)
mask_pred = self.forward(dummy_feats)
mask_targets = self.get_targets(sampling_results, gt_masks, train_cfg)
pos_labels = torch.cat([res.pos_gt_labels for res in sampling_results])
loss_mask = self.loss(mask_pred, mask_targets, pos_labels)
onegt_mask_loss = sum(loss_mask['loss_mask'])
assert onegt_mask_loss.item() > 0, 'mask loss should be non-zero'
# test mask_iou_head
mask_iou_head = MaskIoUHead(
num_convs=1,
num_fcs=1,
roi_feat_size=6,
in_channels=8,
conv_out_channels=8,
fc_out_channels=8,
num_classes=8)
pos_mask_pred = mask_pred[range(mask_pred.size(0)), pos_labels]
mask_iou_pred = mask_iou_head(dummy_feats, pos_mask_pred)
pos_mask_iou_pred = mask_iou_pred[range(mask_iou_pred.size(0)), pos_labels]
mask_iou_targets = mask_iou_head.get_targets(sampling_results, gt_masks,
pos_mask_pred, mask_targets,
train_cfg)
loss_mask_iou = mask_iou_head.loss(pos_mask_iou_pred, mask_iou_targets)
onegt_mask_iou_loss = loss_mask_iou['loss_mask_iou'].sum()
assert onegt_mask_iou_loss.item() >= 0
# test dynamic_mask_head
dummy_proposal_feats = torch.rand(num_sampled, 8)
dynamic_mask_head = DynamicMaskHead(
dynamic_conv_cfg=dict(
type='DynamicConv',
in_channels=8,
feat_channels=8,
out_channels=8,
input_feat_shape=6,
with_proj=False,
act_cfg=dict(type='ReLU', inplace=True),
norm_cfg=dict(type='LN')),
num_convs=1,
num_classes=8,
in_channels=8,
roi_feat_size=6)
mask_pred = dynamic_mask_head(dummy_feats, dummy_proposal_feats)
mask_target = dynamic_mask_head.get_targets(sampling_results, gt_masks,
train_cfg)
loss_mask = dynamic_mask_head.loss(mask_pred, mask_target, pos_labels)
loss_mask = loss_mask['loss_mask'].sum()
assert loss_mask.item() >= 0
|
# Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import torch
from mmdet.models.roi_heads.mask_heads import (DynamicMaskHead, FCNMaskHead,
MaskIoUHead)
from .utils import _dummy_bbox_sampling
def test_mask_head_loss():
"""Test mask head loss when mask target is empty."""
self = FCNMaskHead(
num_convs=1,
roi_feat_size=6,
in_channels=8,
conv_out_channels=8,
num_classes=8)
# Dummy proposals
proposal_list = [
torch.Tensor([[23.6667, 23.8757, 228.6326, 153.8874]]),
]
gt_bboxes = [
torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]),
]
gt_labels = [torch.LongTensor([2])]
sampling_results = _dummy_bbox_sampling(proposal_list, gt_bboxes,
gt_labels)
# create dummy mask
import numpy as np
from mmdet.core import BitmapMasks
dummy_mask = np.random.randint(0, 2, (1, 160, 240), dtype=np.uint8)
gt_masks = [BitmapMasks(dummy_mask, 160, 240)]
# create dummy train_cfg
train_cfg = mmcv.Config(dict(mask_size=12, mask_thr_binary=0.5))
# Create dummy features "extracted" for each sampled bbox
num_sampled = sum(len(res.bboxes) for res in sampling_results)
dummy_feats = torch.rand(num_sampled, 8, 6, 6)
mask_pred = self.forward(dummy_feats)
mask_targets = self.get_targets(sampling_results, gt_masks, train_cfg)
pos_labels = torch.cat([res.pos_gt_labels for res in sampling_results])
loss_mask = self.loss(mask_pred, mask_targets, pos_labels)
onegt_mask_loss = sum(loss_mask['loss_mask'])
assert onegt_mask_loss.item() > 0, 'mask loss should be non-zero'
# test mask_iou_head
mask_iou_head = MaskIoUHead(
num_convs=1,
num_fcs=1,
roi_feat_size=6,
in_channels=8,
conv_out_channels=8,
fc_out_channels=8,
num_classes=8)
pos_mask_pred = mask_pred[range(mask_pred.size(0)), pos_labels]
mask_iou_pred = mask_iou_head(dummy_feats, pos_mask_pred)
pos_mask_iou_pred = mask_iou_pred[range(mask_iou_pred.size(0)), pos_labels]
mask_iou_targets = mask_iou_head.get_targets(sampling_results, gt_masks,
pos_mask_pred, mask_targets,
train_cfg)
loss_mask_iou = mask_iou_head.loss(pos_mask_iou_pred, mask_iou_targets)
onegt_mask_iou_loss = loss_mask_iou['loss_mask_iou'].sum()
assert onegt_mask_iou_loss.item() >= 0
# test dynamic_mask_head
dummy_proposal_feats = torch.rand(num_sampled, 8)
dynamic_mask_head = DynamicMaskHead(
dynamic_conv_cfg=dict(
type='DynamicConv',
in_channels=8,
feat_channels=8,
out_channels=8,
input_feat_shape=6,
with_proj=False,
act_cfg=dict(type='ReLU', inplace=True),
norm_cfg=dict(type='LN')),
num_convs=1,
num_classes=8,
in_channels=8,
roi_feat_size=6)
mask_pred = dynamic_mask_head(dummy_feats, dummy_proposal_feats)
mask_target = dynamic_mask_head.get_targets(sampling_results, gt_masks,
train_cfg)
loss_mask = dynamic_mask_head.loss(mask_pred, mask_target, pos_labels)
loss_mask = loss_mask['loss_mask'].sum()
assert loss_mask.item() >= 0
|
"""
This examples trains a CrossEncoder for the STSbenchmark task. A CrossEncoder takes a sentence pair
as input and outputs a label. Here, it output a continuous labels 0...1 to indicate the similarity between the input pair.
It does NOT produce a sentence embedding and does NOT work for individual sentences.
Usage:
python training_stsbenchmark.py
"""
import logging
import traceback
from datetime import datetime
from datasets import load_dataset
from sentence_transformers.cross_encoder import CrossEncoder
from sentence_transformers.cross_encoder.evaluation import CrossEncoderCorrelationEvaluator
from sentence_transformers.cross_encoder.losses.BinaryCrossEntropyLoss import BinaryCrossEntropyLoss
from sentence_transformers.cross_encoder.trainer import CrossEncoderTrainer
from sentence_transformers.cross_encoder.training_args import CrossEncoderTrainingArguments
# Set the log level to INFO to get more information
logging.basicConfig(format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO)
train_batch_size = 64
num_epochs = 4
output_dir = "output/training_ce_stsbenchmark-" + datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
# 1. Define our CrossEncoder model. We use distilroberta-base as the base model and set it up to predict 1 label
# You can also use other base models, like bert-base-uncased, microsoft/mpnet-base, or rerankers like Alibaba-NLP/gte-reranker-modernbert-base
model_name = "distilroberta-base"
model = CrossEncoder(model_name, num_labels=1)
# 2. Load the STSB dataset: https://huggingface.co/datasets/sentence-transformers/stsb
train_dataset = load_dataset("sentence-transformers/stsb", split="train")
eval_dataset = load_dataset("sentence-transformers/stsb", split="validation")
test_dataset = load_dataset("sentence-transformers/stsb", split="test")
logging.info(train_dataset)
# 3. Define our training loss, we use one that accepts pairs with a binary label
loss = BinaryCrossEntropyLoss(model)
# 4. Before and during training, we use CrossEncoderClassificationEvaluator to measure the performance on the dev set
eval_evaluator = CrossEncoderCorrelationEvaluator(
sentence_pairs=list(zip(eval_dataset["sentence1"], eval_dataset["sentence2"])),
scores=eval_dataset["score"],
name="stsb-validation",
)
eval_evaluator(model)
# 5. Define the training arguments
short_model_name = model_name if "/" not in model_name else model_name.split("/")[-1]
run_name = f"reranker-{short_model_name}-stsb"
args = CrossEncoderTrainingArguments(
# Required parameter:
output_dir=output_dir,
# Optional training parameters:
num_train_epochs=num_epochs,
per_device_train_batch_size=train_batch_size,
per_device_eval_batch_size=train_batch_size,
warmup_ratio=0.1,
fp16=False, # Set to False if you get an error that your GPU can't run on FP16
bf16=True, # Set to True if you have a GPU that supports BF16
# Optional tracking/debugging parameters:
eval_strategy="steps",
eval_steps=80,
save_strategy="steps",
save_steps=80,
save_total_limit=2,
logging_steps=20,
run_name=run_name, # Will be used in W&B if `wandb` is installed
)
# 6. Create the trainer & start training
trainer = CrossEncoderTrainer(
model=model,
args=args,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
loss=loss,
evaluator=eval_evaluator,
)
trainer.train()
# 7. Evaluate the final model on test dataset
test_evaluator = CrossEncoderCorrelationEvaluator(
sentence_pairs=list(zip(test_dataset["sentence1"], test_dataset["sentence2"])),
scores=test_dataset["score"],
name="stsb-test",
)
test_evaluator(model)
# 8. Save the final model
final_output_dir = f"{output_dir}/final"
model.save_pretrained(final_output_dir)
# 9. (Optional) save the model to the Hugging Face Hub!
# It is recommended to run `huggingface-cli login` to log into your Hugging Face account first
try:
model.push_to_hub(run_name)
except Exception:
logging.error(
f"Error uploading model to the Hugging Face Hub:\n{traceback.format_exc()}To upload it manually, you can run "
f"`huggingface-cli login`, followed by loading the model using `model = CrossEncoder({final_output_dir!r})` "
f"and saving it using `model.push_to_hub('{run_name}')`."
)
|
"""
This examples trains a CrossEncoder for the STSbenchmark task. A CrossEncoder takes a sentence pair
as input and outputs a label. Here, it output a continuous labels 0...1 to indicate the similarity between the input pair.
It does NOT produce a sentence embedding and does NOT work for individual sentences.
Usage:
python training_stsbenchmark.py
"""
import logging
import traceback
from datetime import datetime
from datasets import load_dataset
from sentence_transformers.cross_encoder import CrossEncoder
from sentence_transformers.cross_encoder.evaluation import CECorrelationEvaluator
from sentence_transformers.cross_encoder.losses.BinaryCrossEntropyLoss import BinaryCrossEntropyLoss
from sentence_transformers.cross_encoder.trainer import CrossEncoderTrainer
from sentence_transformers.cross_encoder.training_args import CrossEncoderTrainingArguments
# Set the log level to INFO to get more information
logging.basicConfig(format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO)
train_batch_size = 64
num_epochs = 4
output_dir = "output/training_ce_stsbenchmark-" + datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
# 1. Define our CrossEncoder model. We use distilroberta-base as the base model and set it up to predict 1 label
# You can also use other base models, like bert-base-uncased, microsoft/mpnet-base, or rerankers like Alibaba-NLP/gte-reranker-modernbert-base
model_name = "distilroberta-base"
model = CrossEncoder(model_name, num_labels=1)
# 2. Load the STSB dataset: https://huggingface.co/datasets/sentence-transformers/stsb
train_dataset = load_dataset("sentence-transformers/stsb", split="train")
eval_dataset = load_dataset("sentence-transformers/stsb", split="validation")
test_dataset = load_dataset("sentence-transformers/stsb", split="test")
logging.info(train_dataset)
# 3. Define our training loss, we use one that accepts pairs with a binary label
loss = BinaryCrossEntropyLoss(model)
# 4. Before and during training, we use CEClassificationEvaluator to measure the performance on the dev set
eval_evaluator = CECorrelationEvaluator(
sentence_pairs=list(zip(eval_dataset["sentence1"], eval_dataset["sentence2"])),
scores=eval_dataset["score"],
name="stsb-validation",
)
eval_evaluator(model)
# 5. Define the training arguments
short_model_name = model_name if "/" not in model_name else model_name.split("/")[-1]
run_name = f"reranker-{short_model_name}-stsb"
args = CrossEncoderTrainingArguments(
# Required parameter:
output_dir=output_dir,
# Optional training parameters:
num_train_epochs=num_epochs,
per_device_train_batch_size=train_batch_size,
per_device_eval_batch_size=train_batch_size,
warmup_ratio=0.1,
fp16=False, # Set to False if you get an error that your GPU can't run on FP16
bf16=True, # Set to True if you have a GPU that supports BF16
# Optional tracking/debugging parameters:
eval_strategy="steps",
eval_steps=80,
save_strategy="steps",
save_steps=80,
save_total_limit=2,
logging_steps=20,
run_name=run_name, # Will be used in W&B if `wandb` is installed
)
# 6. Create the trainer & start training
trainer = CrossEncoderTrainer(
model=model,
args=args,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
loss=loss,
evaluator=eval_evaluator,
)
trainer.train()
# 7. Evaluate the final model on test dataset
test_evaluator = CECorrelationEvaluator(
sentence_pairs=list(zip(test_dataset["sentence1"], test_dataset["sentence2"])),
scores=test_dataset["score"],
name="stsb-test",
)
test_evaluator(model)
# 8. Save the final model
final_output_dir = f"{output_dir}/final"
model.save_pretrained(final_output_dir)
# 9. (Optional) save the model to the Hugging Face Hub!
# It is recommended to run `huggingface-cli login` to log into your Hugging Face account first
try:
model.push_to_hub(run_name)
except Exception:
logging.error(
f"Error uploading model to the Hugging Face Hub:\n{traceback.format_exc()}To upload it manually, you can run "
f"`huggingface-cli login`, followed by loading the model using `model = CrossEncoder({final_output_dir!r})` "
f"and saving it using `model.push_to_hub('{run_name}')`."
)
|
import base64
from typing import Any, Dict, Union, Optional
from vertexai.generative_models._generative_models import SafetySettingsType
from google.cloud.aiplatform_v1beta1.types import content as gapic_content_types
from llama_index.core.llms import ChatMessage, MessageRole
def is_gemini_model(model: str) -> bool:
return model.startswith("gemini")
def create_gemini_client(
model: str, safety_settings: Optional[SafetySettingsType]
) -> Any:
from vertexai.preview.generative_models import GenerativeModel
return GenerativeModel(model_name=model, safety_settings=safety_settings)
def convert_chat_message_to_gemini_content(
message: ChatMessage, is_history: bool = True
) -> Any:
from vertexai.preview.generative_models import Content, Part
def _convert_gemini_part_to_prompt(part: Union[str, Dict]) -> Part:
from vertexai.preview.generative_models import Image, Part
if isinstance(part, str):
return Part.from_text(part)
if not isinstance(part, Dict):
raise ValueError(
f"Message's content is expected to be a dict, got {type(part)}!"
)
if part["type"] == "text":
return Part.from_text(part["text"])
elif part["type"] == "image_url":
path = part["image_url"]
if path.startswith("gs://"):
raise ValueError("Only local image path is supported!")
elif path.startswith("data:image/jpeg;base64,"):
image = Image.from_bytes(base64.b64decode(path[23:]))
else:
image = Image.load_from_file(path)
else:
raise ValueError("Only text and image_url types are supported!")
return Part.from_image(image)
if (
message.content == "" or message.content is None
) and "tool_calls" in message.additional_kwargs:
tool_calls = message.additional_kwargs["tool_calls"]
parts = [
Part._from_gapic(raw_part=gapic_content_types.Part(function_call=tool_call))
for tool_call in tool_calls
]
else:
raw_content = message.content
if raw_content is None:
raw_content = ""
if isinstance(raw_content, str):
raw_content = [raw_content]
parts = [_convert_gemini_part_to_prompt(part) for part in raw_content]
if is_history:
return Content(
role="user" if message.role == MessageRole.USER else "model",
parts=parts,
)
else:
return parts
|
import base64
from typing import Any, Dict, Union, Optional
from vertexai.generative_models._generative_models import SafetySettingsType
from google.cloud.aiplatform_v1beta1.types import content as gapic_content_types
from llama_index.core.llms import ChatMessage, MessageRole
def is_gemini_model(model: str) -> bool:
return model.startswith("gemini")
def create_gemini_client(
model: str, safety_settings: Optional[SafetySettingsType]
) -> Any:
from vertexai.preview.generative_models import GenerativeModel
return GenerativeModel(model_name=model, safety_settings=safety_settings)
def convert_chat_message_to_gemini_content(
message: ChatMessage, is_history: bool = True
) -> Any:
from vertexai.preview.generative_models import Content, Part
def _convert_gemini_part_to_prompt(part: Union[str, Dict]) -> Part:
from vertexai.preview.generative_models import Image, Part
if isinstance(part, str):
return Part.from_text(part)
if not isinstance(part, Dict):
raise ValueError(
f"Message's content is expected to be a dict, got {type(part)}!"
)
if part["type"] == "text":
return Part.from_text(part["text"])
elif part["type"] == "image_url":
path = part["image_url"]
if path.startswith("gs://"):
raise ValueError("Only local image path is supported!")
elif path.startswith("data:image/jpeg;base64,"):
image = Image.from_bytes(base64.b64decode(path[23:]))
else:
image = Image.load_from_file(path)
else:
raise ValueError("Only text and image_url types are supported!")
return Part.from_image(image)
if message.content == "" and "tool_calls" in message.additional_kwargs:
tool_calls = message.additional_kwargs["tool_calls"]
parts = [
Part._from_gapic(raw_part=gapic_content_types.Part(function_call=tool_call))
for tool_call in tool_calls
]
else:
raw_content = message.content
if raw_content is None:
raw_content = ""
if isinstance(raw_content, str):
raw_content = [raw_content]
parts = [_convert_gemini_part_to_prompt(part) for part in raw_content]
if is_history:
return Content(
role="user" if message.role == MessageRole.USER else "model",
parts=parts,
)
else:
return parts
|
"""
OPUS (http://opus.nlpl.eu/) is a great collection of different parallel datasets for more than 400 languages.
On the website, you can download parallel datasets for many languages in different formats. I found that
the format "Bottom-left triangle: download plain text files (MOSES/GIZA++)" requires minimal
overhead for post-processing to get it into a suitable format for this library.
You can use the OPUS dataset to create multilingual sentence embeddings. This script contains code to download
OPUS datasets for the desired languages and to create training files in the right format.
1) First, you need to install OpusTools (https://github.com/Helsinki-NLP/OpusTools/tree/master/opustools_pkg):
pip install opustools
2) Once you have OpusTools installed, you can download data in the right format via:
mkdir parallel-sentences
opus_read -d [CORPUS] -s [SRC_LANG] -t [TRG_LANG] --write parallel-sentences/[FILENAME].tsv.gz -wm moses -dl opus -p raw
For example:
mkdir parallel-sentences
opus_read -d JW300 -s en -t de --write parallel-sentences/JW300-en-de.tsv.gz -wm moses -dl opus -p raw
This downloads the JW300 Corpus (http://opus.nlpl.eu/JW300.php) for English (en) and German (de) and write the output to
parallel-sentences/JW300-en-de.tsv.gz
####################
This python code automates the download and creation of the parallel sentences files.
"""
import os
from opustools import OpusRead
corpora = ["JW300"] # Corpora you want to use
source_languages = ["en"] # Source language, our teacher model is able to understand
target_languages = ["de", "es", "it", "fr", "ar", "tr"] # Target languages, out student model should learn
output_folder = "parallel-sentences"
opus_download_folder = "./opus"
# Iterator over all corpora / source languages / target languages combinations and download files
os.makedirs(output_folder, exist_ok=True)
for corpus in corpora:
for src_lang in source_languages:
for trg_lang in target_languages:
output_filename = os.path.join(output_folder, "{}-{}-{}.tsv.gz".format(corpus, src_lang, trg_lang))
if not os.path.exists(output_filename):
print("Create:", output_filename)
try:
read = OpusRead(
directory=corpus,
source=src_lang,
target=trg_lang,
write=[output_filename],
download_dir=opus_download_folder,
preprocess="raw",
write_mode="moses",
suppress_prompts=True,
)
read.printPairs()
except Exception:
print("An error occurred during the creation of", output_filename)
|
"""
OPUS (http://opus.nlpl.eu/) is a great collection of different parallel datasets for more than 400 languages.
On the website, you can download parallel datasets for many languages in different formats. I found that
the format "Bottom-left triangle: download plain text files (MOSES/GIZA++)" requires minimal
overhead for post-processing to get it into a suitable format for this library.
You can use the OPUS dataset to create multilingual sentence embeddings. This script contains code to download
OPUS datasets for the desired languages and to create training files in the right format.
1) First, you need to install OpusTools (https://github.com/Helsinki-NLP/OpusTools/tree/master/opustools_pkg):
pip install opustools
2) Once you have OpusTools installed, you can download data in the right format via:
mkdir parallel-sentences
opus_read -d [CORPUS] -s [SRC_LANG] -t [TRG_LANG] --write parallel-sentences/[FILENAME].tsv.gz -wm moses -dl opus -p raw
For example:
mkdir parallel-sentences
opus_read -d JW300 -s en -t de --write parallel-sentences/JW300-en-de.tsv.gz -wm moses -dl opus -p raw
This downloads the JW300 Corpus (http://opus.nlpl.eu/JW300.php) for English (en) and German (de) and write the output to
parallel-sentences/JW300-en-de.tsv.gz
####################
This python code automates the download and creation of the parallel sentences files.
"""
from opustools import OpusRead
import os
corpora = ["JW300"] # Corpora you want to use
source_languages = ["en"] # Source language, our teacher model is able to understand
target_languages = ["de", "es", "it", "fr", "ar", "tr"] # Target languages, out student model should learn
output_folder = "parallel-sentences"
opus_download_folder = "./opus"
# Iterator over all corpora / source languages / target languages combinations and download files
os.makedirs(output_folder, exist_ok=True)
for corpus in corpora:
for src_lang in source_languages:
for trg_lang in target_languages:
output_filename = os.path.join(output_folder, "{}-{}-{}.tsv.gz".format(corpus, src_lang, trg_lang))
if not os.path.exists(output_filename):
print("Create:", output_filename)
try:
read = OpusRead(
directory=corpus,
source=src_lang,
target=trg_lang,
write=[output_filename],
download_dir=opus_download_folder,
preprocess="raw",
write_mode="moses",
suppress_prompts=True,
)
read.printPairs()
except Exception:
print("An error occurred during the creation of", output_filename)
|
__version__ = '0.30.0a3'
import logging
from docarray.array import DocList, DocVec
from docarray.base_doc.doc import BaseDoc
__all__ = ['BaseDoc', 'DocList', 'DocVec']
logger = logging.getLogger('docarray')
handler = logging.StreamHandler()
formatter = logging.Formatter("%(levelname)s - %(name)s - %(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
|
__version__ = '0.30.0a3'
import logging
from docarray.array import DocArray, DocArrayStacked
from docarray.base_doc.doc import BaseDoc
__all__ = ['BaseDoc', 'DocArray', 'DocArrayStacked']
logger = logging.getLogger('docarray')
handler = logging.StreamHandler()
formatter = logging.Formatter("%(levelname)s - %(name)s - %(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
|
"""
Tatoeba (https://tatoeba.org/) is a collection of sentences and translation, mainly aiming for language learning.
It is available for more than 300 languages.
This script downloads the Tatoeba corpus and extracts the sentences & translations in the languages you like
"""
import gzip
import os
import tarfile
import sentence_transformers
# Note: Tatoeba uses 3 letter languages codes (ISO-639-2),
# while other datasets like OPUS use 2 letter language codes (ISO-639-1)
# For training of sentence transformers, which type of language code is used doesn't matter.
# For language codes, see: https://en.wikipedia.org/wiki/List_of_ISO_639-2_codes
source_languages = set(["eng"])
target_languages = set(["deu", "ara", "tur", "spa", "ita", "fra"])
num_dev_sentences = 1000 # Number of sentences that are used to create a development set
tatoeba_folder = "../datasets/tatoeba"
output_folder = "parallel-sentences/"
sentences_file_bz2 = os.path.join(tatoeba_folder, "sentences.tar.bz2")
sentences_file = os.path.join(tatoeba_folder, "sentences.csv")
links_file_bz2 = os.path.join(tatoeba_folder, "links.tar.bz2")
links_file = os.path.join(tatoeba_folder, "links.csv")
download_url = "https://downloads.tatoeba.org/exports/"
os.makedirs(tatoeba_folder, exist_ok=True)
os.makedirs(output_folder, exist_ok=True)
# Download files if needed
for filepath in [sentences_file_bz2, links_file_bz2]:
if not os.path.exists(filepath):
url = download_url + os.path.basename(filepath)
print("Download", url)
sentence_transformers.util.http_get(url, filepath)
# Extract files if needed
if not os.path.exists(sentences_file):
print("Extract", sentences_file_bz2)
tar = tarfile.open(sentences_file_bz2, "r:bz2")
tar.extract("sentences.csv", path=tatoeba_folder)
tar.close()
if not os.path.exists(links_file):
print("Extract", links_file_bz2)
tar = tarfile.open(links_file_bz2, "r:bz2")
tar.extract("links.csv", path=tatoeba_folder)
tar.close()
# Read sentences
sentences = {}
all_langs = target_languages.union(source_languages)
print("Read sentences.csv file")
with open(sentences_file, encoding="utf8") as fIn:
for line in fIn:
id, lang, sentence = line.strip().split("\t")
if lang in all_langs:
sentences[id] = (lang, sentence)
# Read links that map the translations between different languages
print("Read links.csv")
translations = {src_lang: {trg_lang: {} for trg_lang in target_languages} for src_lang in source_languages}
with open(links_file, encoding="utf8") as fIn:
for line in fIn:
src_id, target_id = line.strip().split()
if src_id in sentences and target_id in sentences:
src_lang, src_sent = sentences[src_id]
trg_lang, trg_sent = sentences[target_id]
if src_lang in source_languages and trg_lang in target_languages:
if src_sent not in translations[src_lang][trg_lang]:
translations[src_lang][trg_lang][src_sent] = []
translations[src_lang][trg_lang][src_sent].append(trg_sent)
# Write everything to the output folder
print("Write output files")
for src_lang in source_languages:
for trg_lang in target_languages:
source_sentences = list(translations[src_lang][trg_lang])
train_sentences = source_sentences[num_dev_sentences:]
dev_sentences = source_sentences[0:num_dev_sentences]
print("{}-{} has {} sentences".format(src_lang, trg_lang, len(source_sentences)))
if len(dev_sentences) > 0:
with gzip.open(
os.path.join(output_folder, "Tatoeba-{}-{}-dev.tsv.gz".format(src_lang, trg_lang)),
"wt",
encoding="utf8",
) as fOut:
for sent in dev_sentences:
fOut.write("\t".join([sent] + translations[src_lang][trg_lang][sent]))
fOut.write("\n")
if len(train_sentences) > 0:
with gzip.open(
os.path.join(output_folder, "Tatoeba-{}-{}-train.tsv.gz".format(src_lang, trg_lang)),
"wt",
encoding="utf8",
) as fOut:
for sent in train_sentences:
fOut.write("\t".join([sent] + translations[src_lang][trg_lang][sent]))
fOut.write("\n")
print("---DONE---")
|
"""
Tatoeba (https://tatoeba.org/) is a collection of sentences and translation, mainly aiming for language learning.
It is available for more than 300 languages.
This script downloads the Tatoeba corpus and extracts the sentences & translations in the languages you like
"""
import os
import sentence_transformers
import tarfile
import gzip
# Note: Tatoeba uses 3 letter languages codes (ISO-639-2),
# while other datasets like OPUS use 2 letter language codes (ISO-639-1)
# For training of sentence transformers, which type of language code is used doesn't matter.
# For language codes, see: https://en.wikipedia.org/wiki/List_of_ISO_639-2_codes
source_languages = set(["eng"])
target_languages = set(["deu", "ara", "tur", "spa", "ita", "fra"])
num_dev_sentences = 1000 # Number of sentences that are used to create a development set
tatoeba_folder = "../datasets/tatoeba"
output_folder = "parallel-sentences/"
sentences_file_bz2 = os.path.join(tatoeba_folder, "sentences.tar.bz2")
sentences_file = os.path.join(tatoeba_folder, "sentences.csv")
links_file_bz2 = os.path.join(tatoeba_folder, "links.tar.bz2")
links_file = os.path.join(tatoeba_folder, "links.csv")
download_url = "https://downloads.tatoeba.org/exports/"
os.makedirs(tatoeba_folder, exist_ok=True)
os.makedirs(output_folder, exist_ok=True)
# Download files if needed
for filepath in [sentences_file_bz2, links_file_bz2]:
if not os.path.exists(filepath):
url = download_url + os.path.basename(filepath)
print("Download", url)
sentence_transformers.util.http_get(url, filepath)
# Extract files if needed
if not os.path.exists(sentences_file):
print("Extract", sentences_file_bz2)
tar = tarfile.open(sentences_file_bz2, "r:bz2")
tar.extract("sentences.csv", path=tatoeba_folder)
tar.close()
if not os.path.exists(links_file):
print("Extract", links_file_bz2)
tar = tarfile.open(links_file_bz2, "r:bz2")
tar.extract("links.csv", path=tatoeba_folder)
tar.close()
# Read sentences
sentences = {}
all_langs = target_languages.union(source_languages)
print("Read sentences.csv file")
with open(sentences_file, encoding="utf8") as fIn:
for line in fIn:
id, lang, sentence = line.strip().split("\t")
if lang in all_langs:
sentences[id] = (lang, sentence)
# Read links that map the translations between different languages
print("Read links.csv")
translations = {src_lang: {trg_lang: {} for trg_lang in target_languages} for src_lang in source_languages}
with open(links_file, encoding="utf8") as fIn:
for line in fIn:
src_id, target_id = line.strip().split()
if src_id in sentences and target_id in sentences:
src_lang, src_sent = sentences[src_id]
trg_lang, trg_sent = sentences[target_id]
if src_lang in source_languages and trg_lang in target_languages:
if src_sent not in translations[src_lang][trg_lang]:
translations[src_lang][trg_lang][src_sent] = []
translations[src_lang][trg_lang][src_sent].append(trg_sent)
# Write everything to the output folder
print("Write output files")
for src_lang in source_languages:
for trg_lang in target_languages:
source_sentences = list(translations[src_lang][trg_lang])
train_sentences = source_sentences[num_dev_sentences:]
dev_sentences = source_sentences[0:num_dev_sentences]
print("{}-{} has {} sentences".format(src_lang, trg_lang, len(source_sentences)))
if len(dev_sentences) > 0:
with gzip.open(
os.path.join(output_folder, "Tatoeba-{}-{}-dev.tsv.gz".format(src_lang, trg_lang)),
"wt",
encoding="utf8",
) as fOut:
for sent in dev_sentences:
fOut.write("\t".join([sent] + translations[src_lang][trg_lang][sent]))
fOut.write("\n")
if len(train_sentences) > 0:
with gzip.open(
os.path.join(output_folder, "Tatoeba-{}-{}-train.tsv.gz".format(src_lang, trg_lang)),
"wt",
encoding="utf8",
) as fOut:
for sent in train_sentences:
fOut.write("\t".join([sent] + translations[src_lang][trg_lang][sent]))
fOut.write("\n")
print("---DONE---")
|
import os
import numpy as np
import pytest
from docarray import BaseDoc, DocList, DocVec
from docarray.documents import ImageDoc
from docarray.typing import NdArray, TorchTensor
class MyDoc(BaseDoc):
embedding: NdArray
text: str
image: ImageDoc
@pytest.mark.slow
@pytest.mark.parametrize(
'protocol', ['pickle-array', 'protobuf-array', 'protobuf', 'pickle', 'json-array']
)
@pytest.mark.parametrize('compress', ['lz4', 'bz2', 'lzma', 'zlib', 'gzip', None])
@pytest.mark.parametrize('show_progress', [False, True])
@pytest.mark.parametrize('array_cls', [DocList, DocVec])
def test_array_save_load_binary(protocol, compress, tmp_path, show_progress, array_cls):
tmp_file = os.path.join(tmp_path, 'test')
da = array_cls[MyDoc](
[
MyDoc(
embedding=[1, 2, 3, 4, 5], text='hello', image=ImageDoc(url='aux.png')
),
MyDoc(embedding=[5, 4, 3, 2, 1], text='hello world', image=ImageDoc()),
]
)
da.save_binary(
tmp_file, protocol=protocol, compress=compress, show_progress=show_progress
)
da2 = array_cls[MyDoc].load_binary(
tmp_file, protocol=protocol, compress=compress, show_progress=show_progress
)
assert len(da2) == 2
assert len(da) == len(da2)
for d1, d2 in zip(da, da2):
assert d1.embedding.tolist() == d2.embedding.tolist()
assert d1.text == d2.text
assert d1.image.url == d2.image.url
assert da[1].image.url is None
assert da2[1].image.url is None
@pytest.mark.slow
@pytest.mark.parametrize(
'protocol', ['pickle-array', 'protobuf-array', 'protobuf', 'pickle', 'json-array']
)
@pytest.mark.parametrize('compress', ['lz4', 'bz2', 'lzma', 'zlib', 'gzip', None])
@pytest.mark.parametrize('show_progress', [False, True])
@pytest.mark.parametrize('to_doc_vec', [True, False])
def test_array_save_load_binary_streaming(
protocol, compress, tmp_path, show_progress, to_doc_vec
):
tmp_file = os.path.join(tmp_path, 'test')
array_cls = DocVec if to_doc_vec else DocList
da = DocList[MyDoc]()
def _extend_da(num_docs=100):
for _ in range(num_docs):
da.extend(
[
MyDoc(
embedding=np.random.rand(3, 2),
text='hello',
image=ImageDoc(url='aux.png'),
),
]
)
_extend_da()
if to_doc_vec:
da = da.to_doc_vec()
da.save_binary(
tmp_file, protocol=protocol, compress=compress, show_progress=show_progress
)
da_after = array_cls[MyDoc].load_binary(
tmp_file, protocol=protocol, compress=compress, show_progress=show_progress
)
for i, doc in enumerate(da_after):
assert doc.id == da[i].id
assert doc.text == da[i].text
assert doc.image.url == da[i].image.url
assert i == 99
@pytest.mark.parametrize('tensor_type', [NdArray, TorchTensor])
def test_save_load_tensor_type(tensor_type, tmp_path):
tmp_file = os.path.join(tmp_path, 'test123')
class MyDoc(BaseDoc):
embedding: tensor_type
text: str
image: ImageDoc
da = DocVec[MyDoc](
[
MyDoc(
embedding=[1, 2, 3, 4, 5], text='hello', image=ImageDoc(url='aux.png')
),
MyDoc(embedding=[5, 4, 3, 2, 1], text='hello world', image=ImageDoc()),
],
tensor_type=tensor_type,
)
da.save_binary(tmp_file)
da2 = DocVec[MyDoc].load_binary(tmp_file, tensor_type=tensor_type)
assert da2.tensor_type == tensor_type
assert isinstance(da2.embedding, tensor_type)
|
import os
import numpy as np
import pytest
from docarray import BaseDoc, DocList, DocVec
from docarray.documents import ImageDoc
from docarray.typing import NdArray
class MyDoc(BaseDoc):
embedding: NdArray
text: str
image: ImageDoc
@pytest.mark.slow
@pytest.mark.parametrize(
'protocol', ['pickle-array', 'protobuf-array', 'protobuf', 'pickle', 'json-array']
)
@pytest.mark.parametrize('compress', ['lz4', 'bz2', 'lzma', 'zlib', 'gzip', None])
@pytest.mark.parametrize('show_progress', [False, True])
@pytest.mark.parametrize('array_cls', [DocList, DocVec])
def test_array_save_load_binary(protocol, compress, tmp_path, show_progress, array_cls):
tmp_file = os.path.join(tmp_path, 'test')
da = array_cls[MyDoc](
[
MyDoc(
embedding=[1, 2, 3, 4, 5], text='hello', image=ImageDoc(url='aux.png')
),
MyDoc(embedding=[5, 4, 3, 2, 1], text='hello world', image=ImageDoc()),
]
)
da.save_binary(
tmp_file, protocol=protocol, compress=compress, show_progress=show_progress
)
da2 = array_cls[MyDoc].load_binary(
tmp_file, protocol=protocol, compress=compress, show_progress=show_progress
)
assert len(da2) == 2
assert len(da) == len(da2)
for d1, d2 in zip(da, da2):
assert d1.embedding.tolist() == d2.embedding.tolist()
assert d1.text == d2.text
assert d1.image.url == d2.image.url
assert da[1].image.url is None
assert da2[1].image.url is None
@pytest.mark.slow
@pytest.mark.parametrize(
'protocol', ['pickle-array', 'protobuf-array', 'protobuf', 'pickle', 'json-array']
)
@pytest.mark.parametrize('compress', ['lz4', 'bz2', 'lzma', 'zlib', 'gzip', None])
@pytest.mark.parametrize('show_progress', [False, True])
@pytest.mark.parametrize('to_doc_vec', [True, False])
def test_array_save_load_binary_streaming(
protocol, compress, tmp_path, show_progress, to_doc_vec
):
tmp_file = os.path.join(tmp_path, 'test')
array_cls = DocVec if to_doc_vec else DocList
da = DocList[MyDoc]()
def _extend_da(num_docs=100):
for _ in range(num_docs):
da.extend(
[
MyDoc(
embedding=np.random.rand(3, 2),
text='hello',
image=ImageDoc(url='aux.png'),
),
]
)
_extend_da()
if to_doc_vec:
da = da.to_doc_vec()
da.save_binary(
tmp_file, protocol=protocol, compress=compress, show_progress=show_progress
)
da_after = array_cls[MyDoc].load_binary(
tmp_file, protocol=protocol, compress=compress, show_progress=show_progress
)
for i, doc in enumerate(da_after):
assert doc.id == da[i].id
assert doc.text == da[i].text
assert doc.image.url == da[i].image.url
assert i == 99
|
import pyarrow as pa
import pytest
from datasets.builder import InvalidConfigName
from datasets.data_files import DataFilesList
from datasets.packaged_modules.arrow.arrow import Arrow, ArrowConfig
@pytest.fixture
def arrow_file_streaming_format(tmp_path):
filename = tmp_path / "stream.arrow"
testdata = [[1, 1, 1], [0, 100, 6], [1, 90, 900]]
schema = pa.schema([pa.field("input_ids", pa.list_(pa.int32()))])
array = pa.array(testdata, type=pa.list_(pa.int32()))
table = pa.Table.from_arrays([array], schema=schema)
with open(filename, "wb") as f:
with pa.ipc.new_stream(f, schema) as writer:
writer.write_table(table)
return str(filename)
@pytest.fixture
def arrow_file_file_format(tmp_path):
filename = tmp_path / "file.arrow"
testdata = [[1, 1, 1], [0, 100, 6], [1, 90, 900]]
schema = pa.schema([pa.field("input_ids", pa.list_(pa.int32()))])
array = pa.array(testdata, type=pa.list_(pa.int32()))
table = pa.Table.from_arrays([array], schema=schema)
with open(filename, "wb") as f:
with pa.ipc.new_file(f, schema) as writer:
writer.write_table(table)
return str(filename)
@pytest.mark.parametrize(
"file_fixture, config_kwargs",
[
("arrow_file_streaming_format", {}),
("arrow_file_file_format", {}),
],
)
def test_arrow_generate_tables(file_fixture, config_kwargs, request):
arrow = Arrow(**config_kwargs)
generator = arrow._generate_tables([[request.getfixturevalue(file_fixture)]])
pa_table = pa.concat_tables([table for _, table in generator])
expected = {"input_ids": [[1, 1, 1], [0, 100, 6], [1, 90, 900]]}
assert pa_table.to_pydict() == expected
def test_config_raises_when_invalid_name() -> None:
with pytest.raises(InvalidConfigName, match="Bad characters"):
_ = ArrowConfig(name="name-with-*-invalid-character")
@pytest.mark.parametrize("data_files", ["str_path", ["str_path"], DataFilesList(["str_path"], [()])])
def test_config_raises_when_invalid_data_files(data_files) -> None:
with pytest.raises(ValueError, match="Expected a DataFilesDict"):
_ = ArrowConfig(name="name", data_files=data_files)
|
import pytest
from datasets.builder import InvalidConfigName
from datasets.data_files import DataFilesList
from datasets.packaged_modules.arrow.arrow import ArrowConfig
def test_config_raises_when_invalid_name() -> None:
with pytest.raises(InvalidConfigName, match="Bad characters"):
_ = ArrowConfig(name="name-with-*-invalid-character")
@pytest.mark.parametrize("data_files", ["str_path", ["str_path"], DataFilesList(["str_path"], [()])])
def test_config_raises_when_invalid_data_files(data_files) -> None:
with pytest.raises(ValueError, match="Expected a DataFilesDict"):
_ = ArrowConfig(name="name", data_files=data_files)
|
from __future__ import annotations
from typing import Callable
try:
from typing import Self
except ImportError:
from typing_extensions import Self
from torch import Tensor, nn
from sentence_transformers.models.Module import Module
from sentence_transformers.util import fullname, import_from_string
class Dense(Module):
"""
Feed-forward function with activation function.
This layer takes a fixed-sized sentence embedding and passes it through a feed-forward layer. Can be used to generate deep averaging networks (DAN).
Args:
in_features: Size of the input dimension
out_features: Output size
bias: Add a bias vector
activation_function: Pytorch activation function applied on
output
init_weight: Initial value for the matrix of the linear layer
init_bias: Initial value for the bias of the linear layer
"""
config_keys: list[str] = [
"in_features",
"out_features",
"bias",
"activation_function",
]
def __init__(
self,
in_features: int,
out_features: int,
bias: bool = True,
activation_function: Callable[[Tensor], Tensor] | None = nn.Tanh(),
init_weight: Tensor | None = None,
init_bias: Tensor | None = None,
):
super().__init__()
self.in_features = in_features
self.out_features = out_features
self.bias = bias
self.activation_function = nn.Identity() if activation_function is None else activation_function
self.linear = nn.Linear(in_features, out_features, bias=bias)
if init_weight is not None:
self.linear.weight = nn.Parameter(init_weight)
if init_bias is not None:
self.linear.bias = nn.Parameter(init_bias)
def forward(self, features: dict[str, Tensor]):
features.update({"sentence_embedding": self.activation_function(self.linear(features["sentence_embedding"]))})
return features
def get_sentence_embedding_dimension(self) -> int:
return self.out_features
def get_config_dict(self):
return {
"in_features": self.in_features,
"out_features": self.out_features,
"bias": self.bias,
"activation_function": fullname(self.activation_function),
}
def save(self, output_path: str, *args, safe_serialization: bool = True, **kwargs) -> None:
self.save_config(output_path)
self.save_torch_weights(output_path, safe_serialization=safe_serialization)
def __repr__(self):
return f"Dense({self.get_config_dict()})"
@classmethod
def load(
cls,
model_name_or_path: str,
subfolder: str = "",
token: bool | str | None = None,
cache_folder: str | None = None,
revision: str | None = None,
local_files_only: bool = False,
**kwargs,
) -> Self:
hub_kwargs = {
"subfolder": subfolder,
"token": token,
"cache_folder": cache_folder,
"revision": revision,
"local_files_only": local_files_only,
}
config = cls.load_config(model_name_or_path=model_name_or_path, **hub_kwargs)
config["activation_function"] = import_from_string(config["activation_function"])()
model = cls(**config)
model = cls.load_torch_weights(model_name_or_path=model_name_or_path, model=model, **hub_kwargs)
return model
|
from __future__ import annotations
import json
import os
from typing import Callable
import torch
from safetensors.torch import load_model as load_safetensors_model
from safetensors.torch import save_model as save_safetensors_model
from torch import Tensor, nn
from sentence_transformers.util import fullname, import_from_string
class Dense(nn.Module):
"""
Feed-forward function with activation function.
This layer takes a fixed-sized sentence embedding and passes it through a feed-forward layer. Can be used to generate deep averaging networks (DAN).
Args:
in_features: Size of the input dimension
out_features: Output size
bias: Add a bias vector
activation_function: Pytorch activation function applied on
output
init_weight: Initial value for the matrix of the linear layer
init_bias: Initial value for the bias of the linear layer
"""
def __init__(
self,
in_features: int,
out_features: int,
bias: bool = True,
activation_function: Callable[[Tensor], Tensor] | None = nn.Tanh(),
init_weight: Tensor | None = None,
init_bias: Tensor | None = None,
):
super().__init__()
self.in_features = in_features
self.out_features = out_features
self.bias = bias
self.activation_function = nn.Identity() if activation_function is None else activation_function
self.linear = nn.Linear(in_features, out_features, bias=bias)
if init_weight is not None:
self.linear.weight = nn.Parameter(init_weight)
if init_bias is not None:
self.linear.bias = nn.Parameter(init_bias)
def forward(self, features: dict[str, Tensor]):
features.update({"sentence_embedding": self.activation_function(self.linear(features["sentence_embedding"]))})
return features
def get_sentence_embedding_dimension(self) -> int:
return self.out_features
def get_config_dict(self):
return {
"in_features": self.in_features,
"out_features": self.out_features,
"bias": self.bias,
"activation_function": fullname(self.activation_function),
}
def save(self, output_path, safe_serialization: bool = True) -> None:
with open(os.path.join(output_path, "config.json"), "w") as fOut:
json.dump(self.get_config_dict(), fOut)
if safe_serialization:
save_safetensors_model(self, os.path.join(output_path, "model.safetensors"))
else:
torch.save(self.state_dict(), os.path.join(output_path, "pytorch_model.bin"))
def __repr__(self):
return f"Dense({self.get_config_dict()})"
@staticmethod
def load(input_path):
with open(os.path.join(input_path, "config.json")) as fIn:
config = json.load(fIn)
config["activation_function"] = import_from_string(config["activation_function"])()
model = Dense(**config)
if os.path.exists(os.path.join(input_path, "model.safetensors")):
load_safetensors_model(model, os.path.join(input_path, "model.safetensors"))
else:
model.load_state_dict(
torch.load(
os.path.join(input_path, "pytorch_model.bin"), map_location=torch.device("cpu"), weights_only=True
)
)
return model
|
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
from typing import Callable
import numpy as np
import pytest
from jina import Document, DocumentArray, Flow
from ...audioclip_image import AudioCLIPImageEncoder
@pytest.mark.parametrize("request_size", [1, 10, 50, 100])
def test_integration(request_size: int):
docs = DocumentArray(
[
Document(blob=np.random.randint(0, 255, (100, 100, 3), dtype=np.uint8))
for _ in range(50)
]
)
with Flow(return_results=True).add(uses=AudioCLIPImageEncoder) as flow:
resp = flow.post(
on="/index",
inputs=docs,
request_size=request_size,
return_results=True,
)
assert sum(len(resp_batch.docs) for resp_batch in resp) == 50
for r in resp:
for doc in r.docs:
assert doc.embedding is not None
assert doc.embedding.shape == (1024,)
|
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
from typing import Callable
import numpy as np
import pytest
from jina import Document, DocumentArray, Flow
from jinahub.encoder.audioclip_image import AudioCLIPImageEncoder
@pytest.mark.parametrize("request_size", [1, 10, 50, 100])
def test_integration(request_size: int):
docs = DocumentArray(
[
Document(blob=np.random.randint(0, 255, (100, 100, 3), dtype=np.uint8))
for _ in range(50)
]
)
with Flow(return_results=True).add(uses=AudioCLIPImageEncoder) as flow:
resp = flow.post(
on="/index",
inputs=docs,
request_size=request_size,
return_results=True,
)
assert sum(len(resp_batch.docs) for resp_batch in resp) == 50
for r in resp:
for doc in r.docs:
assert doc.embedding is not None
assert doc.embedding.shape == (1024,)
|
from setuptools import find_packages, setup
with open("README.md", mode="r", encoding="utf-8") as readme_file:
readme = readme_file.read()
setup(
name="sentence-transformers",
version="3.1.0.dev0",
author="Nils Reimers, Tom Aarsen",
author_email="info@nils-reimers.de",
description="Multilingual text embeddings",
long_description=readme,
long_description_content_type="text/markdown",
license="Apache License 2.0",
url="https://www.SBERT.net",
download_url="https://github.com/UKPLab/sentence-transformers/",
packages=find_packages(),
include_package_data=True,
python_requires=">=3.8.0",
install_requires=[
"transformers>=4.38.0,<5.0.0",
"tqdm",
"torch>=1.11.0",
"numpy<2.0.0",
"scikit-learn",
"scipy",
"huggingface-hub>=0.19.3",
"Pillow",
],
extras_require={
"train": [
"datasets",
"accelerate>=0.20.3",
],
"dev": [
"datasets",
"accelerate>=0.20.3",
"pre-commit",
"pytest",
"ruff>=0.3.0",
],
},
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Programming Language :: Python :: 3.11",
"Programming Language :: Python :: 3.12",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
],
keywords="Transformer Networks BERT XLNet sentence embedding PyTorch NLP deep learning",
)
|
from setuptools import find_packages, setup
with open("README.md", mode="r", encoding="utf-8") as readme_file:
readme = readme_file.read()
setup(
name="sentence-transformers",
version="3.1.0.dev0",
author="Nils Reimers, Tom Aarsen",
author_email="info@nils-reimers.de",
description="Multilingual text embeddings",
long_description=readme,
long_description_content_type="text/markdown",
license="Apache License 2.0",
url="https://www.SBERT.net",
download_url="https://github.com/UKPLab/sentence-transformers/",
packages=find_packages(),
include_package_data=True,
python_requires=">=3.8.0",
install_requires=[
"transformers>=4.34.0,<5.0.0",
"tqdm",
"torch>=1.11.0",
"numpy",
"scikit-learn",
"scipy",
"huggingface-hub>=0.15.1",
"Pillow",
],
extras_require={
"train": [
"datasets",
"accelerate>=0.20.3",
],
"dev": [
"datasets",
"accelerate>=0.20.3",
"pre-commit",
"pytest",
"ruff>=0.3.0",
],
},
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Programming Language :: Python :: 3.11",
"Programming Language :: Python :: 3.12",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
],
keywords="Transformer Networks BERT XLNet sentence embedding PyTorch NLP deep learning",
)
|
"""Rss reader."""
from typing import List, Any, Union
import logging
from llama_index.core.readers.base import BasePydanticReader
from llama_index.core.schema import Document
logger = logging.getLogger(__name__)
class RssReader(BasePydanticReader):
"""RSS reader.
Reads content from an RSS feed.
"""
is_remote: bool = True
html_to_text: bool = False
user_agent: Union[str, None] = None
def __init__(self, **kwargs: Any) -> None:
super().__init__(**kwargs)
# https://pythonhosted.org/feedparser/http-useragent.html
self.user_agent = kwargs.get("user_agent", None)
@classmethod
def class_name(cls) -> str:
return "RssReader"
def load_data(self, urls: List[str]) -> List[Document]:
"""Load data from RSS feeds.
Args:
urls (List[str]): List of RSS URLs to load.
Returns:
List[Document]: List of documents.
"""
import feedparser
if self.user_agent:
feedparser.USER_AGENT = self.user_agent
if not isinstance(urls, list):
raise ValueError("urls must be a list of strings.")
documents = []
for url in urls:
parsed = feedparser.parse(url)
for entry in parsed.entries:
doc_id = getattr(entry, "id", None) or getattr(entry, "link", None)
data = entry.get("content", [{}])[0].get(
"value", entry.get("description", entry.get("summary", ""))
)
if self.html_to_text:
import html2text
data = html2text.html2text(data)
extra_info = {
"title": getattr(entry, "title", None),
"link": getattr(entry, "link", None),
"date": getattr(entry, "published", None),
}
if doc_id:
documents.append(
Document(text=data, id_=doc_id, extra_info=extra_info)
)
else:
documents.append(Document(text=data, extra_info=extra_info))
return documents
if __name__ == "__main__":
default_reader = RssReader()
print(
default_reader.load_data(urls=["https://rsshub.app/hackernews/newest"])
) # 0 blocked by cloudflare
reader = RssReader(user_agent="MyApp/1.0 +http://example.com/")
print(reader.load_data(urls=["https://rsshub.app/hackernews/newest"]))
|
"""Rss reader."""
from typing import List
from llama_index.core.readers.base import BasePydanticReader
from llama_index.core.schema import Document
class RssReader(BasePydanticReader):
"""RSS reader.
Reads content from an RSS feed.
"""
is_remote: bool = True
html_to_text: bool = False
@classmethod
def class_name(cls) -> str:
return "RssReader"
def load_data(self, urls: List[str]) -> List[Document]:
"""Load data from RSS feeds.
Args:
urls (List[str]): List of RSS URLs to load.
Returns:
List[Document]: List of documents.
"""
import feedparser
if not isinstance(urls, list):
raise ValueError("urls must be a list of strings.")
documents = []
for url in urls:
parsed = feedparser.parse(url)
for entry in parsed.entries:
doc_id = getattr(entry, "id", None) or getattr(entry, "link", None)
data = entry.get("content", [{}])[0].get(
"value", entry.get("description", entry.get("summary", ""))
)
if self.html_to_text:
import html2text
data = html2text.html2text(data)
extra_info = {
"title": getattr(entry, "title", None),
"link": getattr(entry, "link", None),
"date": getattr(entry, "published", None),
}
if doc_id:
documents.append(
Document(text=data, id_=doc_id, extra_info=extra_info)
)
else:
documents.append(Document(text=data, extra_info=extra_info))
return documents
|
"""Tool for the SemanticScholar API."""
from typing import Optional, Type
from langchain_core.callbacks import CallbackManagerForToolRun
from langchain_core.tools import BaseTool
from pydantic import BaseModel, Field
from langchain_community.utilities.semanticscholar import SemanticScholarAPIWrapper
class SemantscholarInput(BaseModel):
"""Input for the SemanticScholar tool."""
query: str = Field(description="search query to look up")
class SemanticScholarQueryRun(BaseTool):
"""Tool that searches the semanticscholar API."""
name: str = "semanticscholar"
description: str = (
"A wrapper around semantischolar.org "
"Useful for when you need to answer to questions"
"from research papers."
"Input should be a search query."
)
api_wrapper: SemanticScholarAPIWrapper = Field(
default_factory=SemanticScholarAPIWrapper # type: ignore[arg-type]
)
args_schema: Type[BaseModel] = SemantscholarInput
def _run(
self,
query: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the Semantic Scholar tool."""
return self.api_wrapper.run(query)
|
"""Tool for the SemanticScholar API."""
from typing import Optional, Type
from langchain_core.callbacks import CallbackManagerForToolRun
from langchain_core.tools import BaseTool
from pydantic import BaseModel, Field
from langchain_community.utilities.semanticscholar import SemanticScholarAPIWrapper
class SemantscholarInput(BaseModel):
"""Input for the SemanticScholar tool."""
query: str = Field(description="search query to look up")
class SemanticScholarQueryRun(BaseTool): # type: ignore[override, override]
"""Tool that searches the semanticscholar API."""
name: str = "semanticscholar"
description: str = (
"A wrapper around semantischolar.org "
"Useful for when you need to answer to questions"
"from research papers."
"Input should be a search query."
)
api_wrapper: SemanticScholarAPIWrapper = Field(
default_factory=SemanticScholarAPIWrapper # type: ignore[arg-type]
)
args_schema: Type[BaseModel] = SemantscholarInput
def _run(
self,
query: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the Semantic Scholar tool."""
return self.api_wrapper.run(query)
|
# Copyright (c) OpenMMLab. All rights reserved.
import os
import os.path as osp
import warnings
import numpy as np
import onnx
import onnxruntime as ort
import torch
import torch.nn as nn
ort_custom_op_path = ''
try:
from mmcv.ops import get_onnxruntime_op_path
ort_custom_op_path = get_onnxruntime_op_path()
except (ImportError, ModuleNotFoundError):
warnings.warn('If input model has custom op from mmcv, \
you may have to build mmcv with ONNXRuntime from source.')
class WrapFunction(nn.Module):
"""Wrap the function to be tested for torch.onnx.export tracking."""
def __init__(self, wrapped_function):
super(WrapFunction, self).__init__()
self.wrapped_function = wrapped_function
def forward(self, *args, **kwargs):
return self.wrapped_function(*args, **kwargs)
def ort_validate(model, feats, onnx_io='tmp.onnx'):
"""Validate the output of the onnxruntime backend is the same as the output
generated by torch.
Args:
model (nn.Module | function): the function of model or model
to be verified.
feats (tuple(list(torch.Tensor)) | list(torch.Tensor) | torch.Tensor):
the input of model.
onnx_io (str): the name of onnx output file.
"""
# if model is not an instance of nn.Module, then it is a normal
# function and it should be wrapped.
if isinstance(model, nn.Module):
wrap_model = model
else:
wrap_model = WrapFunction(model)
wrap_model.cpu().eval()
with torch.no_grad():
torch.onnx.export(
wrap_model,
feats,
onnx_io,
export_params=True,
keep_initializers_as_inputs=True,
do_constant_folding=True,
verbose=False,
opset_version=11)
if isinstance(feats, tuple):
ort_feats = []
for feat in feats:
ort_feats += feat
else:
ort_feats = feats
# default model name: tmp.onnx
onnx_outputs = get_ort_model_output(ort_feats)
# remove temp file
if osp.exists(onnx_io):
os.remove(onnx_io)
if isinstance(feats, tuple):
torch_outputs = convert_result_list(wrap_model.forward(*feats))
else:
torch_outputs = convert_result_list(wrap_model.forward(feats))
torch_outputs = [
torch_output.detach().numpy() for torch_output in torch_outputs
]
# match torch_outputs and onnx_outputs
for i in range(len(onnx_outputs)):
np.testing.assert_allclose(
torch_outputs[i], onnx_outputs[i], rtol=1e-03, atol=1e-05)
def get_ort_model_output(feat, onnx_io='tmp.onnx'):
"""Run the model in onnxruntime env.
Args:
feat (list[Tensor]): A list of tensors from torch.rand,
each is a 4D-tensor.
Returns:
list[np.array]: onnxruntime infer result, each is a np.array
"""
onnx_model = onnx.load(onnx_io)
onnx.checker.check_model(onnx_model)
session_options = ort.SessionOptions()
# register custom op for onnxruntime
if osp.exists(ort_custom_op_path):
session_options.register_custom_ops_library(ort_custom_op_path)
sess = ort.InferenceSession(onnx_io, session_options)
if isinstance(feat, torch.Tensor):
onnx_outputs = sess.run(None,
{sess.get_inputs()[0].name: feat.numpy()})
else:
onnx_outputs = sess.run(None, {
sess.get_inputs()[i].name: feat[i].numpy()
for i in range(len(feat))
})
return onnx_outputs
def convert_result_list(outputs):
"""Convert the torch forward outputs containing tuple or list to a list
only containing torch.Tensor.
Args:
output (list(Tensor) | tuple(list(Tensor) | ...): the outputs
in torch env, maybe containing nested structures such as list
or tuple.
Returns:
list(Tensor): a list only containing torch.Tensor
"""
# recursive end condition
if isinstance(outputs, torch.Tensor):
return [outputs]
ret = []
for sub in outputs:
ret += convert_result_list(sub)
return ret
|
import os
import os.path as osp
import warnings
import numpy as np
import onnx
import onnxruntime as ort
import torch
import torch.nn as nn
ort_custom_op_path = ''
try:
from mmcv.ops import get_onnxruntime_op_path
ort_custom_op_path = get_onnxruntime_op_path()
except (ImportError, ModuleNotFoundError):
warnings.warn('If input model has custom op from mmcv, \
you may have to build mmcv with ONNXRuntime from source.')
class WrapFunction(nn.Module):
"""Wrap the function to be tested for torch.onnx.export tracking."""
def __init__(self, wrapped_function):
super(WrapFunction, self).__init__()
self.wrapped_function = wrapped_function
def forward(self, *args, **kwargs):
return self.wrapped_function(*args, **kwargs)
def ort_validate(model, feats, onnx_io='tmp.onnx'):
"""Validate the output of the onnxruntime backend is the same as the output
generated by torch.
Args:
model (nn.Module | function): the function of model or model
to be verified.
feats (tuple(list(torch.Tensor)) | list(torch.Tensor) | torch.Tensor):
the input of model.
onnx_io (str): the name of onnx output file.
"""
# if model is not an instance of nn.Module, then it is a normal
# function and it should be wrapped.
if isinstance(model, nn.Module):
wrap_model = model
else:
wrap_model = WrapFunction(model)
wrap_model.cpu().eval()
with torch.no_grad():
torch.onnx.export(
wrap_model,
feats,
onnx_io,
export_params=True,
keep_initializers_as_inputs=True,
do_constant_folding=True,
verbose=False,
opset_version=11)
if isinstance(feats, tuple):
ort_feats = []
for feat in feats:
ort_feats += feat
else:
ort_feats = feats
# default model name: tmp.onnx
onnx_outputs = get_ort_model_output(ort_feats)
# remove temp file
if osp.exists(onnx_io):
os.remove(onnx_io)
if isinstance(feats, tuple):
torch_outputs = convert_result_list(wrap_model.forward(*feats))
else:
torch_outputs = convert_result_list(wrap_model.forward(feats))
torch_outputs = [
torch_output.detach().numpy() for torch_output in torch_outputs
]
# match torch_outputs and onnx_outputs
for i in range(len(onnx_outputs)):
np.testing.assert_allclose(
torch_outputs[i], onnx_outputs[i], rtol=1e-03, atol=1e-05)
def get_ort_model_output(feat, onnx_io='tmp.onnx'):
"""Run the model in onnxruntime env.
Args:
feat (list[Tensor]): A list of tensors from torch.rand,
each is a 4D-tensor.
Returns:
list[np.array]: onnxruntime infer result, each is a np.array
"""
onnx_model = onnx.load(onnx_io)
onnx.checker.check_model(onnx_model)
session_options = ort.SessionOptions()
# register custom op for onnxruntime
if osp.exists(ort_custom_op_path):
session_options.register_custom_ops_library(ort_custom_op_path)
sess = ort.InferenceSession(onnx_io, session_options)
if isinstance(feat, torch.Tensor):
onnx_outputs = sess.run(None,
{sess.get_inputs()[0].name: feat.numpy()})
else:
onnx_outputs = sess.run(None, {
sess.get_inputs()[i].name: feat[i].numpy()
for i in range(len(feat))
})
return onnx_outputs
def convert_result_list(outputs):
"""Convert the torch forward outputs containing tuple or list to a list
only containing torch.Tensor.
Args:
output (list(Tensor) | tuple(list(Tensor) | ...): the outputs
in torch env, maybe containing nested structures such as list
or tuple.
Returns:
list(Tensor): a list only containing torch.Tensor
"""
# recursive end condition
if isinstance(outputs, torch.Tensor):
return [outputs]
ret = []
for sub in outputs:
ret += convert_result_list(sub)
return ret
|
import os
from pathlib import Path
from jina import Executor
def test_config():
ex = Executor.load_config(
str(Path(__file__).parents[2] / 'config.yml'),
override_with={
'query_features': ['query'],
'match_features': ['match'],
'relevance_label': 'rel',
},
)
assert ex.q_features == ['query']
def test_init(ranker):
assert not ranker.model.is_fitted()
def test_train(ranker, documents_to_train_stub_model):
ranker.train(docs=documents_to_train_stub_model)
assert ranker.model.is_fitted()
def test_train_with_weights(ranker_with_weight, documents_to_train_stub_model):
"""Weight field specify the importance of the features."""
ranker_with_weight.train(docs=documents_to_train_stub_model)
assert ranker_with_weight.model.is_fitted()
def test_dump_load(ranker, documents_to_train_stub_model, tmpdir):
model_path = str(tmpdir) + '/model.cbm'
ranker.train(docs=documents_to_train_stub_model)
assert ranker.model.is_fitted()
ranker.dump(parameters={'model_path': model_path})
print(model_path)
assert os.path.exists(model_path)
ranker.load({'model_path': model_path})
assert ranker.model.is_fitted()
def test_rank(
ranker, documents_to_train_stub_model, documents_without_label_random_price
):
ranker.train(docs=documents_to_train_stub_model)
assert ranker.model.is_fitted()
matches_before_rank = documents_without_label_random_price.traverse_flat(['m'])
for match in matches_before_rank:
assert not match.scores.get('relevance').value
ranker.rank(documents_without_label_random_price)
matches_after_rank = documents_without_label_random_price.traverse_flat(['m'])
for match in matches_after_rank:
assert isinstance(match.scores.get('relevance').value, float)
def test_rank_price_sensitive_model(
ranker,
documents_to_train_price_sensitive_model,
documents_without_label_random_brand,
):
"""train the model using price sensitive data, assure higher price get lower relevance score."""
ranker.train(docs=documents_to_train_price_sensitive_model)
assert ranker.model.is_fitted()
ranker.rank(documents_without_label_random_brand)
for doc in documents_without_label_random_brand:
predicted_relevances = []
predicted_ids = []
expected_ids = ['3', '4', '2', '1'] # Price smaller to large.
for match in doc.matches:
predicted_relevances.append(match.scores.get('relevance').value)
predicted_ids.append(match.id)
assert (
predicted_relevances[0]
>= predicted_relevances[1]
>= predicted_relevances[2]
)
assert predicted_ids == expected_ids
|
import os
def test_init(ranker):
assert not ranker.model.is_fitted()
def test_train(ranker, documents_to_train_stub_model):
ranker.train(docs=documents_to_train_stub_model)
assert ranker.model.is_fitted()
def test_train_with_weights(ranker_with_weight, documents_to_train_stub_model):
"""Weight field specify the importance of the features."""
ranker_with_weight.train(docs=documents_to_train_stub_model)
assert ranker_with_weight.model.is_fitted()
def test_dump_load(ranker, documents_to_train_stub_model, tmpdir):
model_path = str(tmpdir) + '/model.cbm'
ranker.train(docs=documents_to_train_stub_model)
assert ranker.model.is_fitted()
ranker.dump(parameters={'model_path': model_path})
print(model_path)
assert os.path.exists(model_path)
ranker.load({'model_path': model_path})
assert ranker.model.is_fitted()
def test_rank(
ranker, documents_to_train_stub_model, documents_without_label_random_price
):
ranker.train(docs=documents_to_train_stub_model)
assert ranker.model.is_fitted()
matches_before_rank = documents_without_label_random_price.traverse_flat(['m'])
for match in matches_before_rank:
assert not match.scores.get('relevance').value
ranker.rank(documents_without_label_random_price)
matches_after_rank = documents_without_label_random_price.traverse_flat(['m'])
for match in matches_after_rank:
assert isinstance(match.scores.get('relevance').value, float)
def test_rank_price_sensitive_model(
ranker,
documents_to_train_price_sensitive_model,
documents_without_label_random_brand,
):
"""train the model using price sensitive data, assure higher price get lower relevance score."""
ranker.train(docs=documents_to_train_price_sensitive_model)
assert ranker.model.is_fitted()
ranker.rank(documents_without_label_random_brand)
for doc in documents_without_label_random_brand:
predicted_relevances = []
predicted_ids = []
expected_ids = ['3', '4', '2', '1'] # Price smaller to large.
for match in doc.matches:
predicted_relevances.append(match.scores.get('relevance').value)
predicted_ids.append(match.id)
assert (
predicted_relevances[0]
>= predicted_relevances[1]
>= predicted_relevances[2]
)
assert predicted_ids == expected_ids
|
"""LangChain **Runnable** and the **LangChain Expression Language (LCEL)**.
The LangChain Expression Language (LCEL) offers a declarative method to build
production-grade programs that harness the power of LLMs.
Programs created using LCEL and LangChain Runnables inherently support
synchronous, asynchronous, batch, and streaming operations.
Support for **async** allows servers hosting LCEL based programs to scale better
for higher concurrent loads.
**Batch** operations allow for processing multiple inputs in parallel.
**Streaming** of intermediate outputs, as they're being generated, allows for
creating more responsive UX.
This module contains schema and implementation of LangChain Runnables primitives.
"""
from importlib import import_module
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from langchain_core.runnables.base import (
Runnable,
RunnableBinding,
RunnableGenerator,
RunnableLambda,
RunnableMap,
RunnableParallel,
RunnableSequence,
RunnableSerializable,
chain,
)
from langchain_core.runnables.branch import RunnableBranch
from langchain_core.runnables.config import (
RunnableConfig,
ensure_config,
get_config_list,
patch_config,
run_in_executor,
)
from langchain_core.runnables.fallbacks import RunnableWithFallbacks
from langchain_core.runnables.history import RunnableWithMessageHistory
from langchain_core.runnables.passthrough import (
RunnableAssign,
RunnablePassthrough,
RunnablePick,
)
from langchain_core.runnables.router import RouterInput, RouterRunnable
from langchain_core.runnables.utils import (
AddableDict,
ConfigurableField,
ConfigurableFieldMultiOption,
ConfigurableFieldSingleOption,
ConfigurableFieldSpec,
aadd,
add,
)
__all__ = [
"chain",
"AddableDict",
"ConfigurableField",
"ConfigurableFieldSingleOption",
"ConfigurableFieldMultiOption",
"ConfigurableFieldSpec",
"ensure_config",
"run_in_executor",
"patch_config",
"RouterInput",
"RouterRunnable",
"Runnable",
"RunnableSerializable",
"RunnableBinding",
"RunnableBranch",
"RunnableConfig",
"RunnableGenerator",
"RunnableLambda",
"RunnableMap",
"RunnableParallel",
"RunnablePassthrough",
"RunnableAssign",
"RunnablePick",
"RunnableSequence",
"RunnableWithFallbacks",
"RunnableWithMessageHistory",
"get_config_list",
"aadd",
"add",
]
_dynamic_imports = {
"chain": "base",
"Runnable": "base",
"RunnableBinding": "base",
"RunnableGenerator": "base",
"RunnableLambda": "base",
"RunnableMap": "base",
"RunnableParallel": "base",
"RunnableSequence": "base",
"RunnableSerializable": "base",
"RunnableBranch": "branch",
"RunnableConfig": "config",
"ensure_config": "config",
"get_config_list": "config",
"patch_config": "config",
"run_in_executor": "config",
"RunnableWithFallbacks": "fallbacks",
"RunnableWithMessageHistory": "history",
"RunnableAssign": "passthrough",
"RunnablePassthrough": "passthrough",
"RunnablePick": "passthrough",
"RouterInput": "router",
"RouterRunnable": "router",
"AddableDict": "utils",
"ConfigurableField": "utils",
"ConfigurableFieldMultiOption": "utils",
"ConfigurableFieldSingleOption": "utils",
"ConfigurableFieldSpec": "utils",
"aadd": "utils",
"add": "utils",
}
def __getattr__(attr_name: str) -> object:
module_name = _dynamic_imports.get(attr_name)
package = __spec__.parent
if module_name == "__module__" or module_name is None:
result = import_module(f".{attr_name}", package=package)
else:
module = import_module(f".{module_name}", package=package)
result = getattr(module, attr_name)
globals()[attr_name] = result
return result
def __dir__() -> list[str]:
return list(__all__)
|
"""LangChain **Runnable** and the **LangChain Expression Language (LCEL)**.
The LangChain Expression Language (LCEL) offers a declarative method to build
production-grade programs that harness the power of LLMs.
Programs created using LCEL and LangChain Runnables inherently support
synchronous, asynchronous, batch, and streaming operations.
Support for **async** allows servers hosting LCEL based programs to scale better
for higher concurrent loads.
**Batch** operations allow for processing multiple inputs in parallel.
**Streaming** of intermediate outputs, as they're being generated, allows for
creating more responsive UX.
This module contains schema and implementation of LangChain Runnables primitives.
"""
from importlib import import_module
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from langchain_core.runnables.base import (
Runnable,
RunnableBinding,
RunnableGenerator,
RunnableLambda,
RunnableMap,
RunnableParallel,
RunnableSequence,
RunnableSerializable,
chain,
)
from langchain_core.runnables.branch import RunnableBranch
from langchain_core.runnables.config import (
RunnableConfig,
ensure_config,
get_config_list,
patch_config,
run_in_executor,
)
from langchain_core.runnables.fallbacks import RunnableWithFallbacks
from langchain_core.runnables.history import RunnableWithMessageHistory
from langchain_core.runnables.passthrough import (
RunnableAssign,
RunnablePassthrough,
RunnablePick,
)
from langchain_core.runnables.router import RouterInput, RouterRunnable
from langchain_core.runnables.utils import (
AddableDict,
ConfigurableField,
ConfigurableFieldMultiOption,
ConfigurableFieldSingleOption,
ConfigurableFieldSpec,
aadd,
add,
)
__all__ = [
"chain",
"AddableDict",
"ConfigurableField",
"ConfigurableFieldSingleOption",
"ConfigurableFieldMultiOption",
"ConfigurableFieldSpec",
"ensure_config",
"run_in_executor",
"patch_config",
"RouterInput",
"RouterRunnable",
"Runnable",
"RunnableSerializable",
"RunnableBinding",
"RunnableBranch",
"RunnableConfig",
"RunnableGenerator",
"RunnableLambda",
"RunnableMap",
"RunnableParallel",
"RunnablePassthrough",
"RunnableAssign",
"RunnablePick",
"RunnableSequence",
"RunnableWithFallbacks",
"RunnableWithMessageHistory",
"get_config_list",
"aadd",
"add",
]
_dynamic_imports = {
"chain": "base",
"Runnable": "base",
"RunnableBinding": "base",
"RunnableGenerator": "base",
"RunnableLambda": "base",
"RunnableMap": "base",
"RunnableParallel": "base",
"RunnableSequence": "base",
"RunnableSerializable": "base",
"RunnableBranch": "branch",
"RunnableConfig": "config",
"ensure_config": "config",
"get_config_list": "config",
"patch_config": "config",
"run_in_executor": "config",
"RunnableWithFallbacks": "fallbacks",
"RunnableWithMessageHistory": "history",
"RunnableAssign": "passthrough",
"RunnablePassthrough": "passthrough",
"RunnablePick": "passthrough",
"RouterInput": "router",
"RouterRunnable": "router",
"AddableDict": "utils",
"ConfigurableField": "utils",
"ConfigurableFieldMultiOption": "utils",
"ConfigurableFieldSingleOption": "utils",
"ConfigurableFieldSpec": "utils",
"aadd": "utils",
"add": "utils",
}
def __getattr__(attr_name: str) -> object:
module_name = _dynamic_imports.get(attr_name)
package = __spec__.parent # type: ignore[name-defined]
if module_name == "__module__" or module_name is None:
result = import_module(f".{attr_name}", package=package)
else:
module = import_module(f".{module_name}", package=package)
result = getattr(module, attr_name)
globals()[attr_name] = result
return result
def __dir__() -> list[str]:
return list(__all__)
|
from typing import TYPE_CHECKING, Any, List, Tuple, Type, TypeVar, Union
import numpy as np
from docarray.typing.proto_register import _register_proto
from docarray.typing.tensor.ndarray import NdArray
from docarray.typing.tensor.video.video_tensor_mixin import VideoTensorMixin
T = TypeVar('T', bound='VideoNdArray')
if TYPE_CHECKING:
from pydantic import BaseConfig
from pydantic.fields import ModelField
@_register_proto(proto_type_name='video_ndarray')
class VideoNdArray(NdArray, VideoTensorMixin):
"""
Subclass of NdArray, to represent a video tensor.
Adds video-specific features to the tensor.
EXAMPLE USAGE
"""
@classmethod
def validate(
cls: Type[T],
value: Union[T, np.ndarray, List[Any], Tuple[Any], Any],
field: 'ModelField',
config: 'BaseConfig',
) -> T:
tensor = super().validate(value=value, field=field, config=config)
return cls.validate_shape(value=tensor)
|
from typing import TYPE_CHECKING, Any, List, Tuple, Type, TypeVar, Union
import numpy as np
from docarray.typing.tensor.ndarray import NdArray
from docarray.typing.tensor.video.video_tensor_mixin import VideoTensorMixin
T = TypeVar('T', bound='VideoNdArray')
if TYPE_CHECKING:
from pydantic import BaseConfig
from pydantic.fields import ModelField
class VideoNdArray(NdArray, VideoTensorMixin):
"""
Subclass of NdArray, to represent a video tensor.
Adds video-specific features to the tensor.
EXAMPLE USAGE
"""
_PROTO_FIELD_NAME = 'video_ndarray'
@classmethod
def validate(
cls: Type[T],
value: Union[T, np.ndarray, List[Any], Tuple[Any], Any],
field: 'ModelField',
config: 'BaseConfig',
) -> T:
tensor = super().validate(value=value, field=field, config=config)
return cls.validate_shape(value=tensor)
|
# Copyright (c) OpenMMLab. All rights reserved.
import unittest
from unittest import TestCase
import torch
from parameterized import parameterized
from mmdet.data_elements import DetDataSample
from mmdet.testing import demo_mm_inputs, get_detector_cfg
from mmdet.utils import register_all_modules
class TestSingleStageInstanceSegmentor(TestCase):
def setUp(self):
register_all_modules()
@parameterized.expand([
'solo/solo_r50_fpn_1x_coco.py',
'solo/decoupled_solo_r50_fpn_1x_coco.py',
'solo/decoupled_solo_light_r50_fpn_3x_coco.py',
'solov2/solov2_r50_fpn_1x_coco.py',
'solov2/solov2_light_r18_fpn_mstrain_3x_coco.py'
])
def test_init(self, cfg_file):
model = get_detector_cfg(cfg_file)
model.backbone.init_cfg = None
from mmdet.models import build_detector
detector = build_detector(model)
self.assertTrue(detector.backbone)
self.assertTrue(detector.neck)
self.assertTrue(detector.mask_head)
@parameterized.expand([
('solo/solo_r50_fpn_1x_coco.py', ('cpu', 'cuda')),
('solo/decoupled_solo_r50_fpn_1x_coco.py', ('cpu', 'cuda')),
('solo/decoupled_solo_light_r50_fpn_3x_coco.py', ('cpu', 'cuda')),
('solov2/solov2_r50_fpn_1x_coco.py', ('cpu', 'cuda')),
('solov2/solov2_light_r18_fpn_mstrain_3x_coco.py', ('cpu', 'cuda')),
])
def test_single_stage_forward_loss_mode(self, cfg_file, devices):
model = get_detector_cfg(cfg_file)
# backbone convert to ResNet18
model.backbone.depth = 18
model.neck.in_channels = [64, 128, 256, 512]
model.backbone.init_cfg = None
from mmdet.models import build_detector
assert all([device in ['cpu', 'cuda'] for device in devices])
for device in devices:
detector = build_detector(model)
detector.init_weights()
if device == 'cuda':
if not torch.cuda.is_available():
return unittest.skip('test requires GPU and torch+cuda')
detector = detector.cuda()
packed_inputs = demo_mm_inputs(
2, [[3, 128, 128], [3, 125, 130]], with_mask=True)
batch_inputs, data_samples = detector.data_preprocessor(
packed_inputs, True)
losses = detector.forward(batch_inputs, data_samples, mode='loss')
self.assertIsInstance(losses, dict)
@parameterized.expand([
('solo/solo_r50_fpn_1x_coco.py', ('cpu', 'cuda')),
('solo/decoupled_solo_r50_fpn_1x_coco.py', ('cpu', 'cuda')),
('solo/decoupled_solo_light_r50_fpn_3x_coco.py', ('cpu', 'cuda')),
('solov2/solov2_r50_fpn_1x_coco.py', ('cpu', 'cuda')),
('solov2/solov2_light_r18_fpn_mstrain_3x_coco.py', ('cpu', 'cuda')),
])
def test_single_stage_forward_predict_mode(self, cfg_file, devices):
model = get_detector_cfg(cfg_file)
# backbone convert to ResNet18
model.backbone.depth = 18
model.neck.in_channels = [64, 128, 256, 512]
model.backbone.init_cfg = None
from mmdet.models import build_detector
assert all([device in ['cpu', 'cuda'] for device in devices])
for device in devices:
detector = build_detector(model)
if device == 'cuda':
if not torch.cuda.is_available():
return unittest.skip('test requires GPU and torch+cuda')
detector = detector.cuda()
packed_inputs = demo_mm_inputs(
2, [[3, 128, 128], [3, 125, 130]], with_mask=True)
batch_inputs, data_samples = detector.data_preprocessor(
packed_inputs, False)
# Test forward test
detector.eval()
with torch.no_grad():
batch_results = detector.forward(
batch_inputs, data_samples, mode='predict')
self.assertEqual(len(batch_results), 2)
self.assertIsInstance(batch_results[0], DetDataSample)
|
# Copyright (c) OpenMMLab. All rights reserved.
import unittest
from unittest import TestCase
import torch
from parameterized import parameterized
from mmdet.core import DetDataSample
from mmdet.testing import demo_mm_inputs, get_detector_cfg
from mmdet.utils import register_all_modules
class TestSingleStageInstanceSegmentor(TestCase):
def setUp(self):
register_all_modules()
@parameterized.expand([
'solo/solo_r50_fpn_1x_coco.py',
'solo/decoupled_solo_r50_fpn_1x_coco.py',
'solo/decoupled_solo_light_r50_fpn_3x_coco.py',
'solov2/solov2_r50_fpn_1x_coco.py',
'solov2/solov2_light_r18_fpn_mstrain_3x_coco.py'
])
def test_init(self, cfg_file):
model = get_detector_cfg(cfg_file)
model.backbone.init_cfg = None
from mmdet.models import build_detector
detector = build_detector(model)
self.assertTrue(detector.backbone)
self.assertTrue(detector.neck)
self.assertTrue(detector.mask_head)
@parameterized.expand([
('solo/solo_r50_fpn_1x_coco.py', ('cpu', 'cuda')),
('solo/decoupled_solo_r50_fpn_1x_coco.py', ('cpu', 'cuda')),
('solo/decoupled_solo_light_r50_fpn_3x_coco.py', ('cpu', 'cuda')),
('solov2/solov2_r50_fpn_1x_coco.py', ('cpu', 'cuda')),
('solov2/solov2_light_r18_fpn_mstrain_3x_coco.py', ('cpu', 'cuda')),
])
def test_single_stage_forward_loss_mode(self, cfg_file, devices):
model = get_detector_cfg(cfg_file)
# backbone convert to ResNet18
model.backbone.depth = 18
model.neck.in_channels = [64, 128, 256, 512]
model.backbone.init_cfg = None
from mmdet.models import build_detector
assert all([device in ['cpu', 'cuda'] for device in devices])
for device in devices:
detector = build_detector(model)
detector.init_weights()
if device == 'cuda':
if not torch.cuda.is_available():
return unittest.skip('test requires GPU and torch+cuda')
detector = detector.cuda()
packed_inputs = demo_mm_inputs(
2, [[3, 128, 128], [3, 125, 130]], with_mask=True)
batch_inputs, data_samples = detector.data_preprocessor(
packed_inputs, True)
losses = detector.forward(batch_inputs, data_samples, mode='loss')
self.assertIsInstance(losses, dict)
@parameterized.expand([
('solo/solo_r50_fpn_1x_coco.py', ('cpu', 'cuda')),
('solo/decoupled_solo_r50_fpn_1x_coco.py', ('cpu', 'cuda')),
('solo/decoupled_solo_light_r50_fpn_3x_coco.py', ('cpu', 'cuda')),
('solov2/solov2_r50_fpn_1x_coco.py', ('cpu', 'cuda')),
('solov2/solov2_light_r18_fpn_mstrain_3x_coco.py', ('cpu', 'cuda')),
])
def test_single_stage_forward_predict_mode(self, cfg_file, devices):
model = get_detector_cfg(cfg_file)
# backbone convert to ResNet18
model.backbone.depth = 18
model.neck.in_channels = [64, 128, 256, 512]
model.backbone.init_cfg = None
from mmdet.models import build_detector
assert all([device in ['cpu', 'cuda'] for device in devices])
for device in devices:
detector = build_detector(model)
if device == 'cuda':
if not torch.cuda.is_available():
return unittest.skip('test requires GPU and torch+cuda')
detector = detector.cuda()
packed_inputs = demo_mm_inputs(
2, [[3, 128, 128], [3, 125, 130]], with_mask=True)
batch_inputs, data_samples = detector.data_preprocessor(
packed_inputs, False)
# Test forward test
detector.eval()
with torch.no_grad():
batch_results = detector.forward(
batch_inputs, data_samples, mode='predict')
self.assertEqual(len(batch_results), 2)
self.assertIsInstance(batch_results[0], DetDataSample)
|
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
from typing import Callable, List
import pytest
from jina import DocumentArray, Flow
from ...transform_encoder import TransformerTorchEncoder
@pytest.mark.parametrize("request_size", [1, 10, 50, 100])
def test_integration(data_generator: Callable, request_size: int):
with Flow(return_results=True).add(uses=TransformerTorchEncoder) as flow:
resp = flow.post(
on="/index", inputs=data_generator(), request_size=request_size, return_results=True
)
assert min(len(resp) * request_size, 50) == 50
for r in resp:
for doc in r.docs:
assert doc.embedding is not None
def filter_none(elements):
return list(filter(lambda e: e is not None, elements))
@pytest.mark.parametrize(
["docs", "docs_per_path", "traversal_path"],
[
(pytest.lazy_fixture("docs_with_text"), [["r", 10], ["c", 0], ["cc", 0]], "r"),
(
pytest.lazy_fixture("docs_with_chunk_text"),
[["r", 0], ["c", 10], ["cc", 0]],
"c",
),
(
pytest.lazy_fixture("docs_with_chunk_chunk_text"),
[["r", 0], ["c", 0], ["cc", 10]],
"cc",
),
],
)
def test_traversal_path(
docs: DocumentArray, docs_per_path: List[List[str]], traversal_path: str
):
def validate_traversal(expected_docs_per_path: List[List[str]]):
def validate(res):
for path, count in expected_docs_per_path:
assert len(
filter_none(
DocumentArray(res[0].docs)
.traverse_flat([path])
.get_attributes("embedding")
)
) == count
return validate
flow = Flow(return_results=True).add(uses=TransformerTorchEncoder)
with flow:
resp = flow.post(
on="/test", inputs=docs, parameters={"traversal_paths": [traversal_path]}, return_results=True
)
validate_traversal(docs_per_path)(resp)
|
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
from typing import Callable, List
import pytest
from jina import DocumentArray, Flow
from ...transform_encoder import TransformerTorchEncoder
@pytest.mark.parametrize("request_size", [1, 10, 50, 100])
def test_integration(data_generator: Callable, request_size: int):
with Flow(return_results=True).add(uses=TransformerTorchEncoder) as flow:
resp = flow.post(
on="/index", inputs=data_generator(), request_size=request_size, return_results=True
)
assert min(len(resp) * request_size, 50) == 50
for r in resp:
for doc in r.docs:
assert doc.embedding is not None
@pytest.mark.parametrize(
["docs", "docs_per_path", "traversal_path"],
[
(pytest.lazy_fixture("docs_with_text"), [["r", 10], ["c", 0], ["cc", 0]], "r"),
(
pytest.lazy_fixture("docs_with_chunk_text"),
[["r", 0], ["c", 10], ["cc", 0]],
"c",
),
(
pytest.lazy_fixture("docs_with_chunk_chunk_text"),
[["r", 0], ["c", 0], ["cc", 10]],
"cc",
),
],
)
def test_traversal_path(
docs: DocumentArray, docs_per_path: List[List[str]], traversal_path: str
):
def validate_traversal(expected_docs_per_path: List[List[str]]):
def validate(res):
for path, count in expected_docs_per_path:
return (
len(
DocumentArray(res[0].docs)
.traverse_flat([path])
.get_attributes("embedding")
)
== count
)
return validate
flow = Flow(return_results=True).add(uses=TransformerTorchEncoder)
with flow:
resp = flow.post(
on="/test", inputs=docs, parameters={"traversal_paths": [traversal_path]}, return_results=True
)
assert validate_traversal(docs_per_path)(resp)
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmengine.hooks import Hook
from mmdet.registry import HOOKS
@HOOKS.register_module()
class FastStopTrainingHook(Hook):
"""Set runner's epoch information to the model."""
def __init__(self, by_epoch, save_ckpt=False, stop_iter_or_epoch=5):
self.by_epoch = by_epoch
self.save_ckpt = save_ckpt
self.stop_iter_or_epoch = stop_iter_or_epoch
def after_train_iter(self, runner, batch_idx: int, data_batch: None,
outputs: None) -> None:
if self.save_ckpt and self.by_epoch:
# If it is epoch-based and want to save weights,
# we must run at least 1 epoch.
return
if runner.iter >= self.stop_iter_or_epoch:
raise RuntimeError('quick exit')
def after_train_epoch(self, runner) -> None:
if runner.epoch >= self.stop_iter_or_epoch - 1:
raise RuntimeError('quick exit')
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmengine.hooks import Hook
from mmdet.registry import HOOKS
@HOOKS.register_module()
class FastStopTrainingHook(Hook):
"""Set runner's epoch information to the model."""
def after_train_iter(self, runner, batch_idx: int, data_batch: None,
outputs: None) -> None:
if batch_idx >= 5:
raise RuntimeError('quick exit')
|
import os
from pathlib import Path
import pytest
from jina import Flow
from jina.excepts import RuntimeFailToStart
from jina.orchestrate.deployments import Deployment
from jina.parsers import set_deployment_parser
from jina.serve.executors import BaseExecutor
cur_dir = os.path.dirname(os.path.abspath(__file__))
def test_simple_use_abs_import_shall_fail():
with pytest.raises(ModuleNotFoundError):
from .dummyhub_abs import DummyHubExecutorAbs
DummyHubExecutorAbs()
with pytest.raises(RuntimeFailToStart):
with Flow().add(uses='DummyHubExecutorAbs'):
pass
def test_simple_use_relative_import():
from .dummyhub import DummyHubExecutor
DummyHubExecutor()
with Flow().add(uses='DummyHubExecutor'):
pass
def test_use_from_local_dir_exe_level():
with BaseExecutor.load_config('dummyhub/config.yml'):
pass
def test_use_from_local_dir_deployment_level():
a = set_deployment_parser().parse_args(['--uses', 'dummyhub/config.yml'])
with Deployment(a):
pass
def test_use_from_local_dir_flow_level():
with Flow().add(uses='dummyhub/config.yml'):
pass
@pytest.fixture
def local_hub_executor(tmpdir):
from hubble.executor import HubExecutor, helper, hubapi
pkg_path = Path(__file__).parent / 'dummyhub'
stream_data = helper.archive_package(pkg_path)
with open(tmpdir / 'dummy_test.zip', 'wb') as temp_zip_file:
temp_zip_file.write(stream_data.getvalue())
hubapi.install_local(
Path(tmpdir) / 'dummy_test.zip', HubExecutor(uuid='hello', tag='v0')
)
@pytest.mark.parametrize('uses', ['jinahub://hello', 'jinaai://jina-ai/hello'])
def test_use_from_local_hub_deployment_level(
mocker, monkeypatch, local_hub_executor, uses
):
from hubble.executor.hubio import HubExecutor, HubIO
mock = mocker.Mock()
def _mock_fetch(
name,
*args,
**kwargs,
):
mock(name=name)
return (
HubExecutor(
uuid='hello',
name='alias_dummy',
tag='v0',
image_name='jinahub/pod.dummy_mwu_encoder',
md5sum=None,
visibility=True,
archive_url=None,
),
False,
)
monkeypatch.setattr(HubIO, 'fetch_meta', _mock_fetch)
a = set_deployment_parser().parse_args(['--uses', uses])
with Deployment(a):
pass
@pytest.mark.parametrize('uses', ['jinahub://hello', 'jinaai://jina-ai/hello'])
def test_use_from_local_hub_flow_level(mocker, monkeypatch, local_hub_executor, uses):
from hubble.executor.hubio import HubExecutor, HubIO
mock = mocker.Mock()
def _mock_fetch(
name,
*args,
**kwargs,
):
mock(name=name)
return (
HubExecutor(
uuid='hello',
name='alias_dummy',
tag='v0',
image_name='jinahub/pod.dummy_mwu_encoder',
md5sum=None,
visibility=True,
archive_url=None,
),
False,
)
monkeypatch.setattr(HubIO, 'fetch_meta', _mock_fetch)
with Flow().add(uses=uses, install_requirements=True):
pass
|
import os
from pathlib import Path
import pytest
from jina import Flow
from jina.excepts import RuntimeFailToStart
from jina.orchestrate.deployments import Deployment
from jina.parsers import set_deployment_parser
from jina.serve.executors import BaseExecutor
cur_dir = os.path.dirname(os.path.abspath(__file__))
def test_simple_use_abs_import_shall_fail():
with pytest.raises(ModuleNotFoundError):
from .dummyhub_abs import DummyHubExecutorAbs
DummyHubExecutorAbs()
with pytest.raises(RuntimeFailToStart):
with Flow().add(uses='DummyHubExecutorAbs'):
pass
def test_simple_use_relative_import():
from .dummyhub import DummyHubExecutor
DummyHubExecutor()
with Flow().add(uses='DummyHubExecutor'):
pass
def test_use_from_local_dir_exe_level():
with BaseExecutor.load_config('dummyhub/config.yml'):
pass
def test_use_from_local_dir_deployment_level():
a = set_deployment_parser().parse_args(['--uses', 'dummyhub/config.yml'])
with Deployment(a):
pass
def test_use_from_local_dir_flow_level():
with Flow().add(uses='dummyhub/config.yml'):
pass
@pytest.fixture
def local_hub_executor(tmpdir):
from hubble.executor import HubExecutor, helper, hubapi
pkg_path = Path(__file__).parent / 'dummyhub'
stream_data = helper.archive_package(pkg_path)
with open(tmpdir / 'dummy_test.zip', 'wb') as temp_zip_file:
temp_zip_file.write(stream_data.getvalue())
hubapi.install_local(
Path(tmpdir) / 'dummy_test.zip', HubExecutor(uuid='hello', tag='v0')
)
@pytest.mark.parametrize('uses', ['jinahub://hello', 'jinaai://jina-ai/hello'])
def test_use_from_local_hub_deployment_level(
mocker, monkeypatch, local_hub_executor, uses
):
from hubble.executor.hubio import HubExecutor, HubIO
mock = mocker.Mock()
def _mock_fetch(
name,
tag,
image_required=True,
rebuild_image=True,
*,
prefer_platform=None,
secret=None,
force=False,
):
mock(name=name)
return (
HubExecutor(
uuid='hello',
name='alias_dummy',
tag='v0',
image_name='jinahub/pod.dummy_mwu_encoder',
md5sum=None,
visibility=True,
archive_url=None,
),
False,
)
monkeypatch.setattr(HubIO, 'fetch_meta', _mock_fetch)
a = set_deployment_parser().parse_args(['--uses', uses])
with Deployment(a):
pass
@pytest.mark.parametrize('uses', ['jinahub://hello', 'jinaai://jina-ai/hello'])
def test_use_from_local_hub_flow_level(mocker, monkeypatch, local_hub_executor, uses):
from hubble.executor.hubio import HubExecutor, HubIO
mock = mocker.Mock()
def _mock_fetch(
name,
tag,
image_required=True,
rebuild_image=True,
*,
prefer_platform=None,
secret=None,
force=False,
):
mock(name=name)
return (
HubExecutor(
uuid='hello',
name='alias_dummy',
tag='v0',
image_name='jinahub/pod.dummy_mwu_encoder',
md5sum=None,
visibility=True,
archive_url=None,
),
False,
)
monkeypatch.setattr(HubIO, 'fetch_meta', _mock_fetch)
with Flow().add(uses=uses, install_requirements=True):
pass
|
# Copyright (c) OpenMMLab. All rights reserved.
from .vis_backend import (BaseVisBackend, ClearMLVisBackend, LocalVisBackend,
MLflowVisBackend, NeptuneVisBackend,
TensorboardVisBackend, WandbVisBackend)
from .visualizer import Visualizer
__all__ = [
'Visualizer', 'BaseVisBackend', 'LocalVisBackend', 'WandbVisBackend',
'TensorboardVisBackend', 'MLflowVisBackend', 'ClearMLVisBackend',
'NeptuneVisBackend'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .vis_backend import (BaseVisBackend, ClearMLVisBackend, LocalVisBackend,
MLflowVisBackend, TensorboardVisBackend,
WandbVisBackend)
from .visualizer import Visualizer
__all__ = [
'Visualizer', 'BaseVisBackend', 'LocalVisBackend', 'WandbVisBackend',
'TensorboardVisBackend', 'MLflowVisBackend', 'ClearMLVisBackend'
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.