input
stringlengths 33
5k
| output
stringlengths 32
5k
|
---|---|
import functools
import os
import os.path
import pathlib
from typing import Any, BinaryIO, Collection, Dict, List, Optional, Tuple, Union
from torchdata.datapipes.iter import FileLister, FileOpener, Filter, IterDataPipe, Mapper
from torchvision.prototype.datasets.utils import EncodedData, EncodedImage
from torchvision.prototype.datasets.utils._internal import hint_sharding, hint_shuffling
from torchvision.prototype.features import Label
__all__ = ["from_data_folder", "from_image_folder"]
def _is_not_top_level_file(path: str, *, root: pathlib.Path) -> bool:
rel_path = pathlib.Path(path).relative_to(root)
return rel_path.is_dir() or rel_path.parent != pathlib.Path(".")
def _prepare_sample(
data: Tuple[str, BinaryIO],
*,
root: pathlib.Path,
categories: List[str],
) -> Dict[str, Any]:
path, buffer = data
category = pathlib.Path(path).relative_to(root).parts[0]
return dict(
path=path,
data=EncodedData.from_file(buffer),
label=Label.from_category(category, categories=categories),
)
def from_data_folder(
root: Union[str, pathlib.Path],
*,
valid_extensions: Optional[Collection[str]] = None,
recursive: bool = True,
) -> Tuple[IterDataPipe, List[str]]:
root = pathlib.Path(root).expanduser().resolve()
categories = sorted(entry.name for entry in os.scandir(root) if entry.is_dir())
masks: Union[List[str], str] = [f"*.{ext}" for ext in valid_extensions] if valid_extensions is not None else ""
dp = FileLister(str(root), recursive=recursive, masks=masks)
dp: IterDataPipe = Filter(dp, functools.partial(_is_not_top_level_file, root=root))
dp = hint_sharding(dp)
dp = hint_shuffling(dp)
dp = FileOpener(dp, mode="rb")
return Mapper(dp, functools.partial(_prepare_sample, root=root, categories=categories)), categories
def _data_to_image_key(sample: Dict[str, Any]) -> Dict[str, Any]:
sample["image"] = EncodedImage(sample.pop("data").data)
return sample
def from_image_folder(
root: Union[str, pathlib.Path],
*,
valid_extensions: Collection[str] = ("jpg", "jpeg", "png", "ppm", "bmp", "pgm", "tif", "tiff", "webp"),
**kwargs: Any,
) -> Tuple[IterDataPipe, List[str]]:
valid_extensions = [valid_extension for ext in valid_extensions for valid_extension in (ext.lower(), ext.upper())]
dp, categories = from_data_folder(root, valid_extensions=valid_extensions, **kwargs)
return Mapper(dp, _data_to_image_key), categories
|
import functools
import os
import os.path
import pathlib
from typing import Any, BinaryIO, Collection, Dict, List, Optional, Tuple, Union
from torchdata.datapipes.iter import FileLister, FileOpener, Filter, IterDataPipe, Mapper
from torchvision.prototype.datasets.utils._internal import hint_sharding, hint_shuffling
from torchvision.prototype.features import EncodedData, EncodedImage, Label
__all__ = ["from_data_folder", "from_image_folder"]
def _is_not_top_level_file(path: str, *, root: pathlib.Path) -> bool:
rel_path = pathlib.Path(path).relative_to(root)
return rel_path.is_dir() or rel_path.parent != pathlib.Path(".")
def _prepare_sample(
data: Tuple[str, BinaryIO],
*,
root: pathlib.Path,
categories: List[str],
) -> Dict[str, Any]:
path, buffer = data
category = pathlib.Path(path).relative_to(root).parts[0]
return dict(
path=path,
data=EncodedData.from_file(buffer),
label=Label.from_category(category, categories=categories),
)
def from_data_folder(
root: Union[str, pathlib.Path],
*,
valid_extensions: Optional[Collection[str]] = None,
recursive: bool = True,
) -> Tuple[IterDataPipe, List[str]]:
root = pathlib.Path(root).expanduser().resolve()
categories = sorted(entry.name for entry in os.scandir(root) if entry.is_dir())
masks: Union[List[str], str] = [f"*.{ext}" for ext in valid_extensions] if valid_extensions is not None else ""
dp = FileLister(str(root), recursive=recursive, masks=masks)
dp: IterDataPipe = Filter(dp, functools.partial(_is_not_top_level_file, root=root))
dp = hint_sharding(dp)
dp = hint_shuffling(dp)
dp = FileOpener(dp, mode="rb")
return Mapper(dp, functools.partial(_prepare_sample, root=root, categories=categories)), categories
def _data_to_image_key(sample: Dict[str, Any]) -> Dict[str, Any]:
sample["image"] = EncodedImage(sample.pop("data").data)
return sample
def from_image_folder(
root: Union[str, pathlib.Path],
*,
valid_extensions: Collection[str] = ("jpg", "jpeg", "png", "ppm", "bmp", "pgm", "tif", "tiff", "webp"),
**kwargs: Any,
) -> Tuple[IterDataPipe, List[str]]:
valid_extensions = [valid_extension for ext in valid_extensions for valid_extension in (ext.lower(), ext.upper())]
dp, categories = from_data_folder(root, valid_extensions=valid_extensions, **kwargs)
return Mapper(dp, _data_to_image_key), categories
|
from hubble.executor.hubio import HubIO
from jina.orchestrate.pods.factory import PodFactory
from jina.parsers import set_pod_parser
def test_container_pod(mocker, monkeypatch):
mock = mocker.Mock()
def _mock_pull(self):
return 'docker://jinahub/dummy_executor'
monkeypatch.setattr(HubIO, 'pull', _mock_pull)
args = set_pod_parser().parse_args(['--uses', 'jinahub+docker://DummyExecutor'])
pod = PodFactory.build_pod(args)
assert pod.args.uses == 'docker://jinahub/dummy_executor'
assert pod.name == 'ContainerPod'
|
from jina.orchestrate.pods.factory import PodFactory
from jina.hubble.hubio import HubIO
from jina.parsers import set_pod_parser
def test_container_pod(mocker, monkeypatch):
mock = mocker.Mock()
def _mock_pull(self):
return 'docker://jinahub/dummy_executor'
monkeypatch.setattr(HubIO, 'pull', _mock_pull)
args = set_pod_parser().parse_args(['--uses', 'jinahub+docker://DummyExecutor'])
pod = PodFactory.build_pod(args)
assert pod.args.uses == 'docker://jinahub/dummy_executor'
assert pod.name == 'ContainerPod'
|
# Copyright (c) OpenMMLab. All rights reserved.
import os.path as osp
import shutil
import time
from unittest import TestCase
from unittest.mock import Mock
import torch
from mmengine.structures import InstanceData
from mmdet.engine.hooks import DetVisualizationHook
from mmdet.structures import DetDataSample
from mmdet.visualization import DetLocalVisualizer
def _rand_bboxes(num_boxes, h, w):
cx, cy, bw, bh = torch.rand(num_boxes, 4).T
tl_x = ((cx * w) - (w * bw / 2)).clip(0, w)
tl_y = ((cy * h) - (h * bh / 2)).clip(0, h)
br_x = ((cx * w) + (w * bw / 2)).clip(0, w)
br_y = ((cy * h) + (h * bh / 2)).clip(0, h)
bboxes = torch.vstack([tl_x, tl_y, br_x, br_y]).T
return bboxes
class TestVisualizationHook(TestCase):
def setUp(self) -> None:
DetLocalVisualizer.get_instance('visualizer')
pred_instances = InstanceData()
pred_instances.bboxes = _rand_bboxes(5, 10, 12)
pred_instances.labels = torch.randint(0, 2, (5, ))
pred_instances.scores = torch.rand((5, ))
pred_det_data_sample = DetDataSample()
pred_det_data_sample.set_metainfo({
'img_path':
osp.join(osp.dirname(__file__), '../../data/color.jpg')
})
pred_det_data_sample.pred_instances = pred_instances
self.outputs = [pred_det_data_sample] * 2
def test_after_val_iter(self):
runner = Mock()
runner.iter = 1
hook = DetVisualizationHook()
hook.after_val_iter(runner, 1, {}, self.outputs)
def test_after_test_iter(self):
runner = Mock()
runner.iter = 1
hook = DetVisualizationHook(draw=True)
hook.after_test_iter(runner, 1, {}, self.outputs)
self.assertEqual(hook._test_index, 2)
# test
timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime())
test_out_dir = timestamp + '1'
runner.work_dir = timestamp
runner.timestamp = '1'
hook = DetVisualizationHook(draw=False, test_out_dir=test_out_dir)
hook.after_test_iter(runner, 1, {}, self.outputs)
self.assertTrue(not osp.exists(f'{timestamp}/1/{test_out_dir}'))
hook = DetVisualizationHook(draw=True, test_out_dir=test_out_dir)
hook.after_test_iter(runner, 1, {}, self.outputs)
self.assertTrue(osp.exists(f'{timestamp}/1/{test_out_dir}'))
shutil.rmtree(f'{timestamp}')
|
# Copyright (c) OpenMMLab. All rights reserved.
import os.path as osp
import shutil
import time
from unittest import TestCase
from unittest.mock import Mock
import torch
from mmengine.data import InstanceData
from mmdet.engine.hooks import DetVisualizationHook
from mmdet.structures import DetDataSample
from mmdet.visualization import DetLocalVisualizer
def _rand_bboxes(num_boxes, h, w):
cx, cy, bw, bh = torch.rand(num_boxes, 4).T
tl_x = ((cx * w) - (w * bw / 2)).clip(0, w)
tl_y = ((cy * h) - (h * bh / 2)).clip(0, h)
br_x = ((cx * w) + (w * bw / 2)).clip(0, w)
br_y = ((cy * h) + (h * bh / 2)).clip(0, h)
bboxes = torch.vstack([tl_x, tl_y, br_x, br_y]).T
return bboxes
class TestVisualizationHook(TestCase):
def setUp(self) -> None:
DetLocalVisualizer.get_instance('visualizer')
data_sample = DetDataSample()
data_sample.set_metainfo({
'img_path':
osp.join(osp.dirname(__file__), '../../data/color.jpg')
})
self.data_batch = [{'data_sample': data_sample}] * 2
pred_instances = InstanceData()
pred_instances.bboxes = _rand_bboxes(5, 10, 12)
pred_instances.labels = torch.randint(0, 2, (5, ))
pred_instances.scores = torch.rand((5, ))
pred_det_data_sample = DetDataSample()
pred_det_data_sample.pred_instances = pred_instances
self.outputs = [pred_det_data_sample] * 2
def test_after_val_iter(self):
runner = Mock()
runner.iter = 1
hook = DetVisualizationHook()
hook.after_val_iter(runner, 1, self.data_batch, self.outputs)
def test_after_test_iter(self):
runner = Mock()
runner.iter = 1
hook = DetVisualizationHook(draw=True)
hook.after_test_iter(runner, 1, self.data_batch, self.outputs)
self.assertEqual(hook._test_index, 2)
# test
timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime())
test_out_dir = timestamp + '1'
runner.work_dir = timestamp
runner.timestamp = '1'
hook = DetVisualizationHook(draw=False, test_out_dir=test_out_dir)
hook.after_test_iter(runner, 1, self.data_batch, self.outputs)
self.assertTrue(not osp.exists(f'{timestamp}/1/{test_out_dir}'))
hook = DetVisualizationHook(draw=True, test_out_dir=test_out_dir)
hook.after_test_iter(runner, 1, self.data_batch, self.outputs)
self.assertTrue(osp.exists(f'{timestamp}/1/{test_out_dir}'))
shutil.rmtree(f'{timestamp}')
|
# Copyright (c) OpenMMLab. All rights reserved.
from .coco_api import COCO, COCOeval
__all__ = ['COCO', 'COCOeval']
|
from .coco_api import COCO, COCOeval
__all__ = ['COCO', 'COCOeval']
|
__version__ = '0.1.0'
from docarray.array import DocumentArray
from docarray.document.document import BaseDocument as Document
from docarray.predefined_document import Image, Text
__all__ = ['Document', 'DocumentArray', 'Image', 'Text']
|
__version__ = '0.18.2'
import os
from docarray.document import Document
from docarray.array import DocumentArray
from docarray.dataclasses import dataclass, field
from docarray.helper import login, logout
if 'DA_RICH_HANDLER' in os.environ:
from rich.traceback import install
install()
|
from keras.src import ops
from keras.src.api_export import keras_export
from keras.src.layers.attention.attention import Attention
@keras_export("keras.layers.AdditiveAttention")
class AdditiveAttention(Attention):
"""Additive attention layer, a.k.a. Bahdanau-style attention.
Inputs are a list with 2 or 3 elements:
1. A `query` tensor of shape `(batch_size, Tq, dim)`.
2. A `value` tensor of shape `(batch_size, Tv, dim)`.
3. A optional `key` tensor of shape `(batch_size, Tv, dim)`. If none
supplied, `value` will be used as `key`.
The calculation follows the steps:
1. Calculate attention scores using `query` and `key` with shape
`(batch_size, Tq, Tv)` as a non-linear sum
`scores = reduce_sum(tanh(query + key), axis=-1)`.
2. Use scores to calculate a softmax distribution with shape
`(batch_size, Tq, Tv)`.
3. Use the softmax distribution to create a linear combination of `value`
with shape `(batch_size, Tq, dim)`.
Args:
use_scale: If `True`, will create a scalar variable to scale the
attention scores.
dropout: Float between 0 and 1. Fraction of the units to drop for the
attention scores. Defaults to `0.0`.
Call arguments:
inputs: List of the following tensors:
- `query`: Query tensor of shape `(batch_size, Tq, dim)`.
- `value`: Value tensor of shape `(batch_size, Tv, dim)`.
- `key`: Optional key tensor of shape `(batch_size, Tv, dim)`. If
not given, will use `value` for both `key` and `value`, which is
the most common case.
mask: List of the following tensors:
- `query_mask`: A boolean mask tensor of shape `(batch_size, Tq)`.
If given, the output will be zero at the positions where
`mask==False`.
- `value_mask`: A boolean mask tensor of shape `(batch_size, Tv)`.
If given, will apply the mask such that values at positions
where `mask==False` do not contribute to the result.
return_attention_scores: bool, it `True`, returns the attention scores
(after masking and softmax) as an additional output argument.
training: Python boolean indicating whether the layer should behave in
training mode (adding dropout) or in inference mode (no dropout).
use_causal_mask: Boolean. Set to `True` for decoder self-attention. Adds
a mask such that position `i` cannot attend to positions `j > i`.
This prevents the flow of information from the future towards the
past. Defaults to `False`.
Output:
Attention outputs of shape `(batch_size, Tq, dim)`.
(Optional) Attention scores after masking and softmax with shape
`(batch_size, Tq, Tv)`.
"""
def __init__(
self,
use_scale=True,
dropout=0.0,
**kwargs,
):
super().__init__(use_scale=use_scale, dropout=dropout, **kwargs)
def build(self, input_shape):
self._validate_inputs(input_shape)
dim = input_shape[0][-1]
self.scale = None
if self.use_scale:
self.scale = self.add_weight(
name="scale",
shape=[dim],
initializer="glorot_uniform",
dtype=self.dtype,
trainable=True,
)
self.built = True
def _calculate_scores(self, query, key):
"""Calculates attention scores as a nonlinear sum of query and key.
Args:
query: Query tensor of shape `(batch_size, Tq, dim)`.
key: Key tensor of shape `(batch_size, Tv, dim)`.
Returns:
Tensor of shape `(batch_size, Tq, Tv)`.
"""
# Reshape tensors to enable broadcasting.
# Reshape into [batch_size, Tq, 1, dim].
q_reshaped = ops.expand_dims(query, axis=-2)
# Reshape into [batch_size, 1, Tv, dim].
k_reshaped = ops.expand_dims(key, axis=-3)
scale = self.scale if self.use_scale else 1.0
return ops.sum(scale * ops.tanh(q_reshaped + k_reshaped), axis=-1)
def get_config(self):
base_config = super().get_config()
del base_config["score_mode"]
return base_config
|
from keras.src import ops
from keras.src.api_export import keras_export
from keras.src.layers.attention.attention import Attention
@keras_export("keras.layers.AdditiveAttention")
class AdditiveAttention(Attention):
"""Additive attention layer, a.k.a. Bahdanau-style attention.
Inputs are a list with 2 or 3 elements:
1. A `query` tensor of shape `(batch_size, Tq, dim)`.
2. A `value` tensor of shape `(batch_size, Tv, dim)`.
3. A optional `key` tensor of shape `(batch_size, Tv, dim)`. If none
supplied, `value` will be used as `key`.
The calculation follows the steps:
1. Calculate attention scores using `query` and `key` with shape
`(batch_size, Tq, Tv)` as a non-linear sum
`scores = reduce_sum(tanh(query + key), axis=-1)`.
2. Use scores to calculate a softmax distribution with shape
`(batch_size, Tq, Tv)`.
3. Use the softmax distribution to create a linear combination of `value`
with shape `(batch_size, Tq, dim)`.
Args:
use_scale: If `True`, will create a scalar variable to scale the
attention scores.
dropout: Float between 0 and 1. Fraction of the units to drop for the
attention scores. Defaults to `0.0`.
Call Args:
inputs: List of the following tensors:
- `query`: Query tensor of shape `(batch_size, Tq, dim)`.
- `value`: Value tensor of shape `(batch_size, Tv, dim)`.
- `key`: Optional key tensor of shape `(batch_size, Tv, dim)`. If
not given, will use `value` for both `key` and `value`, which is
the most common case.
mask: List of the following tensors:
- `query_mask`: A boolean mask tensor of shape `(batch_size, Tq)`.
If given, the output will be zero at the positions where
`mask==False`.
- `value_mask`: A boolean mask tensor of shape `(batch_size, Tv)`.
If given, will apply the mask such that values at positions
where `mask==False` do not contribute to the result.
return_attention_scores: bool, it `True`, returns the attention scores
(after masking and softmax) as an additional output argument.
training: Python boolean indicating whether the layer should behave in
training mode (adding dropout) or in inference mode (no dropout).
use_causal_mask: Boolean. Set to `True` for decoder self-attention. Adds
a mask such that position `i` cannot attend to positions `j > i`.
This prevents the flow of information from the future towards the
past. Defaults to `False`.
Output:
Attention outputs of shape `(batch_size, Tq, dim)`.
(Optional) Attention scores after masking and softmax with shape
`(batch_size, Tq, Tv)`.
"""
def __init__(
self,
use_scale=True,
dropout=0.0,
**kwargs,
):
super().__init__(use_scale=use_scale, dropout=dropout, **kwargs)
def build(self, input_shape):
self._validate_inputs(input_shape)
dim = input_shape[0][-1]
self.scale = None
if self.use_scale:
self.scale = self.add_weight(
name="scale",
shape=[dim],
initializer="glorot_uniform",
dtype=self.dtype,
trainable=True,
)
self.built = True
def _calculate_scores(self, query, key):
"""Calculates attention scores as a nonlinear sum of query and key.
Args:
query: Query tensor of shape `(batch_size, Tq, dim)`.
key: Key tensor of shape `(batch_size, Tv, dim)`.
Returns:
Tensor of shape `(batch_size, Tq, Tv)`.
"""
# Reshape tensors to enable broadcasting.
# Reshape into [batch_size, Tq, 1, dim].
q_reshaped = ops.expand_dims(query, axis=-2)
# Reshape into [batch_size, 1, Tv, dim].
k_reshaped = ops.expand_dims(key, axis=-3)
scale = self.scale if self.use_scale else 1.0
return ops.sum(scale * ops.tanh(q_reshaped + k_reshaped), axis=-1)
def get_config(self):
base_config = super().get_config()
del base_config["score_mode"]
return base_config
|
# training schedule for 2x
train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=24, val_interval=1)
val_cfg = dict(type='ValLoop')
test_cfg = dict(type='TestLoop')
# learning rate
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=24,
by_epoch=True,
milestones=[16, 22],
gamma=0.1)
]
# optimizer
optim_wrapper = dict(
type='OptimWrapper',
optimizer=dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001))
|
# training schedule for 2x
train_cfg = dict(by_epoch=True, max_epochs=24)
val_cfg = dict(interval=1)
test_cfg = dict()
# learning rate
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=24,
by_epoch=True,
milestones=[16, 22],
gamma=0.1)
]
# optimizer
optim_wrapper = dict(
type='OptimWrapper',
optimizer=dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001))
|
import torch
from torch import nn
from typing import List
import os
import json
class LSTM(nn.Module):
"""
Bidirectional LSTM running over word embeddings.
"""
def __init__(
self,
word_embedding_dimension: int,
hidden_dim: int,
num_layers: int = 1,
dropout: float = 0,
bidirectional: bool = True,
):
nn.Module.__init__(self)
self.config_keys = ["word_embedding_dimension", "hidden_dim", "num_layers", "dropout", "bidirectional"]
self.word_embedding_dimension = word_embedding_dimension
self.hidden_dim = hidden_dim
self.num_layers = num_layers
self.dropout = dropout
self.bidirectional = bidirectional
self.embeddings_dimension = hidden_dim
if self.bidirectional:
self.embeddings_dimension *= 2
self.encoder = nn.LSTM(
word_embedding_dimension,
hidden_dim,
num_layers=num_layers,
dropout=dropout,
bidirectional=bidirectional,
batch_first=True,
)
def forward(self, features):
token_embeddings = features["token_embeddings"]
sentence_lengths = torch.clamp(features["sentence_lengths"], min=1)
packed = nn.utils.rnn.pack_padded_sequence(
token_embeddings, sentence_lengths.cpu(), batch_first=True, enforce_sorted=False
)
packed = self.encoder(packed)
unpack = nn.utils.rnn.pad_packed_sequence(packed[0], batch_first=True)[0]
features.update({"token_embeddings": unpack})
return features
def get_word_embedding_dimension(self) -> int:
return self.embeddings_dimension
def tokenize(self, text: str, **kwargs) -> List[int]:
raise NotImplementedError()
def save(self, output_path: str):
with open(os.path.join(output_path, "lstm_config.json"), "w") as fOut:
json.dump(self.get_config_dict(), fOut, indent=2)
torch.save(self.state_dict(), os.path.join(output_path, "pytorch_model.bin"))
def get_config_dict(self):
return {key: self.__dict__[key] for key in self.config_keys}
@staticmethod
def load(input_path: str):
with open(os.path.join(input_path, "lstm_config.json"), "r") as fIn:
config = json.load(fIn)
weights = torch.load(os.path.join(input_path, "pytorch_model.bin"))
model = LSTM(**config)
model.load_state_dict(weights)
return model
|
import torch
from torch import nn
from typing import List
import os
import json
class LSTM(nn.Module):
"""
Bidirectional LSTM running over word embeddings.
"""
def __init__(
self,
word_embedding_dimension: int,
hidden_dim: int,
num_layers: int = 1,
dropout: float = 0,
bidirectional: bool = True,
):
nn.Module.__init__(self)
self.config_keys = ["word_embedding_dimension", "hidden_dim", "num_layers", "dropout", "bidirectional"]
self.word_embedding_dimension = word_embedding_dimension
self.hidden_dim = hidden_dim
self.num_layers = num_layers
self.dropout = dropout
self.bidirectional = bidirectional
self.embeddings_dimension = hidden_dim
if self.bidirectional:
self.embeddings_dimension *= 2
self.encoder = nn.LSTM(
word_embedding_dimension,
hidden_dim,
num_layers=num_layers,
dropout=dropout,
bidirectional=bidirectional,
batch_first=True,
)
def forward(self, features):
token_embeddings = features["token_embeddings"]
sentence_lengths = torch.clamp(features["sentence_lengths"], min=1)
packed = nn.utils.rnn.pack_padded_sequence(
token_embeddings, sentence_lengths.cpu(), batch_first=True, enforce_sorted=False
)
packed = self.encoder(packed)
unpack = nn.utils.rnn.pad_packed_sequence(packed[0], batch_first=True)[0]
features.update({"token_embeddings": unpack})
return features
def get_word_embedding_dimension(self) -> int:
return self.embeddings_dimension
def tokenize(self, text: str) -> List[int]:
raise NotImplementedError()
def save(self, output_path: str):
with open(os.path.join(output_path, "lstm_config.json"), "w") as fOut:
json.dump(self.get_config_dict(), fOut, indent=2)
torch.save(self.state_dict(), os.path.join(output_path, "pytorch_model.bin"))
def get_config_dict(self):
return {key: self.__dict__[key] for key in self.config_keys}
@staticmethod
def load(input_path: str):
with open(os.path.join(input_path, "lstm_config.json"), "r") as fIn:
config = json.load(fIn)
weights = torch.load(os.path.join(input_path, "pytorch_model.bin"))
model = LSTM(**config)
model.load_state_dict(weights)
return model
|
from enum import Enum
# --8<-- [start:ProviderName]
class ProviderName(str, Enum):
ANTHROPIC = "anthropic"
APOLLO = "apollo"
COMPASS = "compass"
DISCORD = "discord"
D_ID = "d_id"
E2B = "e2b"
EXA = "exa"
FAL = "fal"
GITHUB = "github"
GOOGLE = "google"
GOOGLE_MAPS = "google_maps"
GROQ = "groq"
HUBSPOT = "hubspot"
IDEOGRAM = "ideogram"
JINA = "jina"
LINEAR = "linear"
MEDIUM = "medium"
MEM0 = "mem0"
NOTION = "notion"
NVIDIA = "nvidia"
OLLAMA = "ollama"
OPENAI = "openai"
OPENWEATHERMAP = "openweathermap"
OPEN_ROUTER = "open_router"
PINECONE = "pinecone"
REDDIT = "reddit"
REPLICATE = "replicate"
REVID = "revid"
SCREENSHOTONE = "screenshotone"
SLANT3D = "slant3d"
SMARTLEAD = "smartlead"
SMTP = "smtp"
TWITTER = "twitter"
TODOIST = "todoist"
UNREAL_SPEECH = "unreal_speech"
ZEROBOUNCE = "zerobounce"
# --8<-- [end:ProviderName]
|
from enum import Enum
# --8<-- [start:ProviderName]
class ProviderName(str, Enum):
ANTHROPIC = "anthropic"
COMPASS = "compass"
DISCORD = "discord"
D_ID = "d_id"
E2B = "e2b"
EXA = "exa"
FAL = "fal"
GITHUB = "github"
GOOGLE = "google"
GOOGLE_MAPS = "google_maps"
GROQ = "groq"
HUBSPOT = "hubspot"
IDEOGRAM = "ideogram"
JINA = "jina"
LINEAR = "linear"
MEDIUM = "medium"
MEM0 = "mem0"
NOTION = "notion"
NVIDIA = "nvidia"
OLLAMA = "ollama"
OPENAI = "openai"
OPENWEATHERMAP = "openweathermap"
OPEN_ROUTER = "open_router"
PINECONE = "pinecone"
REDDIT = "reddit"
REPLICATE = "replicate"
REVID = "revid"
SCREENSHOTONE = "screenshotone"
SLANT3D = "slant3d"
SMTP = "smtp"
TWITTER = "twitter"
TODOIST = "todoist"
UNREAL_SPEECH = "unreal_speech"
# --8<-- [end:ProviderName]
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch
from .base_data_element import BaseDataElement
class LabelData(BaseDataElement):
"""Data structure for label-level annnotations or predictions."""
@staticmethod
def onehot_to_label(onehot: torch.Tensor) -> torch.Tensor:
"""Convert the one-hot input to label.
Args:
onehot (torch.Tensor, optional): The one-hot input. The format
of input must be one-hot.
Return:
torch.Tensor: The converted results.
"""
assert isinstance(onehot, torch.Tensor)
if (onehot.ndim == 1 and onehot.max().item() <= 1
and onehot.min().item() >= 0):
return onehot.nonzero().squeeze()
else:
raise ValueError(
'input is not one-hot and can not convert to label')
@staticmethod
def label_to_onehot(label: torch.Tensor, num_classes: int) -> torch.Tensor:
"""Convert the label-format input to one-hot.
Args:
label (torch.Tensor): The label-format input. The format
of item must be label-format.
num_classes (int): The number of classes.
Return:
torch.Tensor: The converted results.
"""
assert isinstance(label, torch.Tensor)
onehot = label.new_zeros((num_classes, ))
assert max(label, default=torch.tensor(0)).item() < num_classes
onehot[label] = 1
return onehot
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch
from .base_data_element import BaseDataElement
class LabelData(BaseDataElement):
"""Data structure for label-level annnotations or predictions."""
@staticmethod
def onehot_to_label(onehot: torch.Tensor) -> torch.Tensor:
"""Convert the one-hot input to label.
Args:
onehot (torch.Tensor, optional): The one-hot input. The format
of input must be one-hot.
Return:
torch.Tensor: The converted results.
"""
assert isinstance(onehot, torch.Tensor)
if (onehot.ndim == 1 and onehot.max().item() <= 1
and onehot.min().item() >= 0):
return onehot.nonzero().squeeze()
else:
raise ValueError(
'input is not one-hot and can not convert to label')
@staticmethod
def label_to_onehot(label: torch.Tensor, num_classes: int) -> torch.Tensor:
"""Convert the label-format input to one-hot.
Args:
label (torch.Tensor): The label-format input. The format
of item must be label-format.
num_classes (int): The number of classes.
Return:
torch.Tensor: The converted results.
"""
assert isinstance(label, torch.Tensor)
onehot = torch.zeros((num_classes, ), dtype=torch.int64)
assert label.max().item() < num_classes
onehot[label] = 1
return onehot
|
"""Prompt display utils."""
from llama_index.core.prompts.mixin import PromptDictType
# define prompt viewing function
def display_prompt_dict(prompts_dict: PromptDictType) -> None:
"""
Display prompt dict.
Args:
prompts_dict: prompt dict
"""
from IPython.display import Markdown, display
for k, p in prompts_dict.items():
text_md = f"**Prompt Key**: {k}<br>**Text:** <br>"
display(Markdown(text_md))
print(p.get_template())
display(Markdown("<br><br>"))
|
"""Prompt display utils."""
from llama_index.core.prompts.mixin import PromptDictType
# define prompt viewing function
def display_prompt_dict(prompts_dict: PromptDictType) -> None:
"""
Display prompt dict.
Args:
prompts_dict: prompt dict
"""
from IPython.display import Markdown, display
for k, p in prompts_dict.items():
text_md = f"**Prompt Key**: {k}<br>" f"**Text:** <br>"
display(Markdown(text_md))
print(p.get_template())
display(Markdown("<br><br>"))
|
"""Chain-of-Abstraction Output Parser."""
import asyncio
import json
import networkx as nx
import re
from collections import defaultdict
from typing import Dict, Tuple
from llama_index.core.tools import AsyncBaseTool, ToolOutput
from llama_index.core.types import BaseOutputParser
class ChainOfAbstractionParser(BaseOutputParser):
"""
Chain of abstraction output parser.
This parser is used to parse the output using the default prompt
defined in prompts.py.
If the prompt formatting changes the function format, this parser
will not work and should be updated.
"""
def __init__(self, verbose: bool = False):
"""Init params."""
self._verbose = verbose
def parse(
self, solution: str, tools_by_name: Dict[str, AsyncBaseTool]
) -> Tuple[str, int]:
return asyncio.run(self.aparse(solution, tools_by_name))
async def aparse(
self, solution: str, tools_by_name: Dict[str, AsyncBaseTool]
) -> Tuple[str, int]:
# Extract function calls and placeholders
func_calls = re.findall(r"\[FUNC (\w+)\((.*?)\) = (\w+)\]", solution)
placeholders = set()
for match in re.finditer(r"\[FUNC (\w+)\((.*?)\) = (\w+)\]", solution):
placeholders.add(match.group(3))
# Create a dependency graph
graph = nx.DiGraph()
for func_name, inputs, output in func_calls:
inputs = json.loads("[" + inputs + "]")
graph.add_node(output, func_name=func_name, inputs=inputs)
for inp in inputs:
graph.add_edge(inp, output)
# Find the execution levels
execution_levels = defaultdict(list)
for node in nx.topological_sort(graph):
level = (
max(
[execution_levels[pred] for pred in graph.predecessors(node)],
default=-1,
)
+ 1
)
execution_levels[node] = level
# Group nodes by execution level
level_groups = defaultdict(list)
for node, level in execution_levels.items():
level_groups[level].append(node)
# Execute functions and replace placeholders
results = {}
tool_outputs = []
graph_nodes = {node[0]: node[1] for node in graph.nodes(data=True)}
for level in sorted(level_groups.keys()):
level_nodes = level_groups[level]
parallel_results = {}
for placeholder in level_nodes:
if len(graph_nodes[placeholder]) == 0:
continue
# get function name and inputs
func_name, inputs = (
graph_nodes[placeholder]["func_name"],
graph_nodes[placeholder]["inputs"],
)
# loop up any inputs that depend on other functions
breakpoint()
input_values = [results.get(inp, inp) for inp in inputs]
if self._verbose:
print(
f"==== Executing {func_name} with inputs {input_values} ====",
flush=True,
)
# execute function and store result
try:
tool_output = await tools_by_name[func_name].acall(*input_values)
tool_outputs.append(tool_output)
except Exception as e:
tool_outputs.append(
ToolOutput(
content=str(e),
tool_name=func_name,
raw_output=None,
raw_input={"args": input_values},
is_error=True,
)
)
# If an error occurs, stop execution
break
parallel_results[placeholder] = tool_output.raw_output
results.update(parallel_results)
# Replace placeholders in the solution text
for placeholder, value in results.items():
solution = solution.replace(f"{placeholder}", '"' + str(value) + '"')
return solution, tool_outputs
|
"""Chain-of-Abstraction Output Parser."""
import asyncio
import json
import networkx as nx
import re
from collections import defaultdict
from typing import Dict, Tuple
from llama_index.core.tools import AsyncBaseTool, ToolOutput
from llama_index.core.types import BaseOutputParser
class ChainOfAbstractionParser(BaseOutputParser):
"""
Chain of abstraction output parser.
This parser is used to parse the output using the default prompt
defined in prompts.py.
If the prompt formatting changes the function format, this parser
will not work and should be updated.
"""
def __init__(self, verbose: bool = False):
"""Init params."""
self._verbose = verbose
def parse(
self, solution: str, tools_by_name: Dict[str, AsyncBaseTool]
) -> Tuple[str, int]:
return asyncio.run(self.aparse(solution, tools_by_name))
async def aparse(
self, solution: str, tools_by_name: Dict[str, AsyncBaseTool]
) -> Tuple[str, int]:
# Extract function calls and placeholders
func_calls = re.findall(r"\[FUNC (\w+)\((.*?)\) = (\w+)\]", solution)
placeholders = set()
for match in re.finditer(r"\[FUNC (\w+)\((.*?)\) = (\w+)\]", solution):
placeholders.add(match.group(3))
# Create a dependency graph
graph = nx.DiGraph()
for func_name, inputs, output in func_calls:
inputs = json.loads("[" + inputs + "]")
graph.add_node(output, func_name=func_name, inputs=inputs)
for inp in inputs:
graph.add_edge(inp, output)
# Find the execution levels
execution_levels = defaultdict(list)
for node in nx.topological_sort(graph):
level = (
max(
[execution_levels[pred] for pred in graph.predecessors(node)],
default=-1,
)
+ 1
)
execution_levels[node] = level
# Group nodes by execution level
level_groups = defaultdict(list)
for node, level in execution_levels.items():
level_groups[level].append(node)
# Execute functions and replace placeholders
results = {}
tool_outputs = []
graph_nodes = {node[0]: node[1] for node in graph.nodes(data=True)}
for level in sorted(level_groups.keys()):
level_nodes = level_groups[level]
parallel_results = {}
for placeholder in level_nodes:
if len(graph_nodes[placeholder]) == 0:
continue
# get function name and inputs
func_name, inputs = (
graph_nodes[placeholder]["func_name"],
graph_nodes[placeholder]["inputs"],
)
# loop up any inputs that depend on other functions
input_values = [results.get(inp, inp) for inp in inputs]
if self._verbose:
print(
f"==== Executing {func_name} with inputs {input_values} ====",
flush=True,
)
# execute function and store result
try:
raw_tool_output = await tools_by_name[func_name].acall(
*input_values
)
tool_outputs.append(
ToolOutput(
content=str(raw_tool_output),
tool_name=func_name,
raw_output=raw_tool_output,
raw_input={"args": input_values},
is_error=False,
)
)
except Exception as e:
tool_outputs.append(
ToolOutput(
content=str(e),
tool_name=func_name,
raw_output=None,
raw_input={"args": input_values},
is_error=True,
)
)
# If an error occurs, stop execution
break
parallel_results[placeholder] = str(raw_tool_output)
results.update(parallel_results)
# Replace placeholders in the solution text
for placeholder, value in results.items():
solution = solution.replace(f"{placeholder}", '"' + str(value) + '"')
return solution, tool_outputs
|
import logging
import os
from typing import Optional
from jina.importer import ImportExtensions
from jina.serve.gateway import BaseGateway
from jina.serve.runtimes.gateway.websocket.app import get_fastapi_app
class WebSocketGateway(BaseGateway):
"""WebSocket Gateway implementation"""
def __init__(
self,
ssl_keyfile: Optional[str] = None,
ssl_certfile: Optional[str] = None,
uvicorn_kwargs: Optional[dict] = None,
proxy: Optional[bool] = None,
**kwargs
):
"""Initialize the gateway
:param ssl_keyfile: the path to the key file
:param ssl_certfile: the path to the certificate file
:param uvicorn_kwargs: Dictionary of kwargs arguments that will be passed to Uvicorn server when starting the server
:param proxy: If set, respect the http_proxy and https_proxy environment variables, otherwise, it will unset
these proxy variables before start. gRPC seems to prefer no proxy
:param kwargs: keyword args
"""
super().__init__(**kwargs)
self.ssl_keyfile = ssl_keyfile
self.ssl_certfile = ssl_certfile
self.uvicorn_kwargs = uvicorn_kwargs
if not proxy and os.name != 'nt':
os.unsetenv('http_proxy')
os.unsetenv('https_proxy')
async def setup_server(self):
"""
Setup WebSocket Server
"""
from jina.helper import extend_rest_interface
self.app = extend_rest_interface(
get_fastapi_app(
streamer=self.streamer,
logger=self.logger,
tracing=self.tracing,
tracer_provider=self.tracer_provider,
)
)
with ImportExtensions(required=True):
from uvicorn import Config, Server
class UviServer(Server):
"""The uvicorn server."""
async def setup(self, sockets=None):
"""
Setup uvicorn server.
:param sockets: sockets of server.
"""
config = self.config
if not config.loaded:
config.load()
self.lifespan = config.lifespan_class(config)
await self.startup(sockets=sockets)
if self.should_exit:
return
async def serve(self, **kwargs):
"""
Start the server.
:param kwargs: keyword arguments
"""
await self.main_loop()
if 'CICD_JINA_DISABLE_HEALTHCHECK_LOGS' in os.environ:
class _EndpointFilter(logging.Filter):
def filter(self, record: logging.LogRecord) -> bool:
# NOTE: space is important after `GET /`, else all logs will be disabled.
return record.getMessage().find("GET / ") == -1
# Filter out healthcheck endpoint `GET /`
logging.getLogger("uvicorn.access").addFilter(_EndpointFilter())
uvicorn_kwargs = self.uvicorn_kwargs or {}
if self.ssl_keyfile and 'ssl_keyfile' not in uvicorn_kwargs.keys():
uvicorn_kwargs['ssl_keyfile'] = self.ssl_keyfile
if self.ssl_certfile and 'ssl_certfile' not in uvicorn_kwargs.keys():
uvicorn_kwargs['ssl_certfile'] = self.ssl_certfile
self.server = UviServer(
config=Config(
app=self.app,
host=self.host,
port=self.port,
ws_max_size=1024 * 1024 * 1024,
log_level=os.getenv('JINA_LOG_LEVEL', 'error').lower(),
**uvicorn_kwargs,
)
)
await self.server.setup()
async def shutdown(self):
"""Free other resources allocated with the server, e.g, gateway object, ..."""
self.server.should_exit = True
await self.server.shutdown()
async def run_server(self):
"""Run WebSocket server forever"""
await self.server.serve()
|
import logging
import os
from typing import Optional
from jina import __default_host__
from jina.importer import ImportExtensions
from jina.serve.gateway import BaseGateway
from jina.serve.runtimes.gateway.websocket.app import get_fastapi_app
class WebSocketGateway(BaseGateway):
"""WebSocket Gateway implementation"""
def __init__(
self,
ssl_keyfile: Optional[str] = None,
ssl_certfile: Optional[str] = None,
uvicorn_kwargs: Optional[dict] = None,
proxy: Optional[bool] = None,
**kwargs
):
"""Initialize the gateway
:param ssl_keyfile: the path to the key file
:param ssl_certfile: the path to the certificate file
:param uvicorn_kwargs: Dictionary of kwargs arguments that will be passed to Uvicorn server when starting the server
:param proxy: If set, respect the http_proxy and https_proxy environment variables, otherwise, it will unset
these proxy variables before start. gRPC seems to prefer no proxy
:param kwargs: keyword args
"""
super().__init__(**kwargs)
self.ssl_keyfile = ssl_keyfile
self.ssl_certfile = ssl_certfile
self.uvicorn_kwargs = uvicorn_kwargs
if not proxy and os.name != 'nt':
os.unsetenv('http_proxy')
os.unsetenv('https_proxy')
async def setup_server(self):
"""
Setup WebSocket Server
"""
from jina.helper import extend_rest_interface
self.app = extend_rest_interface(
get_fastapi_app(
streamer=self.streamer,
logger=self.logger,
tracing=self.tracing,
tracer_provider=self.tracer_provider,
)
)
with ImportExtensions(required=True):
from uvicorn import Config, Server
class UviServer(Server):
"""The uvicorn server."""
async def setup(self, sockets=None):
"""
Setup uvicorn server.
:param sockets: sockets of server.
"""
config = self.config
if not config.loaded:
config.load()
self.lifespan = config.lifespan_class(config)
await self.startup(sockets=sockets)
if self.should_exit:
return
async def serve(self, **kwargs):
"""
Start the server.
:param kwargs: keyword arguments
"""
await self.main_loop()
if 'CICD_JINA_DISABLE_HEALTHCHECK_LOGS' in os.environ:
class _EndpointFilter(logging.Filter):
def filter(self, record: logging.LogRecord) -> bool:
# NOTE: space is important after `GET /`, else all logs will be disabled.
return record.getMessage().find("GET / ") == -1
# Filter out healthcheck endpoint `GET /`
logging.getLogger("uvicorn.access").addFilter(_EndpointFilter())
uvicorn_kwargs = self.uvicorn_kwargs or {}
if self.ssl_keyfile and 'ssl_keyfile' not in uvicorn_kwargs.keys():
uvicorn_kwargs['ssl_keyfile'] = self.ssl_keyfile
if self.ssl_certfile and 'ssl_certfile' not in uvicorn_kwargs.keys():
uvicorn_kwargs['ssl_certfile'] = self.ssl_certfile
self.server = UviServer(
config=Config(
app=self.app,
host=self.host,
port=self.port,
ws_max_size=1024 * 1024 * 1024,
log_level=os.getenv('JINA_LOG_LEVEL', 'error').lower(),
**uvicorn_kwargs,
)
)
await self.server.setup()
async def shutdown(self):
"""Free other resources allocated with the server, e.g, gateway object, ..."""
self.server.should_exit = True
await self.server.shutdown()
async def run_server(self):
"""Run WebSocket server forever"""
await self.server.serve()
|
import unittest
import torch
from mmengine.structures import PixelData
from mmengine.testing import assert_allclose
from mmdet.models.seg_heads import PanopticFPNHead
from mmdet.structures import DetDataSample
class TestPanopticFPNHead(unittest.TestCase):
def test_init_weights(self):
head = PanopticFPNHead(
num_things_classes=2,
num_stuff_classes=2,
in_channels=32,
inner_channels=32)
head.init_weights()
assert_allclose(head.conv_logits.bias.data,
torch.zeros_like(head.conv_logits.bias.data))
def test_loss(self):
head = PanopticFPNHead(
num_things_classes=2,
num_stuff_classes=2,
in_channels=32,
inner_channels=32,
start_level=0,
end_level=1)
x = [torch.rand((2, 32, 8, 8)), torch.rand((2, 32, 4, 4))]
data_sample1 = DetDataSample()
data_sample1.gt_sem_seg = PixelData(
sem_seg=torch.randint(0, 4, (1, 7, 8)))
data_sample2 = DetDataSample()
data_sample2.gt_sem_seg = PixelData(
sem_seg=torch.randint(0, 4, (1, 7, 8)))
batch_data_samples = [data_sample1, data_sample2]
results = head.loss(x, batch_data_samples)
self.assertIsInstance(results, dict)
def test_predict(self):
head = PanopticFPNHead(
num_things_classes=2,
num_stuff_classes=2,
in_channels=32,
inner_channels=32,
start_level=0,
end_level=1)
x = [torch.rand((2, 32, 8, 8)), torch.rand((2, 32, 4, 4))]
img_meta1 = {
'batch_input_shape': (16, 16),
'img_shape': (14, 14),
'ori_shape': (12, 12),
}
img_meta2 = {
'batch_input_shape': (16, 16),
'img_shape': (16, 16),
'ori_shape': (16, 16),
}
batch_img_metas = [img_meta1, img_meta2]
head.eval()
with torch.no_grad():
seg_preds = head.predict(x, batch_img_metas, rescale=False)
self.assertTupleEqual(seg_preds[0].shape[-2:], (16, 16))
self.assertTupleEqual(seg_preds[1].shape[-2:], (16, 16))
seg_preds = head.predict(x, batch_img_metas, rescale=True)
self.assertTupleEqual(seg_preds[0].shape[-2:], (12, 12))
self.assertTupleEqual(seg_preds[1].shape[-2:], (16, 16))
|
import unittest
import torch
from mmengine.structures import PixelData
from mmengine.testing import assert_allclose
from mmdet.models.seg_heads import PanopticFPNHead
from mmdet.structures import DetDataSample
class TestPanopticFPNHead(unittest.TestCase):
def test_init_weights(self):
head = PanopticFPNHead(
num_things_classes=2,
num_stuff_classes=2,
in_channels=1,
inner_channels=1)
head.init_weights()
assert_allclose(head.conv_logits.bias.data,
torch.zeros_like(head.conv_logits.bias.data))
def test_loss(self):
head = PanopticFPNHead(
num_things_classes=2,
num_stuff_classes=2,
in_channels=32,
inner_channels=32,
start_level=0,
end_level=1)
x = [torch.rand((2, 32, 8, 8)), torch.rand((2, 32, 4, 4))]
data_sample1 = DetDataSample()
data_sample1.gt_sem_seg = PixelData(
sem_seg=torch.randint(0, 4, (1, 7, 8)))
data_sample2 = DetDataSample()
data_sample2.gt_sem_seg = PixelData(
sem_seg=torch.randint(0, 4, (1, 7, 8)))
batch_data_samples = [data_sample1, data_sample2]
results = head.loss(x, batch_data_samples)
self.assertIsInstance(results, dict)
def test_predict(self):
head = PanopticFPNHead(
num_things_classes=2,
num_stuff_classes=2,
in_channels=32,
inner_channels=32,
start_level=0,
end_level=1)
x = [torch.rand((2, 32, 8, 8)), torch.rand((2, 32, 4, 4))]
img_meta1 = {
'batch_input_shape': (16, 16),
'img_shape': (14, 14),
'ori_shape': (12, 12),
}
img_meta2 = {
'batch_input_shape': (16, 16),
'img_shape': (16, 16),
'ori_shape': (16, 16),
}
batch_img_metas = [img_meta1, img_meta2]
head.eval()
with torch.no_grad():
seg_preds = head.predict(x, batch_img_metas, rescale=False)
self.assertTupleEqual(seg_preds[0].shape[-2:], (16, 16))
self.assertTupleEqual(seg_preds[1].shape[-2:], (16, 16))
seg_preds = head.predict(x, batch_img_metas, rescale=True)
self.assertTupleEqual(seg_preds[0].shape[-2:], (12, 12))
self.assertTupleEqual(seg_preds[1].shape[-2:], (16, 16))
|
"""Tests for the InMemoryStore class."""
import pytest
from langchain_core.stores import InMemoryStore
from langchain_tests.integration_tests.base_store import (
BaseStoreAsyncTests,
BaseStoreSyncTests,
)
class TestInMemoryStore(BaseStoreSyncTests[str]):
@pytest.fixture
def three_values(self) -> tuple[str, str, str]:
return "foo", "bar", "buzz"
@pytest.fixture
def kv_store(self) -> InMemoryStore:
return InMemoryStore()
class TestInMemoryStoreAsync(BaseStoreAsyncTests[str]):
@pytest.fixture
def three_values(self) -> tuple[str, str, str]:
return "foo", "bar", "buzz"
@pytest.fixture
async def kv_store(self) -> InMemoryStore:
return InMemoryStore()
|
"""Tests for the InMemoryStore class."""
import pytest
from langchain_core.stores import InMemoryStore
from langchain_tests.integration_tests.base_store import (
BaseStoreAsyncTests,
BaseStoreSyncTests,
)
class TestInMemoryStore(BaseStoreSyncTests):
@pytest.fixture
def three_values(self) -> tuple[str, str, str]:
return "foo", "bar", "buzz"
@pytest.fixture
def kv_store(self) -> InMemoryStore:
return InMemoryStore()
class TestInMemoryStoreAsync(BaseStoreAsyncTests):
@pytest.fixture
def three_values(self) -> tuple[str, str, str]: # type: ignore
return "foo", "bar", "buzz"
@pytest.fixture
async def kv_store(self) -> InMemoryStore:
return InMemoryStore()
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.api.optimizers import legacy
from keras.api.optimizers import schedules
from keras.src.optimizers import deserialize
from keras.src.optimizers import get
from keras.src.optimizers import serialize
from keras.src.optimizers.adadelta import Adadelta
from keras.src.optimizers.adafactor import Adafactor
from keras.src.optimizers.adagrad import Adagrad
from keras.src.optimizers.adam import Adam
from keras.src.optimizers.adamax import Adamax
from keras.src.optimizers.adamw import AdamW
from keras.src.optimizers.ftrl import Ftrl
from keras.src.optimizers.lamb import Lamb
from keras.src.optimizers.lion import Lion
from keras.src.optimizers.loss_scale_optimizer import LossScaleOptimizer
from keras.src.optimizers.nadam import Nadam
from keras.src.optimizers.optimizer import Optimizer
from keras.src.optimizers.rmsprop import RMSprop
from keras.src.optimizers.sgd import SGD
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.api.optimizers import legacy
from keras.api.optimizers import schedules
from keras.src.optimizers import deserialize
from keras.src.optimizers import get
from keras.src.optimizers import serialize
from keras.src.optimizers.adadelta import Adadelta
from keras.src.optimizers.adafactor import Adafactor
from keras.src.optimizers.adagrad import Adagrad
from keras.src.optimizers.adam import Adam
from keras.src.optimizers.adamax import Adamax
from keras.src.optimizers.adamw import AdamW
from keras.src.optimizers.ftrl import Ftrl
from keras.src.optimizers.lion import Lion
from keras.src.optimizers.loss_scale_optimizer import LossScaleOptimizer
from keras.src.optimizers.nadam import Nadam
from keras.src.optimizers.optimizer import Optimizer
from keras.src.optimizers.rmsprop import RMSprop
from keras.src.optimizers.sgd import SGD
|
"""
This examples trains a CrossEncoder for the NLI task. A CrossEncoder takes a sentence pair
as input and outputs a label. Here, it learns to predict the labels: "contradiction": 0, "entailment": 1, "neutral": 2.
It does NOT produce a sentence embedding and does NOT work for individual sentences.
Usage:
python training_nli.py
"""
import csv
import gzip
import logging
import math
import os
from datetime import datetime
from torch.utils.data import DataLoader
from sentence_transformers import LoggingHandler, util
from sentence_transformers.cross_encoder import CrossEncoder
from sentence_transformers.cross_encoder.evaluation import CEF1Evaluator, CESoftmaxAccuracyEvaluator
from sentence_transformers.evaluation import SequentialEvaluator
from sentence_transformers.readers import InputExample
#### Just some code to print debug information to stdout
logging.basicConfig(
format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO, handlers=[LoggingHandler()]
)
logger = logging.getLogger(__name__)
#### /print debug information to stdout
# As dataset, we use SNLI + MultiNLI
# Check if dataset exists. If not, download and extract it
nli_dataset_path = "datasets/AllNLI.tsv.gz"
if not os.path.exists(nli_dataset_path):
util.http_get("https://sbert.net/datasets/AllNLI.tsv.gz", nli_dataset_path)
# Read the AllNLI.tsv.gz file and create the training dataset
logger.info("Read AllNLI train dataset")
label2int = {"contradiction": 0, "entailment": 1, "neutral": 2}
train_samples = []
dev_samples = []
with gzip.open(nli_dataset_path, "rt", encoding="utf8") as fIn:
reader = csv.DictReader(fIn, delimiter="\t", quoting=csv.QUOTE_NONE)
for row in reader:
label_id = label2int[row["label"]]
if row["split"] == "train":
train_samples.append(InputExample(texts=[row["sentence1"], row["sentence2"]], label=label_id))
else:
dev_samples.append(InputExample(texts=[row["sentence1"], row["sentence2"]], label=label_id))
train_batch_size = 16
num_epochs = 4
model_save_path = "output/training_allnli-" + datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
# Define our CrossEncoder model. We use distilroberta-base as basis and setup it up to predict 3 labels
model = CrossEncoder("distilroberta-base", num_labels=len(label2int))
# We wrap train_samples, which is a list of InputExample, in a pytorch DataLoader
train_dataloader = DataLoader(train_samples, shuffle=True, batch_size=train_batch_size)
# During training, we use CESoftmaxAccuracyEvaluator and CEF1Evaluator to measure the performance on the dev set
accuracy_evaluator = CESoftmaxAccuracyEvaluator.from_input_examples(dev_samples, name="AllNLI-dev")
f1_evaluator = CEF1Evaluator.from_input_examples(dev_samples, name="AllNLI-dev")
evaluator = SequentialEvaluator([accuracy_evaluator, f1_evaluator])
warmup_steps = math.ceil(len(train_dataloader) * num_epochs * 0.1) # 10% of train data for warm-up
logger.info(f"Warmup-steps: {warmup_steps}")
# Train the model
model.fit(
train_dataloader=train_dataloader,
evaluator=evaluator,
epochs=num_epochs,
evaluation_steps=10000,
warmup_steps=warmup_steps,
output_path=model_save_path,
)
|
"""
This examples trains a CrossEncoder for the NLI task. A CrossEncoder takes a sentence pair
as input and outputs a label. Here, it learns to predict the labels: "contradiction": 0, "entailment": 1, "neutral": 2.
It does NOT produce a sentence embedding and does NOT work for individual sentences.
Usage:
python training_nli.py
"""
import csv
import gzip
import logging
import math
import os
from datetime import datetime
from torch.utils.data import DataLoader
from sentence_transformers import LoggingHandler, util
from sentence_transformers.cross_encoder import CrossEncoder
from sentence_transformers.cross_encoder.evaluation import CEF1Evaluator, CESoftmaxAccuracyEvaluator
from sentence_transformers.evaluation import SequentialEvaluator
from sentence_transformers.readers import InputExample
#### Just some code to print debug information to stdout
logging.basicConfig(
format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO, handlers=[LoggingHandler()]
)
logger = logging.getLogger(__name__)
#### /print debug information to stdout
# As dataset, we use SNLI + MultiNLI
# Check if dataset exists. If not, download and extract it
nli_dataset_path = "datasets/AllNLI.tsv.gz"
if not os.path.exists(nli_dataset_path):
util.http_get("https://sbert.net/datasets/AllNLI.tsv.gz", nli_dataset_path)
# Read the AllNLI.tsv.gz file and create the training dataset
logger.info("Read AllNLI train dataset")
label2int = {"contradiction": 0, "entailment": 1, "neutral": 2}
train_samples = []
dev_samples = []
with gzip.open(nli_dataset_path, "rt", encoding="utf8") as fIn:
reader = csv.DictReader(fIn, delimiter="\t", quoting=csv.QUOTE_NONE)
for row in reader:
label_id = label2int[row["label"]]
if row["split"] == "train":
train_samples.append(InputExample(texts=[row["sentence1"], row["sentence2"]], label=label_id))
else:
dev_samples.append(InputExample(texts=[row["sentence1"], row["sentence2"]], label=label_id))
train_batch_size = 16
num_epochs = 4
model_save_path = "output/training_allnli-" + datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
# Define our CrossEncoder model. We use distilroberta-base as basis and setup it up to predict 3 labels
model = CrossEncoder("distilroberta-base", num_labels=len(label2int))
# We wrap train_samples, which is a list of InputExample, in a pytorch DataLoader
train_dataloader = DataLoader(train_samples, shuffle=True, batch_size=train_batch_size)
# During training, we use CESoftmaxAccuracyEvaluator and CEF1Evaluator to measure the performance on the dev set
accuracy_evaluator = CESoftmaxAccuracyEvaluator.from_input_examples(dev_samples, name="AllNLI-dev")
f1_evaluator = CEF1Evaluator.from_input_examples(dev_samples, name="AllNLI-dev")
evaluator = SequentialEvaluator([accuracy_evaluator, f1_evaluator])
warmup_steps = math.ceil(len(train_dataloader) * num_epochs * 0.1) # 10% of train data for warm-up
logger.info("Warmup-steps: {}".format(warmup_steps))
# Train the model
model.fit(
train_dataloader=train_dataloader,
evaluator=evaluator,
epochs=num_epochs,
evaluation_steps=10000,
warmup_steps=warmup_steps,
output_path=model_save_path,
)
|
_base_ = './mask-rcnn_r50_fpn_gn-ws-all_2x_coco.py'
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://jhu/resnet101_gn_ws')))
|
_base_ = './mask_rcnn_r50_fpn_gn_ws-all_2x_coco.py'
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://jhu/resnet101_gn_ws')))
|
"""Semi-supervised learning algorithms.
These algorithms utilize small amounts of labeled data and large amounts of unlabeled
data for classification tasks.
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
from ._label_propagation import LabelPropagation, LabelSpreading
from ._self_training import SelfTrainingClassifier
__all__ = ["LabelPropagation", "LabelSpreading", "SelfTrainingClassifier"]
|
"""Semi-supervised learning algorithms.
These algorithms utilize small amounts of labeled data and large amounts of unlabeled
data for classification tasks.
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
from ._label_propagation import LabelPropagation, LabelSpreading
from ._self_training import SelfTrainingClassifier
__all__ = ["SelfTrainingClassifier", "LabelPropagation", "LabelSpreading"]
|
"""Hatena Blog reader."""
from typing import Dict, List
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
ATOM_PUB_ENTRY_URL = "{root_endpoint}/entry"
class Article:
def __init__(self) -> None:
self.title = ""
self.content = ""
self.published = ""
self.url = ""
class HatenaBlogReader(BaseReader):
"""
Hatena Blog reader.
Args:
root_endpoint (str): AtomPub root endpoint.
api_key (str): AtomPub API Key
username (str): Hatena ID
"""
def __init__(self, root_endpoint: str, api_key: str, username: str) -> None:
"""Initialize Hatena Blog reader."""
self.root_endpoint = root_endpoint
self.api_key = api_key
self.username = username
def load_data(self) -> List[Document]:
results = []
articles = self.get_all_articles()
for a in articles:
results.append(
Document(
text=a.content,
extra_info={
"title": a.title,
"published": a.published,
"url": a.url,
},
)
)
return results
def get_all_articles(self) -> List[Article]:
articles: List[Article] = []
page_url = ATOM_PUB_ENTRY_URL.format(root_endpoint=self.root_endpoint)
while True:
res = self.get_articles(page_url)
articles += res.get("articles")
page_url = res.get("next_page")
if page_url is None:
break
return articles
def get_articles(self, url: str) -> Dict:
import requests
from bs4 import BeautifulSoup
from requests.auth import HTTPBasicAuth
articles: List[Article] = []
next_page = None
res = requests.get(url, auth=HTTPBasicAuth(self.username, self.api_key))
soup = BeautifulSoup(res.text, "xml")
for entry in soup.find_all("entry"):
if entry.find("app:control").find("app:draft").string == "yes":
continue
article = Article()
article.title = entry.find("title").string
article.published = entry.find("published").string
article.url = entry.find("link", rel="alternate")["href"]
content = entry.find("content")
if content.get("type") == "text/html":
article.content = (
BeautifulSoup(entry.find("content").string, "html.parser")
.get_text()
.strip()
)
else:
article.content = entry.find("content").string.strip()
articles.append(article)
next = soup.find("link", attrs={"rel": "next"})
if next:
next_page = next.get("href")
return {"articles": articles, "next_page": next_page}
|
"""Hatena Blog reader."""
from typing import Dict, List
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
ATOM_PUB_ENTRY_URL = "{root_endpoint}/entry"
class Article:
def __init__(self) -> None:
self.title = ""
self.content = ""
self.published = ""
self.url = ""
class HatenaBlogReader(BaseReader):
"""Hatena Blog reader.
Args:
root_endpoint (str): AtomPub root endpoint.
api_key (str): AtomPub API Key
username (str): Hatena ID
"""
def __init__(self, root_endpoint: str, api_key: str, username: str) -> None:
"""Initialize Hatena Blog reader."""
self.root_endpoint = root_endpoint
self.api_key = api_key
self.username = username
def load_data(self) -> List[Document]:
results = []
articles = self.get_all_articles()
for a in articles:
results.append(
Document(
text=a.content,
extra_info={
"title": a.title,
"published": a.published,
"url": a.url,
},
)
)
return results
def get_all_articles(self) -> List[Article]:
articles: List[Article] = []
page_url = ATOM_PUB_ENTRY_URL.format(root_endpoint=self.root_endpoint)
while True:
res = self.get_articles(page_url)
articles += res.get("articles")
page_url = res.get("next_page")
if page_url is None:
break
return articles
def get_articles(self, url: str) -> Dict:
import requests
from bs4 import BeautifulSoup
from requests.auth import HTTPBasicAuth
articles: List[Article] = []
next_page = None
res = requests.get(url, auth=HTTPBasicAuth(self.username, self.api_key))
soup = BeautifulSoup(res.text, "xml")
for entry in soup.find_all("entry"):
if entry.find("app:control").find("app:draft").string == "yes":
continue
article = Article()
article.title = entry.find("title").string
article.published = entry.find("published").string
article.url = entry.find("link", rel="alternate")["href"]
content = entry.find("content")
if content.get("type") == "text/html":
article.content = (
BeautifulSoup(entry.find("content").string, "html.parser")
.get_text()
.strip()
)
else:
article.content = entry.find("content").string.strip()
articles.append(article)
next = soup.find("link", attrs={"rel": "next"})
if next:
next_page = next.get("href")
return {"articles": articles, "next_page": next_page}
|
from typing import Dict, List, Optional, Callable
from jina.importer import ImportExtensions
from jina.types.request.data import DataRequest
from jina import DocumentArray
from jina._docarray import docarray_v2
if docarray_v2:
from docarray import DocList
def get_fastapi_app(
request_models_map: Dict,
caller: Callable,
**kwargs
):
"""
Get the app from FastAPI as the REST interface.
:param request_models_map: Map describing the endpoints and its Pydantic models
:param caller: Callable to be handled by the endpoints of the returned FastAPI app
:param kwargs: Extra kwargs to make it compatible with other methods
:return: fastapi app
"""
with ImportExtensions(required=True):
from fastapi import FastAPI, Response, HTTPException
import pydantic
from jina.proto import jina_pb2
app = FastAPI()
def add_route(endpoint_path, input_model, output_model, input_doc_list_model=None, output_doc_list_model=None):
@app.api_route(
path=f'/{endpoint_path.strip("/")}',
methods=['POST'],
summary=f'Endpoint {endpoint_path}',
response_model=output_model
)
async def post(body: input_model, response: Response):
req = DataRequest()
if not docarray_v2:
req.data.docs = DocumentArray.from_pydantic_model(body.data)
else:
req.data.docs = DocList[input_doc_list_model](body.data)
req.parameters = body.parameters
req.header.exec_endpoint = endpoint_path
resp = await caller(req)
status = resp.header.status
if status.code == jina_pb2.StatusProto.ERROR:
raise HTTPException(status_code=499, detail=status.description)
else:
if not docarray_v2:
docs_response = resp.docs.to_dict()
else:
docs_response = resp.docs._data
return output_model(data=docs_response, parameters=resp.parameters)
for endpoint, input_output_map in request_models_map.items():
if endpoint != '_jina_dry_run_':
input_doc_model = input_output_map['input']['model']
output_doc_model = input_output_map['output']['model']
endpoint_input_model = pydantic.create_model(
f'{endpoint.strip("/")}_input_model',
data=(List[input_doc_model], []),
parameters=(Optional[Dict], None)
)
endpoint_output_model = pydantic.create_model(
f'{endpoint.strip("/")}_output_model',
data=(List[output_doc_model], []),
parameters=(Optional[Dict], None)
)
add_route(endpoint,
input_model=endpoint_input_model,
output_model=endpoint_output_model,
input_doc_list_model=input_doc_model,
output_doc_list_model=output_doc_model)
from jina.serve.runtimes.gateway.health_model import JinaHealthModel
@app.get(
path='/',
summary='Get the health of Jina Executor service',
response_model=JinaHealthModel,
)
async def _executor_health():
"""
Get the health of this Gateway service.
.. # noqa: DAR201
"""
return {}
return app
|
from typing import Dict, List, Optional, Callable
from jina.importer import ImportExtensions
from jina.types.request.data import DataRequest
from jina import DocumentArray
def get_fastapi_app(
request_models_map: Dict,
caller: Callable,
**kwargs
):
"""
Get the app from FastAPI as the REST interface.
:param request_models_map: Map describing the endpoints and its Pydantic models
:param caller: Callable to be handled by the endpoints of the returned FastAPI app
:param kwargs: Extra kwargs to make it compatible with other methods
:return: fastapi app
"""
with ImportExtensions(required=True):
from fastapi import FastAPI, Response, HTTPException
import pydantic
from jina.proto import jina_pb2
app = FastAPI()
def add_route(endpoint_path, input_model, output_model):
@app.api_route(
path=f'/{endpoint_path.strip("/")}',
methods=['POST'],
summary=f'Endpoint {endpoint_path}',
response_model=output_model
)
async def post(body: input_model, response: Response):
req = DataRequest()
req.data.docs = DocumentArray.from_pydantic_model(body.data)
req.parameters = body.parameters
req.header.exec_endpoint = endpoint_path
resp = await caller(req)
status = resp.header.status
if status.code == jina_pb2.StatusProto.ERROR:
raise HTTPException(status_code=499, detail=status.description)
else:
return output_model(data=resp.docs.to_dict(), parameters=resp.parameters)
for endpoint, input_output_map in request_models_map.items():
if endpoint != '_jina_dry_run_':
input_doc_model = input_output_map['input']['model']
output_doc_model = input_output_map['output']['model']
endpoint_input_model = pydantic.create_model(
f'{endpoint.strip("/")}_input_model',
data=(List[input_doc_model], []),
parameters=(Optional[Dict], None)
)
endpoint_output_model = pydantic.create_model(
f'{endpoint.strip("/")}_output_model',
data=(List[output_doc_model], []),
parameters=(Optional[Dict], None)
)
add_route(endpoint, input_model=endpoint_input_model, output_model=endpoint_output_model)
from jina.serve.runtimes.gateway.models import JinaHealthModel
@app.get(
path='/',
summary='Get the health of Jina Executor service',
response_model=JinaHealthModel,
)
async def _executor_health():
"""
Get the health of this Gateway service.
.. # noqa: DAR201
"""
return {}
return app
|
# Copyright (c) OpenMMLab. All rights reserved.
from .approx_max_iou_assigner import ApproxMaxIoUAssigner
from .assign_result import AssignResult
from .atss_assigner import ATSSAssigner
from .base_assigner import BaseAssigner
from .center_region_assigner import CenterRegionAssigner
from .grid_assigner import GridAssigner
from .hungarian_assigner import HungarianAssigner
from .max_iou_assigner import MaxIoUAssigner
from .point_assigner import PointAssigner
from .region_assigner import RegionAssigner
from .sim_ota_assigner import SimOTAAssigner
from .task_aligned_assigner import TaskAlignedAssigner
from .uniform_assigner import UniformAssigner
__all__ = [
'BaseAssigner', 'MaxIoUAssigner', 'ApproxMaxIoUAssigner', 'AssignResult',
'PointAssigner', 'ATSSAssigner', 'CenterRegionAssigner', 'GridAssigner',
'HungarianAssigner', 'RegionAssigner', 'UniformAssigner', 'SimOTAAssigner',
'TaskAlignedAssigner'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .approx_max_iou_assigner import ApproxMaxIoUAssigner
from .assign_result import AssignResult
from .atss_assigner import ATSSAssigner
from .base_assigner import BaseAssigner
from .center_region_assigner import CenterRegionAssigner
from .grid_assigner import GridAssigner
from .hungarian_assigner import HungarianAssigner
from .mask_hungarian_assigner import MaskHungarianAssigner
from .max_iou_assigner import MaxIoUAssigner
from .point_assigner import PointAssigner
from .region_assigner import RegionAssigner
from .sim_ota_assigner import SimOTAAssigner
from .task_aligned_assigner import TaskAlignedAssigner
from .uniform_assigner import UniformAssigner
__all__ = [
'BaseAssigner', 'MaxIoUAssigner', 'ApproxMaxIoUAssigner', 'AssignResult',
'PointAssigner', 'ATSSAssigner', 'CenterRegionAssigner', 'GridAssigner',
'HungarianAssigner', 'RegionAssigner', 'UniformAssigner', 'SimOTAAssigner',
'TaskAlignedAssigner', 'MaskHungarianAssigner'
]
|
from abc import ABC, abstractmethod
from typing import Optional, Union
from .. import Dataset, DatasetDict, Features, IterableDataset, IterableDatasetDict, NamedSplit
from ..utils.typing import NestedDataStructureLike, PathLike
class AbstractDatasetReader(ABC):
def __init__(
self,
path_or_paths: Optional[NestedDataStructureLike[PathLike]] = None,
split: Optional[NamedSplit] = None,
features: Optional[Features] = None,
cache_dir: str = None,
keep_in_memory: bool = False,
streaming: bool = False,
num_proc: Optional[int] = None,
**kwargs,
):
self.path_or_paths = path_or_paths
self.split = split if split or isinstance(path_or_paths, dict) else "train"
self.features = features
self.cache_dir = cache_dir
self.keep_in_memory = keep_in_memory
self.streaming = streaming
self.num_proc = num_proc
self.kwargs = kwargs
@abstractmethod
def read(self) -> Union[Dataset, DatasetDict, IterableDataset, IterableDatasetDict]:
pass
class AbstractDatasetInputStream(ABC):
def __init__(
self,
features: Optional[Features] = None,
cache_dir: str = None,
keep_in_memory: bool = False,
streaming: bool = False,
num_proc: Optional[int] = None,
**kwargs,
):
self.features = features
self.cache_dir = cache_dir
self.keep_in_memory = keep_in_memory
self.streaming = streaming
self.num_proc = num_proc
self.kwargs = kwargs
@abstractmethod
def read(self) -> Union[Dataset, IterableDataset]:
pass
|
from abc import ABC, abstractmethod
from typing import Optional, Union
from .. import Dataset, DatasetDict, Features, IterableDataset, IterableDatasetDict, NamedSplit
from ..utils.typing import NestedDataStructureLike, PathLike
class AbstractDatasetReader(ABC):
def __init__(
self,
path_or_paths: NestedDataStructureLike[PathLike],
split: Optional[NamedSplit] = None,
features: Optional[Features] = None,
cache_dir: str = None,
keep_in_memory: bool = False,
streaming: bool = False,
num_proc: Optional[int] = None,
**kwargs,
):
self.path_or_paths = path_or_paths
self.split = split if split or isinstance(path_or_paths, dict) else "train"
self.features = features
self.cache_dir = cache_dir
self.keep_in_memory = keep_in_memory
self.streaming = streaming
self.num_proc = num_proc
self.kwargs = kwargs
@abstractmethod
def read(self) -> Union[Dataset, DatasetDict, IterableDataset, IterableDatasetDict]:
pass
class AbstractDatasetInputStream(ABC):
def __init__(
self,
features: Optional[Features] = None,
cache_dir: str = None,
keep_in_memory: bool = False,
streaming: bool = False,
num_proc: Optional[int] = None,
**kwargs,
):
self.features = features
self.cache_dir = cache_dir
self.keep_in_memory = keep_in_memory
self.streaming = streaming
self.num_proc = num_proc
self.kwargs = kwargs
@abstractmethod
def read(self) -> Union[Dataset, IterableDataset]:
pass
|
# coding=utf-8
# Copyright 2025 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Processor class for Granite Speech."""
from typing import List, Union
from ...feature_extraction_utils import BatchFeature
from ...processing_utils import ProcessorMixin
from ...tokenization_utils import PreTokenizedInput, TextInput
from ...utils import is_torch_available, logging
from ...utils.import_utils import requires_backends
if is_torch_available():
import torch
logger = logging.get_logger(__name__)
class GraniteSpeechProcessor(ProcessorMixin):
attributes = ["audio_processor", "tokenizer"]
audio_processor_class = "GraniteSpeechFeatureExtractor"
tokenizer_class = "AutoTokenizer"
def __init__(
self,
audio_processor,
tokenizer,
audio_token="<|audio|>",
chat_template=None,
):
self.audio_token = tokenizer.audio_token if hasattr(tokenizer, "audio_token") else audio_token
super().__init__(audio_processor, tokenizer, chat_template=chat_template)
def __call__(
self,
text: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]],
audio: Union["torch.Tensor", List["torch.Tensor"]] = None,
device: str = "cpu",
images=None,
videos=None,
**kwargs,
) -> BatchFeature:
requires_backends(self, ["torch"])
text = self._get_validated_text(text)
prompt_strings = text
if audio is not None:
# NOTE - we intentionally avoid throwing for potentially misaligned
# text / audio inputs here because some inference engines will
# trigger the conditions due to the way they call multimodal
# processors, e.g., vLLM.
audio_inputs = self.audio_processor(audio, device=device)
# TODO (@alex-jw-brooks); we should add a util to get_num_audio_tokens
# from feature lengths and call it here, rather than returning it
# from the feature extractor.
audio_embed_sizes = audio_inputs.pop("audio_embed_sizes")
# Expand the audio placeholders to match the feature dims; this
# is similar to how many VLMs handle image tokens, e.g., llava next
prompt_strings = []
num_replaced = 0
for sample in text:
while self.audio_token in sample:
sample = sample.replace(
self.audio_token,
"<placeholder>" * audio_embed_sizes[num_replaced],
1,
)
num_replaced += 1
prompt_strings.append(sample)
prompt_strings = [sample.replace("<placeholder>", self.audio_token) for sample in prompt_strings]
else:
audio_inputs = {}
text_inputs = self.tokenizer(prompt_strings, padding=True, **kwargs)
return BatchFeature(data={**text_inputs, **audio_inputs})
def _get_validated_text(self, text: Union[str, list]) -> List[str]:
if isinstance(text, str):
return [text]
elif isinstance(text, list) and isinstance(text[0], str):
return text
raise TypeError("Invalid text provided! Text should be a string or list of strings.")
__all__ = ["GraniteSpeechProcessor"]
|
# coding=utf-8
# Copyright 2025 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Processor class for Granite Speech."""
from typing import List, Union
from ...feature_extraction_utils import BatchFeature
from ...processing_utils import ProcessorMixin
from ...tokenization_utils import PreTokenizedInput, TextInput
from ...utils import is_torch_available, logging
from ...utils.import_utils import requires_backends
if is_torch_available():
import torch
logger = logging.get_logger(__name__)
class GraniteSpeechProcessor(ProcessorMixin):
attributes = ["audio_processor", "tokenizer"]
valid_kwargs = ["audio_token"]
audio_processor_class = "GraniteSpeechFeatureExtractor"
tokenizer_class = "AutoTokenizer"
def __init__(
self,
audio_processor,
tokenizer,
audio_token="<|audio|>",
chat_template=None,
):
self.audio_token = tokenizer.audio_token if hasattr(tokenizer, "audio_token") else audio_token
super().__init__(audio_processor, tokenizer, chat_template=chat_template)
def __call__(
self,
text: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]],
audio: Union["torch.Tensor", List["torch.Tensor"]] = None,
device: str = "cpu",
images=None,
videos=None,
**kwargs,
) -> BatchFeature:
requires_backends(self, ["torch"])
text = self._get_validated_text(text)
prompt_strings = text
if audio is not None:
# NOTE - we intentionally avoid throwing for potentially misaligned
# text / audio inputs here because some inference engines will
# trigger the conditions due to the way they call multimodal
# processors, e.g., vLLM.
audio_inputs = self.audio_processor(audio, device=device)
# TODO (@alex-jw-brooks); we should add a util to get_num_audio_tokens
# from feature lengths and call it here, rather than returning it
# from the feature extractor.
audio_embed_sizes = audio_inputs.pop("audio_embed_sizes")
# Expand the audio placeholders to match the feature dims; this
# is similar to how many VLMs handle image tokens, e.g., llava next
prompt_strings = []
num_replaced = 0
for sample in text:
while self.audio_token in sample:
sample = sample.replace(
self.audio_token,
"<placeholder>" * audio_embed_sizes[num_replaced],
1,
)
num_replaced += 1
prompt_strings.append(sample)
prompt_strings = [sample.replace("<placeholder>", self.audio_token) for sample in prompt_strings]
else:
audio_inputs = {}
text_inputs = self.tokenizer(prompt_strings, padding=True, **kwargs)
return BatchFeature(data={**text_inputs, **audio_inputs})
def _get_validated_text(self, text: Union[str, list]) -> List[str]:
if isinstance(text, str):
return [text]
elif isinstance(text, list) and isinstance(text[0], str):
return text
raise TypeError("Invalid text provided! Text should be a string or list of strings.")
__all__ = ["GraniteSpeechProcessor"]
|
"""Test chat model integration using standard integration tests."""
from typing import Type
from langchain_tests.integration_tests import ChatModelIntegrationTests
from langchain_ollama.chat_models import ChatOllama
class TestChatOllama(ChatModelIntegrationTests):
@property
def chat_model_class(self) -> Type[ChatOllama]:
return ChatOllama
@property
def chat_model_params(self) -> dict:
return {"model": "llama3.1"}
@property
def supports_image_inputs(self) -> bool:
return True
@property
def supports_json_mode(self) -> bool:
return True
|
"""Test chat model integration using standard integration tests."""
from typing import Type
import pytest
from langchain_core.language_models import BaseChatModel
from langchain_tests.integration_tests import ChatModelIntegrationTests
from langchain_ollama.chat_models import ChatOllama
class TestChatOllama(ChatModelIntegrationTests):
@property
def chat_model_class(self) -> Type[ChatOllama]:
return ChatOllama
@property
def chat_model_params(self) -> dict:
return {"model": "llama3.1"}
@property
def supports_image_inputs(self) -> bool:
return True
@property
def supports_json_mode(self) -> bool:
return True
@pytest.mark.xfail(
reason=(
"Fails with 'AssertionError'. Ollama does not support 'tool_choice' yet."
)
)
def test_structured_output(self, model: BaseChatModel, schema_type: str) -> None:
super().test_structured_output(model, schema_type)
@pytest.mark.xfail(
reason=(
"Fails with 'AssertionError'. Ollama does not support 'tool_choice' yet."
)
)
def test_structured_output_pydantic_2_v1(self, model: BaseChatModel) -> None:
super().test_structured_output_pydantic_2_v1(model)
|
import json
import logging
import os
from typing import Dict, Optional
import fsspec
from llama_index.core.storage.kvstore.types import (
DEFAULT_COLLECTION,
BaseInMemoryKVStore,
)
logger = logging.getLogger(__name__)
DATA_TYPE = Dict[str, Dict[str, dict]]
class SimpleKVStore(BaseInMemoryKVStore):
"""
Simple in-memory Key-Value store.
Args:
data (Optional[DATA_TYPE]): data to initialize the store with
"""
def __init__(
self,
data: Optional[DATA_TYPE] = None,
) -> None:
"""Init a SimpleKVStore."""
self._data: DATA_TYPE = data or {}
def put(self, key: str, val: dict, collection: str = DEFAULT_COLLECTION) -> None:
"""Put a key-value pair into the store."""
if collection not in self._data:
self._data[collection] = {}
self._data[collection][key] = val.copy()
async def aput(
self, key: str, val: dict, collection: str = DEFAULT_COLLECTION
) -> None:
"""Put a key-value pair into the store."""
self.put(key, val, collection)
def get(self, key: str, collection: str = DEFAULT_COLLECTION) -> Optional[dict]:
"""Get a value from the store."""
collection_data = self._data.get(collection, None)
if not collection_data:
return None
if key not in collection_data:
return None
return collection_data[key].copy()
async def aget(
self, key: str, collection: str = DEFAULT_COLLECTION
) -> Optional[dict]:
"""Get a value from the store."""
return self.get(key, collection)
def get_all(self, collection: str = DEFAULT_COLLECTION) -> Dict[str, dict]:
"""Get all values from the store."""
return self._data.get(collection, {}).copy()
async def aget_all(self, collection: str = DEFAULT_COLLECTION) -> Dict[str, dict]:
"""Get all values from the store."""
return self.get_all(collection)
def delete(self, key: str, collection: str = DEFAULT_COLLECTION) -> bool:
"""Delete a value from the store."""
try:
self._data[collection].pop(key)
return True
except KeyError:
return False
async def adelete(self, key: str, collection: str = DEFAULT_COLLECTION) -> bool:
"""Delete a value from the store."""
return self.delete(key, collection)
def persist(
self, persist_path: str, fs: Optional[fsspec.AbstractFileSystem] = None
) -> None:
"""Persist the store."""
fs = fs or fsspec.filesystem("file")
dirpath = os.path.dirname(persist_path)
if not fs.exists(dirpath):
fs.makedirs(dirpath)
with fs.open(persist_path, "w") as f:
f.write(json.dumps(self._data))
@classmethod
def from_persist_path(
cls, persist_path: str, fs: Optional[fsspec.AbstractFileSystem] = None
) -> "SimpleKVStore":
"""Load a SimpleKVStore from a persist path and filesystem."""
fs = fs or fsspec.filesystem("file")
logger.debug(f"Loading {__name__} from {persist_path}.")
with fs.open(persist_path, "rb") as f:
data = json.load(f)
return cls(data)
def to_dict(self) -> dict:
"""Save the store as dict."""
return self._data
@classmethod
def from_dict(cls, save_dict: dict) -> "SimpleKVStore":
"""Load a SimpleKVStore from dict."""
return cls(save_dict)
|
import json
import logging
import os
from typing import Dict, Optional
import fsspec
from llama_index.core.storage.kvstore.types import (
DEFAULT_COLLECTION,
BaseInMemoryKVStore,
)
logger = logging.getLogger(__name__)
DATA_TYPE = Dict[str, Dict[str, dict]]
class SimpleKVStore(BaseInMemoryKVStore):
"""Simple in-memory Key-Value store.
Args:
data (Optional[DATA_TYPE]): data to initialize the store with
"""
def __init__(
self,
data: Optional[DATA_TYPE] = None,
) -> None:
"""Init a SimpleKVStore."""
self._data: DATA_TYPE = data or {}
def put(self, key: str, val: dict, collection: str = DEFAULT_COLLECTION) -> None:
"""Put a key-value pair into the store."""
if collection not in self._data:
self._data[collection] = {}
self._data[collection][key] = val.copy()
async def aput(
self, key: str, val: dict, collection: str = DEFAULT_COLLECTION
) -> None:
"""Put a key-value pair into the store."""
self.put(key, val, collection)
def get(self, key: str, collection: str = DEFAULT_COLLECTION) -> Optional[dict]:
"""Get a value from the store."""
collection_data = self._data.get(collection, None)
if not collection_data:
return None
if key not in collection_data:
return None
return collection_data[key].copy()
async def aget(
self, key: str, collection: str = DEFAULT_COLLECTION
) -> Optional[dict]:
"""Get a value from the store."""
return self.get(key, collection)
def get_all(self, collection: str = DEFAULT_COLLECTION) -> Dict[str, dict]:
"""Get all values from the store."""
return self._data.get(collection, {}).copy()
async def aget_all(self, collection: str = DEFAULT_COLLECTION) -> Dict[str, dict]:
"""Get all values from the store."""
return self.get_all(collection)
def delete(self, key: str, collection: str = DEFAULT_COLLECTION) -> bool:
"""Delete a value from the store."""
try:
self._data[collection].pop(key)
return True
except KeyError:
return False
async def adelete(self, key: str, collection: str = DEFAULT_COLLECTION) -> bool:
"""Delete a value from the store."""
return self.delete(key, collection)
def persist(
self, persist_path: str, fs: Optional[fsspec.AbstractFileSystem] = None
) -> None:
"""Persist the store."""
fs = fs or fsspec.filesystem("file")
dirpath = os.path.dirname(persist_path)
if not fs.exists(dirpath):
fs.makedirs(dirpath)
with fs.open(persist_path, "w") as f:
f.write(json.dumps(self._data))
@classmethod
def from_persist_path(
cls, persist_path: str, fs: Optional[fsspec.AbstractFileSystem] = None
) -> "SimpleKVStore":
"""Load a SimpleKVStore from a persist path and filesystem."""
fs = fs or fsspec.filesystem("file")
logger.debug(f"Loading {__name__} from {persist_path}.")
with fs.open(persist_path, "rb") as f:
data = json.load(f)
return cls(data)
def to_dict(self) -> dict:
"""Save the store as dict."""
return self._data
@classmethod
def from_dict(cls, save_dict: dict) -> "SimpleKVStore":
"""Load a SimpleKVStore from dict."""
return cls(save_dict)
|
import re
from io import BytesIO
from pathlib import Path
from typing import Any, Type
import numpy as np
import pytest
from langchain_core.documents.base import Blob
from langchain_core.language_models import FakeMessagesListChatModel
from langchain_core.messages import ChatMessage
from langchain_community.document_loaders.parsers.images import (
LLMImageBlobParser,
RapidOCRBlobParser,
TesseractBlobParser,
)
path_base = Path(__file__).parent.parent.parent
building_image = Blob.from_path(path_base / "examples/building.jpg")
text_image = Blob.from_path(path_base / "examples/text.png")
page_image = Blob.from_path(path_base / "examples/page.png")
_re_in_image = r"(?ms).*MAKE.*TEXT.*STAND.*OUT.*FROM.*"
@pytest.mark.parametrize(
"blob,body",
[
(Blob.from_path(path_base / "examples/text-gray.png"), _re_in_image),
],
)
@pytest.mark.parametrize(
"blob_loader,kw",
[
(RapidOCRBlobParser, {}),
(TesseractBlobParser, {}),
(
LLMImageBlobParser,
{
"model": FakeMessagesListChatModel(
responses=[
ChatMessage(
id="ai1",
role="system",
content="A building. MAKE TEXT STAND OUT FROM BACKGROUNDS",
),
]
)
},
),
],
)
def test_image_parser_with_differents_files(
blob_loader: Type,
kw: dict[str, Any],
blob: Blob,
body: str,
) -> None:
if blob_loader == LLMImageBlobParser and "building" in str(blob.path):
body = ".*building.*"
documents = list(blob_loader(**kw).lazy_parse(blob))
assert len(documents) == 1
assert re.compile(body).match(documents[0].page_content)
@pytest.mark.parametrize(
"blob_loader,kw",
[
(RapidOCRBlobParser, {}),
(TesseractBlobParser, {}),
(
LLMImageBlobParser,
{
"model": FakeMessagesListChatModel(
responses=[
ChatMessage(
id="ai1",
role="system",
content="A building. MAKE TEXT STAND OUT FROM BACKGROUNDS",
),
]
)
},
),
],
)
def test_image_parser_with_numpy(
blob_loader: Type,
kw: dict[str, Any],
) -> None:
gray_image = np.empty(shape=(412, 1652, 1))
with BytesIO() as buffer:
np.save(buffer, gray_image)
buffer.seek(0)
npy_bytes = buffer.getvalue()
blob = Blob.from_data(npy_bytes, mime_type="application/x-npy")
documents = list(blob_loader(**kw).lazy_parse(blob))
assert len(documents) == 1
|
import re
from pathlib import Path
from typing import Any, Type
import pytest
from langchain_core.documents.base import Blob
from langchain_core.language_models import FakeMessagesListChatModel
from langchain_core.messages import ChatMessage
from langchain_community.document_loaders.parsers.images import (
LLMImageBlobParser,
RapidOCRBlobParser,
TesseractBlobParser,
)
path_base = Path(__file__).parent.parent.parent
building_image = Blob.from_path(path_base / "examples/building.jpg")
text_image = Blob.from_path(path_base / "examples/text.png")
page_image = Blob.from_path(path_base / "examples/page.png")
@pytest.mark.parametrize(
"blob,body",
[
(building_image, ""),
(text_image, r"(?ms).*MAKE.*TEXT.*STAND.*OUT.*FROM.*BACKGROUNDS.*"),
],
)
@pytest.mark.parametrize(
"blob_loader,kw",
[
(RapidOCRBlobParser, {}),
(TesseractBlobParser, {}),
(
LLMImageBlobParser,
{
"model": FakeMessagesListChatModel(
responses=[
ChatMessage(
id="ai1",
role="system",
content="A building. MAKE TEXT STAND OUT FROM BACKGROUNDS",
),
]
)
},
),
],
)
def test_image_parser_with_differents_files(
blob_loader: Type,
kw: dict[str, Any],
blob: Blob,
body: str,
) -> None:
if blob_loader == LLMImageBlobParser and "building" in str(blob.path):
body = ".*building.*"
documents = list(blob_loader(**kw).lazy_parse(blob))
assert len(documents) == 1
assert re.compile(body).match(documents[0].page_content)
|
import logging
from datasets import load_dataset
from sentence_transformers.sparse_encoder import (
MLMTransformer,
SparseEncoder,
SparseRerankingEvaluator,
SpladePooling,
)
logging.basicConfig(format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO)
# Initialize the SPLADE model
model_name = "naver/splade-cocondenser-ensembledistil"
model = SparseEncoder(
modules=[
MLMTransformer(model_name),
SpladePooling(pooling_strategy="max"), # You can also use 'sum'
],
device="cuda:0",
)
# Load a dataset with queries, positives, and negatives
eval_dataset = load_dataset("microsoft/ms_marco", "v1.1", split="validation").select(range(1000))
samples = [
{
"query": sample["query"],
"positive": [
text
for is_selected, text in zip(sample["passages"]["is_selected"], sample["passages"]["passage_text"])
if is_selected
],
"negative": [
text
for is_selected, text in zip(sample["passages"]["is_selected"], sample["passages"]["passage_text"])
if not is_selected
],
}
for sample in eval_dataset
]
# Now evaluate using only the documents from the 1000 samples
reranking_evaluator = SparseRerankingEvaluator(
samples=samples,
name="ms-marco-dev-small",
show_progress_bar=True,
batch_size=32,
)
results = reranking_evaluator(model)
# Print the results
print(f"Primary metric: {reranking_evaluator.primary_metric}")
print(f"Primary metric value: {results[reranking_evaluator.primary_metric]:.4f}")
|
from datasets import load_dataset
from sentence_transformers.sparse_encoder import (
MLMTransformer,
SparseEncoder,
SparseRerankingEvaluator,
SpladePooling,
)
# Initialize the SPLADE model
model_name = "naver/splade-cocondenser-ensembledistil"
model = SparseEncoder(
modules=[
MLMTransformer(model_name),
SpladePooling(pooling_strategy="max"), # You can also use 'sum'
],
device="cuda:0",
)
# Load a dataset with queries, positives, and negatives
eval_dataset = load_dataset("microsoft/ms_marco", "v1.1", split="validation")
samples = [
{
"query": sample["query"],
"positive": [
text
for is_selected, text in zip(sample["passages"]["is_selected"], sample["passages"]["passage_text"])
if is_selected
],
"negative": [
text
for is_selected, text in zip(sample["passages"]["is_selected"], sample["passages"]["passage_text"])
if not is_selected
],
}
for sample in eval_dataset
]
# Now evaluate using only the documents from the 1000 samples
reranking_evaluator = SparseRerankingEvaluator(
samples=samples,
name="ms-marco-dev-small",
show_progress_bar=True,
batch_size=32,
)
results = reranking_evaluator(model)
print(results)
print(reranking_evaluator.primary_metric)
print(results[reranking_evaluator.primary_metric])
|
import inspect
import re
from typing import Dict, List, Tuple
from huggingface_hub.utils import insecure_hashlib
from .arrow import arrow
from .audiofolder import audiofolder
from .cache import cache
from .csv import csv
from .imagefolder import imagefolder
from .json import json
from .pandas import pandas
from .parquet import parquet
from .sql import sql
from .text import text
from .videofolder import videofolder
from .webdataset import webdataset
from .xml import xml
def _hash_python_lines(lines: list[str]) -> str:
filtered_lines = []
for line in lines:
line = re.sub(r"#.*", "", line) # remove comments
if line:
filtered_lines.append(line)
full_str = "\n".join(filtered_lines)
# Make a hash from all this code
full_bytes = full_str.encode("utf-8")
return insecure_hashlib.sha256(full_bytes).hexdigest()
# get importable module names and hash for caching
_PACKAGED_DATASETS_MODULES = {
"csv": (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())),
"json": (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())),
"pandas": (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())),
"parquet": (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())),
"arrow": (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())),
"text": (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())),
"imagefolder": (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())),
"audiofolder": (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())),
"videofolder": (videofolder.__name__, _hash_python_lines(inspect.getsource(videofolder).splitlines())),
"webdataset": (webdataset.__name__, _hash_python_lines(inspect.getsource(webdataset).splitlines())),
"xml": (xml.__name__, _hash_python_lines(inspect.getsource(xml).splitlines())),
}
# get importable module names and hash for caching
_PACKAGED_DATASETS_MODULES_2_15_HASHES = {
"csv": "eea64c71ca8b46dd3f537ed218fc9bf495d5707789152eb2764f5c78fa66d59d",
"json": "8bb11242116d547c741b2e8a1f18598ffdd40a1d4f2a2872c7a28b697434bc96",
"pandas": "3ac4ffc4563c796122ef66899b9485a3f1a977553e2d2a8a318c72b8cc6f2202",
"parquet": "ca31c69184d9832faed373922c2acccec0b13a0bb5bbbe19371385c3ff26f1d1",
"arrow": "74f69db2c14c2860059d39860b1f400a03d11bf7fb5a8258ca38c501c878c137",
"text": "c4a140d10f020282918b5dd1b8a49f0104729c6177f60a6b49ec2a365ec69f34",
"imagefolder": "7b7ce5247a942be131d49ad4f3de5866083399a0f250901bd8dc202f8c5f7ce5",
"audiofolder": "d3c1655c66c8f72e4efb5c79e952975fa6e2ce538473a6890241ddbddee9071c",
}
# Used to infer the module to use based on the data files extensions
_EXTENSION_TO_MODULE: dict[str, tuple[str, dict]] = {
".csv": ("csv", {}),
".tsv": ("csv", {"sep": "\t"}),
".json": ("json", {}),
".jsonl": ("json", {}),
# ndjson is no longer maintained (see: https://github.com/ndjson/ndjson-spec/issues/35#issuecomment-1285673417)
".ndjson": ("json", {}),
".parquet": ("parquet", {}),
".geoparquet": ("parquet", {}),
".gpq": ("parquet", {}),
".arrow": ("arrow", {}),
".txt": ("text", {}),
".tar": ("webdataset", {}),
".xml": ("xml", {}),
}
_EXTENSION_TO_MODULE.update({ext: ("imagefolder", {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ("imagefolder", {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext: ("audiofolder", {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ("audiofolder", {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext: ("videofolder", {}) for ext in videofolder.VideoFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ("videofolder", {}) for ext in videofolder.VideoFolder.EXTENSIONS})
_MODULE_SUPPORTS_METADATA = {"imagefolder", "audiofolder", "videofolder"}
# Used to filter data files based on extensions given a module name
_MODULE_TO_EXTENSIONS: dict[str, list[str]] = {}
for _ext, (_module, _) in _EXTENSION_TO_MODULE.items():
_MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext)
for _module in _MODULE_TO_EXTENSIONS:
_MODULE_TO_EXTENSIONS[_module].append(".zip")
|
import inspect
import re
from typing import Dict, List, Tuple
from huggingface_hub.utils import insecure_hashlib
from .arrow import arrow
from .audiofolder import audiofolder
from .cache import cache
from .csv import csv
from .imagefolder import imagefolder
from .json import json
from .pandas import pandas
from .parquet import parquet
from .sql import sql
from .text import text
from .videofolder import videofolder
from .webdataset import webdataset
from .xml import xml
def _hash_python_lines(lines: List[str]) -> str:
filtered_lines = []
for line in lines:
line = re.sub(r"#.*", "", line) # remove comments
if line:
filtered_lines.append(line)
full_str = "\n".join(filtered_lines)
# Make a hash from all this code
full_bytes = full_str.encode("utf-8")
return insecure_hashlib.sha256(full_bytes).hexdigest()
# get importable module names and hash for caching
_PACKAGED_DATASETS_MODULES = {
"csv": (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())),
"json": (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())),
"pandas": (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())),
"parquet": (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())),
"arrow": (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())),
"text": (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())),
"imagefolder": (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())),
"audiofolder": (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())),
"videofolder": (videofolder.__name__, _hash_python_lines(inspect.getsource(videofolder).splitlines())),
"webdataset": (webdataset.__name__, _hash_python_lines(inspect.getsource(webdataset).splitlines())),
"xml": (xml.__name__, _hash_python_lines(inspect.getsource(xml).splitlines())),
}
# get importable module names and hash for caching
_PACKAGED_DATASETS_MODULES_2_15_HASHES = {
"csv": "eea64c71ca8b46dd3f537ed218fc9bf495d5707789152eb2764f5c78fa66d59d",
"json": "8bb11242116d547c741b2e8a1f18598ffdd40a1d4f2a2872c7a28b697434bc96",
"pandas": "3ac4ffc4563c796122ef66899b9485a3f1a977553e2d2a8a318c72b8cc6f2202",
"parquet": "ca31c69184d9832faed373922c2acccec0b13a0bb5bbbe19371385c3ff26f1d1",
"arrow": "74f69db2c14c2860059d39860b1f400a03d11bf7fb5a8258ca38c501c878c137",
"text": "c4a140d10f020282918b5dd1b8a49f0104729c6177f60a6b49ec2a365ec69f34",
"imagefolder": "7b7ce5247a942be131d49ad4f3de5866083399a0f250901bd8dc202f8c5f7ce5",
"audiofolder": "d3c1655c66c8f72e4efb5c79e952975fa6e2ce538473a6890241ddbddee9071c",
}
# Used to infer the module to use based on the data files extensions
_EXTENSION_TO_MODULE: Dict[str, Tuple[str, dict]] = {
".csv": ("csv", {}),
".tsv": ("csv", {"sep": "\t"}),
".json": ("json", {}),
".jsonl": ("json", {}),
# ndjson is no longer maintained (see: https://github.com/ndjson/ndjson-spec/issues/35#issuecomment-1285673417)
".ndjson": ("json", {}),
".parquet": ("parquet", {}),
".geoparquet": ("parquet", {}),
".gpq": ("parquet", {}),
".arrow": ("arrow", {}),
".txt": ("text", {}),
".tar": ("webdataset", {}),
".xml": ("xml", {}),
}
_EXTENSION_TO_MODULE.update({ext: ("imagefolder", {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ("imagefolder", {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext: ("audiofolder", {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ("audiofolder", {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext: ("videofolder", {}) for ext in videofolder.VideoFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ("videofolder", {}) for ext in videofolder.VideoFolder.EXTENSIONS})
_MODULE_SUPPORTS_METADATA = {"imagefolder", "audiofolder", "videofolder"}
# Used to filter data files based on extensions given a module name
_MODULE_TO_EXTENSIONS: Dict[str, List[str]] = {}
for _ext, (_module, _) in _EXTENSION_TO_MODULE.items():
_MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext)
for _module in _MODULE_TO_EXTENSIONS:
_MODULE_TO_EXTENSIONS[_module].append(".zip")
|
from __future__ import annotations
from collections.abc import Sequence
from copy import deepcopy
from typing import Any, Optional, Union
from langchain_core._api.deprecation import deprecated
from langchain_core.callbacks import Callbacks
from langchain_core.documents import BaseDocumentCompressor, Document
from langchain_core.utils import get_from_dict_or_env
from pydantic import ConfigDict, model_validator
@deprecated(
since="0.0.30",
removal="1.0",
alternative_import="langchain_cohere.CohereRerank",
)
class CohereRerank(BaseDocumentCompressor):
"""Document compressor that uses `Cohere Rerank API`."""
client: Any = None
"""Cohere client to use for compressing documents."""
top_n: Optional[int] = 3
"""Number of documents to return."""
model: str = "rerank-english-v2.0"
"""Model to use for reranking."""
cohere_api_key: Optional[str] = None
"""Cohere API key. Must be specified directly or via environment variable
COHERE_API_KEY."""
user_agent: str = "langchain"
"""Identifier for the application making the request."""
model_config = ConfigDict(
arbitrary_types_allowed=True,
extra="forbid",
)
@model_validator(mode="before")
@classmethod
def validate_environment(cls, values: dict) -> Any:
"""Validate that api key and python package exists in environment."""
if not values.get("client"):
try:
import cohere
except ImportError:
msg = (
"Could not import cohere python package. "
"Please install it with `pip install cohere`."
)
raise ImportError(msg)
cohere_api_key = get_from_dict_or_env(
values,
"cohere_api_key",
"COHERE_API_KEY",
)
client_name = values.get("user_agent", "langchain")
values["client"] = cohere.Client(cohere_api_key, client_name=client_name)
return values
def rerank(
self,
documents: Sequence[Union[str, Document, dict]],
query: str,
*,
model: Optional[str] = None,
top_n: Optional[int] = -1,
max_chunks_per_doc: Optional[int] = None,
) -> list[dict[str, Any]]:
"""Returns an ordered list of documents ordered by their relevance to the provided query.
Args:
query: The query to use for reranking.
documents: A sequence of documents to rerank.
model: The model to use for re-ranking. Default to self.model.
top_n : The number of results to return. If None returns all results.
Defaults to self.top_n.
max_chunks_per_doc : The maximum number of chunks derived from a document.
""" # noqa: E501
if len(documents) == 0: # to avoid empty api call
return []
docs = [
doc.page_content if isinstance(doc, Document) else doc for doc in documents
]
model = model or self.model
top_n = top_n if (top_n is None or top_n > 0) else self.top_n
results = self.client.rerank(
query=query,
documents=docs,
model=model,
top_n=top_n,
max_chunks_per_doc=max_chunks_per_doc,
)
if hasattr(results, "results"):
results = getattr(results, "results")
return [
{"index": res.index, "relevance_score": res.relevance_score}
for res in results
]
def compress_documents(
self,
documents: Sequence[Document],
query: str,
callbacks: Optional[Callbacks] = None,
) -> Sequence[Document]:
"""
Compress documents using Cohere's rerank API.
Args:
documents: A sequence of documents to compress.
query: The query to use for compressing the documents.
callbacks: Callbacks to run during the compression process.
Returns:
A sequence of compressed documents.
"""
compressed = []
for res in self.rerank(documents, query):
doc = documents[res["index"]]
doc_copy = Document(doc.page_content, metadata=deepcopy(doc.metadata))
doc_copy.metadata["relevance_score"] = res["relevance_score"]
compressed.append(doc_copy)
return compressed
|
from __future__ import annotations
from collections.abc import Sequence
from copy import deepcopy
from typing import Any, Optional, Union
from langchain_core._api.deprecation import deprecated
from langchain_core.callbacks import Callbacks
from langchain_core.documents import BaseDocumentCompressor, Document
from langchain_core.utils import get_from_dict_or_env
from pydantic import ConfigDict, model_validator
@deprecated(
since="0.0.30", removal="1.0", alternative_import="langchain_cohere.CohereRerank"
)
class CohereRerank(BaseDocumentCompressor):
"""Document compressor that uses `Cohere Rerank API`."""
client: Any = None
"""Cohere client to use for compressing documents."""
top_n: Optional[int] = 3
"""Number of documents to return."""
model: str = "rerank-english-v2.0"
"""Model to use for reranking."""
cohere_api_key: Optional[str] = None
"""Cohere API key. Must be specified directly or via environment variable
COHERE_API_KEY."""
user_agent: str = "langchain"
"""Identifier for the application making the request."""
model_config = ConfigDict(
arbitrary_types_allowed=True,
extra="forbid",
)
@model_validator(mode="before")
@classmethod
def validate_environment(cls, values: dict) -> Any:
"""Validate that api key and python package exists in environment."""
if not values.get("client"):
try:
import cohere
except ImportError:
msg = (
"Could not import cohere python package. "
"Please install it with `pip install cohere`."
)
raise ImportError(msg)
cohere_api_key = get_from_dict_or_env(
values, "cohere_api_key", "COHERE_API_KEY"
)
client_name = values.get("user_agent", "langchain")
values["client"] = cohere.Client(cohere_api_key, client_name=client_name)
return values
def rerank(
self,
documents: Sequence[Union[str, Document, dict]],
query: str,
*,
model: Optional[str] = None,
top_n: Optional[int] = -1,
max_chunks_per_doc: Optional[int] = None,
) -> list[dict[str, Any]]:
"""Returns an ordered list of documents ordered by their relevance to the provided query.
Args:
query: The query to use for reranking.
documents: A sequence of documents to rerank.
model: The model to use for re-ranking. Default to self.model.
top_n : The number of results to return. If None returns all results.
Defaults to self.top_n.
max_chunks_per_doc : The maximum number of chunks derived from a document.
""" # noqa: E501
if len(documents) == 0: # to avoid empty api call
return []
docs = [
doc.page_content if isinstance(doc, Document) else doc for doc in documents
]
model = model or self.model
top_n = top_n if (top_n is None or top_n > 0) else self.top_n
results = self.client.rerank(
query=query,
documents=docs,
model=model,
top_n=top_n,
max_chunks_per_doc=max_chunks_per_doc,
)
if hasattr(results, "results"):
results = getattr(results, "results")
result_dicts = []
for res in results:
result_dicts.append(
{
"index": res.index,
"relevance_score": res.relevance_score,
}
)
return result_dicts
def compress_documents(
self,
documents: Sequence[Document],
query: str,
callbacks: Optional[Callbacks] = None,
) -> Sequence[Document]:
"""
Compress documents using Cohere's rerank API.
Args:
documents: A sequence of documents to compress.
query: The query to use for compressing the documents.
callbacks: Callbacks to run during the compression process.
Returns:
A sequence of compressed documents.
"""
compressed = []
for res in self.rerank(documents, query):
doc = documents[res["index"]]
doc_copy = Document(doc.page_content, metadata=deepcopy(doc.metadata))
doc_copy.metadata["relevance_score"] = res["relevance_score"]
compressed.append(doc_copy)
return compressed
|
from typing import TYPE_CHECKING, Any, List, Tuple, Type, TypeVar, Union
import numpy as np
from docarray.typing.proto_register import _register_proto
from docarray.typing.tensor.ndarray import NdArray
from docarray.typing.tensor.video.video_tensor_mixin import VideoTensorMixin
T = TypeVar('T', bound='VideoNdArray')
if TYPE_CHECKING:
from pydantic import BaseConfig
from pydantic.fields import ModelField
@_register_proto(proto_type_name='video_ndarray')
class VideoNdArray(NdArray, VideoTensorMixin):
"""
Subclass of NdArray, to represent a video tensor.
Adds video-specific features to the tensor.
---
```python
from typing import Optional
import numpy as np
from pydantic import parse_obj_as
from docarray import BaseDoc
from docarray.typing import VideoNdArray, VideoUrl
class MyVideoDoc(BaseDoc):
title: str
url: Optional[VideoUrl]
video_tensor: Optional[VideoNdArray]
doc_1 = MyVideoDoc(
title='my_first_video_doc',
video_tensor=np.random.random((100, 224, 224, 3)),
)
doc_1.video_tensor.save(file_path='/tmp/file_1.mp4')
doc_2 = MyVideoDoc(
title='my_second_video_doc',
url='/tmp/file_1.mp4',
)
doc_2.video_tensor = parse_obj_as(VideoNdArray, doc_2.url.load().video)
doc_2.video_tensor.save(file_path='/tmp/file_2.mp4')
```
---
"""
@classmethod
def validate(
cls: Type[T],
value: Union[T, np.ndarray, List[Any], Tuple[Any], Any],
field: 'ModelField',
config: 'BaseConfig',
) -> T:
tensor = super().validate(value=value, field=field, config=config)
return cls.validate_shape(value=tensor)
|
from typing import TYPE_CHECKING, Any, List, Tuple, Type, TypeVar, Union
import numpy as np
from docarray.typing.proto_register import _register_proto
from docarray.typing.tensor.ndarray import NdArray
from docarray.typing.tensor.video.video_tensor_mixin import VideoTensorMixin
T = TypeVar('T', bound='VideoNdArray')
if TYPE_CHECKING:
from pydantic import BaseConfig
from pydantic.fields import ModelField
@_register_proto(proto_type_name='video_ndarray')
class VideoNdArray(NdArray, VideoTensorMixin):
"""
Subclass of NdArray, to represent a video tensor.
Adds video-specific features to the tensor.
---
```python
from typing import Optional
import numpy as np
from pydantic import parse_obj_as
from docarray import BaseDoc
from docarray.typing import VideoNdArray, VideoUrl
class MyVideoDoc(BaseDoc):
title: str
url: Optional[VideoUrl]
video_tensor: Optional[VideoNdArray]
doc_1 = MyVideoDoc(
title='my_first_video_doc',
video_tensor=np.random.random((100, 224, 224, 3)),
)
doc_1.video_tensor.save(file_path='file_1.mp4')
doc_2 = MyVideoDoc(
title='my_second_video_doc',
url='file_1.mp4',
)
doc_2.video_tensor = parse_obj_as(VideoNdArray, doc_2.url.load().video)
doc_2.video_tensor.save(file_path='file_2.mp4')
```
---
"""
@classmethod
def validate(
cls: Type[T],
value: Union[T, np.ndarray, List[Any], Tuple[Any], Any],
field: 'ModelField',
config: 'BaseConfig',
) -> T:
tensor = super().validate(value=value, field=field, config=config)
return cls.validate_shape(value=tensor)
|
import numpy as np
import pytest
from absl.testing import parameterized
from keras.src import backend
from keras.src import layers
from keras.src import testing
class UpSampling3dTest(testing.TestCase):
@parameterized.product(
data_format=["channels_first", "channels_last"],
length_dim1=[2, 3],
length_dim2=[2],
length_dim3=[3],
)
@pytest.mark.requires_trainable_backend
def test_upsampling_3d(
self, data_format, length_dim1, length_dim2, length_dim3
):
num_samples = 2
stack_size = 2
input_len_dim1 = 10
input_len_dim2 = 11
input_len_dim3 = 12
if data_format == "channels_first":
inputs = np.random.rand(
num_samples,
stack_size,
input_len_dim1,
input_len_dim2,
input_len_dim3,
)
else:
inputs = np.random.rand(
num_samples,
input_len_dim1,
input_len_dim2,
input_len_dim3,
stack_size,
)
# basic test
if data_format == "channels_first":
expected_output_shape = (2, 2, 20, 22, 24)
else:
expected_output_shape = (2, 20, 22, 24, 2)
self.run_layer_test(
layers.UpSampling3D,
init_kwargs={"size": (2, 2, 2), "data_format": data_format},
input_shape=inputs.shape,
expected_output_shape=expected_output_shape,
expected_output_dtype="float32",
expected_num_trainable_weights=0,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=0,
expected_num_losses=0,
supports_masking=False,
)
layer = layers.UpSampling3D(
size=(length_dim1, length_dim2, length_dim3),
data_format=data_format,
)
layer.build(inputs.shape)
np_output = layer(inputs=backend.Variable(inputs))
if data_format == "channels_first":
assert np_output.shape[2] == length_dim1 * input_len_dim1
assert np_output.shape[3] == length_dim2 * input_len_dim2
assert np_output.shape[4] == length_dim3 * input_len_dim3
else: # tf
assert np_output.shape[1] == length_dim1 * input_len_dim1
assert np_output.shape[2] == length_dim2 * input_len_dim2
assert np_output.shape[3] == length_dim3 * input_len_dim3
# compare with numpy
if data_format == "channels_first":
expected_out = np.repeat(inputs, length_dim1, axis=2)
expected_out = np.repeat(expected_out, length_dim2, axis=3)
expected_out = np.repeat(expected_out, length_dim3, axis=4)
else: # tf
expected_out = np.repeat(inputs, length_dim1, axis=1)
expected_out = np.repeat(expected_out, length_dim2, axis=2)
expected_out = np.repeat(expected_out, length_dim3, axis=3)
self.assertAllClose(np_output, expected_out)
def test_upsampling_3d_correctness(self):
input_shape = (2, 1, 2, 1, 3)
x = np.arange(np.prod(input_shape)).reshape(input_shape)
expected_output = np.array(
[
[
[
[[0.0, 1.0, 2.0], [0.0, 1.0, 2.0]],
[[0.0, 1.0, 2.0], [0.0, 1.0, 2.0]],
[[3.0, 4.0, 5.0], [3.0, 4.0, 5.0]],
[[3.0, 4.0, 5.0], [3.0, 4.0, 5.0]],
],
[
[[0.0, 1.0, 2.0], [0.0, 1.0, 2.0]],
[[0.0, 1.0, 2.0], [0.0, 1.0, 2.0]],
[[3.0, 4.0, 5.0], [3.0, 4.0, 5.0]],
[[3.0, 4.0, 5.0], [3.0, 4.0, 5.0]],
],
],
[
[
[[6.0, 7.0, 8.0], [6.0, 7.0, 8.0]],
[[6.0, 7.0, 8.0], [6.0, 7.0, 8.0]],
[[9.0, 10.0, 11.0], [9.0, 10.0, 11.0]],
[[9.0, 10.0, 11.0], [9.0, 10.0, 11.0]],
],
[
[[6.0, 7.0, 8.0], [6.0, 7.0, 8.0]],
[[6.0, 7.0, 8.0], [6.0, 7.0, 8.0]],
[[9.0, 10.0, 11.0], [9.0, 10.0, 11.0]],
[[9.0, 10.0, 11.0], [9.0, 10.0, 11.0]],
],
],
]
)
if backend.config.image_data_format() == "channels_first":
expected_output = expected_output.transpose((0, 4, 1, 2, 3))
x = x.transpose((0, 4, 1, 2, 3))
self.assertAllClose(
layers.UpSampling3D(size=(2, 2, 2))(x), expected_output
)
|
import numpy as np
import pytest
from absl.testing import parameterized
from keras.src import backend
from keras.src import layers
from keras.src import testing
class UpSampling3dTest(testing.TestCase, parameterized.TestCase):
@parameterized.product(
data_format=["channels_first", "channels_last"],
length_dim1=[2, 3],
length_dim2=[2],
length_dim3=[3],
)
@pytest.mark.requires_trainable_backend
def test_upsampling_3d(
self, data_format, length_dim1, length_dim2, length_dim3
):
num_samples = 2
stack_size = 2
input_len_dim1 = 10
input_len_dim2 = 11
input_len_dim3 = 12
if data_format == "channels_first":
inputs = np.random.rand(
num_samples,
stack_size,
input_len_dim1,
input_len_dim2,
input_len_dim3,
)
else:
inputs = np.random.rand(
num_samples,
input_len_dim1,
input_len_dim2,
input_len_dim3,
stack_size,
)
# basic test
if data_format == "channels_first":
expected_output_shape = (2, 2, 20, 22, 24)
else:
expected_output_shape = (2, 20, 22, 24, 2)
self.run_layer_test(
layers.UpSampling3D,
init_kwargs={"size": (2, 2, 2), "data_format": data_format},
input_shape=inputs.shape,
expected_output_shape=expected_output_shape,
expected_output_dtype="float32",
expected_num_trainable_weights=0,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=0,
expected_num_losses=0,
supports_masking=False,
)
layer = layers.UpSampling3D(
size=(length_dim1, length_dim2, length_dim3),
data_format=data_format,
)
layer.build(inputs.shape)
np_output = layer(inputs=backend.Variable(inputs))
if data_format == "channels_first":
assert np_output.shape[2] == length_dim1 * input_len_dim1
assert np_output.shape[3] == length_dim2 * input_len_dim2
assert np_output.shape[4] == length_dim3 * input_len_dim3
else: # tf
assert np_output.shape[1] == length_dim1 * input_len_dim1
assert np_output.shape[2] == length_dim2 * input_len_dim2
assert np_output.shape[3] == length_dim3 * input_len_dim3
# compare with numpy
if data_format == "channels_first":
expected_out = np.repeat(inputs, length_dim1, axis=2)
expected_out = np.repeat(expected_out, length_dim2, axis=3)
expected_out = np.repeat(expected_out, length_dim3, axis=4)
else: # tf
expected_out = np.repeat(inputs, length_dim1, axis=1)
expected_out = np.repeat(expected_out, length_dim2, axis=2)
expected_out = np.repeat(expected_out, length_dim3, axis=3)
self.assertAllClose(np_output, expected_out)
def test_upsampling_3d_correctness(self):
input_shape = (2, 1, 2, 1, 3)
x = np.arange(np.prod(input_shape)).reshape(input_shape)
expected_output = np.array(
[
[
[
[[0.0, 1.0, 2.0], [0.0, 1.0, 2.0]],
[[0.0, 1.0, 2.0], [0.0, 1.0, 2.0]],
[[3.0, 4.0, 5.0], [3.0, 4.0, 5.0]],
[[3.0, 4.0, 5.0], [3.0, 4.0, 5.0]],
],
[
[[0.0, 1.0, 2.0], [0.0, 1.0, 2.0]],
[[0.0, 1.0, 2.0], [0.0, 1.0, 2.0]],
[[3.0, 4.0, 5.0], [3.0, 4.0, 5.0]],
[[3.0, 4.0, 5.0], [3.0, 4.0, 5.0]],
],
],
[
[
[[6.0, 7.0, 8.0], [6.0, 7.0, 8.0]],
[[6.0, 7.0, 8.0], [6.0, 7.0, 8.0]],
[[9.0, 10.0, 11.0], [9.0, 10.0, 11.0]],
[[9.0, 10.0, 11.0], [9.0, 10.0, 11.0]],
],
[
[[6.0, 7.0, 8.0], [6.0, 7.0, 8.0]],
[[6.0, 7.0, 8.0], [6.0, 7.0, 8.0]],
[[9.0, 10.0, 11.0], [9.0, 10.0, 11.0]],
[[9.0, 10.0, 11.0], [9.0, 10.0, 11.0]],
],
],
]
)
if backend.config.image_data_format() == "channels_first":
expected_output = expected_output.transpose((0, 4, 1, 2, 3))
x = x.transpose((0, 4, 1, 2, 3))
self.assertAllClose(
layers.UpSampling3D(size=(2, 2, 2))(x), expected_output
)
|
"""
===================================
Demo of DBSCAN clustering algorithm
===================================
DBSCAN (Density-Based Spatial Clustering of Applications with Noise) finds core
samples in regions of high density and expands clusters from them. This
algorithm is good for data which contains clusters of similar density.
See the :ref:`sphx_glr_auto_examples_cluster_plot_cluster_comparison.py` example
for a demo of different clustering algorithms on 2D datasets.
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
# %%
# Data generation
# ---------------
#
# We use :class:`~sklearn.datasets.make_blobs` to create 3 synthetic clusters.
from sklearn.datasets import make_blobs
from sklearn.preprocessing import StandardScaler
centers = [[1, 1], [-1, -1], [1, -1]]
X, labels_true = make_blobs(
n_samples=750, centers=centers, cluster_std=0.4, random_state=0
)
X = StandardScaler().fit_transform(X)
# %%
# We can visualize the resulting data:
import matplotlib.pyplot as plt
plt.scatter(X[:, 0], X[:, 1])
plt.show()
# %%
# Compute DBSCAN
# --------------
#
# One can access the labels assigned by :class:`~sklearn.cluster.DBSCAN` using
# the `labels_` attribute. Noisy samples are given the label :math:`-1`.
import numpy as np
from sklearn import metrics
from sklearn.cluster import DBSCAN
db = DBSCAN(eps=0.3, min_samples=10).fit(X)
labels = db.labels_
# Number of clusters in labels, ignoring noise if present.
n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)
n_noise_ = list(labels).count(-1)
print("Estimated number of clusters: %d" % n_clusters_)
print("Estimated number of noise points: %d" % n_noise_)
# %%
# Clustering algorithms are fundamentally unsupervised learning methods.
# However, since :class:`~sklearn.datasets.make_blobs` gives access to the true
# labels of the synthetic clusters, it is possible to use evaluation metrics
# that leverage this "supervised" ground truth information to quantify the
# quality of the resulting clusters. Examples of such metrics are the
# homogeneity, completeness, V-measure, Rand-Index, Adjusted Rand-Index and
# Adjusted Mutual Information (AMI).
#
# If the ground truth labels are not known, evaluation can only be performed
# using the model results itself. In that case, the Silhouette Coefficient comes
# in handy.
#
# For more information, see the
# :ref:`sphx_glr_auto_examples_cluster_plot_adjusted_for_chance_measures.py`
# example or the :ref:`clustering_evaluation` module.
print(f"Homogeneity: {metrics.homogeneity_score(labels_true, labels):.3f}")
print(f"Completeness: {metrics.completeness_score(labels_true, labels):.3f}")
print(f"V-measure: {metrics.v_measure_score(labels_true, labels):.3f}")
print(f"Adjusted Rand Index: {metrics.adjusted_rand_score(labels_true, labels):.3f}")
print(
"Adjusted Mutual Information:"
f" {metrics.adjusted_mutual_info_score(labels_true, labels):.3f}"
)
print(f"Silhouette Coefficient: {metrics.silhouette_score(X, labels):.3f}")
# %%
# Plot results
# ------------
#
# Core samples (large dots) and non-core samples (small dots) are color-coded
# according to the assigned cluster. Samples tagged as noise are represented in
# black.
unique_labels = set(labels)
core_samples_mask = np.zeros_like(labels, dtype=bool)
core_samples_mask[db.core_sample_indices_] = True
colors = [plt.cm.Spectral(each) for each in np.linspace(0, 1, len(unique_labels))]
for k, col in zip(unique_labels, colors):
if k == -1:
# Black used for noise.
col = [0, 0, 0, 1]
class_member_mask = labels == k
xy = X[class_member_mask & core_samples_mask]
plt.plot(
xy[:, 0],
xy[:, 1],
"o",
markerfacecolor=tuple(col),
markeredgecolor="k",
markersize=14,
)
xy = X[class_member_mask & ~core_samples_mask]
plt.plot(
xy[:, 0],
xy[:, 1],
"o",
markerfacecolor=tuple(col),
markeredgecolor="k",
markersize=6,
)
plt.title(f"Estimated number of clusters: {n_clusters_}")
plt.show()
|
"""
===================================
Demo of DBSCAN clustering algorithm
===================================
DBSCAN (Density-Based Spatial Clustering of Applications with Noise) finds core
samples in regions of high density and expands clusters from them. This
algorithm is good for data which contains clusters of similar density.
See the :ref:`sphx_glr_auto_examples_cluster_plot_cluster_comparison.py` example
for a demo of different clustering algorithms on 2D datasets.
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
# %%
# Data generation
# ---------------
#
# We use :class:`~sklearn.datasets.make_blobs` to create 3 synthetic clusters.
from sklearn.datasets import make_blobs
from sklearn.preprocessing import StandardScaler
centers = [[1, 1], [-1, -1], [1, -1]]
X, labels_true = make_blobs(
n_samples=750, centers=centers, cluster_std=0.4, random_state=0
)
X = StandardScaler().fit_transform(X)
# %%
# We can visualize the resulting data:
import matplotlib.pyplot as plt
plt.scatter(X[:, 0], X[:, 1])
plt.show()
# %%
# Compute DBSCAN
# --------------
#
# One can access the labels assigned by :class:`~sklearn.cluster.DBSCAN` using
# the `labels_` attribute. Noisy samples are given the label math:`-1`.
import numpy as np
from sklearn import metrics
from sklearn.cluster import DBSCAN
db = DBSCAN(eps=0.3, min_samples=10).fit(X)
labels = db.labels_
# Number of clusters in labels, ignoring noise if present.
n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)
n_noise_ = list(labels).count(-1)
print("Estimated number of clusters: %d" % n_clusters_)
print("Estimated number of noise points: %d" % n_noise_)
# %%
# Clustering algorithms are fundamentally unsupervised learning methods.
# However, since :class:`~sklearn.datasets.make_blobs` gives access to the true
# labels of the synthetic clusters, it is possible to use evaluation metrics
# that leverage this "supervised" ground truth information to quantify the
# quality of the resulting clusters. Examples of such metrics are the
# homogeneity, completeness, V-measure, Rand-Index, Adjusted Rand-Index and
# Adjusted Mutual Information (AMI).
#
# If the ground truth labels are not known, evaluation can only be performed
# using the model results itself. In that case, the Silhouette Coefficient comes
# in handy.
#
# For more information, see the
# :ref:`sphx_glr_auto_examples_cluster_plot_adjusted_for_chance_measures.py`
# example or the :ref:`clustering_evaluation` module.
print(f"Homogeneity: {metrics.homogeneity_score(labels_true, labels):.3f}")
print(f"Completeness: {metrics.completeness_score(labels_true, labels):.3f}")
print(f"V-measure: {metrics.v_measure_score(labels_true, labels):.3f}")
print(f"Adjusted Rand Index: {metrics.adjusted_rand_score(labels_true, labels):.3f}")
print(
"Adjusted Mutual Information:"
f" {metrics.adjusted_mutual_info_score(labels_true, labels):.3f}"
)
print(f"Silhouette Coefficient: {metrics.silhouette_score(X, labels):.3f}")
# %%
# Plot results
# ------------
#
# Core samples (large dots) and non-core samples (small dots) are color-coded
# according to the assigned cluster. Samples tagged as noise are represented in
# black.
unique_labels = set(labels)
core_samples_mask = np.zeros_like(labels, dtype=bool)
core_samples_mask[db.core_sample_indices_] = True
colors = [plt.cm.Spectral(each) for each in np.linspace(0, 1, len(unique_labels))]
for k, col in zip(unique_labels, colors):
if k == -1:
# Black used for noise.
col = [0, 0, 0, 1]
class_member_mask = labels == k
xy = X[class_member_mask & core_samples_mask]
plt.plot(
xy[:, 0],
xy[:, 1],
"o",
markerfacecolor=tuple(col),
markeredgecolor="k",
markersize=14,
)
xy = X[class_member_mask & ~core_samples_mask]
plt.plot(
xy[:, 0],
xy[:, 1],
"o",
markerfacecolor=tuple(col),
markeredgecolor="k",
markersize=6,
)
plt.title(f"Estimated number of clusters: {n_clusters_}")
plt.show()
|
_base_ = './faster-rcnn_s50_fpn_syncbn-backbone+head_ms-range-1x_coco.py'
model = dict(
backbone=dict(
stem_channels=128,
depth=101,
init_cfg=dict(type='Pretrained',
checkpoint='open-mmlab://resnest101')))
|
_base_ = './faster_rcnn_s50_fpn_syncbn-backbone+head_mstrain-range_1x_coco.py'
model = dict(
backbone=dict(
stem_channels=128,
depth=101,
init_cfg=dict(type='Pretrained',
checkpoint='open-mmlab://resnest101')))
|
"""General node utils."""
import logging
import uuid
from typing import List, Optional, Protocol, runtime_checkable
from llama_index.core.schema import (
BaseNode,
Document,
ImageDocument,
ImageNode,
NodeRelationship,
TextNode,
)
from llama_index.core.utils import truncate_text
logger = logging.getLogger(__name__)
@runtime_checkable
class IdFuncCallable(Protocol):
def __call__(self, i: int, doc: BaseNode) -> str:
...
def default_id_func(i: int, doc: BaseNode) -> str:
return str(uuid.uuid4())
def build_nodes_from_splits(
text_splits: List[str],
document: BaseNode,
ref_doc: Optional[BaseNode] = None,
id_func: Optional[IdFuncCallable] = None,
) -> List[TextNode]:
"""Build nodes from splits."""
ref_doc = ref_doc or document
id_func = id_func or default_id_func
nodes: List[TextNode] = []
"""Calling as_related_node_info() on a document recomputes the hash for the whole text and metadata"""
"""It is not that bad, when creating relationships between the nodes, but is terrible when adding a relationship"""
"""between the node and a document, hence we create the relationship only once here and pass it to the nodes"""
relationships = {NodeRelationship.SOURCE: ref_doc.as_related_node_info()}
for i, text_chunk in enumerate(text_splits):
logger.debug(f"> Adding chunk: {truncate_text(text_chunk, 50)}")
if isinstance(document, ImageDocument):
image_node = ImageNode(
id_=id_func(i, document),
text=text_chunk,
embedding=document.embedding,
image=document.image,
image_path=document.image_path,
image_url=document.image_url,
excluded_embed_metadata_keys=document.excluded_embed_metadata_keys,
excluded_llm_metadata_keys=document.excluded_llm_metadata_keys,
metadata_seperator=document.metadata_separator,
metadata_template=document.metadata_template,
text_template=document.text_template,
relationships=relationships,
)
nodes.append(image_node) # type: ignore
elif isinstance(document, Document):
node = TextNode(
id_=id_func(i, document),
text=text_chunk,
embedding=document.embedding,
excluded_embed_metadata_keys=document.excluded_embed_metadata_keys,
excluded_llm_metadata_keys=document.excluded_llm_metadata_keys,
metadata_seperator=document.metadata_separator,
metadata_template=document.metadata_template,
text_template=document.text_template,
relationships=relationships,
)
nodes.append(node)
elif isinstance(document, TextNode):
node = TextNode(
id_=id_func(i, document),
text=text_chunk,
embedding=document.embedding,
excluded_embed_metadata_keys=document.excluded_embed_metadata_keys,
excluded_llm_metadata_keys=document.excluded_llm_metadata_keys,
metadata_seperator=document.metadata_seperator,
metadata_template=document.metadata_template,
text_template=document.text_template,
relationships=relationships,
)
nodes.append(node)
else:
raise ValueError(f"Unknown document type: {type(document)}")
return nodes
|
"""General node utils."""
import logging
import uuid
from typing import List, Optional, Protocol, runtime_checkable
from llama_index.core.schema import (
BaseNode,
Document,
ImageDocument,
ImageNode,
NodeRelationship,
TextNode,
)
from llama_index.core.utils import truncate_text
logger = logging.getLogger(__name__)
@runtime_checkable
class IdFuncCallable(Protocol):
def __call__(self, i: int, doc: BaseNode) -> str:
...
def default_id_func(i: int, doc: BaseNode) -> str:
return str(uuid.uuid4())
def build_nodes_from_splits(
text_splits: List[str],
document: BaseNode,
ref_doc: Optional[BaseNode] = None,
id_func: Optional[IdFuncCallable] = None,
) -> List[TextNode]:
"""Build nodes from splits."""
ref_doc = ref_doc or document
id_func = id_func or default_id_func
nodes: List[TextNode] = []
"""Calling as_related_node_info() on a document recomputes the hash for the whole text and metadata"""
"""It is not that bad, when creating relationships between the nodes, but is terrible when adding a relationship"""
"""between the node and a document, hence we create the relationship only once here and pass it to the nodes"""
relationships = {NodeRelationship.SOURCE: ref_doc.as_related_node_info()}
for i, text_chunk in enumerate(text_splits):
logger.debug(f"> Adding chunk: {truncate_text(text_chunk, 50)}")
if isinstance(document, ImageDocument):
image_node = ImageNode(
id_=id_func(i, document),
text=text_chunk,
embedding=document.embedding,
image=document.image,
image_path=document.image_path,
image_url=document.image_url,
excluded_embed_metadata_keys=document.excluded_embed_metadata_keys,
excluded_llm_metadata_keys=document.excluded_llm_metadata_keys,
metadata_seperator=document.metadata_separator,
metadata_template=document.metadata_template,
text_template=document.text_template,
relationships=relationships,
)
nodes.append(image_node) # type: ignore
elif isinstance(document, Document):
node = TextNode(
id_=id_func(i, document),
text=text_chunk,
embedding=document.embedding,
excluded_embed_metadata_keys=document.excluded_embed_metadata_keys,
excluded_llm_metadata_keys=document.excluded_llm_metadata_keys,
metadata_seperator=document.metadata_separator,
metadata_template=document.metadata_template,
text_template=document.text_template,
relationships=relationships,
)
nodes.append(node)
elif isinstance(document, TextNode):
node = TextNode(
id_=id_func(i, document),
text=text_chunk,
embedding=document.embedding,
excluded_embed_metadata_keys=document.excluded_embed_metadata_keys,
excluded_llm_metadata_keys=document.excluded_llm_metadata_keys,
metadata_seperator=document.metadata_seperator,
metadata_template=document.metadata_template,
text_template=document.text_template,
relationships=relationships,
)
nodes.append(node)
else:
raise ValueError(f"Unknown document type: {type(document)}")
return nodes
|
from workflows.types import StopEventT, RunResultT # noqa
|
from typing import Any, TypeVar, Union
from .events import StopEvent
StopEventT = TypeVar("StopEventT", bound=StopEvent)
# TODO: When releasing 1.0, remove support for Any
# and enforce usage of StopEventT
RunResultT = Union[StopEventT, Any]
|
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import os
import librosa
from jina import Flow, Document, DocumentArray
from ... import AudioCLIPEncoder
cur_dir = os.path.dirname(os.path.abspath(__file__))
def test_flow_from_yml():
doc = DocumentArray([Document()])
with Flow.load_config(os.path.join(cur_dir, 'flow.yml')) as f:
resp = f.post(on='test', inputs=doc, return_results=True)
assert resp is not None
def test_embedding_exists():
x_audio, _ = librosa.load(os.path.join(cur_dir, '../test_data/sample.mp3'))
doc = DocumentArray([Document(blob=x_audio)])
with Flow.load_config(os.path.join(cur_dir, 'flow.yml')) as f:
responses = f.post(on='index', inputs=doc, return_results=True)
assert responses[0].docs[0].embedding is not None
assert responses[0].docs[0].embedding.shape == (1024, )
|
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import os
import librosa
from jina import Flow, Document, DocumentArray
cur_dir = os.path.dirname(os.path.abspath(__file__))
def test_flow_from_yml():
doc = DocumentArray([Document()])
with Flow.load_config(os.path.join(cur_dir, 'flow.yml')) as f:
resp = f.post(on='test', inputs=doc, return_results=True)
assert resp is not None
def test_embedding_exists():
x_audio, _ = librosa.load(os.path.join(cur_dir, '../test_data/sample.mp3'))
doc = DocumentArray([Document(blob=x_audio)])
with Flow.load_config(os.path.join(cur_dir, 'flow.yml')) as f:
responses = f.post(on='index', inputs=doc, return_results=True)
assert responses[0].docs[0].embedding is not None
assert responses[0].docs[0].embedding.shape == (1024, )
|
import re
from typing import TYPE_CHECKING, Any, Dict, Union
if TYPE_CHECKING:
from sentence_transformers.SentenceTransformer import SentenceTransformer
class SentenceEvaluator:
"""
Base class for all evaluators
Extend this class and implement __call__ for custom evaluators.
"""
def __init__(self):
"""
Base class for all evaluators. Notably, this class introduces the ``greater_is_better`` and ``primary_metric``
attributes. The former is a boolean indicating whether a higher evaluation score is better, which is used
for choosing the best checkpoint if ``load_best_model_at_end`` is set to ``True`` in the training arguments.
The latter is a string indicating the primary metric for the evaluator. This has to be defined whenever
the evaluator returns a dictionary of metrics, and the primary metric is the key pointing to the primary
metric, i.e. the one that is used for model selection and/or logging.
"""
self.greater_is_better = True
self.primary_metric = None
def __call__(
self, model: "SentenceTransformer", output_path: str = None, epoch: int = -1, steps: int = -1
) -> Union[float, Dict[str, float]]:
"""
This is called during training to evaluate the model.
It returns a score for the evaluation with a higher score indicating a better result.
Args:
model: the model to evaluate
output_path: path where predictions and metrics are written
to
epoch: the epoch where the evaluation takes place. This is
used for the file prefixes. If this is -1, then we
assume evaluation on test data.
steps: the steps in the current epoch at time of the
evaluation. This is used for the file prefixes. If this
is -1, then we assume evaluation at the end of the
epoch.
Returns:
Either a score for the evaluation with a higher score
indicating a better result, or a dictionary with scores. If
the latter is chosen, then `evaluator.primary_metric` must
be defined
"""
pass
def prefix_name_to_metrics(self, metrics: Dict[str, float], name: str) -> Dict[str, float]:
if not name:
return metrics
metrics = {name + "_" + key: value for key, value in metrics.items()}
if hasattr(self, "primary_metric") and not self.primary_metric.startswith(name + "_"):
self.primary_metric = name + "_" + self.primary_metric
return metrics
def store_metrics_in_model_card_data(self, model: "SentenceTransformer", metrics: Dict[str, Any]) -> None:
model.model_card_data.set_evaluation_metrics(self, metrics)
@property
def description(self) -> str:
"""
Returns a human-readable description of the evaluator: BinaryClassificationEvaluator -> Binary Classification
1. Remove "Evaluator" from the class name
2. Add a space before every capital letter
"""
class_name = self.__class__.__name__
try:
index = class_name.index("Evaluator")
class_name = class_name[:index]
except IndexError:
pass
return re.sub(r"([a-z])([A-Z])", "\g<1> \g<2>", class_name)
|
import re
from typing import TYPE_CHECKING, Any, Dict, Union
if TYPE_CHECKING:
from sentence_transformers.SentenceTransformer import SentenceTransformer
class SentenceEvaluator:
"""
Base class for all evaluators
Extend this class and implement __call__ for custom evaluators.
"""
def __init__(self):
"""
Base class for all evaluators. Notably, this class introduces the ``greater_is_better`` and ``primary_metric``
attributes. The former is a boolean indicating whether a higher evaluation score is better, which is used
for choosing the best checkpoint if ``load_best_model_at_end`` is set to ``True`` in the training arguments.
The latter is a string indicating the primary metric for the evaluator. This has to be defined whenever
the evaluator returns a dictionary of metrics, and the primary metric is the key pointing to the primary
metric, i.e. the one that is used for model selection and/or logging.
"""
self.greater_is_better = True
self.primary_metric = None
def __call__(
self, model: "SentenceTransformer", output_path: str = None, epoch: int = -1, steps: int = -1
) -> Union[float, Dict[str, float]]:
"""
This is called during training to evaluate the model.
It returns a score for the evaluation with a higher score indicating a better result.
Args:
model: the model to evaluate
output_path: path where predictions and metrics are written
to
epoch: the epoch where the evaluation takes place. This is
used for the file prefixes. If this is -1, then we
assume evaluation on test data.
steps: the steps in the current epoch at time of the
evaluation. This is used for the file prefixes. If this
is -1, then we assume evaluation at the end of the
epoch.
Returns:
Either a score for the evaluation with a higher score
indicating a better result, or a dictionary with scores. If
the latter is chosen, then `evaluator.primary_metric` must
be defined
"""
pass
def prefix_name_to_metrics(self, metrics: Dict[str, float], name: str):
if not name:
return metrics
metrics = {name + "_" + key: value for key, value in metrics.items()}
if hasattr(self, "primary_metric") and not self.primary_metric.startswith(name + "_"):
self.primary_metric = name + "_" + self.primary_metric
return metrics
def store_metrics_in_model_card_data(self, model: "SentenceTransformer", metrics: Dict[str, Any]) -> None:
model.model_card_data.set_evaluation_metrics(self, metrics)
@property
def description(self) -> str:
"""
Returns a human-readable description of the evaluator: BinaryClassificationEvaluator -> Binary Classification
1. Remove "Evaluator" from the class name
2. Add a space before every capital letter
"""
class_name = self.__class__.__name__
try:
index = class_name.index("Evaluator")
class_name = class_name[:index]
except IndexError:
pass
return re.sub(r"([a-z])([A-Z])", "\g<1> \g<2>", class_name)
|
_base_ = './cascade-mask-rcnn_convnext-t-p4-w7_fpn_4conv1fc-giou_amp-ms-crop-3x_coco.py' # noqa
# please install mmpretrain
# import mmpretrain.models to trigger register_module in mmpretrain
custom_imports = dict(
imports=['mmpretrain.models'], allow_failed_imports=False)
checkpoint_file = 'https://download.openmmlab.com/mmclassification/v0/convnext/downstream/convnext-small_3rdparty_32xb128-noema_in1k_20220301-303e75e3.pth' # noqa
model = dict(
backbone=dict(
_delete_=True,
type='mmpretrain.ConvNeXt',
arch='small',
out_indices=[0, 1, 2, 3],
drop_path_rate=0.6,
layer_scale_init_value=1.0,
gap_before_final_norm=False,
init_cfg=dict(
type='Pretrained', checkpoint=checkpoint_file,
prefix='backbone.')))
optim_wrapper = dict(paramwise_cfg={
'decay_rate': 0.7,
'decay_type': 'layer_wise',
'num_layers': 12
})
|
_base_ = './cascade-mask-rcnn_convnext-t-p4-w7_fpn_4conv1fc-giou_amp-ms-crop-3x_coco.py' # noqa
# TODO: delete custom_imports after mmcls supports auto import
# please install mmcls>=1.0
# import mmcls.models to trigger register_module in mmcls
custom_imports = dict(imports=['mmcls.models'], allow_failed_imports=False)
checkpoint_file = 'https://download.openmmlab.com/mmclassification/v0/convnext/downstream/convnext-small_3rdparty_32xb128-noema_in1k_20220301-303e75e3.pth' # noqa
model = dict(
backbone=dict(
_delete_=True,
type='mmcls.ConvNeXt',
arch='small',
out_indices=[0, 1, 2, 3],
drop_path_rate=0.6,
layer_scale_init_value=1.0,
gap_before_final_norm=False,
init_cfg=dict(
type='Pretrained', checkpoint=checkpoint_file,
prefix='backbone.')))
optim_wrapper = dict(paramwise_cfg={
'decay_rate': 0.7,
'decay_type': 'layer_wise',
'num_layers': 12
})
|
from typing import TYPE_CHECKING, Dict, Iterable
from sentence_transformers.evaluation.SentenceEvaluator import SentenceEvaluator
if TYPE_CHECKING:
from sentence_transformers.SentenceTransformer import SentenceTransformer
class SequentialEvaluator(SentenceEvaluator):
"""
This evaluator allows that multiple sub-evaluators are passed. When the model is evaluated,
the data is passed sequentially to all sub-evaluators.
All scores are passed to 'main_score_function', which derives one final score value
"""
def __init__(self, evaluators: Iterable[SentenceEvaluator], main_score_function=lambda scores: scores[-1]):
"""
Initializes a SequentialEvaluator object.
Args:
evaluators (Iterable[SentenceEvaluator]): A collection of SentenceEvaluator objects.
main_score_function (function, optional): A function that takes a list of scores and returns the main score.
Defaults to selecting the last score in the list.
Example:
::
evaluator1 = BinaryClassificationEvaluator(...)
evaluator2 = InformationRetrievalEvaluator(...)
evaluator3 = MSEEvaluator(...)
seq_evaluator = SequentialEvaluator([evaluator1, evaluator2, evaluator3])
"""
super().__init__()
self.evaluators = evaluators
self.main_score_function = main_score_function
def __call__(
self, model: "SentenceTransformer", output_path: str = None, epoch: int = -1, steps: int = -1
) -> Dict[str, float]:
evaluations = []
scores = []
for evaluator_idx, evaluator in enumerate(self.evaluators):
evaluation = evaluator(model, output_path, epoch, steps)
if not isinstance(evaluation, dict):
scores.append(evaluation)
evaluation = {f"evaluator_{evaluator_idx}": evaluation}
else:
if hasattr(evaluator, "primary_metric"):
scores.append(evaluation[evaluator.primary_metric])
else:
scores.append(evaluation[list(evaluation.keys())[0]])
evaluations.append(evaluation)
self.primary_metric = "sequential_score"
main_score = self.main_score_function(scores)
results = {key: value for evaluation in evaluations for key, value in evaluation.items()}
results["sequential_score"] = main_score
return results
|
from typing import TYPE_CHECKING, Dict, Iterable
from sentence_transformers.evaluation.SentenceEvaluator import SentenceEvaluator
if TYPE_CHECKING:
from sentence_transformers.SentenceTransformer import SentenceTransformer
class SequentialEvaluator(SentenceEvaluator):
"""
This evaluator allows that multiple sub-evaluators are passed. When the model is evaluated,
the data is passed sequentially to all sub-evaluators.
All scores are passed to 'main_score_function', which derives one final score value
"""
def __init__(self, evaluators: Iterable[SentenceEvaluator], main_score_function=lambda scores: scores[-1]):
"""
Initializes a SequentialEvaluator object.
Args:
evaluators (Iterable[SentenceEvaluator]): A collection of SentenceEvaluator objects.
main_score_function (function, optional): A function that takes a list of scores and returns the main score.
Defaults to selecting the last score in the list.
Example:
::
evaluator1 = BinaryClassificationEvaluator(...)
evaluator2 = InformationRetrievalEvaluator(...)
evaluator3 = MSEEvaluator(...)
seq_evaluator = SequentialEvaluator([evaluator1, evaluator2, evaluator3])
"""
super().__init__()
self.evaluators = evaluators
self.main_score_function = main_score_function
def __call__(
self, model: "SentenceTransformer", output_path: str = None, epoch: int = -1, steps: int = -1
) -> Dict[str, float]:
evaluations = []
scores = []
for evaluator_idx, evaluator in enumerate(self.evaluators):
evaluation = evaluator(model, output_path, epoch, steps)
if not isinstance(evaluation, dict):
scores.append(evaluation)
evaluation = {f"evaluator_{evaluator_idx}": evaluation}
else:
if hasattr(evaluation, "primary_metric"):
scores.append(evaluation[evaluation.primary_metric])
else:
scores.append(evaluation[list(evaluation.keys())[0]])
evaluations.append(evaluation)
self.primary_metric = "sequential_score"
main_score = self.main_score_function(scores)
results = {key: value for evaluation in evaluations for key, value in evaluation.items()}
results["sequential_score"] = main_score
return results
|
from typing import Any
from langchain_core.documents import Document
from langchain_core.messages import AIMessage, AIMessageChunk, HumanMessage
class AnyStr(str):
__slots__ = ()
def __eq__(self, other: object) -> bool:
return isinstance(other, str)
# The code below creates version of pydantic models
# that will work in unit tests with AnyStr as id field
# Please note that the `id` field is assigned AFTER the model is created
# to workaround an issue with pydantic ignoring the __eq__ method on
# subclassed strings.
def _any_id_document(**kwargs: Any) -> Document:
"""Create a document with an id field."""
message = Document(**kwargs)
message.id = AnyStr()
return message
def _any_id_ai_message(**kwargs: Any) -> AIMessage:
"""Create ai message with an any id field."""
message = AIMessage(**kwargs)
message.id = AnyStr()
return message
def _any_id_ai_message_chunk(**kwargs: Any) -> AIMessageChunk:
"""Create ai message with an any id field."""
message = AIMessageChunk(**kwargs)
message.id = AnyStr()
return message
def _any_id_human_message(**kwargs: Any) -> HumanMessage:
"""Create a human with an any id field."""
message = HumanMessage(**kwargs)
message.id = AnyStr()
return message
|
from typing import Any
from langchain_core.documents import Document
from langchain_core.messages import AIMessage, AIMessageChunk, HumanMessage
class AnyStr(str):
__slots__ = ()
def __eq__(self, other: Any) -> bool:
return isinstance(other, str)
# The code below creates version of pydantic models
# that will work in unit tests with AnyStr as id field
# Please note that the `id` field is assigned AFTER the model is created
# to workaround an issue with pydantic ignoring the __eq__ method on
# subclassed strings.
def _any_id_document(**kwargs: Any) -> Document:
"""Create a document with an id field."""
message = Document(**kwargs)
message.id = AnyStr()
return message
def _any_id_ai_message(**kwargs: Any) -> AIMessage:
"""Create ai message with an any id field."""
message = AIMessage(**kwargs)
message.id = AnyStr()
return message
def _any_id_ai_message_chunk(**kwargs: Any) -> AIMessageChunk:
"""Create ai message with an any id field."""
message = AIMessageChunk(**kwargs)
message.id = AnyStr()
return message
def _any_id_human_message(**kwargs: Any) -> HumanMessage:
"""Create a human with an any id field."""
message = HumanMessage(**kwargs)
message.id = AnyStr()
return message
|
from typing import TypeVar
from docarray.document.base_node import BaseNode
from .ndarray import Embedding, Tensor
from .url import ImageUrl
T = TypeVar('T')
__all__ = ['Tensor', 'Embedding', 'BaseNode']
|
from typing import (
Union,
TYPE_CHECKING,
TypeVar,
Sequence,
Optional,
List,
Dict,
Generator,
Iterable,
Tuple,
ForwardRef,
)
if TYPE_CHECKING: # pragma: no cover
import scipy.sparse
import tensorflow
import torch
import numpy as np
from PIL.Image import Image as PILImage
from docarray import Document
ArrayType = TypeVar(
'ArrayType',
np.ndarray,
scipy.sparse.spmatrix,
tensorflow.SparseTensor,
tensorflow.Tensor,
torch.Tensor,
Sequence[float],
)
DocumentContentType = Union[bytes, str, ArrayType]
ProtoValueType = Optional[Union[str, bool, float]]
StructValueType = Union[
ProtoValueType, List[ProtoValueType], Dict[str, ProtoValueType]
]
DocumentArraySourceType = Union[
Sequence[Document], Document, Generator[Document], Iterable[Document]
]
T = TypeVar('T')
AnyDNN = TypeVar(
'AnyDNN'
) #: The type of any implementation of a Deep Neural Network object
DocumentArraySingletonIndexType = Union[int, str]
DocumentArrayMultipleIndexType = Union[
slice, Sequence[int], Sequence[str], Sequence[bool], Ellipsis
]
DocumentArraySingleAttributeType = Tuple[
Union[DocumentArraySingletonIndexType, DocumentArrayMultipleIndexType], str
]
DocumentArrayMultipleAttributeType = Tuple[
Union[DocumentArraySingletonIndexType, DocumentArrayMultipleIndexType],
Sequence[str],
]
DocumentArrayIndexType = Union[
DocumentArraySingletonIndexType,
DocumentArrayMultipleIndexType,
DocumentArraySingleAttributeType,
DocumentArrayMultipleAttributeType,
]
Image = TypeVar(
'Image',
str,
ForwardRef('np.ndarray'),
ForwardRef('PILImage'),
)
Text = TypeVar('Text', bound=str)
URI = TypeVar('URI', bound=str)
Audio = TypeVar('Audio', str, ForwardRef('np.ndarray'))
Video = TypeVar('Video', str, ForwardRef('np.ndarray'))
Mesh = TypeVar('Mesh', str, ForwardRef('np.ndarray'))
Tabular = TypeVar('Tabular', bound=str)
Blob = TypeVar('Blob', str, bytes)
JSON = TypeVar('JSON', str, dict)
|
# coding=utf-8
# Copyright 2025 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import requests
from packaging.version import parse
# GitHub repository details
USER = "huggingface"
REPO = "diffusers"
def fetch_all_branches(user, repo):
branches = [] # List to store all branches
page = 1 # Start from first page
while True:
# Make a request to the GitHub API for the branches
response = requests.get(
f"https://api.github.com/repos/{user}/{repo}/branches",
params={"page": page},
timeout=60,
)
# Check if the request was successful
if response.status_code == 200:
# Add the branches from the current page to the list
branches.extend([branch["name"] for branch in response.json()])
# Check if there is a 'next' link for pagination
if "next" in response.links:
page += 1 # Move to the next page
else:
break # Exit loop if there is no next page
else:
print("Failed to retrieve branches:", response.status_code)
break
return branches
def main():
# Fetch all branches
branches = fetch_all_branches(USER, REPO)
# Filter branches.
# print(f"Total branches: {len(branches)}")
filtered_branches = []
for branch in branches:
if branch.startswith("v") and ("-release" in branch or "-patch" in branch):
filtered_branches.append(branch)
# print(f"Filtered: {branch}")
sorted_branches = sorted(filtered_branches, key=lambda x: parse(x.split("-")[0][1:]), reverse=True)
latest_branch = sorted_branches[0]
# print(f"Latest branch: {latest_branch}")
return latest_branch
if __name__ == "__main__":
print(main())
|
# coding=utf-8
# Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import requests
from packaging.version import parse
# GitHub repository details
USER = "huggingface"
REPO = "diffusers"
def fetch_all_branches(user, repo):
branches = [] # List to store all branches
page = 1 # Start from first page
while True:
# Make a request to the GitHub API for the branches
response = requests.get(
f"https://api.github.com/repos/{user}/{repo}/branches",
params={"page": page},
timeout=60,
)
# Check if the request was successful
if response.status_code == 200:
# Add the branches from the current page to the list
branches.extend([branch["name"] for branch in response.json()])
# Check if there is a 'next' link for pagination
if "next" in response.links:
page += 1 # Move to the next page
else:
break # Exit loop if there is no next page
else:
print("Failed to retrieve branches:", response.status_code)
break
return branches
def main():
# Fetch all branches
branches = fetch_all_branches(USER, REPO)
# Filter branches.
# print(f"Total branches: {len(branches)}")
filtered_branches = []
for branch in branches:
if branch.startswith("v") and ("-release" in branch or "-patch" in branch):
filtered_branches.append(branch)
# print(f"Filtered: {branch}")
sorted_branches = sorted(filtered_branches, key=lambda x: parse(x.split("-")[0][1:]), reverse=True)
latest_branch = sorted_branches[0]
# print(f"Latest branch: {latest_branch}")
return latest_branch
if __name__ == "__main__":
print(main())
|
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
from functools import update_wrapper, wraps
from types import MethodType
class _AvailableIfDescriptor:
"""Implements a conditional property using the descriptor protocol.
Using this class to create a decorator will raise an ``AttributeError``
if check(self) returns a falsey value. Note that if check raises an error
this will also result in hasattr returning false.
See https://docs.python.org/3/howto/descriptor.html for an explanation of
descriptors.
"""
def __init__(self, fn, check, attribute_name):
self.fn = fn
self.check = check
self.attribute_name = attribute_name
# update the docstring of the descriptor
update_wrapper(self, fn)
def _check(self, obj, owner):
attr_err_msg = (
f"This {owner.__name__!r} has no attribute {self.attribute_name!r}"
)
try:
check_result = self.check(obj)
except Exception as e:
raise AttributeError(attr_err_msg) from e
if not check_result:
raise AttributeError(attr_err_msg)
def __get__(self, obj, owner=None):
if obj is not None:
# delegate only on instances, not the classes.
# this is to allow access to the docstrings.
self._check(obj, owner=owner)
out = MethodType(self.fn, obj)
else:
# This makes it possible to use the decorated method as an unbound method,
# for instance when monkeypatching.
@wraps(self.fn)
def out(*args, **kwargs):
self._check(args[0], owner=owner)
return self.fn(*args, **kwargs)
return out
def available_if(check):
"""An attribute that is available only if check returns a truthy value.
Parameters
----------
check : callable
When passed the object with the decorated method, this should return
a truthy value if the attribute is available, and either return False
or raise an AttributeError if not available.
Returns
-------
callable
Callable makes the decorated method available if `check` returns
a truthy value, otherwise the decorated method is unavailable.
Examples
--------
>>> from sklearn.utils.metaestimators import available_if
>>> class HelloIfEven:
... def __init__(self, x):
... self.x = x
...
... def _x_is_even(self):
... return self.x % 2 == 0
...
... @available_if(_x_is_even)
... def say_hello(self):
... print("Hello")
...
>>> obj = HelloIfEven(1)
>>> hasattr(obj, "say_hello")
False
>>> obj.x = 2
>>> hasattr(obj, "say_hello")
True
>>> obj.say_hello()
Hello
"""
return lambda fn: _AvailableIfDescriptor(fn, check, attribute_name=fn.__name__)
|
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
from functools import update_wrapper, wraps
from types import MethodType
class _AvailableIfDescriptor:
"""Implements a conditional property using the descriptor protocol.
Using this class to create a decorator will raise an ``AttributeError``
if check(self) returns a falsey value. Note that if check raises an error
this will also result in hasattr returning false.
See https://docs.python.org/3/howto/descriptor.html for an explanation of
descriptors.
"""
def __init__(self, fn, check, attribute_name):
self.fn = fn
self.check = check
self.attribute_name = attribute_name
# update the docstring of the descriptor
update_wrapper(self, fn)
def _check(self, obj, owner):
attr_err_msg = (
f"This {repr(owner.__name__)} has no attribute {repr(self.attribute_name)}"
)
try:
check_result = self.check(obj)
except Exception as e:
raise AttributeError(attr_err_msg) from e
if not check_result:
raise AttributeError(attr_err_msg)
def __get__(self, obj, owner=None):
if obj is not None:
# delegate only on instances, not the classes.
# this is to allow access to the docstrings.
self._check(obj, owner=owner)
out = MethodType(self.fn, obj)
else:
# This makes it possible to use the decorated method as an unbound method,
# for instance when monkeypatching.
@wraps(self.fn)
def out(*args, **kwargs):
self._check(args[0], owner=owner)
return self.fn(*args, **kwargs)
return out
def available_if(check):
"""An attribute that is available only if check returns a truthy value.
Parameters
----------
check : callable
When passed the object with the decorated method, this should return
a truthy value if the attribute is available, and either return False
or raise an AttributeError if not available.
Returns
-------
callable
Callable makes the decorated method available if `check` returns
a truthy value, otherwise the decorated method is unavailable.
Examples
--------
>>> from sklearn.utils.metaestimators import available_if
>>> class HelloIfEven:
... def __init__(self, x):
... self.x = x
...
... def _x_is_even(self):
... return self.x % 2 == 0
...
... @available_if(_x_is_even)
... def say_hello(self):
... print("Hello")
...
>>> obj = HelloIfEven(1)
>>> hasattr(obj, "say_hello")
False
>>> obj.x = 2
>>> hasattr(obj, "say_hello")
True
>>> obj.say_hello()
Hello
"""
return lambda fn: _AvailableIfDescriptor(fn, check, attribute_name=fn.__name__)
|
from sentence_transformers import losses, SentenceTransformer, util
class AnglELoss(losses.CoSENTLoss):
def __init__(self, model: SentenceTransformer, scale: float = 20.0):
"""
This class implements AnglE (Angle Optimized) loss.
This is a modification of :class:`CoSENTLoss`, designed to address the following issue:
The cosine function's gradient approaches 0 as the wave approaches the top or bottom of its form.
This can hinder the optimization process, so AnglE proposes to instead optimize the angle difference
in complex space in order to mitigate this effect.
It expects that each of the InputExamples consists of a pair of texts and a float valued label, representing
the expected similarity score between the pair.
It computes the following loss function:
``loss = logsum(1+exp(s(k,l)-s(i,j))+exp...)``, where ``(i,j)`` and ``(k,l)`` are any of the input pairs in the
batch such that the expected similarity of ``(i,j)`` is greater than ``(k,l)``. The summation is over all possible
pairs of input pairs in the batch that match this condition. This is the same as CoSENTLoss, with a different
similarity function.
Args:
model: SentenceTransformerModel
scale: Output of similarity function is multiplied by scale
value. Represents the inverse temperature.
References:
- For further details, see: https://arxiv.org/abs/2309.12871v1
Requirements:
- Sentence pairs with corresponding similarity scores in range of the similarity function. Default is [-1,1].
Relations:
- :class:`CoSENTLoss` is AnglELoss with ``pairwise_cos_sim`` as the metric, rather than ``pairwise_angle_sim``.
- :class:`CosineSimilarityLoss` seems to produce a weaker training signal than ``CoSENTLoss`` or ``AnglELoss``.
Inputs:
+--------------------------------+------------------------+
| Texts | Labels |
+================================+========================+
| (sentence_A, sentence_B) pairs | float similarity score |
+--------------------------------+------------------------+
Example:
::
from sentence_transformers import SentenceTransformer, SentenceTransformerTrainer, losses
from datasets import Dataset
model = SentenceTransformer("microsoft/mpnet-base")
train_dataset = Dataset.from_dict({
"sentence1": ["It's nice weather outside today.", "He drove to work."],
"sentence2": ["It's so sunny.", "She walked to the store."],
"score": [1.0, 0.3],
})
loss = losses.AnglELoss(model)
trainer = SentenceTransformerTrainer(
model=model,
train_dataset=train_dataset,
loss=loss,
)
trainer.train()
"""
super().__init__(model, scale, similarity_fct=util.pairwise_angle_sim)
@property
def citation(self) -> str:
return """
@misc{li2023angleoptimized,
title={AnglE-optimized Text Embeddings},
author={Xianming Li and Jing Li},
year={2023},
eprint={2309.12871},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
"""
|
from sentence_transformers import losses, SentenceTransformer, util
class AnglELoss(losses.CoSENTLoss):
def __init__(self, model: SentenceTransformer, scale: float = 20.0):
"""
This class implements AnglE (Angle Optimized) loss.
This is a modification of :class:`CoSENTLoss`, designed to address the following issue:
The cosine function's gradient approaches 0 as the wave approaches the top or bottom of its form.
This can hinder the optimization process, so AnglE proposes to instead optimize the angle difference
in complex space in order to mitigate this effect.
It expects that each of the InputExamples consists of a pair of texts and a float valued label, representing
the expected similarity score between the pair.
It computes the following loss function:
``loss = logsum(1+exp(s(k,l)-s(i,j))+exp...)``, where ``(i,j)`` and ``(k,l)`` are any of the input pairs in the
batch such that the expected similarity of ``(i,j)`` is greater than ``(k,l)``. The summation is over all possible
pairs of input pairs in the batch that match this condition. This is the same as CoSENTLoss, with a different
similarity function.
:param model: SentenceTransformerModel
:param scale: Output of similarity function is multiplied by scale value. Represents the inverse temperature.
References:
- For further details, see: https://arxiv.org/abs/2309.12871v1
Requirements:
- Sentence pairs with corresponding similarity scores in range of the similarity function. Default is [-1,1].
Relations:
- :class:`CoSENTLoss` is AnglELoss with ``pairwise_cos_sim`` as the metric, rather than ``pairwise_angle_sim``.
- :class:`CosineSimilarityLoss` seems to produce a weaker training signal than ``CoSENTLoss`` or ``AnglELoss``.
Inputs:
+--------------------------------+------------------------+
| Texts | Labels |
+================================+========================+
| (sentence_A, sentence_B) pairs | float similarity score |
+--------------------------------+------------------------+
Example:
::
from sentence_transformers import SentenceTransformer, losses
from sentence_transformers.readers import InputExample
model = SentenceTransformer('bert-base-uncased')
train_examples = [InputExample(texts=['My first sentence', 'My second sentence'], label=1.0),
InputExample(texts=['My third sentence', 'Unrelated sentence'], label=0.3)]
train_dataloader = DataLoader(train_examples, shuffle=True, batch_size=train_batch_size)
train_loss = losses.AnglELoss(model=model)
"""
super().__init__(model, scale, similarity_fct=util.pairwise_angle_sim)
@property
def citation(self) -> str:
return """
@misc{li2023angleoptimized,
title={AnglE-optimized Text Embeddings},
author={Xianming Li and Jing Li},
year={2023},
eprint={2309.12871},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
"""
|
import gc
import unittest
import pytest
import torch
from diffusers import (
StableDiffusionUpscalePipeline,
)
from diffusers.utils import load_image
from diffusers.utils.testing_utils import (
backend_empty_cache,
enable_full_determinism,
numpy_cosine_similarity_distance,
require_torch_accelerator,
slow,
torch_device,
)
from .single_file_testing_utils import SDSingleFileTesterMixin
enable_full_determinism()
@slow
@require_torch_accelerator
class StableDiffusionUpscalePipelineSingleFileSlowTests(unittest.TestCase, SDSingleFileTesterMixin):
pipeline_class = StableDiffusionUpscalePipeline
ckpt_path = "https://huggingface.co/stabilityai/stable-diffusion-x4-upscaler/blob/main/x4-upscaler-ema.safetensors"
original_config = "https://raw.githubusercontent.com/Stability-AI/stablediffusion/main/configs/stable-diffusion/x4-upscaling.yaml"
repo_id = "stabilityai/stable-diffusion-x4-upscaler"
def setUp(self):
super().setUp()
gc.collect()
backend_empty_cache(torch_device)
def tearDown(self):
super().tearDown()
gc.collect()
backend_empty_cache(torch_device)
def test_single_file_format_inference_is_same_as_pretrained(self):
image = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-upscale/low_res_cat.png"
)
prompt = "a cat sitting on a park bench"
pipe = StableDiffusionUpscalePipeline.from_pretrained(self.repo_id)
pipe.enable_model_cpu_offload(device=torch_device)
generator = torch.Generator("cpu").manual_seed(0)
output = pipe(prompt=prompt, image=image, generator=generator, output_type="np", num_inference_steps=3)
image_from_pretrained = output.images[0]
pipe_from_single_file = StableDiffusionUpscalePipeline.from_single_file(self.ckpt_path)
pipe_from_single_file.enable_model_cpu_offload(device=torch_device)
generator = torch.Generator("cpu").manual_seed(0)
output_from_single_file = pipe_from_single_file(
prompt=prompt, image=image, generator=generator, output_type="np", num_inference_steps=3
)
image_from_single_file = output_from_single_file.images[0]
assert image_from_pretrained.shape == (512, 512, 3)
assert image_from_single_file.shape == (512, 512, 3)
assert (
numpy_cosine_similarity_distance(image_from_pretrained.flatten(), image_from_single_file.flatten()) < 1e-3
)
@pytest.mark.xfail(
condition=True,
reason="Test fails because of mismatches in the configs but it is very hard to properly fix this considering downstream usecase.",
strict=True,
)
def test_single_file_components_with_original_config(self):
super().test_single_file_components_with_original_config()
@pytest.mark.xfail(
condition=True,
reason="Test fails because of mismatches in the configs but it is very hard to properly fix this considering downstream usecase.",
strict=True,
)
def test_single_file_components_with_original_config_local_files_only(self):
super().test_single_file_components_with_original_config_local_files_only()
|
import gc
import unittest
import pytest
import torch
from diffusers import (
StableDiffusionUpscalePipeline,
)
from diffusers.utils import load_image
from diffusers.utils.testing_utils import (
backend_empty_cache,
enable_full_determinism,
numpy_cosine_similarity_distance,
require_torch_accelerator,
slow,
torch_device,
)
from .single_file_testing_utils import SDSingleFileTesterMixin
enable_full_determinism()
@slow
@require_torch_accelerator
class StableDiffusionUpscalePipelineSingleFileSlowTests(unittest.TestCase, SDSingleFileTesterMixin):
pipeline_class = StableDiffusionUpscalePipeline
ckpt_path = "https://huggingface.co/stabilityai/stable-diffusion-x4-upscaler/blob/main/x4-upscaler-ema.safetensors"
original_config = "https://raw.githubusercontent.com/Stability-AI/stablediffusion/main/configs/stable-diffusion/x4-upscaling.yaml"
repo_id = "stabilityai/stable-diffusion-x4-upscaler"
def setUp(self):
super().setUp()
gc.collect()
backend_empty_cache(torch_device)
def tearDown(self):
super().tearDown()
gc.collect()
backend_empty_cache(torch_device)
def test_single_file_format_inference_is_same_as_pretrained(self):
image = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-upscale/low_res_cat.png"
)
prompt = "a cat sitting on a park bench"
pipe = StableDiffusionUpscalePipeline.from_pretrained(self.repo_id)
pipe.enable_model_cpu_offload()
generator = torch.Generator("cpu").manual_seed(0)
output = pipe(prompt=prompt, image=image, generator=generator, output_type="np", num_inference_steps=3)
image_from_pretrained = output.images[0]
pipe_from_single_file = StableDiffusionUpscalePipeline.from_single_file(self.ckpt_path)
pipe_from_single_file.enable_model_cpu_offload()
generator = torch.Generator("cpu").manual_seed(0)
output_from_single_file = pipe_from_single_file(
prompt=prompt, image=image, generator=generator, output_type="np", num_inference_steps=3
)
image_from_single_file = output_from_single_file.images[0]
assert image_from_pretrained.shape == (512, 512, 3)
assert image_from_single_file.shape == (512, 512, 3)
assert (
numpy_cosine_similarity_distance(image_from_pretrained.flatten(), image_from_single_file.flatten()) < 1e-3
)
@pytest.mark.xfail(
condition=True,
reason="Test fails because of mismatches in the configs but it is very hard to properly fix this considering downstream usecase.",
strict=True,
)
def test_single_file_components_with_original_config(self):
super().test_single_file_components_with_original_config()
@pytest.mark.xfail(
condition=True,
reason="Test fails because of mismatches in the configs but it is very hard to properly fix this considering downstream usecase.",
strict=True,
)
def test_single_file_components_with_original_config_local_files_only(self):
super().test_single_file_components_with_original_config_local_files_only()
|
"""
This is a simple application for sentence embeddings: clustering
Sentences are mapped to sentence embeddings and then k-mean clustering is applied.
"""
from sklearn.cluster import KMeans
from sentence_transformers import SentenceTransformer
embedder = SentenceTransformer("all-MiniLM-L6-v2")
# Corpus with example sentences
corpus = [
"A man is eating food.",
"A man is eating a piece of bread.",
"A man is eating pasta.",
"The girl is carrying a baby.",
"The baby is carried by the woman",
"A man is riding a horse.",
"A man is riding a white horse on an enclosed ground.",
"A monkey is playing drums.",
"Someone in a gorilla costume is playing a set of drums.",
"A cheetah is running behind its prey.",
"A cheetah chases prey on across a field.",
]
corpus_embeddings = embedder.encode(corpus)
# Perform kmean clustering
num_clusters = 5
clustering_model = KMeans(n_clusters=num_clusters)
clustering_model.fit(corpus_embeddings)
cluster_assignment = clustering_model.labels_
clustered_sentences = [[] for i in range(num_clusters)]
for sentence_id, cluster_id in enumerate(cluster_assignment):
clustered_sentences[cluster_id].append(corpus[sentence_id])
for i, cluster in enumerate(clustered_sentences):
print("Cluster ", i + 1)
print(cluster)
print("")
|
"""
This is a simple application for sentence embeddings: clustering
Sentences are mapped to sentence embeddings and then k-mean clustering is applied.
"""
from sentence_transformers import SentenceTransformer
from sklearn.cluster import KMeans
embedder = SentenceTransformer("all-MiniLM-L6-v2")
# Corpus with example sentences
corpus = [
"A man is eating food.",
"A man is eating a piece of bread.",
"A man is eating pasta.",
"The girl is carrying a baby.",
"The baby is carried by the woman",
"A man is riding a horse.",
"A man is riding a white horse on an enclosed ground.",
"A monkey is playing drums.",
"Someone in a gorilla costume is playing a set of drums.",
"A cheetah is running behind its prey.",
"A cheetah chases prey on across a field.",
]
corpus_embeddings = embedder.encode(corpus)
# Perform kmean clustering
num_clusters = 5
clustering_model = KMeans(n_clusters=num_clusters)
clustering_model.fit(corpus_embeddings)
cluster_assignment = clustering_model.labels_
clustered_sentences = [[] for i in range(num_clusters)]
for sentence_id, cluster_id in enumerate(cluster_assignment):
clustered_sentences[cluster_id].append(corpus[sentence_id])
for i, cluster in enumerate(clustered_sentences):
print("Cluster ", i + 1)
print(cluster)
print("")
|
"""**Callback handlers** allow listening to events in LangChain.
**Class hierarchy:**
.. code-block::
BaseCallbackHandler --> <name>CallbackHandler # Example: AimCallbackHandler
"""
from typing import TYPE_CHECKING
from langchain_core._import_utils import import_attr
if TYPE_CHECKING:
from langchain_core.callbacks.base import (
AsyncCallbackHandler,
BaseCallbackHandler,
BaseCallbackManager,
CallbackManagerMixin,
Callbacks,
ChainManagerMixin,
LLMManagerMixin,
RetrieverManagerMixin,
RunManagerMixin,
ToolManagerMixin,
)
from langchain_core.callbacks.file import FileCallbackHandler
from langchain_core.callbacks.manager import (
AsyncCallbackManager,
AsyncCallbackManagerForChainGroup,
AsyncCallbackManagerForChainRun,
AsyncCallbackManagerForLLMRun,
AsyncCallbackManagerForRetrieverRun,
AsyncCallbackManagerForToolRun,
AsyncParentRunManager,
AsyncRunManager,
BaseRunManager,
CallbackManager,
CallbackManagerForChainGroup,
CallbackManagerForChainRun,
CallbackManagerForLLMRun,
CallbackManagerForRetrieverRun,
CallbackManagerForToolRun,
ParentRunManager,
RunManager,
adispatch_custom_event,
dispatch_custom_event,
)
from langchain_core.callbacks.stdout import StdOutCallbackHandler
from langchain_core.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
from langchain_core.callbacks.usage import (
UsageMetadataCallbackHandler,
get_usage_metadata_callback,
)
__all__ = (
"AsyncCallbackHandler",
"AsyncCallbackManager",
"AsyncCallbackManagerForChainGroup",
"AsyncCallbackManagerForChainRun",
"AsyncCallbackManagerForLLMRun",
"AsyncCallbackManagerForRetrieverRun",
"AsyncCallbackManagerForToolRun",
"AsyncParentRunManager",
"AsyncRunManager",
"BaseCallbackHandler",
"BaseCallbackManager",
"BaseRunManager",
"CallbackManager",
"CallbackManagerForChainGroup",
"CallbackManagerForChainRun",
"CallbackManagerForLLMRun",
"CallbackManagerForRetrieverRun",
"CallbackManagerForToolRun",
"CallbackManagerMixin",
"Callbacks",
"ChainManagerMixin",
"FileCallbackHandler",
"LLMManagerMixin",
"ParentRunManager",
"RetrieverManagerMixin",
"RunManager",
"RunManagerMixin",
"StdOutCallbackHandler",
"StreamingStdOutCallbackHandler",
"ToolManagerMixin",
"UsageMetadataCallbackHandler",
"adispatch_custom_event",
"dispatch_custom_event",
"get_usage_metadata_callback",
)
_dynamic_imports = {
"AsyncCallbackHandler": "base",
"BaseCallbackHandler": "base",
"BaseCallbackManager": "base",
"CallbackManagerMixin": "base",
"Callbacks": "base",
"ChainManagerMixin": "base",
"LLMManagerMixin": "base",
"RetrieverManagerMixin": "base",
"RunManagerMixin": "base",
"ToolManagerMixin": "base",
"FileCallbackHandler": "file",
"AsyncCallbackManager": "manager",
"AsyncCallbackManagerForChainGroup": "manager",
"AsyncCallbackManagerForChainRun": "manager",
"AsyncCallbackManagerForLLMRun": "manager",
"AsyncCallbackManagerForRetrieverRun": "manager",
"AsyncCallbackManagerForToolRun": "manager",
"AsyncParentRunManager": "manager",
"AsyncRunManager": "manager",
"BaseRunManager": "manager",
"CallbackManager": "manager",
"CallbackManagerForChainGroup": "manager",
"CallbackManagerForChainRun": "manager",
"CallbackManagerForLLMRun": "manager",
"CallbackManagerForRetrieverRun": "manager",
"CallbackManagerForToolRun": "manager",
"ParentRunManager": "manager",
"RunManager": "manager",
"adispatch_custom_event": "manager",
"dispatch_custom_event": "manager",
"StdOutCallbackHandler": "stdout",
"StreamingStdOutCallbackHandler": "streaming_stdout",
"UsageMetadataCallbackHandler": "usage",
"get_usage_metadata_callback": "usage",
}
def __getattr__(attr_name: str) -> object:
module_name = _dynamic_imports.get(attr_name)
result = import_attr(attr_name, module_name, __spec__.parent)
globals()[attr_name] = result
return result
def __dir__() -> list[str]:
return list(__all__)
|
"""**Callback handlers** allow listening to events in LangChain.
**Class hierarchy:**
.. code-block::
BaseCallbackHandler --> <name>CallbackHandler # Example: AimCallbackHandler
"""
from typing import TYPE_CHECKING
from langchain_core._import_utils import import_attr
if TYPE_CHECKING:
from langchain_core.callbacks.base import (
AsyncCallbackHandler,
BaseCallbackHandler,
BaseCallbackManager,
CallbackManagerMixin,
Callbacks,
ChainManagerMixin,
LLMManagerMixin,
RetrieverManagerMixin,
RunManagerMixin,
ToolManagerMixin,
)
from langchain_core.callbacks.file import FileCallbackHandler
from langchain_core.callbacks.manager import (
AsyncCallbackManager,
AsyncCallbackManagerForChainGroup,
AsyncCallbackManagerForChainRun,
AsyncCallbackManagerForLLMRun,
AsyncCallbackManagerForRetrieverRun,
AsyncCallbackManagerForToolRun,
AsyncParentRunManager,
AsyncRunManager,
BaseRunManager,
CallbackManager,
CallbackManagerForChainGroup,
CallbackManagerForChainRun,
CallbackManagerForLLMRun,
CallbackManagerForRetrieverRun,
CallbackManagerForToolRun,
ParentRunManager,
RunManager,
adispatch_custom_event,
dispatch_custom_event,
)
from langchain_core.callbacks.stdout import StdOutCallbackHandler
from langchain_core.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
from langchain_core.callbacks.usage import (
UsageMetadataCallbackHandler,
get_usage_metadata_callback,
)
__all__ = (
"dispatch_custom_event",
"adispatch_custom_event",
"RetrieverManagerMixin",
"LLMManagerMixin",
"ChainManagerMixin",
"ToolManagerMixin",
"Callbacks",
"CallbackManagerMixin",
"RunManagerMixin",
"BaseCallbackHandler",
"AsyncCallbackHandler",
"BaseCallbackManager",
"BaseRunManager",
"RunManager",
"ParentRunManager",
"AsyncRunManager",
"AsyncParentRunManager",
"CallbackManagerForLLMRun",
"AsyncCallbackManagerForLLMRun",
"CallbackManagerForChainRun",
"AsyncCallbackManagerForChainRun",
"CallbackManagerForToolRun",
"AsyncCallbackManagerForToolRun",
"CallbackManagerForRetrieverRun",
"AsyncCallbackManagerForRetrieverRun",
"CallbackManager",
"CallbackManagerForChainGroup",
"AsyncCallbackManager",
"AsyncCallbackManagerForChainGroup",
"StdOutCallbackHandler",
"StreamingStdOutCallbackHandler",
"FileCallbackHandler",
"UsageMetadataCallbackHandler",
"get_usage_metadata_callback",
)
_dynamic_imports = {
"AsyncCallbackHandler": "base",
"BaseCallbackHandler": "base",
"BaseCallbackManager": "base",
"CallbackManagerMixin": "base",
"Callbacks": "base",
"ChainManagerMixin": "base",
"LLMManagerMixin": "base",
"RetrieverManagerMixin": "base",
"RunManagerMixin": "base",
"ToolManagerMixin": "base",
"FileCallbackHandler": "file",
"AsyncCallbackManager": "manager",
"AsyncCallbackManagerForChainGroup": "manager",
"AsyncCallbackManagerForChainRun": "manager",
"AsyncCallbackManagerForLLMRun": "manager",
"AsyncCallbackManagerForRetrieverRun": "manager",
"AsyncCallbackManagerForToolRun": "manager",
"AsyncParentRunManager": "manager",
"AsyncRunManager": "manager",
"BaseRunManager": "manager",
"CallbackManager": "manager",
"CallbackManagerForChainGroup": "manager",
"CallbackManagerForChainRun": "manager",
"CallbackManagerForLLMRun": "manager",
"CallbackManagerForRetrieverRun": "manager",
"CallbackManagerForToolRun": "manager",
"ParentRunManager": "manager",
"RunManager": "manager",
"adispatch_custom_event": "manager",
"dispatch_custom_event": "manager",
"StdOutCallbackHandler": "stdout",
"StreamingStdOutCallbackHandler": "streaming_stdout",
"UsageMetadataCallbackHandler": "usage",
"get_usage_metadata_callback": "usage",
}
def __getattr__(attr_name: str) -> object:
module_name = _dynamic_imports.get(attr_name)
result = import_attr(attr_name, module_name, __spec__.parent)
globals()[attr_name] = result
return result
def __dir__() -> list[str]:
return list(__all__)
|
from docarray.document.any_document import AnyDocument
from docarray.document.base_node import BaseNode
from docarray.document.document import BaseDocument
__all__ = ['AnyDocument', 'BaseDocument', 'BaseNode']
|
from docarray.document.any_document import AnyDocument
from docarray.document.document import BaseDocument
__all__ = ['AnyDocument', 'BaseDocument']
|
import torchaudio
_LAZILY_IMPORTED = [
"StreamReader",
"StreamReaderSourceStream",
"StreamReaderSourceAudioStream",
"StreamReaderSourceVideoStream",
"StreamReaderOutputStream",
]
def __getattr__(name: str):
if name in _LAZILY_IMPORTED:
torchaudio._extension._init_ffmpeg()
from . import _stream_reader
item = getattr(_stream_reader, name)
globals()[name] = item
return item
raise AttributeError(f"module {__name__} has no attribute {name}")
def __dir__():
return sorted(__all__ + _LAZILY_IMPORTED)
__all__ = []
|
_INITIALIZED = False
_LAZILY_IMPORTED = [
"StreamReader",
"StreamReaderSourceStream",
"StreamReaderSourceAudioStream",
"StreamReaderSourceVideoStream",
"StreamReaderOutputStream",
]
def _init_extension():
import torch
import torchaudio
try:
torchaudio._extension._load_lib("libtorchaudio_ffmpeg")
import torchaudio._torchaudio_ffmpeg
except OSError as err:
raise ImportError(
"Stream API requires FFmpeg libraries (libavformat and such). Please install FFmpeg 4."
) from err
try:
torch.ops.torchaudio.ffmpeg_init()
except RuntimeError as err:
raise RuntimeError(
"Stream API requires FFmpeg binding. Please set USE_FFMPEG=1 when building from source."
) from err
global _INITIALIZED
_INITIALIZED = True
def __getattr__(name: str):
if name in _LAZILY_IMPORTED:
if not _INITIALIZED:
_init_extension()
from . import _stream_reader
item = getattr(_stream_reader, name)
globals()[name] = item
return item
raise AttributeError(f"module {__name__} has no attribute {name}")
def __dir__():
return sorted(__all__ + _LAZILY_IMPORTED)
__all__ = []
|
"""LlamaPack class."""
from typing import Any, Dict
from llama_index.core.llama_pack.base import BaseLlamaPack
# backwards compatibility
try:
from llama_index.agent.legacy.openai_agent import OpenAIAgent
except ImportError:
from llama_index.agent.openai import OpenAIAgent
class GmailOpenAIAgentPack(BaseLlamaPack):
def __init__(self, gmail_tool_kwargs: Dict[str, Any]) -> None:
"""Init params."""
try:
from llama_index.tools.google import GmailToolSpec
except ImportError:
raise ImportError("llama_hub not installed.")
self.tool_spec = GmailToolSpec(**gmail_tool_kwargs)
self.agent = OpenAIAgent.from_tools(self.tool_spec.to_tool_list())
def get_modules(self) -> Dict[str, Any]:
"""Get modules."""
return {"gmail_tool": self.tool_spec, "agent": self.agent}
def run(self, *args: Any, **kwargs: Any) -> Any:
"""Run the pipeline."""
return self.agent.chat(*args, **kwargs)
|
"""LlamaPack class."""
from typing import Any, Dict
from llama_index.core.llama_pack.base import BaseLlamaPack
# backwards compatibility
try:
from llama_index.agent.legacy.openai_agent import OpenAIAgent
except ImportError:
from llama_index.agent.openai import OpenAIAgent
class GmailOpenAIAgentPack(BaseLlamaPack):
def __init__(self, gmail_tool_kwargs: Dict[str, Any]) -> None:
"""Init params."""
try:
from llama_index.tools.google import GmailToolSpec
except ImportError:
raise ImportError("llama_hub not installed.")
self.tool_spec = GmailToolSpec(**gmail_tool_kwargs)
self.agent = OpenAIAgent.from_tools(self.tool_spec.to_tool_list())
def get_modules(self) -> Dict[str, Any]:
"""Get modules."""
return {"gmail_tool": self.tool_spec, "agent": self.agent}
def run(self, *args: Any, **kwargs: Any) -> Any:
"""Run the pipeline."""
return self.agent.chat(*args, **kwargs)
|
from __future__ import annotations
import functools
import operator
from typing import Any, TYPE_CHECKING
import torch
# NOTE: other files rely on the imports below
from torch._dynamo import callback as compilation_callback # noqa: F401
from torch._inductor.runtime.cache_dir_utils import ( # noqa: F401
cache_dir,
default_cache_dir,
triton_cache_dir,
)
if TYPE_CHECKING:
from collections.abc import Hashable
from .triton_compat import Config
def conditional_product(*args: int) -> int:
return functools.reduce(operator.mul, [x for x in args if x])
def ceildiv(numer: int, denom: int) -> int:
return -(numer // -denom)
def is_power_of_2(n: int) -> bool:
"""Returns whether n = 2 ** m for some integer m."""
return n > 0 and n & n - 1 == 0
def next_power_of_2(n: int) -> int:
"""Return the smallest power of 2 greater than or equal to n"""
n -= 1
n |= n >> 1
n |= n >> 2
n |= n >> 4
n |= n >> 8
n |= n >> 16
n |= n >> 32
n += 1
return n
def get_num_bytes(*args: torch.Tensor, num_in_out_args: int = 0) -> int:
"""
Return the total number of bytes the arguments of tensor type takes.
For in/out args, tensor sizes are counted twice: once for reading and
once for writing.
The first num_in_out_args arguments are in out tensors.
"""
return sum(
arg.numel() * arg.element_size() * (1 + int(i < num_in_out_args))
for i, arg in enumerate(args)
if isinstance(arg, torch.Tensor)
)
def triton_config_to_hashable(cfg: Config) -> Hashable:
"""
Convert triton config to a tuple that can uniquely identify it. We can use
the return value as a dictionary key.
"""
items = sorted(cfg.kwargs.items())
items.append(("num_warps", cfg.num_warps))
items.append(("num_stages", cfg.num_stages))
return tuple(items)
def validate_triton_config(cfg: Config) -> None:
# [Note: Triton pre_hook in inductor]
# pre-hook is a lambda function, which we don't attempt to serialize.
# right now, if a pre-hook is attached to the config, it will not be saved;
# and then it won't be used when the config is loaded from cache.
# So we assert - if we do get a pre_hook, it might get ignored after caching.
assert getattr(cfg, "pre_hook", None) is None, (
"triton configs with pre_hooks not supported"
)
def create_bandwidth_info_str(
ms: float,
num_gb: float,
gb_per_s: float,
prefix: str = "",
suffix: str = "",
color: bool = True,
) -> str:
info_str = f"{prefix}{ms:.3f}ms \t{num_gb:.3f} GB \t {gb_per_s:7.2f}GB/s{suffix}"
slow = ms > 0.012 and gb_per_s < 650
return red_text(info_str) if color and slow else info_str
def get_max_y_grid() -> int:
return 65535
try:
import colorama
HAS_COLORAMA = True
except ModuleNotFoundError:
HAS_COLORAMA = False
colorama = None # type: ignore[assignment]
if HAS_COLORAMA:
def _color_text(msg: str, color: str) -> str:
return getattr(colorama.Fore, color.upper()) + msg + colorama.Fore.RESET
else:
def _color_text(msg: str, color: str) -> str:
return msg
def green_text(msg: str) -> str:
return _color_text(msg, "green")
def yellow_text(msg: str) -> str:
return _color_text(msg, "yellow")
def red_text(msg: str) -> str:
return _color_text(msg, "red")
def blue_text(msg: str) -> str:
return _color_text(msg, "blue")
def get_first_attr(obj: Any, *attrs: str) -> Any:
"""
Return the first available attribute or throw an exception if none is present.
"""
for attr in attrs:
if hasattr(obj, attr):
return getattr(obj, attr)
raise AssertionError(f"{obj} does not has any of the attributes: {attrs}")
dynamo_timed = torch._dynamo.utils.dynamo_timed # type: ignore[has-type]
def triton_hash_to_path_key(key: str) -> str:
# In early versions of Triton, the hash is directly used in the path name.
# Later, the hash is converted to base64 before being used in the path name.
# Later, the base64 convertion was replaced to the base32
#
# This code tries to import _base64 and falls back to _base32 if _base64 is unavailable.
#
# To handle this, try to import the to-base64-conversion function.
# If it exists, use it; otherwise, try using _base32; if both are unavailable, use the hash directly.
try:
from triton.runtime.cache import _base64
return _base64(key)
except Exception:
try:
from triton.runtime.cache import _base32
return _base32(key)
except Exception:
return key
def compile_mps_shader(source: str) -> Any:
"""
Compiles shader source but raise more actionable error message when needed
"""
try:
return torch.mps.compile_shader(source)
except SyntaxError as err:
raise SyntaxError(f"failed to compile {source} with {err.msg}") from err
|
from __future__ import annotations
import functools
import operator
from typing import Any, TYPE_CHECKING
import torch
# NOTE: other files rely on the imports below
from torch._dynamo import callback as compilation_callback # noqa: F401
from torch._inductor.runtime.cache_dir_utils import ( # noqa: F401
cache_dir,
default_cache_dir,
triton_cache_dir,
)
if TYPE_CHECKING:
from collections.abc import Hashable
from .triton_compat import Config
def conditional_product(*args: int) -> int:
return functools.reduce(operator.mul, [x for x in args if x])
def ceildiv(number: int, denom: int) -> int:
return -(number // -denom)
def is_power_of_2(n: int) -> bool:
"""Returns whether n = 2 ** m for some integer m."""
return n > 0 and n & n - 1 == 0
def next_power_of_2(n: int) -> int:
"""Return the smallest power of 2 greater than or equal to n"""
n -= 1
n |= n >> 1
n |= n >> 2
n |= n >> 4
n |= n >> 8
n |= n >> 16
n |= n >> 32
n += 1
return n
def get_num_bytes(*args: torch.Tensor, num_in_out_args: int = 0) -> int:
"""
Return the total number of bytes the arguments of tensor type takes.
For in/out args, tensor sizes are counted twice: once for reading and
once for writing.
The first num_in_out_args arguments are in out tensors.
"""
return sum(
arg.numel() * arg.element_size() * (1 + int(i < num_in_out_args))
for i, arg in enumerate(args)
if isinstance(arg, torch.Tensor)
)
def triton_config_to_hashable(cfg: Config) -> Hashable:
"""
Convert triton config to a tuple that can uniquely identify it. We can use
the return value as a dictionary key.
"""
items = sorted(cfg.kwargs.items())
items.append(("num_warps", cfg.num_warps))
items.append(("num_stages", cfg.num_stages))
return tuple(items)
def validate_triton_config(cfg: Config) -> None:
# [Note: Triton pre_hook in inductor]
# pre-hook is a lambda function, which we don't attempt to serialize.
# right now, if a pre-hook is attached to the config, it will not be saved;
# and then it won't be used when the config is loaded from cache.
# So we assert - if we do get a pre_hook, it might get ignored after caching.
assert getattr(cfg, "pre_hook", None) is None, (
"triton configs with pre_hooks not supported"
)
def create_bandwidth_info_str(
ms: float,
num_gb: float,
gb_per_s: float,
prefix: str = "",
suffix: str = "",
color: bool = True,
) -> str:
info_str = f"{prefix}{ms:.3f}ms \t{num_gb:.3f} GB \t {gb_per_s:7.2f}GB/s{suffix}"
slow = ms > 0.012 and gb_per_s < 650
return red_text(info_str) if color and slow else info_str
def get_max_y_grid() -> int:
return 65535
try:
import colorama
HAS_COLORAMA = True
except ModuleNotFoundError:
HAS_COLORAMA = False
colorama = None # type: ignore[assignment]
if HAS_COLORAMA:
def _color_text(msg: str, color: str) -> str:
return getattr(colorama.Fore, color.upper()) + msg + colorama.Fore.RESET
else:
def _color_text(msg: str, color: str) -> str:
return msg
def green_text(msg: str) -> str:
return _color_text(msg, "green")
def yellow_text(msg: str) -> str:
return _color_text(msg, "yellow")
def red_text(msg: str) -> str:
return _color_text(msg, "red")
def blue_text(msg: str) -> str:
return _color_text(msg, "blue")
def get_first_attr(obj: Any, *attrs: str) -> Any:
"""
Return the first available attribute or throw an exception if none is present.
"""
for attr in attrs:
if hasattr(obj, attr):
return getattr(obj, attr)
raise AssertionError(f"{obj} does not has any of the attributes: {attrs}")
dynamo_timed = torch._dynamo.utils.dynamo_timed # type: ignore[has-type]
def triton_hash_to_path_key(key: str) -> str:
# In early versions of Triton, the hash is directly used in the path name.
# Later, the hash is converted to base64 before being used in the path name.
# Later, the base64 conversion was replaced to the base32
#
# This code tries to import _base64 and falls back to _base32 if _base64 is unavailable.
#
# To handle this, try to import the to-base64-conversion function.
# If it exists, use it; otherwise, try using _base32; if both are unavailable, use the hash directly.
try:
from triton.runtime.cache import _base64
return _base64(key)
except Exception:
try:
from triton.runtime.cache import _base32
return _base32(key)
except Exception:
return key
def compile_mps_shader(source: str) -> Any:
"""
Compiles shader source but raise more actionable error message when needed
"""
try:
return torch.mps.compile_shader(source)
except SyntaxError as err:
raise SyntaxError(f"failed to compile {source} with {err.msg}") from err
|
from typing import List, Optional
from llama_index.core.data_structs.data_structs import IndexStruct
from llama_index.core.storage.index_store.types import BaseIndexStore
from llama_index.core.storage.index_store.utils import (
index_struct_to_json,
json_to_index_struct,
)
from llama_index.core.storage.kvstore.types import BaseKVStore
DEFAULT_NAMESPACE = "index_store"
DEFAULT_COLLECTION_SUFFIX = "/data"
class KVIndexStore(BaseIndexStore):
"""
Key-Value Index store.
Args:
kvstore (BaseKVStore): key-value store
namespace (str): namespace for the index store
collection_suffix (str): suffix for the collection name
"""
def __init__(
self,
kvstore: BaseKVStore,
namespace: Optional[str] = None,
collection_suffix: Optional[str] = None,
) -> None:
"""Init a KVIndexStore."""
self._kvstore = kvstore
self._namespace = namespace or DEFAULT_NAMESPACE
self._collection_suffix = collection_suffix or DEFAULT_COLLECTION_SUFFIX
self._collection = f"{self._namespace}{self._collection_suffix}"
def add_index_struct(self, index_struct: IndexStruct) -> None:
"""
Add an index struct.
Args:
index_struct (IndexStruct): index struct
"""
key = index_struct.index_id
data = index_struct_to_json(index_struct)
self._kvstore.put(key, data, collection=self._collection)
def delete_index_struct(self, key: str) -> None:
"""
Delete an index struct.
Args:
key (str): index struct key
"""
self._kvstore.delete(key, collection=self._collection)
def get_index_struct(
self, struct_id: Optional[str] = None
) -> Optional[IndexStruct]:
"""
Get an index struct.
Args:
struct_id (Optional[str]): index struct id
"""
if struct_id is None:
structs = self.index_structs()
assert len(structs) == 1
return structs[0]
else:
json = self._kvstore.get(struct_id, collection=self._collection)
if json is None:
return None
return json_to_index_struct(json)
def index_structs(self) -> List[IndexStruct]:
"""
Get all index structs.
Returns:
List[IndexStruct]: index structs
"""
jsons = self._kvstore.get_all(collection=self._collection)
return [json_to_index_struct(json) for json in jsons.values()]
async def async_add_index_struct(self, index_struct: IndexStruct) -> None:
"""
Asynchronously add an index struct.
Args:
index_struct (IndexStruct): index struct
"""
key = index_struct.index_id
data = index_struct_to_json(index_struct)
await self._kvstore.aput(key, data, collection=self._collection)
async def adelete_index_struct(self, key: str) -> None:
"""
Asynchronously delete an index struct.
Args:
key (str): index struct key
"""
await self._kvstore.adelete(key, collection=self._collection)
async def aget_index_struct(
self, struct_id: Optional[str] = None
) -> Optional[IndexStruct]:
"""
Asynchronously get an index struct.
Args:
struct_id (Optional[str]): index struct id
"""
if struct_id is None:
structs = await self.async_index_structs()
assert len(structs) == 1
return structs[0]
else:
json = await self._kvstore.aget(struct_id, collection=self._collection)
if json is None:
return None
return json_to_index_struct(json)
async def async_index_structs(self) -> List[IndexStruct]:
"""
Asynchronously get all index structs.
Returns:
List[IndexStruct]: index structs
"""
jsons = await self._kvstore.aget_all(collection=self._collection)
return [json_to_index_struct(json) for json in jsons.values()]
|
from typing import List, Optional
from llama_index.core.data_structs.data_structs import IndexStruct
from llama_index.core.storage.index_store.types import BaseIndexStore
from llama_index.core.storage.index_store.utils import (
index_struct_to_json,
json_to_index_struct,
)
from llama_index.core.storage.kvstore.types import BaseKVStore
DEFAULT_NAMESPACE = "index_store"
DEFAULT_COLLECTION_SUFFIX = "/data"
class KVIndexStore(BaseIndexStore):
"""
Key-Value Index store.
Args:
kvstore (BaseKVStore): key-value store
namespace (str): namespace for the index store
collection_suffix (str): suffix for the collection name
"""
def __init__(
self,
kvstore: BaseKVStore,
namespace: Optional[str] = None,
collection_suffix: Optional[str] = None,
) -> None:
"""Init a KVIndexStore."""
self._kvstore = kvstore
self._namespace = namespace or DEFAULT_NAMESPACE
self._collection_suffix = collection_suffix or DEFAULT_COLLECTION_SUFFIX
self._collection = f"{self._namespace}{self._collection_suffix}"
def add_index_struct(self, index_struct: IndexStruct) -> None:
"""
Add an index struct.
Args:
index_struct (IndexStruct): index struct
"""
key = index_struct.index_id
data = index_struct_to_json(index_struct)
self._kvstore.put(key, data, collection=self._collection)
def delete_index_struct(self, key: str) -> None:
"""
Delete an index struct.
Args:
key (str): index struct key
"""
self._kvstore.delete(key, collection=self._collection)
def get_index_struct(
self, struct_id: Optional[str] = None
) -> Optional[IndexStruct]:
"""
Get an index struct.
Args:
struct_id (Optional[str]): index struct id
"""
if struct_id is None:
structs = self.index_structs()
assert len(structs) == 1
return structs[0]
else:
json = self._kvstore.get(struct_id, collection=self._collection)
if json is None:
return None
return json_to_index_struct(json)
def index_structs(self) -> List[IndexStruct]:
"""
Get all index structs.
Returns:
List[IndexStruct]: index structs
"""
jsons = self._kvstore.get_all(collection=self._collection)
return [json_to_index_struct(json) for json in jsons.values()]
|
_base_ = './detr_r50_8xb2-500e_coco.py'
model = dict(
backbone=dict(
depth=18,
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet18')),
neck=dict(in_channels=[512]))
|
_base_ = './detr_r50_8xb2-500e_coco.py'
model = dict(
backbone=dict(
depth=18,
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet18')),
bbox_head=dict(in_channels=512))
|
_base_ = './detr_r50_8xb2-150e_coco.py'
# learning policy
max_epochs = 500
train_cfg = dict(
type='EpochBasedTrainLoop', max_epochs=max_epochs, val_interval=10)
param_scheduler = [
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[334],
gamma=0.1)
]
# only keep latest 2 checkpoints
default_hooks = dict(checkpoint=dict(max_keep_ckpts=2))
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (8 GPUs) x (2 samples per GPU)
auto_scale_lr = dict(base_batch_size=16)
|
_base_ = './detr_r50_8x2_150e_coco.py'
# learning policy
max_epochs = 500
train_cfg = dict(
type='EpochBasedTrainLoop', max_epochs=max_epochs, val_interval=10)
param_scheduler = [
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[334],
gamma=0.1)
]
# only keep latest 2 checkpoints
default_hooks = dict(checkpoint=dict(max_keep_ckpts=2))
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (8 GPUs) x (2 samples per GPU)
auto_scale_lr = dict(base_batch_size=16)
|
_base_ = [
'../_base_/models/mask-rcnn_r50_fpn.py',
'../common/lsj-100e_coco-instance.py'
]
image_size = (1024, 1024)
batch_augments = [
dict(type='BatchFixedSizePad', size=image_size, pad_mask=True)
]
norm_cfg = dict(type='SyncBN', requires_grad=True)
# Use MMSyncBN that handles empty tensor in head. It can be changed to
# SyncBN after https://github.com/pytorch/pytorch/issues/36530 is fixed
head_norm_cfg = dict(type='MMSyncBN', requires_grad=True)
model = dict(
# use caffe norm
data_preprocessor=dict(
mean=[103.530, 116.280, 123.675],
std=[1.0, 1.0, 1.0],
bgr_to_rgb=False,
# pad_size_divisor=32 is unnecessary in training but necessary
# in testing.
pad_size_divisor=32,
batch_augments=batch_augments),
backbone=dict(
frozen_stages=-1,
norm_eval=False,
norm_cfg=norm_cfg,
init_cfg=None,
style='caffe'),
neck=dict(norm_cfg=norm_cfg),
rpn_head=dict(num_convs=2),
roi_head=dict(
bbox_head=dict(
type='Shared4Conv1FCBBoxHead',
conv_out_channels=256,
norm_cfg=head_norm_cfg),
mask_head=dict(norm_cfg=head_norm_cfg)))
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='RandomResize',
scale=image_size,
ratio_range=(0.1, 2.0),
keep_ratio=True),
dict(
type='RandomCrop',
crop_type='absolute_range',
crop_size=image_size,
recompute_bbox=True,
allow_negative_crop=True),
dict(type='FilterAnnotations', min_gt_bbox_wh=(1e-2, 1e-2)),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
# Use RepeatDataset to speed up training
train_dataloader = dict(dataset=dict(dataset=dict(pipeline=train_pipeline)))
|
_base_ = [
'../_base_/models/mask-rcnn_r50_fpn.py',
'../common/lsj-100e_coco-instance.py'
]
image_size = (1024, 1024)
batch_augments = [
dict(type='BatchFixedSizePad', size=image_size, pad_mask=True)
]
norm_cfg = dict(type='SyncBN', requires_grad=True)
# Use MMSyncBN that handles empty tensor in head. It can be changed to
# SyncBN after https://github.com/pytorch/pytorch/issues/36530 is fixed
# Requires MMCV-full after https://github.com/open-mmlab/mmcv/pull/1205.
head_norm_cfg = dict(type='MMSyncBN', requires_grad=True)
model = dict(
# use caffe norm
data_preprocessor=dict(
mean=[103.530, 116.280, 123.675],
std=[1.0, 1.0, 1.0],
bgr_to_rgb=False,
# pad_size_divisor=32 is unnecessary in training but necessary
# in testing.
pad_size_divisor=32,
batch_augments=batch_augments),
backbone=dict(
frozen_stages=-1,
norm_eval=False,
norm_cfg=norm_cfg,
init_cfg=None,
style='caffe'),
neck=dict(norm_cfg=norm_cfg),
rpn_head=dict(num_convs=2),
roi_head=dict(
bbox_head=dict(
type='Shared4Conv1FCBBoxHead',
conv_out_channels=256,
norm_cfg=head_norm_cfg),
mask_head=dict(norm_cfg=head_norm_cfg)))
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='RandomResize',
scale=image_size,
ratio_range=(0.1, 2.0),
keep_ratio=True),
dict(
type='RandomCrop',
crop_type='absolute_range',
crop_size=image_size,
recompute_bbox=True,
allow_negative_crop=True),
dict(type='FilterAnnotations', min_gt_bbox_wh=(1e-2, 1e-2)),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
# Use RepeatDataset to speed up training
train_dataloader = dict(dataset=dict(dataset=dict(pipeline=train_pipeline)))
|
import os
from pathlib import Path
from torchaudio.datasets.libritts import LIBRITTS
from torchaudio_unittest.common_utils import (
get_whitenoise,
normalize_wav,
save_wav,
TempDirMixin,
TorchaudioTestCase,
)
_UTTERANCE_IDS = [
[19, 198, "000000", "000000"],
[26, 495, "000004", "000000"],
]
_ORIGINAL_TEXT = "this is the original text."
_NORMALIZED_TEXT = "this is the normalized text."
def get_mock_dataset(root_dir):
"""
root_dir: directory to the mocked dataset
"""
mocked_data = []
base_dir = os.path.join(root_dir, "LibriTTS", "train-clean-100")
for i, utterance_id in enumerate(_UTTERANCE_IDS):
filename = f'{"_".join(str(u) for u in utterance_id)}.wav'
file_dir = os.path.join(base_dir, str(utterance_id[0]), str(utterance_id[1]))
os.makedirs(file_dir, exist_ok=True)
path = os.path.join(file_dir, filename)
data = get_whitenoise(sample_rate=24000, duration=2, n_channels=1, dtype="int16", seed=i)
save_wav(path, data, 24000)
mocked_data.append(normalize_wav(data))
original_text_filename = f'{"_".join(str(u) for u in utterance_id)}.original.txt'
path_original = os.path.join(file_dir, original_text_filename)
with open(path_original, "w") as file_:
file_.write(_ORIGINAL_TEXT)
normalized_text_filename = f'{"_".join(str(u) for u in utterance_id)}.normalized.txt'
path_normalized = os.path.join(file_dir, normalized_text_filename)
with open(path_normalized, "w") as file_:
file_.write(_NORMALIZED_TEXT)
return mocked_data, _UTTERANCE_IDS, _ORIGINAL_TEXT, _NORMALIZED_TEXT
class TestLibriTTS(TempDirMixin, TorchaudioTestCase):
backend = "default"
root_dir = None
data = []
_utterance_ids, _original_text, _normalized_text = [], [], []
@classmethod
def setUpClass(cls):
cls.root_dir = cls.get_base_temp_dir()
cls.data, cls._utterance_ids, cls._original_text, cls._normalized_text = get_mock_dataset(cls.root_dir)
def _test_libritts(self, dataset):
n_ites = 0
for i, (
waveform,
sample_rate,
original_text,
normalized_text,
speaker_id,
chapter_id,
utterance_id,
) in enumerate(dataset):
expected_ids = self._utterance_ids[i]
expected_data = self.data[i]
self.assertEqual(expected_data, waveform, atol=5e-5, rtol=1e-8)
assert sample_rate == 24000
assert speaker_id == expected_ids[0]
assert chapter_id == expected_ids[1]
assert original_text == self._original_text
assert normalized_text == self._normalized_text
assert utterance_id == f'{"_".join(str(u) for u in expected_ids[-4:])}'
n_ites += 1
assert n_ites == len(self._utterance_ids)
def test_libritts_str(self):
dataset = LIBRITTS(self.root_dir)
self._test_libritts(dataset)
def test_libritts_path(self):
dataset = LIBRITTS(Path(self.root_dir))
self._test_libritts(dataset)
|
import os
from pathlib import Path
from torchaudio.datasets.libritts import LIBRITTS
from torchaudio_unittest.common_utils import (
TempDirMixin,
TorchaudioTestCase,
get_whitenoise,
save_wav,
normalize_wav,
)
_UTTERANCE_IDS = [
[19, 198, "000000", "000000"],
[26, 495, "000004", "000000"],
]
_ORIGINAL_TEXT = "this is the original text."
_NORMALIZED_TEXT = "this is the normalized text."
def get_mock_dataset(root_dir):
"""
root_dir: directory to the mocked dataset
"""
mocked_data = []
base_dir = os.path.join(root_dir, "LibriTTS", "train-clean-100")
for i, utterance_id in enumerate(_UTTERANCE_IDS):
filename = f'{"_".join(str(u) for u in utterance_id)}.wav'
file_dir = os.path.join(base_dir, str(utterance_id[0]), str(utterance_id[1]))
os.makedirs(file_dir, exist_ok=True)
path = os.path.join(file_dir, filename)
data = get_whitenoise(sample_rate=24000, duration=2, n_channels=1, dtype="int16", seed=i)
save_wav(path, data, 24000)
mocked_data.append(normalize_wav(data))
original_text_filename = f'{"_".join(str(u) for u in utterance_id)}.original.txt'
path_original = os.path.join(file_dir, original_text_filename)
with open(path_original, "w") as file_:
file_.write(_ORIGINAL_TEXT)
normalized_text_filename = f'{"_".join(str(u) for u in utterance_id)}.normalized.txt'
path_normalized = os.path.join(file_dir, normalized_text_filename)
with open(path_normalized, "w") as file_:
file_.write(_NORMALIZED_TEXT)
return mocked_data, _UTTERANCE_IDS, _ORIGINAL_TEXT, _NORMALIZED_TEXT
class TestLibriTTS(TempDirMixin, TorchaudioTestCase):
backend = "default"
root_dir = None
data = []
_utterance_ids, _original_text, _normalized_text = [], [], []
@classmethod
def setUpClass(cls):
cls.root_dir = cls.get_base_temp_dir()
cls.data, cls._utterance_ids, cls._original_text, cls._normalized_text = get_mock_dataset(cls.root_dir)
def _test_libritts(self, dataset):
n_ites = 0
for i, (
waveform,
sample_rate,
original_text,
normalized_text,
speaker_id,
chapter_id,
utterance_id,
) in enumerate(dataset):
expected_ids = self._utterance_ids[i]
expected_data = self.data[i]
self.assertEqual(expected_data, waveform, atol=5e-5, rtol=1e-8)
assert sample_rate == 24000
assert speaker_id == expected_ids[0]
assert chapter_id == expected_ids[1]
assert original_text == self._original_text
assert normalized_text == self._normalized_text
assert utterance_id == f'{"_".join(str(u) for u in expected_ids[-4:])}'
n_ites += 1
assert n_ites == len(self._utterance_ids)
def test_libritts_str(self):
dataset = LIBRITTS(self.root_dir)
self._test_libritts(dataset)
def test_libritts_path(self):
dataset = LIBRITTS(Path(self.root_dir))
self._test_libritts(dataset)
|
"""
This examples trains BERT (or any other transformer model like RoBERTa, DistilBERT etc.) for the STSbenchmark from scratch. It generates sentence embeddings
that can be compared using cosine-similarity to measure the similarity.
Usage:
python training_nli.py
OR
python training_nli.py pretrained_transformer_model_name
"""
import logging
import sys
import traceback
from datetime import datetime
from datasets import load_dataset
from sentence_transformers import SentenceTransformer, losses
from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator
from sentence_transformers.similarity_functions import SimilarityFunction
from sentence_transformers.trainer import SentenceTransformerTrainer
from sentence_transformers.training_args import SentenceTransformerTrainingArguments
# Set the log level to INFO to get more information
logging.basicConfig(format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO)
# You can specify any Hugging Face pre-trained model here, for example, bert-base-uncased, roberta-base, xlm-roberta-base
model_name = sys.argv[1] if len(sys.argv) > 1 else "distilbert-base-uncased"
train_batch_size = 16
num_epochs = 4
output_dir = (
"output/training_stsbenchmark_" + model_name.replace("/", "-") + "-" + datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
)
# 1. Here we define our SentenceTransformer model. If not already a Sentence Transformer model, it will automatically
# create one with "mean" pooling.
model = SentenceTransformer(model_name)
# 2. Load the STSB dataset: https://huggingface.co/datasets/sentence-transformers/stsb
train_dataset = load_dataset("sentence-transformers/stsb", split="train")
eval_dataset = load_dataset("sentence-transformers/stsb", split="validation")
test_dataset = load_dataset("sentence-transformers/stsb", split="test")
logging.info(train_dataset)
# 3. Define our training loss
# CosineSimilarityLoss (https://sbert.net/docs/package_reference/sentence_transformer/losses.html#cosinesimilarityloss) needs two text columns and one
# similarity score column (between 0 and 1)
train_loss = losses.CosineSimilarityLoss(model=model)
# train_loss = losses.CoSENTLoss(model=model)
# 4. Define an evaluator for use during training. This is useful to keep track of alongside the evaluation loss.
dev_evaluator = EmbeddingSimilarityEvaluator(
sentences1=eval_dataset["sentence1"],
sentences2=eval_dataset["sentence2"],
scores=eval_dataset["score"],
main_similarity=SimilarityFunction.COSINE,
name="sts-dev",
)
# 5. Define the training arguments
args = SentenceTransformerTrainingArguments(
# Required parameter:
output_dir=output_dir,
# Optional training parameters:
num_train_epochs=num_epochs,
per_device_train_batch_size=train_batch_size,
per_device_eval_batch_size=train_batch_size,
warmup_ratio=0.1,
fp16=True, # Set to False if you get an error that your GPU can't run on FP16
bf16=False, # Set to True if you have a GPU that supports BF16
# Optional tracking/debugging parameters:
eval_strategy="steps",
eval_steps=100,
save_strategy="steps",
save_steps=100,
save_total_limit=2,
logging_steps=100,
run_name="sts", # Will be used in W&B if `wandb` is installed
)
# 6. Create the trainer & start training
trainer = SentenceTransformerTrainer(
model=model,
args=args,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
loss=train_loss,
evaluator=dev_evaluator,
)
trainer.train()
# 7. Evaluate the model performance on the STS Benchmark test dataset
test_evaluator = EmbeddingSimilarityEvaluator(
sentences1=test_dataset["sentence1"],
sentences2=test_dataset["sentence2"],
scores=test_dataset["score"],
main_similarity=SimilarityFunction.COSINE,
name="sts-test",
)
test_evaluator(model)
# 8. Save the trained & evaluated model locally
final_output_dir = f"{output_dir}/final"
model.save(final_output_dir)
# 9. (Optional) save the model to the Hugging Face Hub!
# It is recommended to run `huggingface-cli login` to log into your Hugging Face account first
model_name = model_name if "/" not in model_name else model_name.split("/")[-1]
try:
model.push_to_hub(f"{model_name}-sts")
except Exception:
logging.error(
f"Error uploading model to the Hugging Face Hub:\n{traceback.format_exc()}To upload it manually, you can run "
f"`huggingface-cli login`, followed by loading the model using `model = SentenceTransformer({final_output_dir!r})` "
f"and saving it using `model.push_to_hub('{model_name}-sts')`."
)
|
"""
This examples trains BERT (or any other transformer model like RoBERTa, DistilBERT etc.) for the STSbenchmark from scratch. It generates sentence embeddings
that can be compared using cosine-similarity to measure the similarity.
Usage:
python training_nli.py
OR
python training_nli.py pretrained_transformer_model_name
"""
from torch.utils.data import DataLoader
import math
from sentence_transformers import SentenceTransformer, LoggingHandler, losses, models, util
from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator
from sentence_transformers.readers import InputExample
import logging
from datetime import datetime
import sys
import os
import gzip
import csv
#### Just some code to print debug information to stdout
logging.basicConfig(
format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO, handlers=[LoggingHandler()]
)
#### /print debug information to stdout
# Check if dataset exists. If not, download and extract it
sts_dataset_path = "datasets/stsbenchmark.tsv.gz"
if not os.path.exists(sts_dataset_path):
util.http_get("https://sbert.net/datasets/stsbenchmark.tsv.gz", sts_dataset_path)
# You can specify any huggingface/transformers pre-trained model here, for example, bert-base-uncased, roberta-base, xlm-roberta-base
model_name = sys.argv[1] if len(sys.argv) > 1 else "distilbert-base-uncased"
# Read the dataset
train_batch_size = 16
num_epochs = 4
model_save_path = (
"output/training_stsbenchmark_" + model_name.replace("/", "-") + "-" + datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
)
# Use Huggingface/transformers model (like BERT, RoBERTa, XLNet, XLM-R) for mapping tokens to embeddings
word_embedding_model = models.Transformer(model_name)
# Apply mean pooling to get one fixed sized sentence vector
pooling_model = models.Pooling(
word_embedding_model.get_word_embedding_dimension(),
pooling_mode_mean_tokens=True,
pooling_mode_cls_token=False,
pooling_mode_max_tokens=False,
)
model = SentenceTransformer(modules=[word_embedding_model, pooling_model])
# Convert the dataset to a DataLoader ready for training
logging.info("Read STSbenchmark train dataset")
train_samples = []
dev_samples = []
test_samples = []
with gzip.open(sts_dataset_path, "rt", encoding="utf8") as fIn:
reader = csv.DictReader(fIn, delimiter="\t", quoting=csv.QUOTE_NONE)
for row in reader:
score = float(row["score"]) / 5.0 # Normalize score to range 0 ... 1
inp_example = InputExample(texts=[row["sentence1"], row["sentence2"]], label=score)
if row["split"] == "dev":
dev_samples.append(inp_example)
elif row["split"] == "test":
test_samples.append(inp_example)
else:
train_samples.append(inp_example)
train_dataloader = DataLoader(train_samples, shuffle=True, batch_size=train_batch_size)
train_loss = losses.CosineSimilarityLoss(model=model)
logging.info("Read STSbenchmark dev dataset")
evaluator = EmbeddingSimilarityEvaluator.from_input_examples(dev_samples, name="sts-dev")
# Configure the training. We skip evaluation in this example
warmup_steps = math.ceil(len(train_dataloader) * num_epochs * 0.1) # 10% of train data for warm-up
logging.info("Warmup-steps: {}".format(warmup_steps))
# Train the model
model.fit(
train_objectives=[(train_dataloader, train_loss)],
evaluator=evaluator,
epochs=num_epochs,
evaluation_steps=1000,
warmup_steps=warmup_steps,
output_path=model_save_path,
)
##############################################################################
#
# Load the stored model and evaluate its performance on STS benchmark dataset
#
##############################################################################
model = SentenceTransformer(model_save_path)
test_evaluator = EmbeddingSimilarityEvaluator.from_input_examples(test_samples, name="sts-test")
test_evaluator(model, output_path=model_save_path)
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os.path as osp
import numpy as np
from mmengine.config import Config, DictAction
from mmengine.utils import ProgressBar
from mmdet.models.utils import mask2ndarray
from mmdet.registry import DATASETS, VISUALIZERS
from mmdet.structures.bbox import BaseBoxes
from mmdet.utils import register_all_modules
def parse_args():
parser = argparse.ArgumentParser(description='Browse a dataset')
parser.add_argument('config', help='train config file path')
parser.add_argument(
'--output-dir',
default=None,
type=str,
help='If there is no display interface, you can save it')
parser.add_argument('--not-show', default=False, action='store_true')
parser.add_argument(
'--show-interval',
type=float,
default=2,
help='the interval of show (s)')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
args = parser.parse_args()
return args
def main():
args = parse_args()
cfg = Config.fromfile(args.config)
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
# register all modules in mmdet into the registries
register_all_modules()
dataset = DATASETS.build(cfg.train_dataloader.dataset)
visualizer = VISUALIZERS.build(cfg.visualizer)
visualizer.dataset_meta = dataset.metainfo
progress_bar = ProgressBar(len(dataset))
for item in dataset:
img = item['inputs'].permute(1, 2, 0).numpy()
data_sample = item['data_samples'].numpy()
gt_instances = data_sample.gt_instances
img_path = osp.basename(item['data_samples'].img_path)
out_file = osp.join(
args.output_dir,
osp.basename(img_path)) if args.output_dir is not None else None
img = img[..., [2, 1, 0]] # bgr to rgb
gt_bboxes = gt_instances.get('bboxes', None)
if gt_bboxes is not None and isinstance(gt_bboxes, BaseBoxes):
gt_instances.bboxes = gt_bboxes.tensor
gt_masks = gt_instances.get('masks', None)
if gt_masks is not None:
masks = mask2ndarray(gt_masks)
gt_instances.masks = masks.astype(np.bool)
data_sample.gt_instances = gt_instances
visualizer.add_datasample(
osp.basename(img_path),
img,
data_sample,
show=not args.not_show,
wait_time=args.show_interval,
out_file=out_file)
progress_bar.update()
if __name__ == '__main__':
main()
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os.path as osp
import numpy as np
from mmengine.config import Config, DictAction
from mmengine.utils import ProgressBar
from mmdet.models.utils import mask2ndarray
from mmdet.registry import DATASETS, VISUALIZERS
from mmdet.structures.bbox import BaseBoxes
from mmdet.utils import register_all_modules
def parse_args():
parser = argparse.ArgumentParser(description='Browse a dataset')
parser.add_argument('config', help='train config file path')
parser.add_argument(
'--output-dir',
default=None,
type=str,
help='If there is no display interface, you can save it')
parser.add_argument('--not-show', default=False, action='store_true')
parser.add_argument(
'--show-interval',
type=float,
default=2,
help='the interval of show (s)')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
args = parser.parse_args()
return args
def main():
args = parse_args()
cfg = Config.fromfile(args.config)
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
# register all modules in mmdet into the registries
register_all_modules()
dataset = DATASETS.build(cfg.train_dataloader.dataset)
visualizer = VISUALIZERS.build(cfg.visualizer)
visualizer.dataset_meta = dataset.metainfo
progress_bar = ProgressBar(len(dataset))
for item in dataset:
img = item['inputs'].permute(1, 2, 0).numpy()
data_sample = item['data_sample'].numpy()
gt_instances = data_sample.gt_instances
img_path = osp.basename(item['data_sample'].img_path)
out_file = osp.join(
args.output_dir,
osp.basename(img_path)) if args.output_dir is not None else None
img = img[..., [2, 1, 0]] # bgr to rgb
gt_bboxes = gt_instances.get('bboxes', None)
if gt_bboxes is not None and isinstance(gt_bboxes, BaseBoxes):
gt_instances.bboxes = gt_bboxes.tensor
gt_masks = gt_instances.get('masks', None)
if gt_masks is not None:
masks = mask2ndarray(gt_masks)
gt_instances.masks = masks.astype(np.bool)
data_sample.gt_instances = gt_instances
visualizer.add_datasample(
osp.basename(img_path),
img,
data_sample,
show=not args.not_show,
wait_time=args.show_interval,
out_file=out_file)
progress_bar.update()
if __name__ == '__main__':
main()
|
_base_ = [
'../_base_/models/retinanet_r50_fpn.py',
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
# please install mmpretrain
# import mmpretrain.models to trigger register_module in mmpretrain
custom_imports = dict(
imports=['mmpretrain.models'], allow_failed_imports=False)
model = dict(
backbone=dict(
_delete_=True,
type='mmpretrain.TIMMBackbone',
model_name='tv_resnet50', # ResNet-50 with torchvision weights
features_only=True,
pretrained=True,
out_indices=(1, 2, 3, 4)))
# optimizer
optim_wrapper = dict(optimizer=dict(lr=0.01))
|
_base_ = [
'../_base_/models/retinanet_r50_fpn.py',
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
# TODO: delete custom_imports after mmcls supports auto import
# please install mmcls>=1.0
# import mmcls.models to trigger register_module in mmcls
custom_imports = dict(imports=['mmcls.models'], allow_failed_imports=False)
model = dict(
backbone=dict(
_delete_=True,
type='mmcls.TIMMBackbone',
model_name='tv_resnet50', # ResNet-50 with torchvision weights
features_only=True,
pretrained=True,
out_indices=(1, 2, 3, 4)))
# optimizer
optim_wrapper = dict(optimizer=dict(lr=0.01))
|
from sentence_transformers import SentenceTransformer, losses, util
class AnglELoss(losses.CoSENTLoss):
def __init__(self, model: SentenceTransformer, scale: float = 20.0) -> None:
"""
This class implements AnglE (Angle Optimized) loss.
This is a modification of :class:`CoSENTLoss`, designed to address the following issue:
The cosine function's gradient approaches 0 as the wave approaches the top or bottom of its form.
This can hinder the optimization process, so AnglE proposes to instead optimize the angle difference
in complex space in order to mitigate this effect.
It expects that each of the InputExamples consists of a pair of texts and a float valued label, representing
the expected similarity score between the pair.
It computes the following loss function:
``loss = logsum(1+exp(s(k,l)-s(i,j))+exp...)``, where ``(i,j)`` and ``(k,l)`` are any of the input pairs in the
batch such that the expected similarity of ``(i,j)`` is greater than ``(k,l)``. The summation is over all possible
pairs of input pairs in the batch that match this condition. This is the same as CoSENTLoss, with a different
similarity function.
Args:
model: SentenceTransformerModel
scale: Output of similarity function is multiplied by scale
value. Represents the inverse temperature.
References:
- For further details, see: https://arxiv.org/abs/2309.12871v1
Requirements:
- Sentence pairs with corresponding similarity scores in range of the similarity function. Default is [-1,1].
Relations:
- :class:`CoSENTLoss` is AnglELoss with ``pairwise_cos_sim`` as the metric, rather than ``pairwise_angle_sim``.
- :class:`CosineSimilarityLoss` seems to produce a weaker training signal than ``CoSENTLoss`` or ``AnglELoss``.
Inputs:
+--------------------------------+------------------------+
| Texts | Labels |
+================================+========================+
| (sentence_A, sentence_B) pairs | float similarity score |
+--------------------------------+------------------------+
Example:
::
from sentence_transformers import SentenceTransformer, SentenceTransformerTrainer, losses
from datasets import Dataset
model = SentenceTransformer("microsoft/mpnet-base")
train_dataset = Dataset.from_dict({
"sentence1": ["It's nice weather outside today.", "He drove to work."],
"sentence2": ["It's so sunny.", "She walked to the store."],
"score": [1.0, 0.3],
})
loss = losses.AnglELoss(model)
trainer = SentenceTransformerTrainer(
model=model,
train_dataset=train_dataset,
loss=loss,
)
trainer.train()
"""
super().__init__(model, scale, similarity_fct=util.pairwise_angle_sim)
@property
def citation(self) -> str:
return """
@misc{li2023angleoptimized,
title={AnglE-optimized Text Embeddings},
author={Xianming Li and Jing Li},
year={2023},
eprint={2309.12871},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
"""
|
from sentence_transformers import SentenceTransformer, losses, util
class AnglELoss(losses.CoSENTLoss):
def __init__(self, model: SentenceTransformer, scale: float = 20.0) -> None:
"""
This class implements AnglE (Angle Optimized) loss.
This is a modification of :class:`CoSENTLoss`, designed to address the following issue:
The cosine function's gradient approaches 0 as the wave approaches the top or bottom of its form.
This can hinder the optimization process, so AnglE proposes to instead optimize the angle difference
in complex space in order to mitigate this effect.
It expects that each of the InputExamples consists of a pair of texts and a float valued label, representing
the expected similarity score between the pair.
It computes the following loss function:
``loss = logsum(1+exp(s(k,l)-s(i,j))+exp...)``, where ``(i,j)`` and ``(k,l)`` are any of the input pairs in the
batch such that the expected similarity of ``(i,j)`` is greater than ``(k,l)``. The summation is over all possible
pairs of input pairs in the batch that match this condition. This is the same as CoSENTLoss, with a different
similarity function.
Args:
model: SentenceTransformerModel
scale: Output of similarity function is multiplied by scale
value. Represents the inverse temperature.
References:
- For further details, see: https://arxiv.org/abs/2309.12871v1
Requirements:
- Sentence pairs with corresponding similarity scores in range of the similarity function. Default is [-1,1].
Relations:
- :class:`CoSENTLoss` is AnglELoss with ``pairwise_cos_sim`` as the metric, rather than ``pairwise_angle_sim``.
- :class:`CosineSimilarityLoss` seems to produce a weaker training signal than ``CoSENTLoss`` or ``AnglELoss``.
Inputs:
+--------------------------------+------------------------+
| Texts | Labels |
+================================+========================+
| (sentence_A, sentence_B) pairs | float similarity score |
+--------------------------------+------------------------+
Example:
::
from sentence_transformers import SentenceTransformer, SentenceTransformerTrainer, losses
from datasets import Dataset
model = SentenceTransformer("microsoft/mpnet-base")
train_dataset = Dataset.from_dict({
"sentence1": ["It's nice weather outside today.", "He drove to work."],
"sentence2": ["It's so sunny.", "She walked to the store."],
"score": [1.0, 0.3],
})
loss = losses.AnglELoss(model)
trainer = SentenceTransformerTrainer(
model=model,
train_dataset=train_dataset,
loss=loss,
)
trainer.train()
"""
super().__init__(model, scale, similarity_fct=util.pairwise_angle_sim)
@property
def citation(self) -> str:
return """
@misc{li2023angleoptimized,
title={AnglE-optimized Text Embeddings},
author={Xianming Li and Jing Li},
year={2023},
eprint={2309.12871},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
"""
|
import os
from pathlib import Path
from typing import Any, Callable, Optional, Union
import numpy as np
from PIL import Image
from .utils import download_url
from .vision import VisionDataset
class USPS(VisionDataset):
"""`USPS <https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multiclass.html#usps>`_ Dataset.
The data-format is : [label [index:value ]*256 \\n] * num_lines, where ``label`` lies in ``[1, 10]``.
The value for each pixel lies in ``[-1, 1]``. Here we transform the ``label`` into ``[0, 9]``
and make pixel values in ``[0, 255]``.
Args:
root (str or ``pathlib.Path``): Root directory of dataset to store``USPS`` data files.
train (bool, optional): If True, creates dataset from ``usps.bz2``,
otherwise from ``usps.t.bz2``.
transform (callable, optional): A function/transform that takes in a PIL image
and returns a transformed version. E.g, ``transforms.RandomCrop``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
download (bool, optional): If true, downloads the dataset from the internet and
puts it in root directory. If dataset is already downloaded, it is not
downloaded again.
"""
split_list = {
"train": [
"https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multiclass/usps.bz2",
"usps.bz2",
"ec16c51db3855ca6c91edd34d0e9b197",
],
"test": [
"https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multiclass/usps.t.bz2",
"usps.t.bz2",
"8ea070ee2aca1ac39742fdd1ef5ed118",
],
}
def __init__(
self,
root: Union[str, Path],
train: bool = True,
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
download: bool = False,
) -> None:
super().__init__(root, transform=transform, target_transform=target_transform)
split = "train" if train else "test"
url, filename, checksum = self.split_list[split]
full_path = os.path.join(self.root, filename)
if download and not os.path.exists(full_path):
download_url(url, self.root, filename, md5=checksum)
import bz2
with bz2.open(full_path) as fp:
raw_data = [line.decode().split() for line in fp.readlines()]
tmp_list = [[x.split(":")[-1] for x in data[1:]] for data in raw_data]
imgs = np.asarray(tmp_list, dtype=np.float32).reshape((-1, 16, 16))
imgs = ((imgs + 1) / 2 * 255).astype(dtype=np.uint8)
targets = [int(d[0]) - 1 for d in raw_data]
self.data = imgs
self.targets = targets
def __getitem__(self, index: int) -> tuple[Any, Any]:
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is index of the target class.
"""
img, target = self.data[index], int(self.targets[index])
# doing this so that it is consistent with all other datasets
# to return a PIL Image
img = Image.fromarray(img, mode="L")
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
return img, target
def __len__(self) -> int:
return len(self.data)
|
import os
from pathlib import Path
from typing import Any, Callable, Optional, Tuple, Union
import numpy as np
from PIL import Image
from .utils import download_url
from .vision import VisionDataset
class USPS(VisionDataset):
"""`USPS <https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multiclass.html#usps>`_ Dataset.
The data-format is : [label [index:value ]*256 \\n] * num_lines, where ``label`` lies in ``[1, 10]``.
The value for each pixel lies in ``[-1, 1]``. Here we transform the ``label`` into ``[0, 9]``
and make pixel values in ``[0, 255]``.
Args:
root (str or ``pathlib.Path``): Root directory of dataset to store``USPS`` data files.
train (bool, optional): If True, creates dataset from ``usps.bz2``,
otherwise from ``usps.t.bz2``.
transform (callable, optional): A function/transform that takes in a PIL image
and returns a transformed version. E.g, ``transforms.RandomCrop``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
download (bool, optional): If true, downloads the dataset from the internet and
puts it in root directory. If dataset is already downloaded, it is not
downloaded again.
"""
split_list = {
"train": [
"https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multiclass/usps.bz2",
"usps.bz2",
"ec16c51db3855ca6c91edd34d0e9b197",
],
"test": [
"https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multiclass/usps.t.bz2",
"usps.t.bz2",
"8ea070ee2aca1ac39742fdd1ef5ed118",
],
}
def __init__(
self,
root: Union[str, Path],
train: bool = True,
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
download: bool = False,
) -> None:
super().__init__(root, transform=transform, target_transform=target_transform)
split = "train" if train else "test"
url, filename, checksum = self.split_list[split]
full_path = os.path.join(self.root, filename)
if download and not os.path.exists(full_path):
download_url(url, self.root, filename, md5=checksum)
import bz2
with bz2.open(full_path) as fp:
raw_data = [line.decode().split() for line in fp.readlines()]
tmp_list = [[x.split(":")[-1] for x in data[1:]] for data in raw_data]
imgs = np.asarray(tmp_list, dtype=np.float32).reshape((-1, 16, 16))
imgs = ((imgs + 1) / 2 * 255).astype(dtype=np.uint8)
targets = [int(d[0]) - 1 for d in raw_data]
self.data = imgs
self.targets = targets
def __getitem__(self, index: int) -> Tuple[Any, Any]:
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is index of the target class.
"""
img, target = self.data[index], int(self.targets[index])
# doing this so that it is consistent with all other datasets
# to return a PIL Image
img = Image.fromarray(img, mode="L")
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
return img, target
def __len__(self) -> int:
return len(self.data)
|
import numpy as np
import pytest
import torch
from docarray import BaseDoc, DocList
from docarray.array import DocVec
from docarray.typing import NdArray, TorchTensor
@pytest.fixture()
def batch():
class Image(BaseDoc):
tensor: TorchTensor[3, 224, 224]
batch = DocList[Image]([Image(tensor=torch.zeros(3, 224, 224)) for _ in range(10)])
return batch.to_doc_vec()
@pytest.mark.proto
def test_proto_stacked_mode_torch(batch):
batch.from_protobuf(batch.to_protobuf())
@pytest.mark.proto
def test_proto_stacked_mode_numpy():
class MyDoc(BaseDoc):
tensor: NdArray[3, 224, 224]
da = DocList[MyDoc]([MyDoc(tensor=np.zeros((3, 224, 224))) for _ in range(10)])
da = da.to_doc_vec()
da.from_protobuf(da.to_protobuf())
@pytest.mark.proto
def test_stacked_proto():
class CustomDocument(BaseDoc):
image: NdArray
da = DocList[CustomDocument](
[CustomDocument(image=np.zeros((3, 224, 224))) for _ in range(10)]
).to_doc_vec()
da2 = DocVec.from_protobuf(da.to_protobuf())
assert isinstance(da2, DocVec)
|
import numpy as np
import pytest
import torch
from docarray import BaseDoc, DocList
from docarray.array import DocVec
from docarray.typing import NdArray, TorchTensor
@pytest.fixture()
def batch():
class Image(BaseDoc):
tensor: TorchTensor[3, 224, 224]
batch = DocList[Image]([Image(tensor=torch.zeros(3, 224, 224)) for _ in range(10)])
return batch.stack()
@pytest.mark.proto
def test_proto_stacked_mode_torch(batch):
batch.from_protobuf(batch.to_protobuf())
@pytest.mark.proto
def test_proto_stacked_mode_numpy():
class MyDoc(BaseDoc):
tensor: NdArray[3, 224, 224]
da = DocList[MyDoc]([MyDoc(tensor=np.zeros((3, 224, 224))) for _ in range(10)])
da = da.stack()
da.from_protobuf(da.to_protobuf())
@pytest.mark.proto
def test_stacked_proto():
class CustomDocument(BaseDoc):
image: NdArray
da = DocList[CustomDocument](
[CustomDocument(image=np.zeros((3, 224, 224))) for _ in range(10)]
).stack()
da2 = DocVec.from_protobuf(da.to_protobuf())
assert isinstance(da2, DocVec)
|
_base_ = './solov2_r50_fpn_ms-3x_coco.py'
# model settings
model = dict(
backbone=dict(
type='ResNeXt',
depth=101,
groups=64,
base_width=4,
dcn=dict(type='DCNv2', deformable_groups=1, fallback_on_stride=False),
stage_with_dcn=(False, True, True, True),
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d')),
mask_head=dict(
mask_feature_head=dict(conv_cfg=dict(type='DCNv2')),
dcn_cfg=dict(type='DCNv2'),
dcn_apply_to_all_conv=True))
|
_base_ = 'solov2_r50_fpn_mstrain_3x_coco.py'
# model settings
model = dict(
backbone=dict(
type='ResNeXt',
depth=101,
groups=64,
base_width=4,
dcn=dict(type='DCNv2', deformable_groups=1, fallback_on_stride=False),
stage_with_dcn=(False, True, True, True),
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d')),
mask_head=dict(
mask_feature_head=dict(conv_cfg=dict(type='DCNv2')),
dcn_cfg=dict(type='DCNv2'),
dcn_apply_to_all_conv=True))
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import tempfile
from collections import OrderedDict
import torch
from mmcv import Config
def parse_config(config_strings):
temp_file = tempfile.NamedTemporaryFile()
config_path = f'{temp_file.name}.py'
with open(config_path, 'w') as f:
f.write(config_strings)
config = Config.fromfile(config_path)
# check whether it is SSD
if config.model.bbox_head.type != 'SSDHead':
raise AssertionError('This is not a SSD model.')
def convert(in_file, out_file):
checkpoint = torch.load(in_file)
in_state_dict = checkpoint.pop('state_dict')
out_state_dict = OrderedDict()
meta_info = checkpoint['meta']
parse_config('#' + meta_info['config'])
for key, value in in_state_dict.items():
if 'extra' in key:
layer_idx = int(key.split('.')[2])
new_key = 'neck.extra_layers.{}.{}.conv.'.format(
layer_idx // 2, layer_idx % 2) + key.split('.')[-1]
elif 'l2_norm' in key:
new_key = 'neck.l2_norm.weight'
elif 'bbox_head' in key:
new_key = key[:21] + '.0' + key[21:]
else:
new_key = key
out_state_dict[new_key] = value
checkpoint['state_dict'] = out_state_dict
if torch.__version__ >= '1.6':
torch.save(checkpoint, out_file, _use_new_zipfile_serialization=False)
else:
torch.save(checkpoint, out_file)
def main():
parser = argparse.ArgumentParser(description='Upgrade SSD version')
parser.add_argument('in_file', help='input checkpoint file')
parser.add_argument('out_file', help='output checkpoint file')
args = parser.parse_args()
convert(args.in_file, args.out_file)
if __name__ == '__main__':
main()
|
import argparse
import tempfile
from collections import OrderedDict
import torch
from mmcv import Config
def parse_config(config_strings):
temp_file = tempfile.NamedTemporaryFile()
config_path = f'{temp_file.name}.py'
with open(config_path, 'w') as f:
f.write(config_strings)
config = Config.fromfile(config_path)
# check whether it is SSD
if config.model.bbox_head.type != 'SSDHead':
raise AssertionError('This is not a SSD model.')
def convert(in_file, out_file):
checkpoint = torch.load(in_file)
in_state_dict = checkpoint.pop('state_dict')
out_state_dict = OrderedDict()
meta_info = checkpoint['meta']
parse_config('#' + meta_info['config'])
for key, value in in_state_dict.items():
if 'extra' in key:
layer_idx = int(key.split('.')[2])
new_key = 'neck.extra_layers.{}.{}.conv.'.format(
layer_idx // 2, layer_idx % 2) + key.split('.')[-1]
elif 'l2_norm' in key:
new_key = 'neck.l2_norm.weight'
elif 'bbox_head' in key:
new_key = key[:21] + '.0' + key[21:]
else:
new_key = key
out_state_dict[new_key] = value
checkpoint['state_dict'] = out_state_dict
if torch.__version__ >= '1.6':
torch.save(checkpoint, out_file, _use_new_zipfile_serialization=False)
else:
torch.save(checkpoint, out_file)
def main():
parser = argparse.ArgumentParser(description='Upgrade SSD version')
parser.add_argument('in_file', help='input checkpoint file')
parser.add_argument('out_file', help='output checkpoint file')
args = parser.parse_args()
convert(args.in_file, args.out_file)
if __name__ == '__main__':
main()
|
# Copyright (c) OpenMMLab. All rights reserved.
"""Collecting some commonly used type hint in mmdetection."""
from typing import Dict, List, Optional, Tuple, Union
import torch
from mmengine.config import ConfigDict
from mmengine.data import InstanceData, PixelData
from ..bbox.samplers import SamplingResult
from ..data_structures import DetDataSample
# Type hint of config data
ConfigType = Union[ConfigDict, dict]
OptConfigType = Optional[ConfigType]
# Type hint of one or more config data
MultiConfig = Union[ConfigType, List[ConfigType]]
OptMultiConfig = Optional[MultiConfig]
InstanceList = List[InstanceData]
OptInstanceList = Optional[InstanceList]
PixelList = List[PixelData]
OptPixelList = Optional[PixelList]
SampleList = List[DetDataSample]
OptSampleList = Optional[SampleList]
SamplingResultList = List[SamplingResult]
OptSamplingResultList = Optional[SamplingResultList]
ForwardResults = Union[Dict[str, torch.Tensor], List[DetDataSample],
Tuple[torch.Tensor], torch.Tensor]
|
# Copyright (c) OpenMMLab. All rights reserved.
"""Collecting some commonly used type hint in mmdetection."""
from typing import Dict, List, Optional, Tuple, Union
import torch
from mmengine.config import ConfigDict
from mmengine.data import InstanceData
from ..bbox.samplers import SamplingResult
from ..data_structures import DetDataSample
# Type hint of config data
ConfigType = Union[ConfigDict, dict]
OptConfigType = Optional[ConfigType]
# Type hint of one or more config data
MultiConfig = Union[ConfigType, List[ConfigType]]
OptMultiConfig = Optional[MultiConfig]
InstanceList = List[InstanceData]
OptInstanceList = Optional[InstanceList]
SampleList = List[DetDataSample]
OptSampleList = Optional[SampleList]
SamplingResultList = List[SamplingResult]
OptSamplingResultList = Optional[SamplingResultList]
ForwardResults = Union[Dict[str, torch.Tensor], List[DetDataSample],
Tuple[torch.Tensor], torch.Tensor]
|
import os
from typing import Literal, Optional, overload
import nomic # type: ignore[import]
from langchain_core.embeddings import Embeddings
from nomic import embed
class NomicEmbeddings(Embeddings):
"""NomicEmbeddings embedding model.
Example:
.. code-block:: python
from langchain_nomic import NomicEmbeddings
model = NomicEmbeddings()
"""
@overload
def __init__(
self,
*,
model: str,
nomic_api_key: Optional[str] = ...,
dimensionality: Optional[int] = ...,
inference_mode: Literal["remote"] = ...,
):
...
@overload
def __init__(
self,
*,
model: str,
nomic_api_key: Optional[str] = ...,
dimensionality: Optional[int] = ...,
inference_mode: Literal["local", "dynamic"],
device: Optional[str] = ...,
):
...
@overload
def __init__(
self,
*,
model: str,
nomic_api_key: Optional[str] = ...,
dimensionality: Optional[int] = ...,
inference_mode: str,
device: Optional[str] = ...,
):
...
def __init__(
self,
*,
model: str,
nomic_api_key: Optional[str] = None,
dimensionality: Optional[int] = None,
inference_mode: str = "remote",
device: Optional[str] = None,
vision_model: Optional[str] = None,
):
"""Initialize NomicEmbeddings model.
Args:
model: model name
nomic_api_key: optionally, set the Nomic API key. Uses the ``NOMIC_API_KEY``
environment variable by default.
dimensionality: The embedding dimension, for use with Matryoshka-capable
models. Defaults to full-size.
inference_mode: How to generate embeddings. One of ``'remote'``, ``'local'``
(Embed4All), or ``'dynamic'`` (automatic). Defaults to ``'remote'``.
device: The device to use for local embeddings. Choices include
``'cpu'``, ``'gpu'``, ``'nvidia'``, ``'amd'``, or a specific device
name. See the docstring for ``GPT4All.__init__`` for more info.
Typically defaults to ``'cpu'``. Do not use on macOS.
"""
_api_key = nomic_api_key or os.environ.get("NOMIC_API_KEY")
if _api_key:
nomic.login(_api_key)
self.model = model
self.dimensionality = dimensionality
self.inference_mode = inference_mode
self.device = device
self.vision_model = vision_model
def embed(self, texts: list[str], *, task_type: str) -> list[list[float]]:
"""Embed texts.
Args:
texts: list of texts to embed
task_type: the task type to use when embedding. One of ``'search_query'``,
``'search_document'``, ``'classification'``, ``'clustering'``
"""
output = embed.text(
texts=texts,
model=self.model,
task_type=task_type,
dimensionality=self.dimensionality,
inference_mode=self.inference_mode,
device=self.device,
)
return output["embeddings"]
def embed_documents(self, texts: list[str]) -> list[list[float]]:
"""Embed search docs.
Args:
texts: list of texts to embed as documents
"""
return self.embed(
texts=texts,
task_type="search_document",
)
def embed_query(self, text: str) -> list[float]:
"""Embed query text.
Args:
text: query text
"""
return self.embed(
texts=[text],
task_type="search_query",
)[0]
def embed_image(self, uris: list[str]) -> list[list[float]]:
return embed.image(
images=uris,
model=self.vision_model,
)["embeddings"]
|
import os
from typing import Literal, Optional, overload
import nomic # type: ignore[import]
from langchain_core.embeddings import Embeddings
from nomic import embed
class NomicEmbeddings(Embeddings):
"""NomicEmbeddings embedding model.
Example:
.. code-block:: python
from langchain_nomic import NomicEmbeddings
model = NomicEmbeddings()
"""
@overload
def __init__(
self,
*,
model: str,
nomic_api_key: Optional[str] = ...,
dimensionality: Optional[int] = ...,
inference_mode: Literal["remote"] = ...,
):
...
@overload
def __init__(
self,
*,
model: str,
nomic_api_key: Optional[str] = ...,
dimensionality: Optional[int] = ...,
inference_mode: Literal["local", "dynamic"],
device: Optional[str] = ...,
):
...
@overload
def __init__(
self,
*,
model: str,
nomic_api_key: Optional[str] = ...,
dimensionality: Optional[int] = ...,
inference_mode: str,
device: Optional[str] = ...,
):
...
def __init__(
self,
*,
model: str,
nomic_api_key: Optional[str] = None,
dimensionality: Optional[int] = None,
inference_mode: str = "remote",
device: Optional[str] = None,
vision_model: Optional[str] = None,
):
"""Initialize NomicEmbeddings model.
Args:
model: model name
nomic_api_key: optionally, set the Nomic API key. Uses the NOMIC_API_KEY
environment variable by default.
dimensionality: The embedding dimension, for use with Matryoshka-capable
models. Defaults to full-size.
inference_mode: How to generate embeddings. One of `remote`, `local`
(Embed4All), or `dynamic` (automatic). Defaults to `remote`.
device: The device to use for local embeddings. Choices include
`cpu`, `gpu`, `nvidia`, `amd`, or a specific device name. See
the docstring for `GPT4All.__init__` for more info. Typically
defaults to CPU. Do not use on macOS.
"""
_api_key = nomic_api_key or os.environ.get("NOMIC_API_KEY")
if _api_key:
nomic.login(_api_key)
self.model = model
self.dimensionality = dimensionality
self.inference_mode = inference_mode
self.device = device
self.vision_model = vision_model
def embed(self, texts: list[str], *, task_type: str) -> list[list[float]]:
"""Embed texts.
Args:
texts: list of texts to embed
task_type: the task type to use when embedding. One of `search_query`,
`search_document`, `classification`, `clustering`
"""
output = embed.text(
texts=texts,
model=self.model,
task_type=task_type,
dimensionality=self.dimensionality,
inference_mode=self.inference_mode,
device=self.device,
)
return output["embeddings"]
def embed_documents(self, texts: list[str]) -> list[list[float]]:
"""Embed search docs.
Args:
texts: list of texts to embed as documents
"""
return self.embed(
texts=texts,
task_type="search_document",
)
def embed_query(self, text: str) -> list[float]:
"""Embed query text.
Args:
text: query text
"""
return self.embed(
texts=[text],
task_type="search_query",
)[0]
def embed_image(self, uris: list[str]) -> list[list[float]]:
return embed.image(
images=uris,
model=self.vision_model,
)["embeddings"]
|
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import subprocess
import pytest
from jina import Document, DocumentArray, Flow
from laser_encoder import LaserEncoder
_EMBEDDING_DIM = 1024
@pytest.mark.parametrize('request_size', [1, 10, 50, 100])
def test_integration(request_size: int):
docs = DocumentArray(
[Document(text='just some random text here') for _ in range(50)]
)
with Flow(return_results=True).add(uses=LaserEncoder) as flow:
resp = flow.post(
on='/index',
inputs=docs,
request_size=request_size,
return_results=True,
)
assert sum(len(resp_batch.docs) for resp_batch in resp) == 50
for r in resp:
for doc in r.docs:
assert doc.embedding.shape == (_EMBEDDING_DIM,)
@pytest.mark.docker
def test_docker_runtime(build_docker_image: str):
with pytest.raises(subprocess.TimeoutExpired):
subprocess.run(
[
'jina',
'executor',
f'--uses=docker://{build_docker_image}',
],
timeout=30,
check=True,
)
|
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import subprocess
import pytest
from jina import Document, DocumentArray, Flow
from ...laser_encoder import LaserEncoder
_EMBEDDING_DIM = 1024
@pytest.mark.parametrize('request_size', [1, 10, 50, 100])
def test_integration(request_size: int):
docs = DocumentArray(
[Document(text='just some random text here') for _ in range(50)]
)
with Flow(return_results=True).add(uses=LaserEncoder) as flow:
resp = flow.post(
on='/index',
inputs=docs,
request_size=request_size,
return_results=True,
)
assert sum(len(resp_batch.docs) for resp_batch in resp) == 50
for r in resp:
for doc in r.docs:
assert doc.embedding.shape == (_EMBEDDING_DIM,)
@pytest.mark.docker
def test_docker_runtime(build_docker_image: str):
with pytest.raises(subprocess.TimeoutExpired):
subprocess.run(
[
'jina',
'executor',
f'--uses=docker://{build_docker_image}',
],
timeout=30,
check=True,
)
|
import os
import shutil
from typing import Sequence
import pytest
from llama_index.core.schema import BaseNode, TextNode
from llama_index.core.vector_stores import VectorStoreQuery
from llama_index.vector_stores.objectbox import ObjectBoxVectorStore
EMBEDDING_DIM = 3
@pytest.fixture()
def vectorstore():
obx = ObjectBoxVectorStore(embedding_dimensions=EMBEDDING_DIM)
db_default_path = "objectbox"
assert os.path.exists(db_default_path), (
f"Directory '{db_default_path}' does not exist."
)
filepath = os.path.join(db_default_path, "data.mdb")
assert os.path.isfile(filepath), (
f"File '{db_default_path}' not found in '{db_default_path}'"
)
return obx
@pytest.fixture()
def node_embeddings() -> Sequence[BaseNode]:
return [
TextNode(
id_="e8671c2d-8ee3-4f95-9730-7832f0115560",
text="test1",
embedding=[1.2, 0.3, -0.9],
),
TextNode(
id_="d0db4ed6-da16-4769-bf19-d1c06267a5f6",
text="test2",
embedding=[0.1, 0.0, 0.0],
),
TextNode(
id_="8601b27c-376e-48dd-a252-e61e01f29069",
text="test3",
embedding=[-2.3, 1.2, -6.7],
),
]
def test_add(vectorstore: ObjectBoxVectorStore, node_embeddings: Sequence[BaseNode]):
node_ids = vectorstore.add(node_embeddings)
retrieved_nodes = vectorstore.get_nodes(node_ids)
assert len(retrieved_nodes) == len(node_embeddings)
def test_query(vectorstore: ObjectBoxVectorStore, node_embeddings: Sequence[BaseNode]):
vectorstore.add(node_embeddings)
search_result = vectorstore.query(
VectorStoreQuery(query_embedding=[0.15, 0.001, -0.01], similarity_top_k=1)
)
assert len(search_result.ids) == 1
assert search_result.nodes[0].id_ == "d0db4ed6-da16-4769-bf19-d1c06267a5f6"
def test_get_nodes(
vectorstore: ObjectBoxVectorStore, node_embeddings: Sequence[BaseNode]
):
vectorstore.add(node_embeddings)
retrieved_nodes = vectorstore.get_nodes(
node_ids=["8601b27c-376e-48dd-a252-e61e01f29069"]
)
assert len(retrieved_nodes) == 1
assert retrieved_nodes[0].id_ == "8601b27c-376e-48dd-a252-e61e01f29069"
def test_count(vectorstore: ObjectBoxVectorStore, node_embeddings: Sequence[BaseNode]):
vectorstore.add(node_embeddings)
assert vectorstore.count() == len(node_embeddings)
def test_delete_nodes(
vectorstore: ObjectBoxVectorStore, node_embeddings: Sequence[BaseNode]
):
node_ids = vectorstore.add(node_embeddings)
node_ids_to_be_deleted = node_ids[0:2]
vectorstore.delete_nodes(node_ids_to_be_deleted)
assert vectorstore.count() == 1
def test_clear(vectorstore: ObjectBoxVectorStore, node_embeddings: Sequence[BaseNode]):
node_ids = vectorstore.add(node_embeddings)
vectorstore.clear()
retrieved_nodes = vectorstore.get_nodes(node_ids)
assert len(retrieved_nodes) == 0
def remove_test_dir(test_dir: str):
if os.path.exists(test_dir):
shutil.rmtree(test_dir)
@pytest.fixture(autouse=True)
def auto_cleanup(vectorstore: ObjectBoxVectorStore):
yield # run the test function
vectorstore.close()
os.remove("llama_index/vector_stores/objectbox/objectbox-model.json")
remove_test_dir("objectbox")
|
import os
import shutil
from typing import Sequence
import pytest
from llama_index.core.schema import TextNode, BaseNode
from llama_index.core.vector_stores import VectorStoreQuery
from llama_index.vector_stores.objectbox import ObjectBoxVectorStore
EMBEDDING_DIM = 3
@pytest.fixture()
def vectorstore():
obx = ObjectBoxVectorStore(embedding_dimensions=EMBEDDING_DIM)
db_default_path = "objectbox"
assert os.path.exists(
db_default_path
), f"Directory '{db_default_path}' does not exist."
filepath = os.path.join(db_default_path, "data.mdb")
assert os.path.isfile(
filepath
), f"File '{db_default_path}' not found in '{db_default_path}'"
return obx
@pytest.fixture()
def node_embeddings() -> Sequence[BaseNode]:
return [
TextNode(
id_="e8671c2d-8ee3-4f95-9730-7832f0115560",
text="test1",
embedding=[1.2, 0.3, -0.9],
),
TextNode(
id_="d0db4ed6-da16-4769-bf19-d1c06267a5f6",
text="test2",
embedding=[0.1, 0.0, 0.0],
),
TextNode(
id_="8601b27c-376e-48dd-a252-e61e01f29069",
text="test3",
embedding=[-2.3, 1.2, -6.7],
),
]
def test_add(vectorstore: ObjectBoxVectorStore, node_embeddings: Sequence[BaseNode]):
node_ids = vectorstore.add(node_embeddings)
retrieved_nodes = vectorstore.get_nodes(node_ids)
assert len(retrieved_nodes) == len(node_embeddings)
def test_query(vectorstore: ObjectBoxVectorStore, node_embeddings: Sequence[BaseNode]):
vectorstore.add(node_embeddings)
search_result = vectorstore.query(
VectorStoreQuery(query_embedding=[0.15, 0.001, -0.01], similarity_top_k=1)
)
assert len(search_result.ids) == 1
assert search_result.nodes[0].id_ == "d0db4ed6-da16-4769-bf19-d1c06267a5f6"
def test_get_nodes(
vectorstore: ObjectBoxVectorStore, node_embeddings: Sequence[BaseNode]
):
vectorstore.add(node_embeddings)
retrieved_nodes = vectorstore.get_nodes(
node_ids=["8601b27c-376e-48dd-a252-e61e01f29069"]
)
assert len(retrieved_nodes) == 1
assert retrieved_nodes[0].id_ == "8601b27c-376e-48dd-a252-e61e01f29069"
def test_count(vectorstore: ObjectBoxVectorStore, node_embeddings: Sequence[BaseNode]):
vectorstore.add(node_embeddings)
assert vectorstore.count() == len(node_embeddings)
def test_delete_nodes(
vectorstore: ObjectBoxVectorStore, node_embeddings: Sequence[BaseNode]
):
node_ids = vectorstore.add(node_embeddings)
node_ids_to_be_deleted = node_ids[0:2]
vectorstore.delete_nodes(node_ids_to_be_deleted)
assert vectorstore.count() == 1
def test_clear(vectorstore: ObjectBoxVectorStore, node_embeddings: Sequence[BaseNode]):
node_ids = vectorstore.add(node_embeddings)
vectorstore.clear()
retrieved_nodes = vectorstore.get_nodes(node_ids)
assert len(retrieved_nodes) == 0
def remove_test_dir(test_dir: str):
if os.path.exists(test_dir):
shutil.rmtree(test_dir)
@pytest.fixture(autouse=True)
def auto_cleanup(vectorstore: ObjectBoxVectorStore):
yield # run the test function
vectorstore.close()
os.remove(
"llama-index-integrations/vector_stores/llama-index-vector-stores-objectbox/llama_index/vector_stores/objectbox/objectbox-model.json"
)
remove_test_dir("objectbox")
|
import logging
from argparse import ArgumentParser
import sentencepiece as spm
import torch
import torchaudio
from transforms import get_data_module
logger = logging.getLogger(__name__)
def compute_word_level_distance(seq1, seq2):
return torchaudio.functional.edit_distance(seq1.lower().split(), seq2.lower().split())
def get_lightning_module(args):
sp_model = spm.SentencePieceProcessor(model_file=str(args.sp_model_path))
if args.modality == "audiovisual":
from lightning_av import AVConformerRNNTModule
model = AVConformerRNNTModule(args, sp_model)
else:
from lightning import ConformerRNNTModule
model = ConformerRNNTModule(args, sp_model)
ckpt = torch.load(args.checkpoint_path, map_location=lambda storage, loc: storage)["state_dict"]
model.load_state_dict(ckpt)
model.eval()
return model
def run_eval(model, data_module):
total_edit_distance = 0
total_length = 0
dataloader = data_module.test_dataloader()
with torch.no_grad():
for idx, (batch, sample) in enumerate(dataloader):
actual = sample[0][-1]
predicted = model(batch)
total_edit_distance += compute_word_level_distance(actual, predicted)
total_length += len(actual.split())
if idx % 100 == 0:
logger.warning(f"Processed elem {idx}; WER: {total_edit_distance / total_length}")
logger.warning(f"Final WER: {total_edit_distance / total_length}")
return total_edit_distance / total_length
def parse_args():
parser = ArgumentParser()
parser.add_argument(
"--modality",
type=str,
help="Modality",
required=True,
)
parser.add_argument(
"--mode",
type=str,
help="Perform online or offline recognition.",
required=True,
)
parser.add_argument(
"--root-dir",
type=str,
help="Root directory to LRS3 audio-visual datasets.",
required=True,
)
parser.add_argument(
"--sp-model-path",
type=str,
help="Path to sentencepiece model.",
required=True,
)
parser.add_argument(
"--checkpoint-path",
type=str,
help="Path to a checkpoint model.",
required=True,
)
parser.add_argument("--debug", action="store_true", help="whether to use debug level for logging")
return parser.parse_args()
def init_logger(debug):
fmt = "%(asctime)s %(message)s" if debug else "%(message)s"
level = logging.DEBUG if debug else logging.INFO
logging.basicConfig(format=fmt, level=level, datefmt="%Y-%m-%d %H:%M:%S")
def cli_main():
args = parse_args()
init_logger(args.debug)
model = get_lightning_module(args)
data_module = get_data_module(args, str(args.sp_model_path))
run_eval(model, data_module)
if __name__ == "__main__":
cli_main()
|
import logging
from argparse import ArgumentParser
import sentencepiece as spm
import torch
import torchaudio
from transforms import get_data_module
logger = logging.getLogger(__name__)
def compute_word_level_distance(seq1, seq2):
return torchaudio.functional.edit_distance(seq1.lower().split(), seq2.lower().split())
def get_lightning_module(args):
sp_model = spm.SentencePieceProcessor(model_file=str(args.sp_model_path))
if args.md == "av":
from lightning_av import AVConformerRNNTModule
model = AVConformerRNNTModule(args, sp_model)
else:
from lightning import ConformerRNNTModule
model = ConformerRNNTModule(args, sp_model)
ckpt = torch.load(args.checkpoint_path, map_location=lambda storage, loc: storage)["state_dict"]
model.load_state_dict(ckpt)
model.eval()
return model
def run_eval(model, data_module):
total_edit_distance = 0
total_length = 0
dataloader = data_module.test_dataloader()
with torch.no_grad():
for idx, (batch, sample) in enumerate(dataloader):
actual = sample[0][-1]
predicted = model(batch)
total_edit_distance += compute_word_level_distance(actual, predicted)
total_length += len(actual.split())
if idx % 100 == 0:
logger.warning(f"Processed elem {idx}; WER: {total_edit_distance / total_length}")
logger.warning(f"Final WER: {total_edit_distance / total_length}")
return total_edit_distance / total_length
def parse_args():
parser = ArgumentParser()
parser.add_argument(
"--md",
type=str,
help="Modality",
required=True,
)
parser.add_argument(
"--mode",
type=str,
help="Perform online or offline recognition.",
required=True,
)
parser.add_argument(
"--root-dir",
type=str,
help="Root directory to LRS3 audio-visual datasets.",
required=True,
)
parser.add_argument(
"--sp-model-path",
type=str,
help="Path to SentencePiece model.",
required=True,
)
parser.add_argument(
"--checkpoint-path",
type=str,
help="Path to checkpoint model.",
required=True,
)
parser.add_argument(
"--pretrained-model-path",
type=str,
help="Path to Pretraned model.",
)
parser.add_argument("--debug", action="store_true", help="whether to use debug level for logging")
return parser.parse_args()
def init_logger(debug):
fmt = "%(asctime)s %(message)s" if debug else "%(message)s"
level = logging.DEBUG if debug else logging.INFO
logging.basicConfig(format=fmt, level=level, datefmt="%Y-%m-%d %H:%M:%S")
def cli_main():
args = parse_args()
init_logger(args.debug)
model = get_lightning_module(args)
data_module = get_data_module(args, str(args.sp_model_path))
run_eval(model, data_module)
if __name__ == "__main__":
cli_main()
|
"""**Chat Models** are a variation on language models.
While Chat Models use language models under the hood, the interface they expose
is a bit different. Rather than expose a "text in, text out" API, they expose
an interface where "chat messages" are the inputs and outputs.
**Class hierarchy:**
.. code-block::
BaseLanguageModel --> BaseChatModel --> <name> # Examples: ChatOpenAI, ChatGooglePalm
**Main helpers:**
.. code-block::
AIMessage, BaseMessage, HumanMessage
""" # noqa: E501
import warnings
from langchain_core._api import LangChainDeprecationWarning
from langchain._api.interactive_env import is_interactive_env
from langchain.chat_models.base import init_chat_model
def __getattr__(name: str) -> None:
from langchain_community import chat_models
# If not in interactive env, raise warning.
if not is_interactive_env():
warnings.warn(
"Importing chat models from langchain is deprecated. Importing from "
"langchain will no longer be supported as of langchain==0.2.0. "
"Please import from langchain-community instead:\n\n"
f"`from langchain_community.chat_models import {name}`.\n\n"
"To install langchain-community run `pip install -U langchain-community`.",
stacklevel=2,
category=LangChainDeprecationWarning,
)
return getattr(chat_models, name)
__all__ = [
"AzureChatOpenAI",
"BedrockChat",
"ChatAnthropic",
"ChatAnyscale",
"ChatBaichuan",
"ChatCohere",
"ChatDatabricks",
"ChatEverlyAI",
"ChatFireworks",
"ChatGooglePalm",
"ChatHunyuan",
"ChatJavelinAIGateway",
"ChatKonko",
"ChatLiteLLM",
"ChatMLflowAIGateway",
"ChatMlflow",
"ChatOllama",
"ChatOpenAI",
"ChatVertexAI",
"ChatYandexGPT",
"ErnieBotChat",
"FakeListChatModel",
"GigaChat",
"HumanInputChatModel",
"JinaChat",
"MiniMaxChat",
"PaiEasChatEndpoint",
"PromptLayerChatOpenAI",
"QianfanChatEndpoint",
"VolcEngineMaasChat",
"init_chat_model",
]
|
"""**Chat Models** are a variation on language models.
While Chat Models use language models under the hood, the interface they expose
is a bit different. Rather than expose a "text in, text out" API, they expose
an interface where "chat messages" are the inputs and outputs.
**Class hierarchy:**
.. code-block::
BaseLanguageModel --> BaseChatModel --> <name> # Examples: ChatOpenAI, ChatGooglePalm
**Main helpers:**
.. code-block::
AIMessage, BaseMessage, HumanMessage
""" # noqa: E501
import warnings
from langchain_core._api import LangChainDeprecationWarning
from langchain._api.interactive_env import is_interactive_env
from langchain.chat_models.base import init_chat_model
def __getattr__(name: str) -> None:
from langchain_community import chat_models
# If not in interactive env, raise warning.
if not is_interactive_env():
warnings.warn(
"Importing chat models from langchain is deprecated. Importing from "
"langchain will no longer be supported as of langchain==0.2.0. "
"Please import from langchain-community instead:\n\n"
f"`from langchain_community.chat_models import {name}`.\n\n"
"To install langchain-community run `pip install -U langchain-community`.",
category=LangChainDeprecationWarning,
)
return getattr(chat_models, name)
__all__ = [
"AzureChatOpenAI",
"BedrockChat",
"ChatAnthropic",
"ChatAnyscale",
"ChatBaichuan",
"ChatCohere",
"ChatDatabricks",
"ChatEverlyAI",
"ChatFireworks",
"ChatGooglePalm",
"ChatHunyuan",
"ChatJavelinAIGateway",
"ChatKonko",
"ChatLiteLLM",
"ChatMLflowAIGateway",
"ChatMlflow",
"ChatOllama",
"ChatOpenAI",
"ChatVertexAI",
"ChatYandexGPT",
"ErnieBotChat",
"FakeListChatModel",
"GigaChat",
"HumanInputChatModel",
"JinaChat",
"MiniMaxChat",
"PaiEasChatEndpoint",
"PromptLayerChatOpenAI",
"QianfanChatEndpoint",
"VolcEngineMaasChat",
"init_chat_model",
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmengine.dist import all_reduce_params, is_distributed
from mmengine.registry import HOOKS
from .hook import Hook
@HOOKS.register_module()
class SyncBuffersHook(Hook):
"""Synchronize model buffers such as running_mean and running_var in BN at
the end of each epoch."""
priority = 'NORMAL'
def __init__(self) -> None:
self.distributed = is_distributed()
# A flag to mark whether synchronization has been done in
# after_train_epoch
self.called_in_train = False
def before_val_epoch(self, runner) -> None:
"""All-reduce model buffers before each validation epoch.
Synchronize the buffers before each validation if they have not been
synchronized at the end of the previous training epoch. This method
will be called when using IterBasedTrainLoop.
Args:
runner (Runner): The runner of the training process.
"""
if self.distributed:
if not self.called_in_train:
all_reduce_params(runner.model.buffers(), op='mean')
self.called_in_train = False
def after_train_epoch(self, runner) -> None:
"""All-reduce model buffers at the end of each epoch.
Args:
runner (Runner): The runner of the training process.
"""
if self.distributed:
all_reduce_params(runner.model.buffers(), op='mean')
self.called_in_train = True
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmengine.dist import all_reduce_params, is_distributed
from mmengine.registry import HOOKS
from .hook import Hook
@HOOKS.register_module()
class SyncBuffersHook(Hook):
"""Synchronize model buffers such as running_mean and running_var in BN at
the end of each epoch."""
priority = 'NORMAL'
def __init__(self) -> None:
self.distributed = is_distributed()
def after_train_epoch(self, runner) -> None:
"""All-reduce model buffers at the end of each epoch.
Args:
runner (Runner): The runner of the training process.
"""
if self.distributed:
all_reduce_params(runner.model.buffers(), op='mean')
|
from torchvision.transforms import AutoAugmentPolicy, InterpolationMode # usort: skip
from . import functional # usort: skip
from ._transform import Transform # usort: skip
from ._presets import StereoMatching # usort: skip
from ._augment import RandomCutmix, RandomErasing, RandomMixup, SimpleCopyPaste
from ._auto_augment import AugMix, AutoAugment, RandAugment, TrivialAugmentWide
from ._color import (
ColorJitter,
RandomAdjustSharpness,
RandomAutocontrast,
RandomEqualize,
RandomInvert,
RandomPhotometricDistort,
RandomPosterize,
RandomSolarize,
)
from ._container import Compose, RandomApply, RandomChoice, RandomOrder
from ._geometry import (
CenterCrop,
ElasticTransform,
FiveCrop,
FixedSizeCrop,
Pad,
RandomAffine,
RandomCrop,
RandomHorizontalFlip,
RandomIoUCrop,
RandomPerspective,
RandomResize,
RandomResizedCrop,
RandomRotation,
RandomShortestSize,
RandomVerticalFlip,
RandomZoomOut,
Resize,
ScaleJitter,
TenCrop,
)
from ._meta import ClampBoundingBoxes, ConvertBoundingBoxFormat, ConvertColorSpace, ConvertDtype, ConvertImageDtype
from ._misc import (
GaussianBlur,
Identity,
Lambda,
LinearTransformation,
Normalize,
PermuteDimensions,
RemoveSmallBoundingBoxes,
ToDtype,
TransposeDimensions,
)
from ._type_conversion import DecodeImage, LabelToOneHot, PILToTensor, ToImagePIL, ToImageTensor, ToPILImage
from ._deprecated import Grayscale, RandomGrayscale, ToTensor # usort: skip
|
from torchvision.transforms import AutoAugmentPolicy, InterpolationMode # usort: skip
from . import functional # usort: skip
from ._transform import Transform # usort: skip
from ._presets import StereoMatching # usort: skip
from ._augment import RandomCutmix, RandomErasing, RandomMixup, SimpleCopyPaste
from ._auto_augment import AugMix, AutoAugment, RandAugment, TrivialAugmentWide
from ._color import (
ColorJitter,
RandomAdjustSharpness,
RandomAutocontrast,
RandomEqualize,
RandomInvert,
RandomPhotometricDistort,
RandomPosterize,
RandomSolarize,
)
from ._container import Compose, RandomApply, RandomChoice, RandomOrder
from ._geometry import (
CenterCrop,
ElasticTransform,
FiveCrop,
FixedSizeCrop,
Pad,
RandomAffine,
RandomCrop,
RandomHorizontalFlip,
RandomIoUCrop,
RandomPerspective,
RandomResize,
RandomResizedCrop,
RandomRotation,
RandomShortestSize,
RandomVerticalFlip,
RandomZoomOut,
Resize,
ScaleJitter,
TenCrop,
)
from ._meta import ClampBoundingBoxes, ConvertBoundingBoxFormat, ConvertColorSpace, ConvertImageDtype
from ._misc import (
GaussianBlur,
Identity,
Lambda,
LinearTransformation,
Normalize,
PermuteDimensions,
RemoveSmallBoundingBoxes,
ToDtype,
TransposeDimensions,
)
from ._type_conversion import DecodeImage, LabelToOneHot, PILToTensor, ToImagePIL, ToImageTensor, ToPILImage
from ._deprecated import Grayscale, RandomGrayscale, ToTensor # usort: skip
|
# Copyright (c) OpenMMLab. All rights reserved.
import math
import torch
from torch.utils.data import DistributedSampler as _DistributedSampler
from mmdet.core.utils import sync_random_seed
from mmdet.utils import get_device
class DistributedSampler(_DistributedSampler):
def __init__(self,
dataset,
num_replicas=None,
rank=None,
shuffle=True,
seed=0):
super().__init__(
dataset, num_replicas=num_replicas, rank=rank, shuffle=shuffle)
# In distributed sampling, different ranks should sample
# non-overlapped data in the dataset. Therefore, this function
# is used to make sure that each rank shuffles the data indices
# in the same order based on the same seed. Then different ranks
# could use different indices to select non-overlapped data from the
# same data list.
device = get_device()
self.seed = sync_random_seed(seed, device)
def __iter__(self):
# deterministically shuffle based on epoch
if self.shuffle:
g = torch.Generator()
# When :attr:`shuffle=True`, this ensures all replicas
# use a different random ordering for each epoch.
# Otherwise, the next iteration of this sampler will
# yield the same ordering.
g.manual_seed(self.epoch + self.seed)
indices = torch.randperm(len(self.dataset), generator=g).tolist()
else:
indices = torch.arange(len(self.dataset)).tolist()
# add extra samples to make it evenly divisible
# in case that indices is shorter than half of total_size
indices = (indices *
math.ceil(self.total_size / len(indices)))[:self.total_size]
assert len(indices) == self.total_size
# subsample
indices = indices[self.rank:self.total_size:self.num_replicas]
assert len(indices) == self.num_samples
return iter(indices)
|
# Copyright (c) OpenMMLab. All rights reserved.
import math
import torch
from torch.utils.data import DistributedSampler as _DistributedSampler
from mmdet.core.utils import sync_random_seed
class DistributedSampler(_DistributedSampler):
def __init__(self,
dataset,
num_replicas=None,
rank=None,
shuffle=True,
seed=0):
super().__init__(
dataset, num_replicas=num_replicas, rank=rank, shuffle=shuffle)
# In distributed sampling, different ranks should sample
# non-overlapped data in the dataset. Therefore, this function
# is used to make sure that each rank shuffles the data indices
# in the same order based on the same seed. Then different ranks
# could use different indices to select non-overlapped data from the
# same data list.
self.seed = sync_random_seed(seed)
def __iter__(self):
# deterministically shuffle based on epoch
if self.shuffle:
g = torch.Generator()
# When :attr:`shuffle=True`, this ensures all replicas
# use a different random ordering for each epoch.
# Otherwise, the next iteration of this sampler will
# yield the same ordering.
g.manual_seed(self.epoch + self.seed)
indices = torch.randperm(len(self.dataset), generator=g).tolist()
else:
indices = torch.arange(len(self.dataset)).tolist()
# add extra samples to make it evenly divisible
# in case that indices is shorter than half of total_size
indices = (indices *
math.ceil(self.total_size / len(indices)))[:self.total_size]
assert len(indices) == self.total_size
# subsample
indices = indices[self.rank:self.total_size:self.num_replicas]
assert len(indices) == self.num_samples
return iter(indices)
|
import unittest
from transformers.testing_utils import Expectations
class ExpectationsTest(unittest.TestCase):
def test_expectations(self):
# We use the expectations below to make sure the right expectations are found for the right devices.
# Each value is just a unique ID.
expectations = Expectations(
{
(None, None): 1,
("cuda", 8): 2,
("cuda", 7): 3,
("rocm", 8): 4,
("rocm", None): 5,
("cpu", None): 6,
("xpu", 3): 7,
}
)
def check(expected_id, device_prop):
found_id = expectations.find_expectation(device_prop)
assert found_id == expected_id, f"Expected {expected_id} for {device_prop}, found {found_id}"
# npu has no matches so should find default expectation
check(1, ("npu", None, None))
check(7, ("xpu", 3, None))
check(2, ("cuda", 8, None))
check(3, ("cuda", 7, None))
check(4, ("rocm", 9, None))
check(4, ("rocm", None, None))
check(2, ("cuda", 2, None))
# We also test that if there is no default excpectation and no match is found, a ValueError is raised.
expectations = Expectations({("cuda", 8): 1})
with self.assertRaises(ValueError):
expectations.find_expectation(("xpu", None))
|
import unittest
from transformers.testing_utils import Expectations
class ExpectationsTest(unittest.TestCase):
def test_expectations(self):
expectations = Expectations(
{
(None, None): 1,
("cuda", 8): 2,
("cuda", 7): 3,
("rocm", 8): 4,
("rocm", None): 5,
("cpu", None): 6,
("xpu", 3): 7,
}
)
def check(value, key):
assert expectations.find_expectation(key) == value
# npu has no matches so should find default expectation
check(1, ("npu", None))
check(7, ("xpu", 3))
check(2, ("cuda", 8))
check(3, ("cuda", 7))
check(4, ("rocm", 9))
check(4, ("rocm", None))
check(2, ("cuda", 2))
expectations = Expectations({("cuda", 8): 1})
with self.assertRaises(ValueError):
expectations.find_expectation(("xpu", None))
|
# model settings
norm_cfg = dict(type='BN', requires_grad=False)
model = dict(
type='FasterRCNN',
backbone=dict(
type='ResNet',
depth=50,
num_stages=3,
strides=(1, 2, 2),
dilations=(1, 1, 1),
out_indices=(2, ),
frozen_stages=1,
norm_cfg=norm_cfg,
norm_eval=True,
style='caffe',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet50_caffe')),
rpn_head=dict(
type='RPNHead',
in_channels=1024,
feat_channels=1024,
anchor_generator=dict(
type='AnchorGenerator',
scales=[2, 4, 8, 16, 32],
ratios=[0.5, 1.0, 2.0],
strides=[16]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0]),
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='L1Loss', loss_weight=1.0)),
roi_head=dict(
type='StandardRoIHead',
shared_head=dict(
type='ResLayer',
depth=50,
stage=3,
stride=2,
dilation=1,
style='caffe',
norm_cfg=norm_cfg,
norm_eval=True,
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet50_caffe')),
bbox_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=0),
out_channels=1024,
featmap_strides=[16]),
bbox_head=dict(
type='BBoxHead',
with_avg_pool=True,
roi_feat_size=7,
in_channels=2048,
num_classes=80,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2]),
reg_class_agnostic=False,
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
loss_bbox=dict(type='L1Loss', loss_weight=1.0))),
# model training and testing settings
train_cfg=dict(
rpn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
match_low_quality=True,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
allowed_border=-1,
pos_weight=-1,
debug=False),
rpn_proposal=dict(
nms_pre=12000,
max_per_img=2000,
nms=dict(type='nms', iou_threshold=0.7),
min_bbox_size=0),
rcnn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0.5,
match_low_quality=False,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
pos_weight=-1,
debug=False)),
test_cfg=dict(
rpn=dict(
nms_pre=6000,
max_per_img=1000,
nms=dict(type='nms', iou_threshold=0.7),
min_bbox_size=0),
rcnn=dict(
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.5),
max_per_img=100)))
|
# model settings
norm_cfg = dict(type='BN', requires_grad=False)
model = dict(
type='FasterRCNN',
backbone=dict(
type='ResNet',
depth=50,
num_stages=3,
strides=(1, 2, 2),
dilations=(1, 1, 1),
out_indices=(2, ),
frozen_stages=1,
norm_cfg=norm_cfg,
norm_eval=True,
style='caffe',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet50_caffe')),
rpn_head=dict(
type='RPNHead',
in_channels=1024,
feat_channels=1024,
anchor_generator=dict(
type='AnchorGenerator',
scales=[2, 4, 8, 16, 32],
ratios=[0.5, 1.0, 2.0],
strides=[16]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0]),
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='L1Loss', loss_weight=1.0)),
roi_head=dict(
type='StandardRoIHead',
shared_head=dict(
type='ResLayer',
depth=50,
stage=3,
stride=2,
dilation=1,
style='caffe',
norm_cfg=norm_cfg,
norm_eval=True),
bbox_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=0),
out_channels=1024,
featmap_strides=[16]),
bbox_head=dict(
type='BBoxHead',
with_avg_pool=True,
roi_feat_size=7,
in_channels=2048,
num_classes=80,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2]),
reg_class_agnostic=False,
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
loss_bbox=dict(type='L1Loss', loss_weight=1.0))),
# model training and testing settings
train_cfg=dict(
rpn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
match_low_quality=True,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
allowed_border=0,
pos_weight=-1,
debug=False),
rpn_proposal=dict(
nms_pre=12000,
max_per_img=2000,
nms=dict(type='nms', iou_threshold=0.7),
min_bbox_size=0),
rcnn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0.5,
match_low_quality=False,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
pos_weight=-1,
debug=False)),
test_cfg=dict(
rpn=dict(
nms_pre=6000,
max_per_img=1000,
nms=dict(type='nms', iou_threshold=0.7),
min_bbox_size=0),
rcnn=dict(
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.5),
max_per_img=100)))
|
# Copyright (c) OpenMMLab. All rights reserved.
__version__ = '3.0.0rc6'
short_version = __version__
def parse_version_info(version_str):
"""Parse a version string into a tuple.
Args:
version_str (str): The version string.
Returns:
tuple[int | str]: The version info, e.g., "1.3.0" is parsed into
(1, 3, 0), and "2.0.0rc1" is parsed into (2, 0, 0, 'rc1').
"""
version_info = []
for x in version_str.split('.'):
if x.isdigit():
version_info.append(int(x))
elif x.find('rc') != -1:
patch_version = x.split('rc')
version_info.append(int(patch_version[0]))
version_info.append(f'rc{patch_version[1]}')
return tuple(version_info)
version_info = parse_version_info(__version__)
|
# Copyright (c) OpenMMLab. All rights reserved.
__version__ = '3.0.0rc5'
short_version = __version__
def parse_version_info(version_str):
"""Parse a version string into a tuple.
Args:
version_str (str): The version string.
Returns:
tuple[int | str]: The version info, e.g., "1.3.0" is parsed into
(1, 3, 0), and "2.0.0rc1" is parsed into (2, 0, 0, 'rc1').
"""
version_info = []
for x in version_str.split('.'):
if x.isdigit():
version_info.append(int(x))
elif x.find('rc') != -1:
patch_version = x.split('rc')
version_info.append(int(patch_version[0]))
version_info.append(f'rc{patch_version[1]}')
return tuple(version_info)
version_info = parse_version_info(__version__)
|
import os
import warnings
from modulefinder import Module
import torch
# Don't re-order these, we need to load the _C extension (done when importing
# .extensions) before entering _meta_registrations.
from .extension import _HAS_OPS # usort:skip
from torchvision import _meta_registrations, datasets, io, models, ops, transforms, utils # usort:skip
try:
from .version import __version__ # noqa: F401
except ImportError:
pass
# Check if torchvision is being imported within the root folder
if not _HAS_OPS and os.path.dirname(os.path.realpath(__file__)) == os.path.join(
os.path.realpath(os.getcwd()), "torchvision"
):
message = (
"You are importing torchvision within its own root folder ({}). "
"This is not expected to work and may give errors. Please exit the "
"torchvision project source and relaunch your python interpreter."
)
warnings.warn(message.format(os.getcwd()))
_image_backend = "PIL"
_video_backend = "pyav"
def set_image_backend(backend):
"""
Specifies the package used to load images.
Args:
backend (string): Name of the image backend. one of {'PIL', 'accimage'}.
The :mod:`accimage` package uses the Intel IPP library. It is
generally faster than PIL, but does not support as many operations.
"""
global _image_backend
if backend not in ["PIL", "accimage"]:
raise ValueError(f"Invalid backend '{backend}'. Options are 'PIL' and 'accimage'")
_image_backend = backend
def get_image_backend():
"""
Gets the name of the package used to load images
"""
return _image_backend
def set_video_backend(backend):
"""
Specifies the package used to decode videos.
Args:
backend (string): Name of the video backend. one of {'pyav', 'video_reader'}.
The :mod:`pyav` package uses the 3rd party PyAv library. It is a Pythonic
binding for the FFmpeg libraries.
The :mod:`video_reader` package includes a native C++ implementation on
top of FFMPEG libraries, and a python API of TorchScript custom operator.
It generally decodes faster than :mod:`pyav`, but is perhaps less robust.
.. note::
Building with FFMPEG is disabled by default in the latest `main`. If you want to use the 'video_reader'
backend, please compile torchvision from source.
"""
global _video_backend
if backend not in ["pyav", "video_reader", "cuda"]:
raise ValueError("Invalid video backend '%s'. Options are 'pyav', 'video_reader' and 'cuda'" % backend)
if backend == "video_reader" and not io._HAS_VIDEO_OPT:
# TODO: better messages
message = "video_reader video backend is not available. Please compile torchvision from source and try again"
raise RuntimeError(message)
elif backend == "cuda" and not io._HAS_GPU_VIDEO_DECODER:
# TODO: better messages
message = "cuda video backend is not available."
raise RuntimeError(message)
else:
_video_backend = backend
def get_video_backend():
"""
Returns the currently active video backend used to decode videos.
Returns:
str: Name of the video backend. one of {'pyav', 'video_reader'}.
"""
return _video_backend
def _is_tracing():
return torch._C._get_tracing_state()
def disable_beta_transforms_warning():
# Noop, only exists to avoid breaking existing code.
# See https://github.com/pytorch/vision/issues/7896
pass
|
import os
import warnings
from modulefinder import Module
import torch
from torchvision import _meta_registrations, datasets, io, models, ops, transforms, utils
from .extension import _HAS_OPS
try:
from .version import __version__ # noqa: F401
except ImportError:
pass
# Check if torchvision is being imported within the root folder
if not _HAS_OPS and os.path.dirname(os.path.realpath(__file__)) == os.path.join(
os.path.realpath(os.getcwd()), "torchvision"
):
message = (
"You are importing torchvision within its own root folder ({}). "
"This is not expected to work and may give errors. Please exit the "
"torchvision project source and relaunch your python interpreter."
)
warnings.warn(message.format(os.getcwd()))
_image_backend = "PIL"
_video_backend = "pyav"
def set_image_backend(backend):
"""
Specifies the package used to load images.
Args:
backend (string): Name of the image backend. one of {'PIL', 'accimage'}.
The :mod:`accimage` package uses the Intel IPP library. It is
generally faster than PIL, but does not support as many operations.
"""
global _image_backend
if backend not in ["PIL", "accimage"]:
raise ValueError(f"Invalid backend '{backend}'. Options are 'PIL' and 'accimage'")
_image_backend = backend
def get_image_backend():
"""
Gets the name of the package used to load images
"""
return _image_backend
def set_video_backend(backend):
"""
Specifies the package used to decode videos.
Args:
backend (string): Name of the video backend. one of {'pyav', 'video_reader'}.
The :mod:`pyav` package uses the 3rd party PyAv library. It is a Pythonic
binding for the FFmpeg libraries.
The :mod:`video_reader` package includes a native C++ implementation on
top of FFMPEG libraries, and a python API of TorchScript custom operator.
It generally decodes faster than :mod:`pyav`, but is perhaps less robust.
.. note::
Building with FFMPEG is disabled by default in the latest `main`. If you want to use the 'video_reader'
backend, please compile torchvision from source.
"""
global _video_backend
if backend not in ["pyav", "video_reader", "cuda"]:
raise ValueError("Invalid video backend '%s'. Options are 'pyav', 'video_reader' and 'cuda'" % backend)
if backend == "video_reader" and not io._HAS_VIDEO_OPT:
# TODO: better messages
message = "video_reader video backend is not available. Please compile torchvision from source and try again"
raise RuntimeError(message)
elif backend == "cuda" and not io._HAS_GPU_VIDEO_DECODER:
# TODO: better messages
message = "cuda video backend is not available."
raise RuntimeError(message)
else:
_video_backend = backend
def get_video_backend():
"""
Returns the currently active video backend used to decode videos.
Returns:
str: Name of the video backend. one of {'pyav', 'video_reader'}.
"""
return _video_backend
def _is_tracing():
return torch._C._get_tracing_state()
def disable_beta_transforms_warning():
# Noop, only exists to avoid breaking existing code.
# See https://github.com/pytorch/vision/issues/7896
pass
|
from typing import Any, Optional
from unittest import mock
import pytest
from langchain_community.tools.databricks._execution import (
DEFAULT_EXECUTE_FUNCTION_ARGS,
EXECUTE_FUNCTION_ARG_NAME,
execute_function,
)
@pytest.mark.requires("databricks.sdk")
@pytest.mark.parametrize(
("parameters", "execute_params"),
[
({"a": 1, "b": 2}, DEFAULT_EXECUTE_FUNCTION_ARGS),
(
{"a": 1, EXECUTE_FUNCTION_ARG_NAME: {"wait_timeout": "10s"}},
{**DEFAULT_EXECUTE_FUNCTION_ARGS, "wait_timeout": "10s"},
),
(
{EXECUTE_FUNCTION_ARG_NAME: {"row_limit": "1000"}},
{**DEFAULT_EXECUTE_FUNCTION_ARGS, "row_limit": "1000"},
),
],
)
def test_execute_function(parameters: dict, execute_params: dict) -> None:
workspace_client = mock.Mock()
def mock_execute_statement(
statement: str,
warehouse_id: str,
*,
byte_limit: Optional[int] = None,
catalog: Optional[str] = None,
disposition: Optional[Any] = None,
format: Optional[Any] = None,
on_wait_timeout: Optional[Any] = None,
parameters: Optional[list[Any]] = None,
row_limit: Optional[int] = None,
schema: Optional[str] = None,
wait_timeout: Optional[str] = None,
) -> mock.Mock:
for key, value in execute_params.items():
assert locals()[key] == value
return mock.Mock()
workspace_client.statement_execution.execute_statement = mock_execute_statement
function = mock.Mock()
function.data_type = "TABLE_TYPE"
function.input_params.parameters = []
execute_function(
workspace_client, warehouse_id="id", function=function, parameters=parameters
)
@pytest.mark.requires("databricks.sdk")
def test_execute_function_error() -> None:
workspace_client = mock.Mock()
def mock_execute_statement(
statement: str,
warehouse_id: str,
*,
byte_limit: Optional[int] = None,
catalog: Optional[str] = None,
disposition: Optional[Any] = None,
format: Optional[Any] = None,
on_wait_timeout: Optional[Any] = None,
parameters: Optional[list[Any]] = None,
row_limit: Optional[int] = None,
schema: Optional[str] = None,
wait_timeout: Optional[str] = None,
) -> mock.Mock:
return mock.Mock()
workspace_client.statement_execution.execute_statement = mock_execute_statement
function = mock.Mock()
function.data_type = "TABLE_TYPE"
function.input_params.parameters = []
parameters = {EXECUTE_FUNCTION_ARG_NAME: {"invalid_param": "123"}}
with pytest.raises(
ValueError,
match=r"Invalid parameters for executing functions: {'invalid_param'}. ",
):
execute_function(
workspace_client,
warehouse_id="id",
function=function,
parameters=parameters,
)
|
from unittest import mock
import pytest
from langchain_community.tools.databricks._execution import (
DEFAULT_EXECUTE_FUNCTION_ARGS,
EXECUTE_FUNCTION_ARG_NAME,
execute_function,
)
@pytest.mark.requires("databricks.sdk")
@pytest.mark.parametrize(
("parameters", "execute_params"),
[
({"a": 1, "b": 2}, DEFAULT_EXECUTE_FUNCTION_ARGS),
(
{"a": 1, EXECUTE_FUNCTION_ARG_NAME: {"wait_timeout": "10s"}},
{**DEFAULT_EXECUTE_FUNCTION_ARGS, "wait_timeout": "10s"},
),
(
{EXECUTE_FUNCTION_ARG_NAME: {"row_limit": "1000"}},
{**DEFAULT_EXECUTE_FUNCTION_ARGS, "row_limit": "1000"},
),
],
)
def test_execute_function(parameters: dict, execute_params: dict) -> None:
workspace_client = mock.Mock()
def mock_execute_statement( # type: ignore
statement,
warehouse_id,
*,
byte_limit=None,
catalog=None,
disposition=None,
format=None,
on_wait_timeout=None,
parameters=None,
row_limit=None,
schema=None,
wait_timeout=None,
):
for key, value in execute_params.items():
assert locals()[key] == value
return mock.Mock()
workspace_client.statement_execution.execute_statement = mock_execute_statement
function = mock.Mock()
function.data_type = "TABLE_TYPE"
function.input_params.parameters = []
execute_function(
workspace_client, warehouse_id="id", function=function, parameters=parameters
)
@pytest.mark.requires("databricks.sdk")
def test_execute_function_error() -> None:
workspace_client = mock.Mock()
def mock_execute_statement( # type: ignore
statement,
warehouse_id,
*,
byte_limit=None,
catalog=None,
disposition=None,
format=None,
on_wait_timeout=None,
parameters=None,
row_limit=None,
schema=None,
wait_timeout=None,
):
return mock.Mock()
workspace_client.statement_execution.execute_statement = mock_execute_statement
function = mock.Mock()
function.data_type = "TABLE_TYPE"
function.input_params.parameters = []
parameters = {EXECUTE_FUNCTION_ARG_NAME: {"invalid_param": "123"}}
with pytest.raises(
ValueError,
match=r"Invalid parameters for executing functions: {'invalid_param'}. ",
):
execute_function(
workspace_client,
warehouse_id="id",
function=function,
parameters=parameters,
)
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from mmengine.dataset import DefaultSampler
from torch.utils.data import Dataset
from mmdet.datasets.samplers import AspectRatioBatchSampler
class DummyDataset(Dataset):
def __init__(self, length):
self.length = length
self.shapes = np.random.random((length, 2))
def __len__(self):
return self.length
def __getitem__(self, idx):
return self.shapes[idx]
def get_data_info(self, idx):
return dict(width=self.shapes[idx][0], height=self.shapes[idx][1])
class TestAspectRatioBatchSampler(TestCase):
@patch('mmengine.dist.get_dist_info', return_value=(0, 1))
def setUp(self, mock):
self.length = 100
self.dataset = DummyDataset(self.length)
self.sampler = DefaultSampler(self.dataset, shuffle=False)
def test_invalid_inputs(self):
with self.assertRaisesRegex(
ValueError, 'batch_size should be a positive integer value'):
AspectRatioBatchSampler(self.sampler, batch_size=-1)
with self.assertRaisesRegex(
TypeError, 'sampler should be an instance of ``Sampler``'):
AspectRatioBatchSampler(None, batch_size=1)
def test_divisible_batch(self):
batch_size = 5
batch_sampler = AspectRatioBatchSampler(
self.sampler, batch_size=batch_size, drop_last=True)
self.assertEqual(len(batch_sampler), self.length // batch_size)
for batch_idxs in batch_sampler:
self.assertEqual(len(batch_idxs), batch_size)
batch = [self.dataset[idx] for idx in batch_idxs]
flag = batch[0][0] < batch[0][1]
for i in range(1, batch_size):
self.assertEqual(batch[i][0] < batch[i][1], flag)
def test_indivisible_batch(self):
batch_size = 7
batch_sampler = AspectRatioBatchSampler(
self.sampler, batch_size=batch_size, drop_last=False)
all_batch_idxs = list(batch_sampler)
self.assertEqual(
len(batch_sampler), (self.length + batch_size - 1) // batch_size)
self.assertEqual(
len(all_batch_idxs), (self.length + batch_size - 1) // batch_size)
batch_sampler = AspectRatioBatchSampler(
self.sampler, batch_size=batch_size, drop_last=True)
all_batch_idxs = list(batch_sampler)
self.assertEqual(len(batch_sampler), self.length // batch_size)
self.assertEqual(len(all_batch_idxs), self.length // batch_size)
# the last batch may not have the same aspect ratio
for batch_idxs in all_batch_idxs[:-1]:
self.assertEqual(len(batch_idxs), batch_size)
batch = [self.dataset[idx] for idx in batch_idxs]
flag = batch[0][0] < batch[0][1]
for i in range(1, batch_size):
self.assertEqual(batch[i][0] < batch[i][1], flag)
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from mmengine.data import DefaultSampler
from torch.utils.data import Dataset
from mmdet.datasets.samplers import AspectRatioBatchSampler
class DummyDataset(Dataset):
def __init__(self, length):
self.length = length
self.shapes = np.random.random((length, 2))
def __len__(self):
return self.length
def __getitem__(self, idx):
return self.shapes[idx]
def get_data_info(self, idx):
return dict(width=self.shapes[idx][0], height=self.shapes[idx][1])
class TestAspectRatioBatchSampler(TestCase):
@patch('mmengine.data.sampler.get_dist_info', return_value=(0, 1))
def setUp(self, mock):
self.length = 100
self.dataset = DummyDataset(self.length)
self.sampler = DefaultSampler(self.dataset, shuffle=False)
def test_invalid_inputs(self):
with self.assertRaisesRegex(
ValueError, 'batch_size should be a positive integer value'):
AspectRatioBatchSampler(self.sampler, batch_size=-1)
with self.assertRaisesRegex(
TypeError, 'sampler should be an instance of ``Sampler``'):
AspectRatioBatchSampler(None, batch_size=1)
def test_divisible_batch(self):
batch_size = 5
batch_sampler = AspectRatioBatchSampler(
self.sampler, batch_size=batch_size, drop_last=True)
self.assertEqual(len(batch_sampler), self.length // batch_size)
for batch_idxs in batch_sampler:
self.assertEqual(len(batch_idxs), batch_size)
batch = [self.dataset[idx] for idx in batch_idxs]
flag = batch[0][0] < batch[0][1]
for i in range(1, batch_size):
self.assertEqual(batch[i][0] < batch[i][1], flag)
def test_indivisible_batch(self):
batch_size = 7
batch_sampler = AspectRatioBatchSampler(
self.sampler, batch_size=batch_size, drop_last=False)
all_batch_idxs = list(batch_sampler)
self.assertEqual(
len(batch_sampler), (self.length + batch_size - 1) // batch_size)
self.assertEqual(
len(all_batch_idxs), (self.length + batch_size - 1) // batch_size)
batch_sampler = AspectRatioBatchSampler(
self.sampler, batch_size=batch_size, drop_last=True)
all_batch_idxs = list(batch_sampler)
self.assertEqual(len(batch_sampler), self.length // batch_size)
self.assertEqual(len(all_batch_idxs), self.length // batch_size)
# the last batch may not have the same aspect ratio
for batch_idxs in all_batch_idxs[:-1]:
self.assertEqual(len(batch_idxs), batch_size)
batch = [self.dataset[idx] for idx in batch_idxs]
flag = batch[0][0] < batch[0][1]
for i in range(1, batch_size):
self.assertEqual(batch[i][0] < batch[i][1], flag)
|
def __getattr__(name: str = "") -> None:
"""Raise an error on import since is deprecated."""
msg = (
"This module has been moved to langchain-experimental. "
"For more details: https://github.com/langchain-ai/langchain/discussions/11352."
"To access this code, install it with `pip install langchain-experimental`."
"`from langchain_experimental.llm_symbolic_math.base "
"import LLMSymbolicMathChain`"
)
raise AttributeError(msg)
|
def __getattr__(name: str = "") -> None:
"""Raise an error on import since is deprecated."""
raise AttributeError(
"This module has been moved to langchain-experimental. "
"For more details: https://github.com/langchain-ai/langchain/discussions/11352."
"To access this code, install it with `pip install langchain-experimental`."
"`from langchain_experimental.llm_symbolic_math.base "
"import LLMSymbolicMathChain`"
)
|
import os
import pytest
from google.ai.generativelanguage_v1beta.types import (
FunctionCallingConfig,
ToolConfig,
)
from llama_index.core.base.llms.base import BaseLLM
from llama_index.core.base.llms.types import ChatMessage, ImageBlock, MessageRole
from llama_index.core.prompts.base import ChatPromptTemplate
from llama_index.core.tools.function_tool import FunctionTool
from llama_index.llms.gemini import Gemini
from llama_index.llms.gemini.utils import chat_message_to_gemini
from pydantic import BaseModel
def test_embedding_class() -> None:
names_of_base_classes = [b.__name__ for b in Gemini.__mro__]
assert BaseLLM.__name__ in names_of_base_classes
def test_chat_message_to_gemini() -> None:
msg = ChatMessage("Some content")
assert chat_message_to_gemini(msg) == {
"role": MessageRole.USER,
"parts": [{"text": "Some content"}],
}
msg = ChatMessage("Some content")
msg.blocks.append(ImageBlock(image=b"foo", image_mimetype="image/png"))
assert chat_message_to_gemini(msg) == {
"role": MessageRole.USER,
"parts": [{"text": "Some content"}, {"data": b"foo", "mime_type": "image/png"}],
}
@pytest.mark.skipif(
os.environ.get("GOOGLE_API_KEY") is None, reason="GOOGLE_API_KEY not set"
)
def test_generate_image_prompt() -> None:
msg = ChatMessage("Tell me the brand of the car in this image:")
msg.blocks.append(
ImageBlock(
url="https://upload.wikimedia.org/wikipedia/commons/5/52/Ferrari_SP_FFX.jpg"
)
)
response = Gemini(model="models/gemini-1.5-flash").chat(messages=[msg])
assert "ferrari" in str(response).lower()
@pytest.mark.skipif(
os.environ.get("GOOGLE_API_KEY") is None, reason="GOOGLE_API_KEY not set"
)
def test_chat_stream() -> None:
msg = ChatMessage("List three types of software testing strategies")
response = list(Gemini(model="models/gemini-1.5-flash").stream_chat(messages=[msg]))
assert response
@pytest.mark.skipif(
os.environ.get("GOOGLE_API_KEY") is None, reason="GOOGLE_API_KEY not set"
)
def test_chat_with_tools() -> None:
def add(a: int, b: int) -> int:
"""Add two integers and returns the result integer."""
return a + b
add_tool = FunctionTool.from_defaults(fn=add)
msg = ChatMessage("What is the result of adding 2 and 3?")
model = Gemini(model="models/gemini-1.5-flash")
response = model.chat_with_tools(
user_msg=msg,
tools=[add_tool],
tool_config=ToolConfig(
function_calling_config=FunctionCallingConfig(
mode=FunctionCallingConfig.Mode.ANY
)
),
)
tool_calls = model.get_tool_calls_from_response(response)
assert len(tool_calls) == 1
assert tool_calls[0].tool_name == "add"
assert tool_calls[0].tool_kwargs == {"a": 2, "b": 3}
assert len(response.additional_kwargs["tool_calls"]) >= 1
@pytest.mark.skipif(
os.environ.get("GOOGLE_API_KEY") is None, reason="GOOGLE_API_KEY not set"
)
def test_structured_llm() -> None:
class Test(BaseModel):
test: str
gemini_flash = Gemini(
model="models/gemini-2.0-flash-001",
api_key=os.environ["GOOGLE_API_KEY"],
additional_kwargs={"seed": 4242},
)
chat_prompt = ChatPromptTemplate(message_templates=[ChatMessage(content="test")])
direct_prediction_response = gemini_flash.structured_predict(
output_cls=Test, prompt=chat_prompt
)
assert direct_prediction_response.test is not None
structured_llm_response = gemini_flash.as_structured_llm(Test).complete("test")
assert structured_llm_response.raw.test is not None
@pytest.mark.skipif(
os.environ.get("GOOGLE_API_KEY") is None, reason="GOOGLE_API_KEY not set"
)
def test_is_function_calling_model() -> None:
assert Gemini(
model="models/gemini-2.0-flash-001"
).metadata.is_function_calling_model
# this model is the only one that does not support function calling
assert not Gemini(
model="models/gemini-2.0-flash-thinking-exp-01-21"
).metadata.is_function_calling_model
# in case of un-released models it should be possible to override the
# capabilities of the current model
manual_override = Gemini(model="models/gemini-2.0-flash-001")
assert manual_override.metadata.is_function_calling_model
manual_override._is_function_call_model = False
assert not manual_override._is_function_call_model
assert not manual_override.metadata.is_function_calling_model
|
import os
from llama_index.core.tools.function_tool import FunctionTool
import pytest
from llama_index.core.base.llms.base import BaseLLM
from llama_index.core.base.llms.types import ChatMessage, ImageBlock, MessageRole
from llama_index.llms.gemini import Gemini
from llama_index.llms.gemini.utils import chat_message_to_gemini
from google.ai.generativelanguage_v1beta.types import (
FunctionCallingConfig,
ToolConfig,
)
def test_embedding_class() -> None:
names_of_base_classes = [b.__name__ for b in Gemini.__mro__]
assert BaseLLM.__name__ in names_of_base_classes
def test_chat_message_to_gemini() -> None:
msg = ChatMessage("Some content")
assert chat_message_to_gemini(msg) == {
"role": MessageRole.USER,
"parts": [{"text": "Some content"}],
}
msg = ChatMessage("Some content")
msg.blocks.append(ImageBlock(image=b"foo", image_mimetype="image/png"))
assert chat_message_to_gemini(msg) == {
"role": MessageRole.USER,
"parts": [{"text": "Some content"}, {"data": b"foo", "mime_type": "image/png"}],
}
@pytest.mark.skipif(
os.environ.get("GOOGLE_API_KEY") is None, reason="GOOGLE_API_KEY not set"
)
def test_generate_image_prompt() -> None:
msg = ChatMessage("Tell me the brand of the car in this image:")
msg.blocks.append(
ImageBlock(
url="https://upload.wikimedia.org/wikipedia/commons/5/52/Ferrari_SP_FFX.jpg"
)
)
response = Gemini(model="models/gemini-1.5-flash").chat(messages=[msg])
assert "ferrari" in str(response).lower()
@pytest.mark.skipif(
os.environ.get("GOOGLE_API_KEY") is None, reason="GOOGLE_API_KEY not set"
)
def test_chat_stream() -> None:
msg = ChatMessage("List three types of software testing strategies")
response = list(Gemini(model="models/gemini-1.5-flash").stream_chat(messages=[msg]))
assert response
@pytest.mark.skipif(
os.environ.get("GOOGLE_API_KEY") is None, reason="GOOGLE_API_KEY not set"
)
def test_chat_with_tools() -> None:
def add(a: int, b: int) -> int:
"""Add two integers and returns the result integer."""
return a + b
add_tool = FunctionTool.from_defaults(fn=add)
msg = ChatMessage("What is the result of adding 2 and 3?")
model = Gemini(model="models/gemini-1.5-flash")
response = model.chat_with_tools(
user_msg=msg,
tools=[add_tool],
tool_config=ToolConfig(
function_calling_config=FunctionCallingConfig(
mode=FunctionCallingConfig.Mode.ANY
)
),
)
tool_calls = model.get_tool_calls_from_response(response)
assert len(tool_calls) == 1
assert tool_calls[0].tool_name == "add"
assert tool_calls[0].tool_kwargs == {"a": 2, "b": 3}
assert len(response.additional_kwargs["tool_calls"]) >= 1
|
# -*- coding: utf-8 -*-
"""
Audio Feature Augmentation
==========================
"""
# When running this tutorial in Google Colab, install the required packages
# with the following.
# !pip install torchaudio librosa
import torch
import torchaudio
import torchaudio.transforms as T
print(torch.__version__)
print(torchaudio.__version__)
######################################################################
# Preparing data and utility functions (skip this section)
# --------------------------------------------------------
#
# @title Prepare data and utility functions. {display-mode: "form"}
# @markdown
# @markdown You do not need to look into this cell.
# @markdown Just execute once and you are good to go.
# @markdown
# @markdown In this tutorial, we will use a speech data from [VOiCES dataset](https://iqtlabs.github.io/voices/),
# @markdown which is licensed under Creative Commos BY 4.0.
# -------------------------------------------------------------------------------
# Preparation of data and helper functions.
# -------------------------------------------------------------------------------
import librosa
import matplotlib.pyplot as plt
from torchaudio.utils import download_asset
SAMPLE_WAV_SPEECH_PATH = download_asset("tutorial-assets/Lab41-SRI-VOiCES-src-sp0307-ch127535-sg0042.wav")
def _get_sample(path, resample=None):
effects = [["remix", "1"]]
if resample:
effects.extend(
[
["lowpass", f"{resample // 2}"],
["rate", f"{resample}"],
]
)
return torchaudio.sox_effects.apply_effects_file(path, effects=effects)
def get_speech_sample(*, resample=None):
return _get_sample(SAMPLE_WAV_SPEECH_PATH, resample=resample)
def get_spectrogram(
n_fft=400,
win_len=None,
hop_len=None,
power=2.0,
):
waveform, _ = get_speech_sample()
spectrogram = T.Spectrogram(
n_fft=n_fft,
win_length=win_len,
hop_length=hop_len,
center=True,
pad_mode="reflect",
power=power,
)
return spectrogram(waveform)
def plot_spectrogram(spec, title=None, ylabel="freq_bin", aspect="auto", xmax=None):
fig, axs = plt.subplots(1, 1)
axs.set_title(title or "Spectrogram (db)")
axs.set_ylabel(ylabel)
axs.set_xlabel("frame")
im = axs.imshow(librosa.power_to_db(spec), origin="lower", aspect=aspect)
if xmax:
axs.set_xlim((0, xmax))
fig.colorbar(im, ax=axs)
plt.show(block=False)
######################################################################
# SpecAugment
# -----------
#
# `SpecAugment <https://ai.googleblog.com/2019/04/specaugment-new-data-augmentation.html>`__
# is a popular spectrogram augmentation technique.
#
# ``torchaudio`` implements :py:func:`torchaudio.transforms.TimeStretch`,
# :py:func:`torchaudio.transforms.TimeMasking` and
# :py:func:`torchaudio.transforms.FrequencyMasking`.
#
######################################################################
# TimeStretch
# -----------
#
spec = get_spectrogram(power=None)
stretch = T.TimeStretch()
rate = 1.2
spec_ = stretch(spec, rate)
plot_spectrogram(torch.abs(spec_[0]), title=f"Stretched x{rate}", aspect="equal", xmax=304)
plot_spectrogram(torch.abs(spec[0]), title="Original", aspect="equal", xmax=304)
rate = 0.9
spec_ = stretch(spec, rate)
plot_spectrogram(torch.abs(spec_[0]), title=f"Stretched x{rate}", aspect="equal", xmax=304)
######################################################################
# TimeMasking
# -----------
#
torch.random.manual_seed(4)
spec = get_spectrogram()
plot_spectrogram(spec[0], title="Original")
masking = T.TimeMasking(time_mask_param=80)
spec = masking(spec)
plot_spectrogram(spec[0], title="Masked along time axis")
######################################################################
# FrequencyMasking
# ----------------
#
torch.random.manual_seed(4)
spec = get_spectrogram()
plot_spectrogram(spec[0], title="Original")
masking = T.FrequencyMasking(freq_mask_param=80)
spec = masking(spec)
plot_spectrogram(spec[0], title="Masked along frequency axis")
|
# -*- coding: utf-8 -*-
"""
Audio Feature Augmentation
==========================
"""
# When running this tutorial in Google Colab, install the required packages
# with the following.
# !pip install torchaudio librosa
import torch
import torchaudio
import torchaudio.transforms as T
print(torch.__version__)
print(torchaudio.__version__)
######################################################################
# Preparing data and utility functions (skip this section)
# --------------------------------------------------------
#
# @title Prepare data and utility functions. {display-mode: "form"}
# @markdown
# @markdown You do not need to look into this cell.
# @markdown Just execute once and you are good to go.
# @markdown
# @markdown In this tutorial, we will use a speech data from [VOiCES dataset](https://iqtlabs.github.io/voices/),
# @markdown which is licensed under Creative Commos BY 4.0.
# -------------------------------------------------------------------------------
# Preparation of data and helper functions.
# -------------------------------------------------------------------------------
import os
import librosa
import matplotlib.pyplot as plt
import requests
_SAMPLE_DIR = "_assets"
SAMPLE_WAV_SPEECH_URL = "https://pytorch-tutorial-assets.s3.amazonaws.com/VOiCES_devkit/source-16k/train/sp0307/Lab41-SRI-VOiCES-src-sp0307-ch127535-sg0042.wav" # noqa: E501
SAMPLE_WAV_SPEECH_PATH = os.path.join(_SAMPLE_DIR, "speech.wav")
os.makedirs(_SAMPLE_DIR, exist_ok=True)
def _fetch_data():
uri = [
(SAMPLE_WAV_SPEECH_URL, SAMPLE_WAV_SPEECH_PATH),
]
for url, path in uri:
with open(path, "wb") as file_:
file_.write(requests.get(url).content)
_fetch_data()
def _get_sample(path, resample=None):
effects = [["remix", "1"]]
if resample:
effects.extend(
[
["lowpass", f"{resample // 2}"],
["rate", f"{resample}"],
]
)
return torchaudio.sox_effects.apply_effects_file(path, effects=effects)
def get_speech_sample(*, resample=None):
return _get_sample(SAMPLE_WAV_SPEECH_PATH, resample=resample)
def get_spectrogram(
n_fft=400,
win_len=None,
hop_len=None,
power=2.0,
):
waveform, _ = get_speech_sample()
spectrogram = T.Spectrogram(
n_fft=n_fft,
win_length=win_len,
hop_length=hop_len,
center=True,
pad_mode="reflect",
power=power,
)
return spectrogram(waveform)
def plot_spectrogram(spec, title=None, ylabel="freq_bin", aspect="auto", xmax=None):
fig, axs = plt.subplots(1, 1)
axs.set_title(title or "Spectrogram (db)")
axs.set_ylabel(ylabel)
axs.set_xlabel("frame")
im = axs.imshow(librosa.power_to_db(spec), origin="lower", aspect=aspect)
if xmax:
axs.set_xlim((0, xmax))
fig.colorbar(im, ax=axs)
plt.show(block=False)
######################################################################
# SpecAugment
# -----------
#
# `SpecAugment <https://ai.googleblog.com/2019/04/specaugment-new-data-augmentation.html>`__
# is a popular spectrogram augmentation technique.
#
# ``torchaudio`` implements :py:func:`torchaudio.transforms.TimeStretch`,
# :py:func:`torchaudio.transforms.TimeMasking` and
# :py:func:`torchaudio.transforms.FrequencyMasking`.
#
######################################################################
# TimeStretch
# -----------
#
spec = get_spectrogram(power=None)
stretch = T.TimeStretch()
rate = 1.2
spec_ = stretch(spec, rate)
plot_spectrogram(torch.abs(spec_[0]), title=f"Stretched x{rate}", aspect="equal", xmax=304)
plot_spectrogram(torch.abs(spec[0]), title="Original", aspect="equal", xmax=304)
rate = 0.9
spec_ = stretch(spec, rate)
plot_spectrogram(torch.abs(spec_[0]), title=f"Stretched x{rate}", aspect="equal", xmax=304)
######################################################################
# TimeMasking
# -----------
#
torch.random.manual_seed(4)
spec = get_spectrogram()
plot_spectrogram(spec[0], title="Original")
masking = T.TimeMasking(time_mask_param=80)
spec = masking(spec)
plot_spectrogram(spec[0], title="Masked along time axis")
######################################################################
# FrequencyMasking
# ----------------
#
torch.random.manual_seed(4)
spec = get_spectrogram()
plot_spectrogram(spec[0], title="Original")
masking = T.FrequencyMasking(freq_mask_param=80)
spec = masking(spec)
plot_spectrogram(spec[0], title="Masked along frequency axis")
|
from enum import Enum
from typing import Callable, Union
from numpy import ndarray
from torch import Tensor
from .util import (
cos_sim,
manhattan_sim,
euclidean_sim,
dot_score,
pairwise_cos_sim,
pairwise_manhattan_sim,
pairwise_euclidean_sim,
pairwise_dot_score,
)
class SimilarityFunction(Enum):
"""
Enum class for supported similarity functions. The following functions are supported:
- ``SimilarityFunction.COSINE`` (``"cosine"``): Cosine similarity
- ``SimilarityFunction.DOT_PRODUCT`` (``"dot"``, ``dot_product``): Dot product similarity
- ``SimilarityFunction.EUCLIDEAN`` (``"euclidean"``): Euclidean distance
- ``SimilarityFunction.MANHATTAN`` (``"manhattan"``): Manhattan distance
"""
COSINE = "cosine"
DOT_PRODUCT = "dot"
DOT = "dot" # Alias for DOT_PRODUCT
EUCLIDEAN = "euclidean"
MANHATTAN = "manhattan"
@staticmethod
def to_similarity_fn(
similarity_function: Union[str, "SimilarityFunction"],
) -> Callable[[Union[Tensor, ndarray], Union[Tensor, ndarray]], Tensor]:
"""
Converts a similarity function name or enum value to the corresponding similarity function.
Args:
similarity_function (Union[str, SimilarityFunction]): The name or enum value of the similarity function.
Returns:
Callable[[Union[Tensor, ndarray], Union[Tensor, ndarray]], Tensor]: The corresponding similarity function.
Raises:
ValueError: If the provided function is not supported.
Example:
>>> similarity_fn = SimilarityFunction.to_similarity_fn("cosine")
>>> similarity_scores = similarity_fn(embeddings1, embeddings2)
>>> similarity_scores
tensor([[0.3952, 0.0554],
[0.0992, 0.1570]])
"""
similarity_function = SimilarityFunction(similarity_function)
if similarity_function == SimilarityFunction.COSINE:
return cos_sim
if similarity_function == SimilarityFunction.DOT_PRODUCT:
return dot_score
if similarity_function == SimilarityFunction.MANHATTAN:
return manhattan_sim
if similarity_function == SimilarityFunction.EUCLIDEAN:
return euclidean_sim
raise ValueError(
"The provided function {} is not supported. Use one of the supported values: {}.".format(
similarity_function, SimilarityFunction.possible_values()
)
)
@staticmethod
def to_similarity_pairwise_fn(
similarity_function: Union[str, "SimilarityFunction"],
) -> Callable[[Union[Tensor, ndarray], Union[Tensor, ndarray]], Tensor]:
"""
Converts a similarity function into a pairwise similarity function.
The pairwise similarity function returns the diagonal vector from the similarity matrix, i.e. it only
computes the similarity(a[i], b[i]) for each i in the range of the input tensors, rather than
computing the similarity between all pairs of a and b.
Args:
similarity_function (Union[str, SimilarityFunction]): The name or enum value of the similarity function.
Returns:
Callable[[Union[Tensor, ndarray], Union[Tensor, ndarray]], Tensor]: The pairwise similarity function.
Raises:
ValueError: If the provided similarity function is not supported.
Example:
>>> pairwise_fn = SimilarityFunction.to_similarity_pairwise_fn("cosine")
>>> similarity_scores = pairwise_fn(embeddings1, embeddings2)
>>> similarity_scores
tensor([0.3952, 0.1570])
"""
similarity_function = SimilarityFunction(similarity_function)
if similarity_function == SimilarityFunction.COSINE:
return pairwise_cos_sim
if similarity_function == SimilarityFunction.DOT_PRODUCT:
return pairwise_dot_score
if similarity_function == SimilarityFunction.MANHATTAN:
return pairwise_manhattan_sim
if similarity_function == SimilarityFunction.EUCLIDEAN:
return pairwise_euclidean_sim
raise ValueError(
"The provided function {} is not supported. Use one of the supported values: {}.".format(
similarity_function, SimilarityFunction.possible_values()
)
)
@staticmethod
def possible_values():
"""
Returns a list of possible values for the SimilarityFunction enum.
Returns:
list: A list of possible values for the SimilarityFunction enum.
Example:
>>> possible_values = SimilarityFunction.possible_values()
>>> possible_values
['cosine', 'dot', 'euclidean', 'manhattan']
"""
return [m.value for m in SimilarityFunction]
|
from enum import Enum
from typing import Callable, Union
from numpy import ndarray
from torch import Tensor
from .util import (
cos_sim,
manhattan_sim,
euclidean_sim,
dot_score,
pairwise_cos_sim,
pairwise_manhattan_sim,
pairwise_euclidean_sim,
pairwise_dot_score,
)
class SimilarityFunction(Enum):
COSINE = "cosine"
DOT_PRODUCT = "dot"
DOT = "dot" # Alias for DOT_PRODUCT
EUCLIDEAN = "euclidean"
MANHATTAN = "manhattan"
@staticmethod
def to_similarity_fn(
similarity_function: Union[str, "SimilarityFunction"],
) -> Callable[[Union[Tensor, ndarray], Union[Tensor, ndarray]], Tensor]:
similarity_function = SimilarityFunction(similarity_function)
if similarity_function == SimilarityFunction.COSINE:
return cos_sim
if similarity_function == SimilarityFunction.DOT_PRODUCT:
return dot_score
if similarity_function == SimilarityFunction.MANHATTAN:
return manhattan_sim
if similarity_function == SimilarityFunction.EUCLIDEAN:
return euclidean_sim
raise ValueError(
"The provided function {} is not supported. Use one of the supported values: {}.".format(
similarity_function, SimilarityFunction.possible_values()
)
)
@staticmethod
def to_similarity_pairwise_fn(
similarity_function: Union[str, "SimilarityFunction"],
) -> Callable[[Union[Tensor, ndarray], Union[Tensor, ndarray]], Tensor]:
similarity_function = SimilarityFunction(similarity_function)
if similarity_function == SimilarityFunction.COSINE:
return pairwise_cos_sim
if similarity_function == SimilarityFunction.DOT_PRODUCT:
return pairwise_dot_score
if similarity_function == SimilarityFunction.MANHATTAN:
return pairwise_manhattan_sim
if similarity_function == SimilarityFunction.EUCLIDEAN:
return pairwise_euclidean_sim
raise ValueError(
"The provided function {} is not supported. Use one of the supported values: {}.".format(
similarity_function, SimilarityFunction.possible_values()
)
)
@staticmethod
def possible_values():
return [m.value for m in SimilarityFunction]
|
from typing import Optional, Dict, List, Set, Tuple
import numpy as np
import pytest
import torch
from docarray import DocumentArray
from docarray.base_document import BaseDocument
from docarray.typing import NdArray, TorchTensor
@pytest.mark.proto
def test_proto_simple():
class CustomDoc(BaseDocument):
text: str
doc = CustomDoc(text='hello')
CustomDoc.from_protobuf(doc.to_protobuf())
@pytest.mark.proto
def test_proto_ndarray():
class CustomDoc(BaseDocument):
tensor: NdArray
tensor = np.zeros((3, 224, 224))
doc = CustomDoc(tensor=tensor)
new_doc = CustomDoc.from_protobuf(doc.to_protobuf())
assert (new_doc.tensor == tensor).all()
@pytest.mark.proto
def test_proto_with_nested_doc():
class CustomInnerDoc(BaseDocument):
tensor: NdArray
class CustomDoc(BaseDocument):
text: str
inner: CustomInnerDoc
doc = CustomDoc(text='hello', inner=CustomInnerDoc(tensor=np.zeros((3, 224, 224))))
CustomDoc.from_protobuf(doc.to_protobuf())
@pytest.mark.proto
def test_proto_with_chunks_doc():
class CustomInnerDoc(BaseDocument):
tensor: NdArray
class CustomDoc(BaseDocument):
text: str
chunks: DocumentArray[CustomInnerDoc]
doc = CustomDoc(
text='hello',
chunks=DocumentArray[CustomInnerDoc](
[CustomInnerDoc(tensor=np.zeros((3, 224, 224))) for _ in range(5)],
),
)
new_doc = CustomDoc.from_protobuf(doc.to_protobuf())
for chunk1, chunk2 in zip(doc.chunks, new_doc.chunks):
assert (chunk1.tensor == chunk2.tensor).all()
@pytest.mark.proto
def test_proto_with_nested_doc_pytorch():
class CustomInnerDoc(BaseDocument):
tensor: TorchTensor
class CustomDoc(BaseDocument):
text: str
inner: CustomInnerDoc
doc = CustomDoc(
text='hello', inner=CustomInnerDoc(tensor=torch.zeros((3, 224, 224)))
)
CustomDoc.from_protobuf(doc.to_protobuf())
@pytest.mark.proto
def test_proto_with_chunks_doc_pytorch():
class CustomInnerDoc(BaseDocument):
tensor: TorchTensor
class CustomDoc(BaseDocument):
text: str
chunks: DocumentArray[CustomInnerDoc]
doc = CustomDoc(
text='hello',
chunks=DocumentArray[CustomInnerDoc](
[CustomInnerDoc(tensor=torch.zeros((3, 224, 224))) for _ in range(5)],
),
)
new_doc = CustomDoc.from_protobuf(doc.to_protobuf())
for chunk1, chunk2 in zip(doc.chunks, new_doc.chunks):
assert (chunk1.tensor == chunk2.tensor).all()
@pytest.mark.proto
def test_optional_field_in_doc():
class CustomDoc(BaseDocument):
text: Optional[str]
CustomDoc.from_protobuf(CustomDoc().to_protobuf())
@pytest.mark.proto
def test_optional_field_nested_in_doc():
class InnerDoc(BaseDocument):
title: str
class CustomDoc(BaseDocument):
text: Optional[InnerDoc]
CustomDoc.from_protobuf(CustomDoc().to_protobuf())
@pytest.mark.proto
def test_integer_field():
class Meow(BaseDocument):
age: int
wealth: float
registered: bool
d = Meow(age=30, wealth=100.5, registered=True)
rebuilt_doc = Meow.from_protobuf(d.to_protobuf())
assert rebuilt_doc.age == 30
assert rebuilt_doc.wealth == 100.5
assert rebuilt_doc.registered
@pytest.mark.proto
def test_list_set_dict_tuple_field():
class MyDoc(BaseDocument):
list_: List
dict_: Dict
tuple_: Tuple
set_: Set
d = MyDoc(
list_=[0, 1, 2], dict_={'a': 0, 'b': 1}, tuple_=tuple([0, 1]), set_={0, 1}
)
rebuilt_doc = MyDoc.from_protobuf(d.to_protobuf())
assert rebuilt_doc.list_ == [0, 1, 2]
assert rebuilt_doc.dict_ == {'a': 0, 'b': 1}
assert rebuilt_doc.tuple_ == (0, 1)
assert rebuilt_doc.set_ == {0, 1}
@pytest.mark.proto
@pytest.mark.parametrize(
'dtype',
[
np.uint,
np.uint8,
np.uint64,
np.int,
np.int8,
np.int64,
np.float,
np.float16,
np.float128,
np.double,
],
)
def test_ndarray_dtype(dtype):
class MyDoc(BaseDocument):
tensor: NdArray
doc = MyDoc(tensor=np.ndarray([1, 2, 3], dtype=dtype))
assert doc.tensor.dtype == dtype
assert MyDoc.from_protobuf(doc.to_protobuf()).tensor.dtype == dtype
assert MyDoc.parse_obj(doc.dict()).tensor.dtype == dtype
@pytest.mark.proto
@pytest.mark.parametrize(
'dtype',
[
torch.uint8,
torch.int,
torch.int8,
torch.int64,
torch.float,
torch.float64,
torch.double,
],
)
def test_torch_dtype(dtype):
class MyDoc(BaseDocument):
tensor: TorchTensor
doc = MyDoc(tensor=torch.zeros([5, 5], dtype=dtype))
assert doc.tensor.dtype == dtype
assert MyDoc.from_protobuf(doc.to_protobuf()).tensor.dtype == dtype
assert MyDoc.parse_obj(doc.dict()).tensor.dtype == dtype
|
from typing import Optional, Dict, List, Set, Tuple
import numpy as np
import pytest
import torch
from docarray import DocumentArray
from docarray.base_document import BaseDocument
from docarray.typing import NdArray, TorchTensor
@pytest.mark.proto
def test_proto_simple():
class CustomDoc(BaseDocument):
text: str
doc = CustomDoc(text='hello')
CustomDoc.from_protobuf(doc.to_protobuf())
@pytest.mark.proto
def test_proto_ndarray():
class CustomDoc(BaseDocument):
tensor: NdArray
tensor = np.zeros((3, 224, 224))
doc = CustomDoc(tensor=tensor)
new_doc = CustomDoc.from_protobuf(doc.to_protobuf())
assert (new_doc.tensor == tensor).all()
@pytest.mark.proto
def test_proto_with_nested_doc():
class CustomInnerDoc(BaseDocument):
tensor: NdArray
class CustomDoc(BaseDocument):
text: str
inner: CustomInnerDoc
doc = CustomDoc(text='hello', inner=CustomInnerDoc(tensor=np.zeros((3, 224, 224))))
CustomDoc.from_protobuf(doc.to_protobuf())
@pytest.mark.proto
def test_proto_with_chunks_doc():
class CustomInnerDoc(BaseDocument):
tensor: NdArray
class CustomDoc(BaseDocument):
text: str
chunks: DocumentArray[CustomInnerDoc]
doc = CustomDoc(
text='hello',
chunks=DocumentArray[CustomInnerDoc](
[CustomInnerDoc(tensor=np.zeros((3, 224, 224))) for _ in range(5)],
),
)
new_doc = CustomDoc.from_protobuf(doc.to_protobuf())
for chunk1, chunk2 in zip(doc.chunks, new_doc.chunks):
assert (chunk1.tensor == chunk2.tensor).all()
@pytest.mark.proto
def test_proto_with_nested_doc_pytorch():
class CustomInnerDoc(BaseDocument):
tensor: TorchTensor
class CustomDoc(BaseDocument):
text: str
inner: CustomInnerDoc
doc = CustomDoc(
text='hello', inner=CustomInnerDoc(tensor=torch.zeros((3, 224, 224)))
)
CustomDoc.from_protobuf(doc.to_protobuf())
@pytest.mark.proto
def test_proto_with_chunks_doc_pytorch():
class CustomInnerDoc(BaseDocument):
tensor: TorchTensor
class CustomDoc(BaseDocument):
text: str
chunks: DocumentArray[CustomInnerDoc]
doc = CustomDoc(
text='hello',
chunks=DocumentArray[CustomInnerDoc](
[CustomInnerDoc(tensor=torch.zeros((3, 224, 224))) for _ in range(5)],
),
)
new_doc = CustomDoc.from_protobuf(doc.to_protobuf())
for chunk1, chunk2 in zip(doc.chunks, new_doc.chunks):
assert (chunk1.tensor == chunk2.tensor).all()
@pytest.mark.proto
def test_optional_field_in_doc():
class CustomDoc(BaseDocument):
text: Optional[str]
CustomDoc.from_protobuf(CustomDoc().to_protobuf())
@pytest.mark.proto
def test_optional_field_nested_in_doc():
class InnerDoc(BaseDocument):
title: str
class CustomDoc(BaseDocument):
text: Optional[InnerDoc]
CustomDoc.from_protobuf(CustomDoc().to_protobuf())
@pytest.mark.proto
def test_integer_field():
class Meow(BaseDocument):
age: int
wealth: float
registered: bool
d = Meow(age=30, wealth=100.5, registered=True)
rebuilt_doc = Meow.from_protobuf(d.to_protobuf())
assert rebuilt_doc.age == 30
assert rebuilt_doc.wealth == 100.5
assert rebuilt_doc.registered
@pytest.mark.proto
def test_list_set_dict_tuple_field():
class MyDoc(BaseDocument):
list_: List
dict_: Dict
tuple_: Tuple
set_: Set
d = MyDoc(
list_=[0, 1, 2], dict_={'a': 0, 'b': 1}, tuple_=tuple([0, 1]), set_={0, 1}
)
rebuilt_doc = MyDoc.from_protobuf(d.to_protobuf())
assert rebuilt_doc.list_ == [0, 1, 2]
assert rebuilt_doc.dict_ == {'a': 0, 'b': 1}
assert rebuilt_doc.tuple_ == (0, 1)
assert rebuilt_doc.set_ == {0, 1}
|
import pytest
from datasets import inspect_metric, list_metrics, load_metric
@pytest.fixture
def mock_emitted_deprecation_warnings(monkeypatch):
monkeypatch.setattr("datasets.utils.deprecation_utils._emitted_deprecation_warnings", set())
# Used by list_metrics
@pytest.fixture
def mock_hfh(monkeypatch):
class MetricMock:
def __init__(self, metric_id):
self.id = metric_id
class HfhMock:
_metrics = [MetricMock(metric_id) for metric_id in ["accuracy", "mse", "precision", "codeparrot/apps_metric"]]
def list_metrics(self):
return self._metrics
monkeypatch.setattr("datasets.inspect.huggingface_hub", HfhMock())
@pytest.mark.parametrize(
"func, args, kwargs",
[
(load_metric, ("metrics/mse",), {"trust_remote_code": True}),
(list_metrics, (), {}),
(inspect_metric, ("metrics/mse", "tmp_path"), {"trust_remote_code": True}),
],
)
def test_metric_deprecation_warning(func, args, kwargs, mock_emitted_deprecation_warnings, mock_hfh, tmp_path):
if "tmp_path" in args:
args = tuple(arg if arg != "tmp_path" else tmp_path for arg in args)
with pytest.warns(FutureWarning, match="https://huggingface.co/docs/evaluate"):
func(*args, **kwargs)
|
import pytest
from datasets import inspect_metric, list_metrics, load_metric
@pytest.fixture
def mock_emitted_deprecation_warnings(monkeypatch):
monkeypatch.setattr("datasets.utils.deprecation_utils._emitted_deprecation_warnings", set())
# Used by list_metrics
@pytest.fixture
def mock_hfh(monkeypatch):
class MetricMock:
def __init__(self, metric_id):
self.id = metric_id
class HfhMock:
_metrics = [MetricMock(metric_id) for metric_id in ["accuracy", "mse", "precision", "codeparrot/apps_metric"]]
def list_metrics(self):
return self._metrics
monkeypatch.setattr("datasets.inspect.huggingface_hub", HfhMock())
@pytest.mark.parametrize(
"func, args", [(load_metric, ("metrics/mse",)), (list_metrics, ()), (inspect_metric, ("metrics/mse", "tmp_path"))]
)
def test_metric_deprecation_warning(func, args, mock_emitted_deprecation_warnings, mock_hfh, tmp_path):
if "tmp_path" in args:
args = tuple(arg if arg != "tmp_path" else tmp_path for arg in args)
with pytest.warns(FutureWarning, match="https://huggingface.co/docs/evaluate"):
func(*args)
|
# Copyright (c) OpenMMLab. All rights reserved.
import os.path as osp
import tempfile
from unittest import TestCase
import torch
from torch import Tensor
from mmengine.evaluator import DumpResults
from mmengine.fileio import load
class TestDumpResults(TestCase):
def test_init(self):
with self.assertRaisesRegex(ValueError,
'The output file must be a pkl file.'):
DumpResults(out_file_path='./results.json')
def test_process(self):
metric = DumpResults(out_file_path='./results.pkl')
data_samples = [dict(data=(Tensor([1, 2, 3]), Tensor([4, 5, 6])))]
metric.process(None, data_samples)
self.assertEqual(len(metric.results), 1)
self.assertEqual(metric.results[0]['data'][0].device,
torch.device('cpu'))
def test_compute_metrics(self):
temp_dir = tempfile.TemporaryDirectory()
path = osp.join(temp_dir.name, 'results.pkl')
metric = DumpResults(out_file_path=path)
data_samples = [dict(data=(Tensor([1, 2, 3]), Tensor([4, 5, 6])))]
metric.process(None, data_samples)
metric.compute_metrics(metric.results)
self.assertTrue(osp.isfile(path))
results = load(path)
self.assertEqual(len(results), 1)
self.assertEqual(results[0]['data'][0].device, torch.device('cpu'))
temp_dir.cleanup()
|
# Copyright (c) OpenMMLab. All rights reserved.
import os.path as osp
import tempfile
from unittest import TestCase
import torch
from torch import Tensor
from mmengine.evaluator import DumpResults
from mmengine.fileio import load
class TestDumpResults(TestCase):
def test_init(self):
with self.assertRaisesRegex(ValueError,
'The output file must be a pkl file.'):
DumpResults(out_file_path='./results.json')
def test_process(self):
metric = DumpResults(out_file_path='./results.pkl')
predictions = [dict(data=(Tensor([1, 2, 3]), Tensor([4, 5, 6])))]
metric.process(None, predictions)
self.assertEqual(len(metric.results), 1)
self.assertEqual(metric.results[0]['data'][0].device,
torch.device('cpu'))
def test_compute_metrics(self):
temp_dir = tempfile.TemporaryDirectory()
path = osp.join(temp_dir.name, 'results.pkl')
metric = DumpResults(out_file_path=path)
predictions = [dict(data=(Tensor([1, 2, 3]), Tensor([4, 5, 6])))]
metric.process(None, predictions)
metric.compute_metrics(metric.results)
self.assertTrue(osp.isfile(path))
results = load(path)
self.assertEqual(len(results), 1)
self.assertEqual(results[0]['data'][0].device, torch.device('cpu'))
temp_dir.cleanup()
|
from typing import Any, Optional
from backend.util.request import requests
class GetRequest:
@classmethod
def get_request(
cls, url: str, headers: Optional[dict] = None, json: bool = False
) -> Any:
if headers is None:
headers = {}
response = requests.get(url, headers=headers)
return response.json() if json else response.text
|
from typing import Any, Optional
import requests
class GetRequest:
@classmethod
def get_request(
cls, url: str, headers: Optional[dict] = None, json: bool = False
) -> Any:
if headers is None:
headers = {}
response = requests.get(url, headers=headers)
response.raise_for_status()
return response.json() if json else response.text
|
import socket
from dataclasses import asdict
import numpy as np
import pytest
from loky import get_reusable_executor
import xgboost as xgb
from xgboost import RabitTracker, build_info, federated
from xgboost import testing as tm
from xgboost.collective import Config
def run_rabit_worker(rabit_env: dict, world_size: int) -> int:
with xgb.collective.CommunicatorContext(**rabit_env):
assert xgb.collective.get_world_size() == world_size
assert xgb.collective.is_distributed()
assert xgb.collective.get_processor_name() == socket.gethostname()
ret = xgb.collective.broadcast("test1234", 0)
assert str(ret) == "test1234"
reduced = xgb.collective.allreduce(np.asarray([1, 2, 3]), xgb.collective.Op.SUM)
assert np.array_equal(reduced, np.asarray([2, 4, 6]))
return 0
@pytest.mark.skipif(**tm.no_loky())
def test_rabit_communicator() -> None:
world_size = 2
tracker = RabitTracker(host_ip="127.0.0.1", n_workers=world_size)
tracker.start()
workers = []
with get_reusable_executor(max_workers=world_size) as pool:
for _ in range(world_size):
worker = pool.submit(
run_rabit_worker, rabit_env=tracker.worker_args(), world_size=world_size
)
workers.append(worker)
for worker in workers:
assert worker.result() == 0
def run_federated_worker(port: int, world_size: int, rank: int) -> int:
with xgb.collective.CommunicatorContext(
dmlc_communicator="federated",
federated_server_address=f"localhost:{port}",
federated_world_size=world_size,
federated_rank=rank,
):
assert xgb.collective.get_world_size() == world_size
assert xgb.collective.is_distributed()
assert xgb.collective.get_processor_name() == f"rank:{rank}"
bret = xgb.collective.broadcast("test1234", 0)
assert str(bret) == "test1234"
aret = xgb.collective.allreduce(np.asarray([1, 2, 3]), xgb.collective.Op.SUM)
assert np.array_equal(aret, np.asarray([2, 4, 6]))
return 0
@pytest.mark.skipif(**tm.skip_win())
@pytest.mark.skipif(**tm.no_loky())
def test_federated_communicator() -> None:
if not build_info()["USE_FEDERATED"]:
pytest.skip("XGBoost not built with federated learning enabled")
port = 9091
world_size = 2
with get_reusable_executor(max_workers=world_size + 1) as pool:
kwargs = {"port": port, "n_workers": world_size, "blocking": False}
tracker = pool.submit(federated.run_federated_server, **kwargs)
if not tracker.running():
raise RuntimeError("Error starting Federated Learning server")
workers = []
for rank in range(world_size):
worker = pool.submit(
run_federated_worker, port=port, world_size=world_size, rank=rank
)
workers.append(worker)
for worker in workers:
assert worker.result() == 0
def test_config_serialization() -> None:
cfg = Config(retry=1, timeout=2, tracker_host_ip="127.0.0.1", tracker_port=None)
cfg1 = Config(**asdict(cfg))
assert cfg == cfg1
|
import socket
import sys
from threading import Thread
import numpy as np
import pytest
from loky import get_reusable_executor
import xgboost as xgb
from xgboost import RabitTracker, build_info, federated
from xgboost import testing as tm
def run_rabit_worker(rabit_env: dict, world_size: int) -> int:
with xgb.collective.CommunicatorContext(**rabit_env):
assert xgb.collective.get_world_size() == world_size
assert xgb.collective.is_distributed()
assert xgb.collective.get_processor_name() == socket.gethostname()
ret = xgb.collective.broadcast("test1234", 0)
assert str(ret) == "test1234"
reduced = xgb.collective.allreduce(np.asarray([1, 2, 3]), xgb.collective.Op.SUM)
assert np.array_equal(reduced, np.asarray([2, 4, 6]))
return 0
@pytest.mark.skipif(**tm.no_loky())
def test_rabit_communicator() -> None:
world_size = 2
tracker = RabitTracker(host_ip="127.0.0.1", n_workers=world_size)
tracker.start()
workers = []
with get_reusable_executor(max_workers=world_size) as pool:
for _ in range(world_size):
worker = pool.submit(
run_rabit_worker, rabit_env=tracker.worker_args(), world_size=world_size
)
workers.append(worker)
for worker in workers:
assert worker.result() == 0
def run_federated_worker(port: int, world_size: int, rank: int) -> int:
with xgb.collective.CommunicatorContext(
dmlc_communicator="federated",
federated_server_address=f"localhost:{port}",
federated_world_size=world_size,
federated_rank=rank,
):
assert xgb.collective.get_world_size() == world_size
assert xgb.collective.is_distributed()
assert xgb.collective.get_processor_name() == f"rank:{rank}"
bret = xgb.collective.broadcast("test1234", 0)
assert str(bret) == "test1234"
aret = xgb.collective.allreduce(np.asarray([1, 2, 3]), xgb.collective.Op.SUM)
assert np.array_equal(aret, np.asarray([2, 4, 6]))
return 0
@pytest.mark.skipif(**tm.skip_win())
@pytest.mark.skipif(**tm.no_loky())
def test_federated_communicator():
if not build_info()["USE_FEDERATED"]:
pytest.skip("XGBoost not built with federated learning enabled")
port = 9091
world_size = 2
with get_reusable_executor(max_workers=world_size+1) as pool:
kwargs={"port": port, "n_workers": world_size, "blocking": False}
tracker = pool.submit(federated.run_federated_server, **kwargs)
if not tracker.running():
raise RuntimeError("Error starting Federated Learning server")
workers = []
for rank in range(world_size):
worker = pool.submit(
run_federated_worker, port=port, world_size=world_size, rank=rank
)
workers.append(worker)
for worker in workers:
assert worker.result() == 0
|
from .conv_emformer import ConvEmformer
from .rnnt import conformer_rnnt_base, conformer_rnnt_model
__all__ = [
"conformer_rnnt_base",
"conformer_rnnt_model",
"ConvEmformer",
]
|
from .conv_emformer import ConvEmformer
from .conv_tasnet import conv_tasnet_base
from .rnnt import conformer_rnnt_base, conformer_rnnt_model
__all__ = [
"conformer_rnnt_base",
"conformer_rnnt_model",
"conv_tasnet_base",
"ConvEmformer",
]
|
"""Module containing the base parser for arguments of Jina."""
import argparse
from jina.parsers.helper import _chf
def set_base_parser():
"""Set the base parser
:return: the parser
"""
from jina import __version__
from jina.helper import colored, format_full_version_info, get_full_version
# create the top-level parser
urls = {
'Code': ('💻', 'https://oss.jina.ai'),
'Docs': ('📖', 'https://docs.jina.ai'),
'Help': ('💬', 'https://discord.jina.ai'),
'Hiring!': ('🙌', 'https://jobs.jina.ai'),
}
url_str = '\n'.join(
f'- {v[0]:<10} {k:10.10}\t{colored(v[1], "cyan", attrs=["underline"])}'
for k, v in urls.items()
)
parser = argparse.ArgumentParser(
epilog=f'''
Jina v{colored(__version__, "green")}: Build multimodal AI services via cloud native technologies.
{url_str}
''',
formatter_class=_chf,
)
parser.add_argument(
'-v',
'--version',
action='version',
version=__version__,
help='Show Jina version',
)
parser.add_argument(
'-vf',
'--version-full',
action='version',
version=format_full_version_info(*get_full_version()),
help='Show Jina and all dependencies\' versions',
)
return parser
|
"""Module containing the base parser for arguments of Jina."""
import argparse
from jina.parsers.helper import _chf
def set_base_parser():
"""Set the base parser
:return: the parser
"""
from jina import __version__
from jina.helper import colored, format_full_version_info, get_full_version
# create the top-level parser
urls = {
'Code': ('💻', 'https://oss.jina.ai'),
'Docs': ('📖', 'https://docs.jina.ai'),
'Help': ('💬', 'https://slack.jina.ai'),
'Hiring!': ('🙌', 'https://jobs.jina.ai'),
}
url_str = '\n'.join(
f'- {v[0]:<10} {k:10.10}\t{colored(v[1], "cyan", attrs=["underline"])}'
for k, v in urls.items()
)
parser = argparse.ArgumentParser(
epilog=f'''
Jina v{colored(__version__, "green")}: Build multimodal AI services via cloud native technologies.
{url_str}
''',
formatter_class=_chf,
)
parser.add_argument(
'-v',
'--version',
action='version',
version=__version__,
help='Show Jina version',
)
parser.add_argument(
'-vf',
'--version-full',
action='version',
version=format_full_version_info(*get_full_version()),
help='Show Jina and all dependencies\' versions',
)
return parser
|
"""Callback Handler that tracks AIMessage.usage_metadata."""
import threading
from collections.abc import Generator
from contextlib import contextmanager
from contextvars import ContextVar
from typing import Any, Optional
from langchain_core._api import beta
from langchain_core.callbacks import BaseCallbackHandler
from langchain_core.messages import AIMessage
from langchain_core.messages.ai import UsageMetadata, add_usage
from langchain_core.outputs import ChatGeneration, LLMResult
@beta()
class UsageMetadataCallbackHandler(BaseCallbackHandler):
"""Callback Handler that tracks AIMessage.usage_metadata.
Example:
.. code-block:: python
from langchain.chat_models import init_chat_model
from langchain_core.callbacks import UsageMetadataCallbackHandler
llm_1 = init_chat_model(model="openai:gpt-4o-mini")
llm_2 = init_chat_model(model="anthropic:claude-3-5-haiku-latest")
callback = UsageMetadataCallbackHandler()
result_1 = llm_1.invoke("Hello", config={"callbacks": [callback]})
result_2 = llm_2.invoke("Hello", config={"callbacks": [callback]})
callback.usage_metadata
.. code-block:: none
{'gpt-4o-mini-2024-07-18': {'input_tokens': 8,
'output_tokens': 10,
'total_tokens': 18,
'input_token_details': {'audio': 0, 'cache_read': 0},
'output_token_details': {'audio': 0, 'reasoning': 0}},
'claude-3-5-haiku-20241022': {'input_tokens': 8,
'output_tokens': 21,
'total_tokens': 29,
'input_token_details': {'cache_read': 0, 'cache_creation': 0}}}
.. versionadded:: 0.3.49
"""
def __init__(self) -> None:
super().__init__()
self._lock = threading.Lock()
self.usage_metadata: dict[str, UsageMetadata] = {}
def __repr__(self) -> str:
return str(self.usage_metadata)
def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None:
"""Collect token usage."""
# Check for usage_metadata (langchain-core >= 0.2.2)
try:
generation = response.generations[0][0]
except IndexError:
generation = None
usage_metadata = None
model_name = None
if isinstance(generation, ChatGeneration):
try:
message = generation.message
if isinstance(message, AIMessage):
usage_metadata = message.usage_metadata
model_name = message.response_metadata.get("model_name")
except AttributeError:
pass
# update shared state behind lock
if usage_metadata and model_name:
with self._lock:
if model_name not in self.usage_metadata:
self.usage_metadata[model_name] = usage_metadata
else:
self.usage_metadata[model_name] = add_usage(
self.usage_metadata[model_name], usage_metadata
)
@contextmanager
@beta()
def get_usage_metadata_callback(
name: str = "usage_metadata_callback",
) -> Generator[UsageMetadataCallbackHandler, None, None]:
"""Get context manager for tracking usage metadata across chat model calls using
``AIMessage.usage_metadata``.
Args:
name (str): The name of the context variable. Defaults to
``"usage_metadata_callback"``.
Example:
.. code-block:: python
from langchain.chat_models import init_chat_model
from langchain_core.callbacks import get_usage_metadata_callback
llm_1 = init_chat_model(model="openai:gpt-4o-mini")
llm_2 = init_chat_model(model="anthropic:claude-3-5-haiku-latest")
with get_usage_metadata_callback() as cb:
llm_1.invoke("Hello")
llm_2.invoke("Hello")
print(cb.usage_metadata)
.. code-block:: none
{'gpt-4o-mini-2024-07-18': {'input_tokens': 8,
'output_tokens': 10,
'total_tokens': 18,
'input_token_details': {'audio': 0, 'cache_read': 0},
'output_token_details': {'audio': 0, 'reasoning': 0}},
'claude-3-5-haiku-20241022': {'input_tokens': 8,
'output_tokens': 21,
'total_tokens': 29,
'input_token_details': {'cache_read': 0, 'cache_creation': 0}}}
.. versionadded:: 0.3.49
"""
from langchain_core.tracers.context import register_configure_hook
usage_metadata_callback_var: ContextVar[Optional[UsageMetadataCallbackHandler]] = (
ContextVar(name, default=None)
)
register_configure_hook(usage_metadata_callback_var, True)
cb = UsageMetadataCallbackHandler()
usage_metadata_callback_var.set(cb)
yield cb
usage_metadata_callback_var.set(None)
|
"""Callback Handler that tracks AIMessage.usage_metadata."""
import threading
from collections.abc import Generator
from contextlib import contextmanager
from contextvars import ContextVar
from typing import Any, Optional
from langchain_core._api import beta
from langchain_core.callbacks import BaseCallbackHandler
from langchain_core.messages import AIMessage
from langchain_core.messages.ai import UsageMetadata, add_usage
from langchain_core.outputs import ChatGeneration, LLMResult
@beta()
class UsageMetadataCallbackHandler(BaseCallbackHandler):
"""Callback Handler that tracks AIMessage.usage_metadata.
Example:
.. code-block:: python
from langchain.chat_models import init_chat_model
from langchain_core.callbacks import UsageMetadataCallbackHandler
llm = init_chat_model(model="openai:gpt-4o-mini")
callback = UsageMetadataCallbackHandler()
results = llm.batch(["Hello", "Goodbye"], config={"callbacks": [callback]})
print(callback.usage_metadata)
.. code-block:: none
{'output_token_details': {'audio': 0, 'reasoning': 0}, 'input_tokens': 17, 'output_tokens': 31, 'total_tokens': 48, 'input_token_details': {'cache_read': 0, 'audio': 0}}
.. versionadded:: 0.3.49
""" # noqa: E501
def __init__(self) -> None:
super().__init__()
self._lock = threading.Lock()
self.usage_metadata: dict[str, UsageMetadata] = {}
def __repr__(self) -> str:
return str(self.usage_metadata)
def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None:
"""Collect token usage."""
# Check for usage_metadata (langchain-core >= 0.2.2)
try:
generation = response.generations[0][0]
except IndexError:
generation = None
usage_metadata = None
model_name = None
if isinstance(generation, ChatGeneration):
try:
message = generation.message
if isinstance(message, AIMessage):
usage_metadata = message.usage_metadata
model_name = message.response_metadata.get("model_name")
except AttributeError:
pass
# update shared state behind lock
if usage_metadata and model_name:
with self._lock:
if model_name not in self.usage_metadata:
self.usage_metadata[model_name] = usage_metadata
else:
self.usage_metadata[model_name] = add_usage(
self.usage_metadata[model_name], usage_metadata
)
@contextmanager
@beta()
def get_usage_metadata_callback(
name: str = "usage_metadata_callback",
) -> Generator[UsageMetadataCallbackHandler, None, None]:
"""Get context manager for tracking usage metadata across chat model calls using
``AIMessage.usage_metadata``.
Args:
name (str): The name of the context variable. Defaults to
``"usage_metadata_callback"``.
Example:
.. code-block:: python
from langchain.chat_models import init_chat_model
from langchain_core.callbacks import get_usage_metadata_callback
llm = init_chat_model(model="openai:gpt-4o-mini")
with get_usage_metadata_callback() as cb:
llm.invoke("Hello")
llm.invoke("Goodbye")
print(cb.usage_metadata)
.. code-block:: none
{'output_token_details': {'audio': 0, 'reasoning': 0}, 'input_tokens': 17, 'output_tokens': 31, 'total_tokens': 48, 'input_token_details': {'cache_read': 0, 'audio': 0}}
.. versionadded:: 0.3.49
""" # noqa: E501
from langchain_core.tracers.context import register_configure_hook
usage_metadata_callback_var: ContextVar[Optional[UsageMetadataCallbackHandler]] = (
ContextVar(name, default=None)
)
register_configure_hook(usage_metadata_callback_var, True)
cb = UsageMetadataCallbackHandler()
usage_metadata_callback_var.set(cb)
yield cb
usage_metadata_callback_var.set(None)
|
import os
import numpy as np
import pytest
from jina import Document, DocumentArray
from .. import NumpySearcher
TOP_K = 5
cur_dir = os.path.dirname(os.path.abspath(__file__))
@pytest.fixture()
def query_docs():
chunks = DocumentArray([Document(embedding=np.random.random(7))])
root_doc = Document(embedding=np.random.random(7))
root_doc.chunks.extend(chunks)
docs = DocumentArray()
docs.append(root_doc)
return docs
@pytest.mark.parametrize('default_traversal_paths', [['r'], ['c']])
def test_query_vector(tmpdir, query_docs, default_traversal_paths):
runtime = {
'workspace': str(tmpdir),
'name': 'searcher',
'pea_id': 0,
'replica_id': 0,
}
dump_path = os.path.join(cur_dir, 'dump1')
indexer = NumpySearcher(dump_path=dump_path, default_traversal_paths=default_traversal_paths, runtime_args=runtime)
indexer.search(query_docs, {'top_k': TOP_K})
assert len(query_docs) == 1
doc_traversal = query_docs.traverse_flat(default_traversal_paths)
assert len(doc_traversal[0].matches) == TOP_K
assert len(doc_traversal[0].matches[0].embedding) == 7
@pytest.mark.parametrize(['metric', 'is_distance'],
[('cosine', True), ('euclidean', True),
('cosine', False), ('euclidean', False)])
def test_metric(tmpdir, query_docs, metric, is_distance):
runtime = {
'workspace': str(tmpdir),
'name': 'searcher',
'pea_id': 0,
'replica_id': 0,
}
dump_path = os.path.join(cur_dir, 'dump1')
indexer = NumpySearcher(dump_path=dump_path, default_top_k=TOP_K, runtime_args=runtime, metric=metric,
is_distance=is_distance)
indexer.search(query_docs, {})
assert len(query_docs[0].matches) == TOP_K
for i in range(len(query_docs[0].matches) - 1):
if not is_distance:
assert query_docs[0].matches[i].scores[metric].value >= query_docs[0].matches[i + 1].scores[metric].value
else:
assert query_docs[0].matches[i].scores[metric].value <= query_docs[0].matches[i + 1].scores[metric].value
def test_empty_shard(tmpdir, query_docs):
runtime = {
'workspace': str(tmpdir),
'name': 'searcher',
'pea_id': 0,
'replica_id': 0,
}
indexer = NumpySearcher(dump_path='tests/dump_empty', runtime_args=runtime)
indexer.search(query_docs, {'top_k': TOP_K})
assert len(query_docs) == 1
assert len(query_docs[0].matches) == 0
def test_empty_documents(tmpdir):
runtime = {
'workspace': str(tmpdir),
'name': 'searcher',
'pea_id': 0,
'replica_id': 0,
}
indexer = NumpySearcher(dump_path='tests/dump1', runtime_args=runtime)
docs = DocumentArray([Document()])
indexer.search(docs, {'top_k': TOP_K})
assert len(docs) == 1
assert len(docs[0].matches) == 0
docs2 = DocumentArray()
indexer.search(docs2, {'top_k': TOP_K})
assert len(docs2) == 0
|
import os
import numpy as np
import pytest
from jina import Document, DocumentArray
from .. import NumpySearcher
TOP_K = 5
cur_dir = os.path.dirname(os.path.abspath(__file__))
def test_query_vector(tmpdir):
runtime = {
'workspace': str(tmpdir),
'name': 'searcher',
'pea_id': 0,
'replica_id': 0,
}
dump_path = os.path.join(cur_dir, 'dump1')
indexer = NumpySearcher(dump_path=dump_path, runtime_args=runtime)
docs = DocumentArray([Document(embedding=np.random.random(7))])
TOP_K = 5
indexer.search(docs, {'top_k': TOP_K})
assert len(docs) == 1
assert len(docs[0].matches) == TOP_K
assert len(docs[0].matches[0].embedding) == 7
@pytest.mark.parametrize(['metric', 'is_distance'],
[('cosine', True), ('euclidean', True),
('cosine', False), ('euclidean', False)])
def test_metric(tmpdir, metric, is_distance):
runtime = {
'workspace': str(tmpdir),
'name': 'searcher',
'pea_id': 0,
'replica_id': 0,
}
dump_path = os.path.join(cur_dir, 'dump1')
indexer = NumpySearcher(dump_path=dump_path, default_top_k=TOP_K, runtime_args=runtime, metric=metric,
is_distance=is_distance)
docs = DocumentArray([Document(embedding=np.random.random(7))])
indexer.search(docs, {})
assert len(docs[0].matches) == TOP_K
for i in range(len(docs[0].matches) - 1):
if not is_distance:
assert docs[0].matches[i].scores[metric].value >= docs[0].matches[i + 1].scores[metric].value
else:
assert docs[0].matches[i].scores[metric].value <= docs[0].matches[i + 1].scores[metric].value
def test_empty_shard(tmpdir):
runtime = {
'workspace': str(tmpdir),
'name': 'searcher',
'pea_id': 0,
'replica_id': 0,
}
indexer = NumpySearcher(dump_path='tests/dump_empty', runtime_args=runtime)
docs = DocumentArray([Document(embedding=np.random.random(7))])
TOP_K = 5
indexer.search(docs, {'top_k': TOP_K})
assert len(docs) == 1
assert len(docs[0].matches) == 0
def test_empty_documents(tmpdir):
runtime = {
'workspace': str(tmpdir),
'name': 'searcher',
'pea_id': 0,
'replica_id': 0,
}
indexer = NumpySearcher(dump_path='tests/dump1', runtime_args=runtime)
docs = DocumentArray([Document(id=0)])
TOP_K = 5
indexer.search(docs, {'top_k': TOP_K})
assert len(docs) == 1
assert len(docs[0].matches) == 0
docs2 = DocumentArray()
indexer.search(docs2, {'top_k': TOP_K})
assert len(docs2) == 0
|
from backend.blocks.jina._auth import (
JinaCredentials,
JinaCredentialsField,
JinaCredentialsInput,
)
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from backend.data.model import SchemaField
from backend.util.request import Requests
class JinaChunkingBlock(Block):
class Input(BlockSchema):
texts: list = SchemaField(description="List of texts to chunk")
credentials: JinaCredentialsInput = JinaCredentialsField()
max_chunk_length: int = SchemaField(
description="Maximum length of each chunk", default=1000
)
return_tokens: bool = SchemaField(
description="Whether to return token information", default=False
)
class Output(BlockSchema):
chunks: list = SchemaField(description="List of chunked texts")
tokens: list = SchemaField(
description="List of token information for each chunk",
)
def __init__(self):
super().__init__(
id="806fb15e-830f-4796-8692-557d300ff43c",
description="Chunks texts using Jina AI's segmentation service",
categories={BlockCategory.AI, BlockCategory.TEXT},
input_schema=JinaChunkingBlock.Input,
output_schema=JinaChunkingBlock.Output,
)
def run(
self, input_data: Input, *, credentials: JinaCredentials, **kwargs
) -> BlockOutput:
url = "https://segment.jina.ai/"
headers = {
"Content-Type": "application/json",
"Authorization": f"Bearer {credentials.api_key.get_secret_value()}",
}
all_chunks = []
all_tokens = []
for text in input_data.texts:
data = {
"content": text,
"return_tokens": str(input_data.return_tokens).lower(),
"return_chunks": "true",
"max_chunk_length": str(input_data.max_chunk_length),
}
response = Requests().post(url, headers=headers, json=data)
result = response.json()
all_chunks.extend(result.get("chunks", []))
if input_data.return_tokens:
all_tokens.extend(result.get("tokens", []))
yield "chunks", all_chunks
if input_data.return_tokens:
yield "tokens", all_tokens
|
from backend.blocks.jina._auth import (
JinaCredentials,
JinaCredentialsField,
JinaCredentialsInput,
)
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from backend.data.model import SchemaField
from backend.util.request import requests
class JinaChunkingBlock(Block):
class Input(BlockSchema):
texts: list = SchemaField(description="List of texts to chunk")
credentials: JinaCredentialsInput = JinaCredentialsField()
max_chunk_length: int = SchemaField(
description="Maximum length of each chunk", default=1000
)
return_tokens: bool = SchemaField(
description="Whether to return token information", default=False
)
class Output(BlockSchema):
chunks: list = SchemaField(description="List of chunked texts")
tokens: list = SchemaField(
description="List of token information for each chunk",
)
def __init__(self):
super().__init__(
id="806fb15e-830f-4796-8692-557d300ff43c",
description="Chunks texts using Jina AI's segmentation service",
categories={BlockCategory.AI, BlockCategory.TEXT},
input_schema=JinaChunkingBlock.Input,
output_schema=JinaChunkingBlock.Output,
)
def run(
self, input_data: Input, *, credentials: JinaCredentials, **kwargs
) -> BlockOutput:
url = "https://segment.jina.ai/"
headers = {
"Content-Type": "application/json",
"Authorization": f"Bearer {credentials.api_key.get_secret_value()}",
}
all_chunks = []
all_tokens = []
for text in input_data.texts:
data = {
"content": text,
"return_tokens": str(input_data.return_tokens).lower(),
"return_chunks": "true",
"max_chunk_length": str(input_data.max_chunk_length),
}
response = requests.post(url, headers=headers, json=data)
result = response.json()
all_chunks.extend(result.get("chunks", []))
if input_data.return_tokens:
all_tokens.extend(result.get("tokens", []))
yield "chunks", all_chunks
if input_data.return_tokens:
yield "tokens", all_tokens
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch
from mmdet.registry import TASK_UTILS
from mmdet.structures.bbox import HorizontalBoxes, get_box_tensor
from .base_bbox_coder import BaseBBoxCoder
@TASK_UTILS.register_module()
class YOLOBBoxCoder(BaseBBoxCoder):
"""YOLO BBox coder.
Following `YOLO <https://arxiv.org/abs/1506.02640>`_, this coder divide
image into grids, and encode bbox (x1, y1, x2, y2) into (cx, cy, dw, dh).
cx, cy in [0., 1.], denotes relative center position w.r.t the center of
bboxes. dw, dh are the same as :obj:`DeltaXYWHBBoxCoder`.
Args:
eps (float): Min value of cx, cy when encoding.
"""
def __init__(self, eps=1e-6, **kwargs):
super().__init__(**kwargs)
self.eps = eps
def encode(self, bboxes, gt_bboxes, stride):
"""Get box regression transformation deltas that can be used to
transform the ``bboxes`` into the ``gt_bboxes``.
Args:
bboxes (torch.Tensor or :obj:`BaseBoxes`): Source boxes,
e.g., anchors.
gt_bboxes (torch.Tensor or :obj:`BaseBoxes`): Target of the
transformation, e.g., ground-truth boxes.
stride (torch.Tensor | int): Stride of bboxes.
Returns:
torch.Tensor: Box transformation deltas
"""
bboxes = get_box_tensor(bboxes)
gt_bboxes = get_box_tensor(gt_bboxes)
assert bboxes.size(0) == gt_bboxes.size(0)
assert bboxes.size(-1) == gt_bboxes.size(-1) == 4
x_center_gt = (gt_bboxes[..., 0] + gt_bboxes[..., 2]) * 0.5
y_center_gt = (gt_bboxes[..., 1] + gt_bboxes[..., 3]) * 0.5
w_gt = gt_bboxes[..., 2] - gt_bboxes[..., 0]
h_gt = gt_bboxes[..., 3] - gt_bboxes[..., 1]
x_center = (bboxes[..., 0] + bboxes[..., 2]) * 0.5
y_center = (bboxes[..., 1] + bboxes[..., 3]) * 0.5
w = bboxes[..., 2] - bboxes[..., 0]
h = bboxes[..., 3] - bboxes[..., 1]
w_target = torch.log((w_gt / w).clamp(min=self.eps))
h_target = torch.log((h_gt / h).clamp(min=self.eps))
x_center_target = ((x_center_gt - x_center) / stride + 0.5).clamp(
self.eps, 1 - self.eps)
y_center_target = ((y_center_gt - y_center) / stride + 0.5).clamp(
self.eps, 1 - self.eps)
encoded_bboxes = torch.stack(
[x_center_target, y_center_target, w_target, h_target], dim=-1)
return encoded_bboxes
def decode(self, bboxes, pred_bboxes, stride):
"""Apply transformation `pred_bboxes` to `boxes`.
Args:
boxes (torch.Tensor or :obj:`BaseBoxes`): Basic boxes,
e.g. anchors.
pred_bboxes (torch.Tensor): Encoded boxes with shape
stride (torch.Tensor | int): Strides of bboxes.
Returns:
Union[torch.Tensor, :obj:`BaseBoxes`]: Decoded boxes.
"""
bboxes = get_box_tensor(bboxes)
assert pred_bboxes.size(-1) == bboxes.size(-1) == 4
xy_centers = (bboxes[..., :2] + bboxes[..., 2:]) * 0.5 + (
pred_bboxes[..., :2] - 0.5) * stride
whs = (bboxes[..., 2:] -
bboxes[..., :2]) * 0.5 * pred_bboxes[..., 2:].exp()
decoded_bboxes = torch.stack(
(xy_centers[..., 0] - whs[..., 0], xy_centers[..., 1] -
whs[..., 1], xy_centers[..., 0] + whs[..., 0],
xy_centers[..., 1] + whs[..., 1]),
dim=-1)
if self.use_box_type:
decoded_bboxes = HorizontalBoxes(decoded_bboxes)
return decoded_bboxes
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch
from mmdet.models.utils.misc import get_box_tensor
from mmdet.registry import TASK_UTILS
from mmdet.structures.bbox import HorizontalBoxes
from .base_bbox_coder import BaseBBoxCoder
@TASK_UTILS.register_module()
class YOLOBBoxCoder(BaseBBoxCoder):
"""YOLO BBox coder.
Following `YOLO <https://arxiv.org/abs/1506.02640>`_, this coder divide
image into grids, and encode bbox (x1, y1, x2, y2) into (cx, cy, dw, dh).
cx, cy in [0., 1.], denotes relative center position w.r.t the center of
bboxes. dw, dh are the same as :obj:`DeltaXYWHBBoxCoder`.
Args:
eps (float): Min value of cx, cy when encoding.
"""
def __init__(self, eps=1e-6, **kwargs):
super().__init__(**kwargs)
self.eps = eps
def encode(self, bboxes, gt_bboxes, stride):
"""Get box regression transformation deltas that can be used to
transform the ``bboxes`` into the ``gt_bboxes``.
Args:
bboxes (torch.Tensor or :obj:`BaseBoxes`): Source boxes,
e.g., anchors.
gt_bboxes (torch.Tensor or :obj:`BaseBoxes`): Target of the
transformation, e.g., ground-truth boxes.
stride (torch.Tensor | int): Stride of bboxes.
Returns:
torch.Tensor: Box transformation deltas
"""
bboxes = get_box_tensor(bboxes)
gt_bboxes = get_box_tensor(gt_bboxes)
assert bboxes.size(0) == gt_bboxes.size(0)
assert bboxes.size(-1) == gt_bboxes.size(-1) == 4
x_center_gt = (gt_bboxes[..., 0] + gt_bboxes[..., 2]) * 0.5
y_center_gt = (gt_bboxes[..., 1] + gt_bboxes[..., 3]) * 0.5
w_gt = gt_bboxes[..., 2] - gt_bboxes[..., 0]
h_gt = gt_bboxes[..., 3] - gt_bboxes[..., 1]
x_center = (bboxes[..., 0] + bboxes[..., 2]) * 0.5
y_center = (bboxes[..., 1] + bboxes[..., 3]) * 0.5
w = bboxes[..., 2] - bboxes[..., 0]
h = bboxes[..., 3] - bboxes[..., 1]
w_target = torch.log((w_gt / w).clamp(min=self.eps))
h_target = torch.log((h_gt / h).clamp(min=self.eps))
x_center_target = ((x_center_gt - x_center) / stride + 0.5).clamp(
self.eps, 1 - self.eps)
y_center_target = ((y_center_gt - y_center) / stride + 0.5).clamp(
self.eps, 1 - self.eps)
encoded_bboxes = torch.stack(
[x_center_target, y_center_target, w_target, h_target], dim=-1)
return encoded_bboxes
def decode(self, bboxes, pred_bboxes, stride):
"""Apply transformation `pred_bboxes` to `boxes`.
Args:
boxes (torch.Tensor or :obj:`BaseBoxes`): Basic boxes,
e.g. anchors.
pred_bboxes (torch.Tensor): Encoded boxes with shape
stride (torch.Tensor | int): Strides of bboxes.
Returns:
Union[torch.Tensor, :obj:`BaseBoxes`]: Decoded boxes.
"""
bboxes = get_box_tensor(bboxes)
assert pred_bboxes.size(-1) == bboxes.size(-1) == 4
xy_centers = (bboxes[..., :2] + bboxes[..., 2:]) * 0.5 + (
pred_bboxes[..., :2] - 0.5) * stride
whs = (bboxes[..., 2:] -
bboxes[..., :2]) * 0.5 * pred_bboxes[..., 2:].exp()
decoded_bboxes = torch.stack(
(xy_centers[..., 0] - whs[..., 0], xy_centers[..., 1] -
whs[..., 1], xy_centers[..., 0] + whs[..., 0],
xy_centers[..., 1] + whs[..., 1]),
dim=-1)
if self.use_box_type:
decoded_bboxes = HorizontalBoxes(decoded_bboxes)
return decoded_bboxes
|
# Copyright (c) OpenMMLab. All rights reserved.
import os
import pytest
import torch
import torch.distributed as torch_dist
import torch.multiprocessing as mp
import mmengine.dist as dist
def _test_get_backend_non_dist():
assert dist.get_backend() is None
def _test_get_world_size_non_dist():
assert dist.get_world_size() == 1
def _test_get_rank_non_dist():
assert dist.get_rank() == 0
def _test_local_size_non_dist():
assert dist.get_local_size() == 1
def _test_local_rank_non_dist():
assert dist.get_local_rank() == 0
def _test_get_dist_info_non_dist():
assert dist.get_dist_info() == (0, 1)
def _test_is_main_process_non_dist():
assert dist.is_main_process()
def _test_master_only_non_dist():
@dist.master_only
def fun():
assert dist.get_rank() == 0
fun()
def _test_barrier_non_dist():
dist.barrier() # nothing is done
def init_process(rank, world_size, functions, backend='gloo'):
"""Initialize the distributed environment."""
os.environ['MASTER_ADDR'] = '127.0.0.1'
os.environ['MASTER_PORT'] = '29501'
os.environ['RANK'] = str(rank)
if backend == 'nccl':
num_gpus = torch.cuda.device_count()
torch.cuda.set_device(rank % num_gpus)
torch_dist.init_process_group(
backend=backend, rank=rank, world_size=world_size)
dist.init_local_group(0, world_size)
for func in functions:
func()
def main(functions, world_size=2, backend='gloo'):
try:
mp.spawn(
init_process,
args=(world_size, functions, backend),
nprocs=world_size)
except Exception:
pytest.fail('error')
def _test_get_backend_dist():
assert dist.get_backend() == torch_dist.get_backend()
def _test_get_world_size_dist():
assert dist.get_world_size() == 2
def _test_get_rank_dist():
if torch_dist.get_rank() == 0:
assert dist.get_rank() == 0
else:
assert dist.get_rank() == 1
def _test_local_size_dist():
assert dist.get_local_size() == 2
def _test_local_rank_dist():
torch_dist.get_rank(dist.get_local_group()) == dist.get_local_rank()
def _test_get_dist_info_dist():
if dist.get_rank() == 0:
assert dist.get_dist_info() == (0, 2)
else:
assert dist.get_dist_info() == (1, 2)
def _test_is_main_process_dist():
if dist.get_rank() == 0:
assert dist.is_main_process()
else:
assert not dist.is_main_process()
def _test_master_only_dist():
@dist.master_only
def fun():
assert dist.get_rank() == 0
fun()
def test_non_distributed_env():
_test_get_backend_non_dist()
_test_get_world_size_non_dist()
_test_get_rank_non_dist()
_test_local_size_non_dist()
_test_local_rank_non_dist()
_test_get_dist_info_non_dist()
_test_is_main_process_non_dist()
_test_master_only_non_dist()
_test_barrier_non_dist()
functions_to_test = [
_test_get_backend_dist,
_test_get_world_size_dist,
_test_get_rank_dist,
_test_local_size_dist,
_test_local_rank_dist,
_test_get_dist_info_dist,
_test_is_main_process_dist,
_test_master_only_dist,
]
def test_gloo_backend():
main(functions_to_test)
@pytest.mark.skipif(
torch.cuda.device_count() < 2, reason='need 2 gpu to test nccl')
def test_nccl_backend():
main(functions_to_test, backend='nccl')
|
# Copyright (c) OpenMMLab. All rights reserved.
import os
import pytest
import torch
import torch.distributed as torch_dist
import torch.multiprocessing as mp
import mmengine.dist as dist
def _test_get_backend_non_dist():
assert dist.get_backend() is None
def _test_get_world_size_non_dist():
assert dist.get_world_size() == 1
def _test_get_rank_non_dist():
assert dist.get_rank() == 0
def _test_local_size_non_dist():
assert dist.get_local_size() == 1
def _test_local_rank_non_dist():
assert dist.get_local_rank() == 0
def _test_get_dist_info_non_dist():
assert dist.get_dist_info() == (0, 1)
def _test_is_main_process_non_dist():
assert dist.is_main_process()
def _test_master_only_non_dist():
@dist.master_only
def fun():
assert dist.get_rank() == 0
fun()
def _test_barrier_non_dist():
dist.barrier() # nothing is done
def init_process(rank, world_size, functions, backend='gloo'):
"""Initialize the distributed environment."""
os.environ['MASTER_ADDR'] = '127.0.0.1'
os.environ['MASTER_PORT'] = '29501'
os.environ['RANK'] = str(rank)
dist.init_dist('pytorch', backend, rank=rank, world_size=world_size)
dist.init_local_group(0, world_size)
for func in functions:
func()
def main(functions, world_size=2, backend='gloo'):
try:
mp.spawn(
init_process,
args=(world_size, functions, backend),
nprocs=world_size)
except Exception:
pytest.fail('error')
def _test_get_backend_dist():
assert dist.get_backend() == torch_dist.get_backend()
def _test_get_world_size_dist():
assert dist.get_world_size() == 2
def _test_get_rank_dist():
if torch_dist.get_rank() == 0:
assert dist.get_rank() == 0
else:
assert dist.get_rank() == 1
def _test_local_size_dist():
assert dist.get_local_size() == 2
def _test_local_rank_dist():
torch_dist.get_rank(dist.get_local_group()) == dist.get_local_rank()
def _test_get_dist_info_dist():
if dist.get_rank() == 0:
assert dist.get_dist_info() == (0, 2)
else:
assert dist.get_dist_info() == (1, 2)
def _test_is_main_process_dist():
if dist.get_rank() == 0:
assert dist.is_main_process()
else:
assert not dist.is_main_process()
def _test_master_only_dist():
@dist.master_only
def fun():
assert dist.get_rank() == 0
fun()
def test_non_distributed_env():
_test_get_backend_non_dist()
_test_get_world_size_non_dist()
_test_get_rank_non_dist()
_test_local_size_non_dist()
_test_local_rank_non_dist()
_test_get_dist_info_non_dist()
_test_is_main_process_non_dist()
_test_master_only_non_dist()
_test_barrier_non_dist()
functions_to_test = [
_test_get_backend_dist,
_test_get_world_size_dist,
_test_get_rank_dist,
_test_local_size_dist,
_test_local_rank_dist,
_test_get_dist_info_dist,
_test_is_main_process_dist,
_test_master_only_dist,
]
def test_gloo_backend():
main(functions_to_test)
@pytest.mark.skipif(
torch.cuda.device_count() < 2, reason='need 2 gpu to test nccl')
def test_nccl_backend():
main(functions_to_test, backend='nccl')
|
from typing import Any, Callable, Dict, Optional, Sequence
from llama_index.core.base.llms.types import ChatMessage, LLMMetadata
from llama_index.core.callbacks import CallbackManager
from llama_index.core.constants import DEFAULT_NUM_OUTPUTS, DEFAULT_TEMPERATURE
from llama_index.core.base.llms.generic_utils import get_from_param_or_env
from llama_index.core.types import BaseOutputParser, PydanticProgramMode
from llama_index.llms.fireworks.utils import (
fireworks_modelname_to_contextsize,
is_function_calling_model,
)
from llama_index.llms.openai import OpenAI
DEFAULT_API_BASE = "https://api.fireworks.ai/inference/v1"
DEFAULT_MODEL = "accounts/fireworks/models/llama-v3p1-8b-instruct"
class Fireworks(OpenAI):
"""
Fireworks LLM.
Examples:
`pip install llama-index-llms-fireworks`
```python
from llama_index.llms.fireworks import Fireworks
# Create an instance of the Fireworks class
llm = Fireworks(
model="accounts/fireworks/models/mixtral-8x7b-instruct",
api_key="YOUR_API_KEY"
)
# Call the complete method with a prompt
resp = llm.complete("Hello world!")
print(resp)
```
"""
def __init__(
self,
model: str = DEFAULT_MODEL,
temperature: float = DEFAULT_TEMPERATURE,
max_tokens: int = DEFAULT_NUM_OUTPUTS,
additional_kwargs: Optional[Dict[str, Any]] = None,
max_retries: int = 10,
api_base: Optional[str] = DEFAULT_API_BASE,
api_key: Optional[str] = None,
callback_manager: Optional[CallbackManager] = None,
default_headers: Optional[Dict[str, str]] = None,
system_prompt: Optional[str] = None,
messages_to_prompt: Optional[Callable[[Sequence[ChatMessage]], str]] = None,
completion_to_prompt: Optional[Callable[[str], str]] = None,
pydantic_program_mode: PydanticProgramMode = PydanticProgramMode.DEFAULT,
output_parser: Optional[BaseOutputParser] = None,
) -> None:
additional_kwargs = additional_kwargs or {}
callback_manager = callback_manager or CallbackManager([])
api_base = get_from_param_or_env("api_base", api_base, "FIREWORKS_API_BASE")
api_key = get_from_param_or_env("api_key", api_key, "FIREWORKS_API_KEY")
super().__init__(
model=model,
temperature=temperature,
max_tokens=max_tokens,
api_base=api_base,
api_key=api_key,
additional_kwargs=additional_kwargs,
max_retries=max_retries,
callback_manager=callback_manager,
default_headers=default_headers,
system_prompt=system_prompt,
messages_to_prompt=messages_to_prompt,
completion_to_prompt=completion_to_prompt,
pydantic_program_mode=pydantic_program_mode,
output_parser=output_parser,
)
@classmethod
def class_name(cls) -> str:
return "Fireworks_LLM"
@property
def metadata(self) -> LLMMetadata:
return LLMMetadata(
context_window=fireworks_modelname_to_contextsize(self.model),
num_output=self.max_tokens,
is_chat_model=True,
model_name=self.model,
is_function_calling_model=is_function_calling_model(
model=self._get_model_name()
),
)
@property
def _is_chat_model(self) -> bool:
return True
|
from typing import Any, Callable, Dict, Optional, Sequence
from llama_index.core.base.llms.types import ChatMessage, LLMMetadata
from llama_index.core.callbacks import CallbackManager
from llama_index.core.constants import DEFAULT_NUM_OUTPUTS, DEFAULT_TEMPERATURE
from llama_index.core.base.llms.generic_utils import get_from_param_or_env
from llama_index.core.types import BaseOutputParser, PydanticProgramMode
from llama_index.llms.fireworks.utils import (
fireworks_modelname_to_contextsize,
is_function_calling_model,
)
from llama_index.llms.openai import OpenAI
DEFAULT_API_BASE = "https://api.fireworks.ai/inference/v1"
DEFAULT_MODEL = "accounts/fireworks/models/llama-v3p1-8b-instruct"
class Fireworks(OpenAI):
"""Fireworks LLM.
Examples:
`pip install llama-index-llms-fireworks`
```python
from llama_index.llms.fireworks import Fireworks
# Create an instance of the Fireworks class
llm = Fireworks(
model="accounts/fireworks/models/mixtral-8x7b-instruct",
api_key="YOUR_API_KEY"
)
# Call the complete method with a prompt
resp = llm.complete("Hello world!")
print(resp)
```
"""
def __init__(
self,
model: str = DEFAULT_MODEL,
temperature: float = DEFAULT_TEMPERATURE,
max_tokens: int = DEFAULT_NUM_OUTPUTS,
additional_kwargs: Optional[Dict[str, Any]] = None,
max_retries: int = 10,
api_base: Optional[str] = DEFAULT_API_BASE,
api_key: Optional[str] = None,
callback_manager: Optional[CallbackManager] = None,
default_headers: Optional[Dict[str, str]] = None,
system_prompt: Optional[str] = None,
messages_to_prompt: Optional[Callable[[Sequence[ChatMessage]], str]] = None,
completion_to_prompt: Optional[Callable[[str], str]] = None,
pydantic_program_mode: PydanticProgramMode = PydanticProgramMode.DEFAULT,
output_parser: Optional[BaseOutputParser] = None,
) -> None:
additional_kwargs = additional_kwargs or {}
callback_manager = callback_manager or CallbackManager([])
api_base = get_from_param_or_env("api_base", api_base, "FIREWORKS_API_BASE")
api_key = get_from_param_or_env("api_key", api_key, "FIREWORKS_API_KEY")
super().__init__(
model=model,
temperature=temperature,
max_tokens=max_tokens,
api_base=api_base,
api_key=api_key,
additional_kwargs=additional_kwargs,
max_retries=max_retries,
callback_manager=callback_manager,
default_headers=default_headers,
system_prompt=system_prompt,
messages_to_prompt=messages_to_prompt,
completion_to_prompt=completion_to_prompt,
pydantic_program_mode=pydantic_program_mode,
output_parser=output_parser,
)
@classmethod
def class_name(cls) -> str:
return "Fireworks_LLM"
@property
def metadata(self) -> LLMMetadata:
return LLMMetadata(
context_window=fireworks_modelname_to_contextsize(self.model),
num_output=self.max_tokens,
is_chat_model=True,
model_name=self.model,
is_function_calling_model=is_function_calling_model(
model=self._get_model_name()
),
)
@property
def _is_chat_model(self) -> bool:
return True
|
from typing import TYPE_CHECKING, Any, Dict, Type, TypeVar
from docarray.document.abstract_document import AbstractDocument
from docarray.document.base_node import BaseNode
if TYPE_CHECKING:
from docarray.proto import DocumentProto, NodeProto
try:
import torch # noqa: F401
except ImportError:
torch_imported = False
else:
from docarray.typing.tensor.torch_tensor import TorchTensor
torch_imported = True
T = TypeVar('T', bound='ProtoMixin')
class ProtoMixin(AbstractDocument, BaseNode):
@classmethod
def from_protobuf(cls: Type[T], pb_msg: 'DocumentProto') -> T:
"""create a Document from a protobuf message"""
from docarray.typing import ( # TorchTensor,
ID,
AnyUrl,
Embedding,
ImageUrl,
Mesh3DUrl,
NdArray,
PointCloud3DUrl,
TextUrl,
)
fields: Dict[str, Any] = {}
for field in pb_msg.data:
value = pb_msg.data[field]
content_type = value.WhichOneof('content')
# this if else statement need to be refactored it is too long
# the check should be delegated to the type level
content_type_dict = dict(
ndarray=NdArray,
embedding=Embedding,
any_url=AnyUrl,
text_url=TextUrl,
image_url=ImageUrl,
mesh_url=Mesh3DUrl,
point_cloud_url=PointCloud3DUrl,
id=ID,
)
if torch_imported:
content_type_dict['torch_tensor'] = TorchTensor
if content_type in content_type_dict:
fields[field] = content_type_dict[content_type].from_protobuf(
getattr(value, content_type)
)
elif content_type == 'text':
fields[field] = value.text
elif content_type == 'nested':
fields[field] = cls._get_field_type(field).from_protobuf(
value.nested
) # we get to the parent class
elif content_type == 'chunks':
from docarray import DocumentArray
fields[field] = DocumentArray.from_protobuf(
value.chunks
) # we get to the parent class
elif content_type is None:
fields[field] = None
else:
raise ValueError(
f'type {content_type} is not supported for deserialization'
)
return cls.construct(**fields)
def to_protobuf(self) -> 'DocumentProto':
"""Convert Document into a Protobuf message.
:return: the protobuf message
"""
from docarray.proto import DocumentProto, NodeProto
data = {}
for field, value in self:
try:
if isinstance(value, BaseNode):
nested_item = value._to_node_protobuf()
elif type(value) is str:
nested_item = NodeProto(text=value)
elif type(value) is bytes:
nested_item = NodeProto(blob=value)
elif value is None:
nested_item = NodeProto()
else:
raise ValueError(f'field {field} with {value} is not supported')
data[field] = nested_item
except RecursionError as ex:
if len(ex.args) >= 1:
ex.args = (
(
f'Field `{field}` contains cyclic reference in memory. '
'Could it be your Document is referring to itself?'
),
)
raise
except Exception as ex:
if len(ex.args) >= 1:
ex.args = (f'Field `{field}` is problematic',) + ex.args
raise
return DocumentProto(data=data)
def _to_node_protobuf(self) -> 'NodeProto':
from docarray.proto import NodeProto
"""Convert Document into a NodeProto protobuf message. This function should be
called when the Document is nest into another Document that need to be
converted into a protobuf
:return: the nested item protobuf message
"""
return NodeProto(nested=self.to_protobuf())
|
from typing import TYPE_CHECKING, Any, Dict, Type, TypeVar
from docarray.document.abstract_document import AbstractDocument
from docarray.document.base_node import BaseNode
if TYPE_CHECKING:
from docarray.proto import DocumentProto, NodeProto
try:
import torch # noqa: F401
except ImportError:
torch_imported = False
else:
from docarray.typing.tensor.torch_tensor import TorchTensor
torch_imported = True
T = TypeVar('T', bound='ProtoMixin')
class ProtoMixin(AbstractDocument, BaseNode):
@classmethod
def from_protobuf(cls: Type[T], pb_msg: 'DocumentProto') -> T:
"""create a Document from a protobuf message"""
from docarray.typing import ( # TorchTensor,
ID,
AnyUrl,
Embedding,
ImageUrl,
Mesh3DUrl,
NdArray,
PointCloud3DUrl,
TextUrl,
)
fields: Dict[str, Any] = {}
for field in pb_msg.data:
value = pb_msg.data[field]
content_type = value.WhichOneof('content')
# this if else statement need to be refactored it is too long
# the check should be delegated to the type level
content_type_dict = dict(
ndarray=NdArray,
embedding=Embedding,
any_url=AnyUrl,
text_url=TextUrl,
image_url=ImageUrl,
mesh_url=Mesh3DUrl,
point_cloud_url=PointCloud3DUrl,
id=ID,
)
if torch_imported:
content_type_dict['torch_tensor'] = TorchTensor
if content_type in content_type_dict:
fields[field] = content_type_dict[content_type].from_protobuf(
getattr(value, content_type)
)
elif content_type == 'text':
fields[field] = value.text
elif content_type == 'nested':
fields[field] = cls._get_nested_document_class(field).from_protobuf(
value.nested
) # we get to the parent class
elif content_type == 'chunks':
from docarray import DocumentArray
fields[field] = DocumentArray.from_protobuf(
value.chunks
) # we get to the parent class
elif content_type is None:
fields[field] = None
else:
raise ValueError(
f'type {content_type} is not supported for deserialization'
)
return cls.construct(**fields)
def to_protobuf(self) -> 'DocumentProto':
"""Convert Document into a Protobuf message.
:return: the protobuf message
"""
from docarray.proto import DocumentProto, NodeProto
data = {}
for field, value in self:
try:
if isinstance(value, BaseNode):
nested_item = value._to_node_protobuf()
elif type(value) is str:
nested_item = NodeProto(text=value)
elif type(value) is bytes:
nested_item = NodeProto(blob=value)
elif value is None:
nested_item = NodeProto()
else:
raise ValueError(f'field {field} with {value} is not supported')
data[field] = nested_item
except RecursionError as ex:
if len(ex.args) >= 1:
ex.args = (
(
f'Field `{field}` contains cyclic reference in memory. '
'Could it be your Document is referring to itself?'
),
)
raise
except Exception as ex:
if len(ex.args) >= 1:
ex.args = (f'Field `{field}` is problematic',) + ex.args
raise
return DocumentProto(data=data)
def _to_node_protobuf(self) -> 'NodeProto':
from docarray.proto import NodeProto
"""Convert Document into a NodeProto protobuf message. This function should be
called when the Document is nest into another Document that need to be
converted into a protobuf
:return: the nested item protobuf message
"""
return NodeProto(nested=self.to_protobuf())
|
"""Pydantic v1 compatibility shim."""
from langchain_core._api import warn_deprecated
try:
from pydantic.v1.main import * # noqa: F403
except ImportError:
from pydantic.main import * # type: ignore[assignment,no-redef] # noqa: F403
warn_deprecated(
"0.3.0",
removal="1.0.0",
alternative="pydantic.v1 or pydantic",
message=(
"As of langchain-core 0.3.0, LangChain uses pydantic v2 internally. "
"The langchain_core.pydantic_v1 module was a "
"compatibility shim for pydantic v1, and should no longer be used. "
"Please update the code to import from Pydantic directly.\n\n"
"For example, replace imports like: "
"`from langchain_core.pydantic_v1 import BaseModel`\n"
"with: `from pydantic import BaseModel`\n"
"or the v1 compatibility namespace if you are working in a code base "
"that has not been fully upgraded to pydantic 2 yet. "
"\tfrom pydantic.v1 import BaseModel\n"
),
)
|
"""Pydantic v1 compatibility shim."""
from langchain_core._api import warn_deprecated
try:
from pydantic.v1.main import * # noqa: F403
except ImportError:
from pydantic.main import * # noqa: F403
warn_deprecated(
"0.3.0",
removal="1.0.0",
alternative="pydantic.v1 or pydantic",
message=(
"As of langchain-core 0.3.0, LangChain uses pydantic v2 internally. "
"The langchain_core.pydantic_v1 module was a "
"compatibility shim for pydantic v1, and should no longer be used. "
"Please update the code to import from Pydantic directly.\n\n"
"For example, replace imports like: "
"`from langchain_core.pydantic_v1 import BaseModel`\n"
"with: `from pydantic import BaseModel`\n"
"or the v1 compatibility namespace if you are working in a code base "
"that has not been fully upgraded to pydantic 2 yet. "
"\tfrom pydantic.v1 import BaseModel\n"
),
)
|
"""FastAPI framework, high performance, easy to learn, fast to code, ready for production"""
__version__ = "0.115.11"
from starlette import status as status
from .applications import FastAPI as FastAPI
from .background import BackgroundTasks as BackgroundTasks
from .datastructures import UploadFile as UploadFile
from .exceptions import HTTPException as HTTPException
from .exceptions import WebSocketException as WebSocketException
from .param_functions import Body as Body
from .param_functions import Cookie as Cookie
from .param_functions import Depends as Depends
from .param_functions import File as File
from .param_functions import Form as Form
from .param_functions import Header as Header
from .param_functions import Path as Path
from .param_functions import Query as Query
from .param_functions import Security as Security
from .requests import Request as Request
from .responses import Response as Response
from .routing import APIRouter as APIRouter
from .websockets import WebSocket as WebSocket
from .websockets import WebSocketDisconnect as WebSocketDisconnect
|
"""FastAPI framework, high performance, easy to learn, fast to code, ready for production"""
__version__ = "0.115.10"
from starlette import status as status
from .applications import FastAPI as FastAPI
from .background import BackgroundTasks as BackgroundTasks
from .datastructures import UploadFile as UploadFile
from .exceptions import HTTPException as HTTPException
from .exceptions import WebSocketException as WebSocketException
from .param_functions import Body as Body
from .param_functions import Cookie as Cookie
from .param_functions import Depends as Depends
from .param_functions import File as File
from .param_functions import Form as Form
from .param_functions import Header as Header
from .param_functions import Path as Path
from .param_functions import Query as Query
from .param_functions import Security as Security
from .requests import Request as Request
from .responses import Response as Response
from .routing import APIRouter as APIRouter
from .websockets import WebSocket as WebSocket
from .websockets import WebSocketDisconnect as WebSocketDisconnect
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.