input
stringlengths 33
5k
| output
stringlengths 32
5k
|
---|---|
from dataclasses import dataclass, field
from typing import Any, Callable, Dict, List
import torch
@dataclass
class SentenceTransformerDataCollator:
"""Collator for a SentenceTransformers model.
This encodes the text columns to {column}_input_ids and {column}_attention_mask columns.
This works with the two text dataset that is used as the example in the training overview:
https://www.sbert.net/docs/training/overview.html
"""
tokenize_fn: Callable
valid_label_columns: List[str] = field(default_factory=lambda: ["label", "score"])
def __call__(self, features: List[Dict[str, Any]]) -> Dict[str, torch.Tensor]:
columns = list(features[0].keys())
# We should always be able to return a loss, label or not:
batch = {}
if "dataset_name" in columns:
columns.remove("dataset_name")
batch["dataset_name"] = features[0]["dataset_name"]
# Extract the label column if it exists
for label_column in self.valid_label_columns:
if label_column in columns:
batch["label"] = torch.tensor([row[label_column] for row in features])
columns.remove(label_column)
break
# Extract the feature columns
for column in columns:
tokenized = self.tokenize_fn([row[column] for row in features])
for key, value in tokenized.items():
batch[f"{column}_{key}"] = value
return batch
|
from dataclasses import dataclass, field
from typing import Any, Callable, Dict, List
import torch
@dataclass
class SentenceTransformerDataCollator:
"""Collator for a SentenceTransformers model.
This encodes the text columns to {column}_input_ids and {column}_attention_mask columns.
This works with the two text dataset that is used as the example in the training overview:
https://www.sbert.net/docs/training/overview.html
"""
tokenize_fn: Callable
valid_label_columns: List[str] = field(default_factory=lambda: ["label", "score"])
def __call__(self, features: List[Dict[str, Any]]) -> Dict[str, torch.Tensor]:
columns = list(features[0].keys())
# We should always be able to return a loss, label or not:
batch = {"return_loss": True}
if "dataset_name" in columns:
columns.remove("dataset_name")
batch["dataset_name"] = features[0]["dataset_name"]
# Extract the label column if it exists
for label_column in self.valid_label_columns:
if label_column in columns:
batch["label"] = torch.tensor([row[label_column] for row in features])
columns.remove(label_column)
break
# Extract the feature columns
for column in columns:
tokenized = self.tokenize_fn([row[column] for row in features])
for key, value in tokenized.items():
batch[f"{column}_{key}"] = value
return batch
|
# Copyright (c) OpenMMLab. All rights reserved.
import unittest
from unittest import TestCase
import torch
from mmengine.config import ConfigDict
from mmengine.data import InstanceData
from parameterized import parameterized
from mmdet.models.roi_heads.mask_heads import FCNMaskHead
class TestFCNMaskHead(TestCase):
@parameterized.expand(['cpu', 'cuda'])
def test_get_seg_masks(self, device):
if device == 'cuda':
if not torch.cuda.is_available():
return unittest.skip('test requires GPU and torch+cuda')
num_classes = 6
mask_head = FCNMaskHead(
num_convs=1,
in_channels=1,
conv_out_channels=1,
num_classes=num_classes)
rcnn_test_cfg = ConfigDict(
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.5),
max_per_img=100,
mask_thr_binary=0.5)
s = 128
img_metas = {
'img_shape': (s, s, 3),
'scale_factor': (1, 1),
'ori_shape': (s, s, 3)
}
result = InstanceData(metainfo=img_metas)
num_samples = 2
mask_pred = [torch.rand((num_samples, num_classes, 14, 14)).to(device)]
result.bboxes = torch.rand((num_samples, 4)).to(device)
result.labels = torch.randint(
num_classes, (num_samples, ), dtype=torch.long).to(device)
mask_head.to(device=device)
result_list = mask_head.get_results(
mask_preds=tuple(mask_pred),
results_list=tuple([result]),
batch_img_metas=[img_metas],
rcnn_test_cfg=rcnn_test_cfg)
self.assertIsInstance(result_list[0], InstanceData)
self.assertEqual(len(result_list[0]), num_samples)
self.assertEqual(result_list[0].masks.shape, (num_samples, s, s))
# test with activate_map, `mask_pred` has been activated before
num_samples = 2
mask_pred = [torch.rand((num_samples, num_classes, 14, 14)).to(device)]
mask_pred = [m.sigmoid().detach() for m in mask_pred]
result.bboxes = torch.rand((num_samples, 4)).to(device)
result.labels = torch.randint(
num_classes, (num_samples, ), dtype=torch.long).to(device)
mask_head.to(device=device)
result_list = mask_head.get_results(
mask_preds=tuple(mask_pred),
results_list=tuple([result]),
batch_img_metas=[img_metas],
rcnn_test_cfg=rcnn_test_cfg,
activate_map=True)
self.assertIsInstance(result_list[0], InstanceData)
self.assertEqual(len(result_list[0]), num_samples)
self.assertEqual(result_list[0].masks.shape, (num_samples, s, s))
# num_samples is 0
num_samples = 0
result = InstanceData(metainfo=img_metas)
mask_pred = [torch.rand((num_samples, num_classes, 14, 14)).to(device)]
result.bboxes = torch.zeros((num_samples, 4)).to(device)
result.labels = torch.zeros((num_samples, )).to(device)
result_list = mask_head.get_results(
mask_preds=tuple(mask_pred),
results_list=tuple([result]),
batch_img_metas=[img_metas],
rcnn_test_cfg=rcnn_test_cfg)
self.assertIsInstance(result_list[0], InstanceData)
self.assertEqual(len(result_list[0]), num_samples)
self.assertEqual(result_list[0].masks.shape, (num_samples, s, s))
|
# Copyright (c) OpenMMLab. All rights reserved.
import unittest
from unittest import TestCase
import torch
from mmengine.config import ConfigDict
from mmengine.data import InstanceData
from parameterized import parameterized
from mmdet.models.roi_heads.mask_heads import FCNMaskHead
class TestFCNMaskHead(TestCase):
def test_init(self):
"""Test init standard RoI head."""
@parameterized.expand(['cpu', 'cuda'])
def test_get_seg_masks(self, device):
if device == 'cuda':
if not torch.cuda.is_available():
return unittest.skip('test requires GPU and torch+cuda')
num_classes = 6
mask_head = FCNMaskHead(
num_convs=1,
in_channels=1,
conv_out_channels=1,
num_classes=num_classes)
rcnn_test_cfg = ConfigDict(
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.5),
max_per_img=100,
mask_thr_binary=0.5)
s = 128
img_metas = {
'img_shape': (s, s, 3),
'scale_factor': (1, 1),
'ori_shape': (s, s, 3)
}
result = InstanceData(metainfo=img_metas)
num_samples = 2
mask_pred = [torch.rand((num_samples, num_classes, 14, 14)).to(device)]
result.bboxes = torch.rand((num_samples, 4)).to(device)
result.labels = torch.randint(
num_classes, (num_samples, ), dtype=torch.long).to(device)
mask_head.to(device=device)
result_list = mask_head.get_results(
mask_preds=tuple(mask_pred),
results_list=tuple([result]),
batch_img_metas=[img_metas],
rcnn_test_cfg=rcnn_test_cfg)
assert isinstance(result_list[0], InstanceData)
assert len(result_list[0]) == num_samples
assert result_list[0].masks.shape == (num_samples, s, s)
# num_samples is 0
num_samples = 0
result = InstanceData(metainfo=img_metas)
mask_pred = [torch.rand((num_samples, num_classes, 14, 14)).to(device)]
result.bboxes = torch.zeros((num_samples, 4)).to(device)
result.labels = torch.zeros((num_samples, )).to(device)
result_list = mask_head.get_results(
mask_preds=tuple(mask_pred),
results_list=tuple([result]),
batch_img_metas=[img_metas],
rcnn_test_cfg=rcnn_test_cfg)
assert isinstance(result_list[0], InstanceData)
assert len(result_list[0]) == num_samples
assert result_list[0].masks.shape == (num_samples, s, s)
|
from keras.src import activations
from keras.src.api_export import keras_export
from keras.src.layers.layer import Layer
@keras_export("keras.layers.ELU")
class ELU(Layer):
"""Applies an Exponential Linear Unit function to an output.
Formula:
```
f(x) = alpha * (exp(x) - 1.) for x < 0
f(x) = x for x >= 0
```
Args:
alpha: float, slope of negative section. Defaults to `1.0`.
**kwargs: Base layer keyword arguments, such as `name` and `dtype`.
"""
def __init__(self, alpha=1.0, **kwargs):
super().__init__(**kwargs)
self.alpha = alpha
self.supports_masking = True
self.built = True
def call(self, inputs):
return activations.elu(inputs, alpha=self.alpha)
def compute_output_shape(self, input_shape):
return input_shape
|
from keras.src import activations
from keras.src.api_export import keras_export
from keras.src.layers.layer import Layer
@keras_export("keras.layers.ELU")
class ELU(Layer):
"""Applies an Exponential Linear Unit function to an output.
Formula:
```
f(x) = alpha * (exp(x) - 1.) for x < 0
f(x) = x for x >= 0
```
Args:
alpha: float, slope of negative section. Defaults to `1.0`.
**kwargs: Base layer keyword arguments, such as `name` and `dtype`.
"""
def __init__(self, alpha=1.0, **kwargs):
super().__init__(**kwargs)
self.alpha = alpha
self.supports_masking = True
def call(self, inputs):
return activations.elu(inputs, alpha=self.alpha)
def compute_output_shape(self, input_shape):
return input_shape
|
"""Tests for evaluation metrics."""
from typing import Dict, List
import numpy as np
import pytest
import xgboost as xgb
from xgboost.compat import concat
from xgboost.core import _parse_eval_str
def check_precision_score(tree_method: str) -> None:
"""Test for precision with ranking and classification."""
datasets = pytest.importorskip("sklearn.datasets")
X, y = datasets.make_classification(
n_samples=1024, n_features=4, n_classes=2, random_state=2023
)
qid = np.zeros(shape=y.shape) # same group
ltr = xgb.XGBRanker(n_estimators=2, tree_method=tree_method)
ltr.fit(X, y, qid=qid)
# re-generate so that XGBoost doesn't evaluate the result to 1.0
X, y = datasets.make_classification(
n_samples=512, n_features=4, n_classes=2, random_state=1994
)
ltr.set_params(eval_metric="pre@32")
result = _parse_eval_str(
ltr.get_booster().eval_set(evals=[(xgb.DMatrix(X, y), "Xy")])
)
score_0 = result[1][1]
X_list = []
y_list = []
n_query_groups = 3
q_list: List[np.ndarray] = []
for i in range(n_query_groups):
# same for all groups
X, y = datasets.make_classification(
n_samples=512, n_features=4, n_classes=2, random_state=1994
)
X_list.append(X)
y_list.append(y)
q = np.full(shape=y.shape, fill_value=i, dtype=np.uint64)
q_list.append(q)
qid = concat(q_list)
X = concat(X_list)
y = concat(y_list)
result = _parse_eval_str(
ltr.get_booster().eval_set(evals=[(xgb.DMatrix(X, y, qid=qid), "Xy")])
)
assert result[1][0].endswith("pre@32")
score_1 = result[1][1]
assert score_1 == score_0
def check_quantile_error(tree_method: str) -> None:
"""Test for the `quantile` loss."""
from sklearn.datasets import make_regression
from sklearn.metrics import mean_pinball_loss
rng = np.random.RandomState(19)
# pylint: disable=unbalanced-tuple-unpacking
X, y = make_regression(128, 3, random_state=rng)
Xy = xgb.QuantileDMatrix(X, y)
evals_result: Dict[str, Dict] = {}
booster = xgb.train(
{"tree_method": tree_method, "eval_metric": "quantile", "quantile_alpha": 0.3},
Xy,
evals=[(Xy, "Train")],
evals_result=evals_result,
)
predt = booster.inplace_predict(X)
loss = mean_pinball_loss(y, predt, alpha=0.3)
np.testing.assert_allclose(evals_result["Train"]["quantile"][-1], loss)
|
"""Tests for evaluation metrics."""
from typing import Dict, List
import numpy as np
import pytest
import xgboost as xgb
from xgboost.compat import concat
from xgboost.core import _parse_eval_str
def check_precision_score(tree_method: str) -> None:
"""Test for precision with ranking and classification."""
datasets = pytest.importorskip("sklearn.datasets")
X, y = datasets.make_classification(
n_samples=1024, n_features=4, n_classes=2, random_state=2023
)
qid = np.zeros(shape=y.shape) # same group
ltr = xgb.XGBRanker(n_estimators=2, tree_method=tree_method)
ltr.fit(X, y, qid=qid)
# re-generate so that XGBoost doesn't evaluate the result to 1.0
X, y = datasets.make_classification(
n_samples=512, n_features=4, n_classes=2, random_state=1994
)
ltr.set_params(eval_metric="pre@32")
result = _parse_eval_str(
ltr.get_booster().eval_set(evals=[(xgb.DMatrix(X, y), "Xy")])
)
score_0 = result[1][1]
X_list = []
y_list = []
n_query_groups = 3
q_list: List[np.ndarray] = []
for i in range(n_query_groups):
# same for all groups
X, y = datasets.make_classification(
n_samples=512, n_features=4, n_classes=2, random_state=1994
)
X_list.append(X)
y_list.append(y)
q = np.full(shape=y.shape, fill_value=i, dtype=np.uint64)
q_list.append(q)
qid = concat(q_list)
X = concat(X_list)
y = concat(y_list)
result = _parse_eval_str(
ltr.get_booster().eval_set(evals=[(xgb.DMatrix(X, y, qid=qid), "Xy")])
)
assert result[1][0].endswith("pre@32")
score_1 = result[1][1]
assert score_1 == score_0
def check_quantile_error(tree_method: str) -> None:
"""Test for the `quantile` loss."""
from sklearn.datasets import make_regression
from sklearn.metrics import mean_pinball_loss
rng = np.random.RandomState(19)
# pylint: disable=unbalanced-tuple-unpacking
X, y = make_regression(128, 3, random_state=rng)
Xy = xgb.QuantileDMatrix(X, y)
evals_result: Dict[str, Dict] = {}
booster = xgb.train(
{"tree_method": tree_method, "eval_metric": "quantile", "quantile_alpha": 0.3},
Xy,
evals=[(Xy, "Train")],
evals_result=evals_result,
)
predt = booster.inplace_predict(X)
loss = mean_pinball_loss(y, predt, alpha=0.3)
np.testing.assert_allclose(evals_result["Train"]["quantile"][-1], loss)
|
import unittest
import torch
import torchaudio.prototype.functional as F
from torchaudio_unittest.common_utils import nested_params, TestBaseMixin, torch_script
class TorchScriptConsistencyTestImpl(TestBaseMixin):
def _assert_consistency(self, func, inputs, shape_only=False):
inputs_ = []
for i in inputs:
if torch.is_tensor(i):
i = i.to(device=self.device, dtype=self.dtype)
inputs_.append(i)
ts_func = torch_script(func)
torch.random.manual_seed(40)
output = func(*inputs_)
torch.random.manual_seed(40)
ts_output = ts_func(*inputs_)
if shape_only:
ts_output = ts_output.shape
output = output.shape
self.assertEqual(ts_output, output)
@nested_params(
["convolve", "fftconvolve"],
["full", "valid", "same"],
)
def test_convolve(self, fn, mode):
leading_dims = (2, 3, 2)
L_x, L_y = 32, 55
x = torch.rand(*leading_dims, L_x, dtype=self.dtype, device=self.device)
y = torch.rand(*leading_dims, L_y, dtype=self.dtype, device=self.device)
self._assert_consistency(getattr(F, fn), (x, y, mode))
@nested_params([True, False])
def test_add_noise(self, use_lengths):
leading_dims = (2, 3)
L = 31
waveform = torch.rand(*leading_dims, L, dtype=self.dtype, device=self.device, requires_grad=True)
noise = torch.rand(*leading_dims, L, dtype=self.dtype, device=self.device, requires_grad=True)
if use_lengths:
lengths = torch.rand(*leading_dims, dtype=self.dtype, device=self.device, requires_grad=True)
else:
lengths = None
snr = torch.rand(*leading_dims, dtype=self.dtype, device=self.device, requires_grad=True) * 10
self._assert_consistency(F.add_noise, (waveform, noise, snr, lengths))
def test_barkscale_fbanks(self):
if self.device != torch.device("cpu"):
raise unittest.SkipTest("No need to perform test on device other than CPU")
n_stft = 100
f_min = 0.0
f_max = 20.0
n_barks = 10
sample_rate = 16000
self._assert_consistency(F.barkscale_fbanks, (n_stft, f_min, f_max, n_barks, sample_rate, "traunmuller"))
def test_oscillator_bank(self):
num_frames, num_pitches, sample_rate = 8000, 8, 8000
freq = torch.rand((num_frames, num_pitches), dtype=self.dtype, device=self.device)
amps = torch.ones_like(freq)
self._assert_consistency(F.oscillator_bank, (freq, amps, sample_rate, "sum"))
def test_extend_pitch(self):
num_frames = 5
input = torch.ones((num_frames, 1), device=self.device, dtype=self.dtype)
num_pitches = 7
pattern = [i + 1.0 for i in range(num_pitches)]
self._assert_consistency(F.extend_pitch, (input, num_pitches))
self._assert_consistency(F.extend_pitch, (input, pattern))
self._assert_consistency(F.extend_pitch, (input, torch.tensor(pattern)))
def test_sinc_ir(self):
cutoff = torch.tensor([0, 0.5, 1.0], device=self.device, dtype=self.dtype)
self._assert_consistency(F.sinc_impulse_response, (cutoff, 513, False))
self._assert_consistency(F.sinc_impulse_response, (cutoff, 513, True))
def test_speed(self):
leading_dims = (3, 2)
T = 200
waveform = torch.rand(*leading_dims, T, dtype=self.dtype, device=self.device, requires_grad=True)
lengths = torch.randint(1, T, leading_dims, dtype=self.dtype, device=self.device)
self._assert_consistency(F.speed, (waveform, lengths, 1000, 1.1))
def test_preemphasis(self):
waveform = torch.rand(3, 2, 100, device=self.device, dtype=self.dtype)
coeff = 0.9
self._assert_consistency(F.preemphasis, (waveform, coeff))
def test_deemphasis(self):
waveform = torch.rand(3, 2, 100, device=self.device, dtype=self.dtype)
coeff = 0.9
self._assert_consistency(F.deemphasis, (waveform, coeff))
def test_freq_ir(self):
mags = torch.tensor([0, 0.5, 1.0], device=self.device, dtype=self.dtype)
self._assert_consistency(F.frequency_impulse_response, (mags,))
|
import unittest
import torch
import torchaudio.prototype.functional as F
from torchaudio_unittest.common_utils import nested_params, TestBaseMixin, torch_script
class TorchScriptConsistencyTestImpl(TestBaseMixin):
def _assert_consistency(self, func, inputs, shape_only=False):
inputs_ = []
for i in inputs:
if torch.is_tensor(i):
i = i.to(device=self.device, dtype=self.dtype)
inputs_.append(i)
ts_func = torch_script(func)
torch.random.manual_seed(40)
output = func(*inputs_)
torch.random.manual_seed(40)
ts_output = ts_func(*inputs_)
if shape_only:
ts_output = ts_output.shape
output = output.shape
self.assertEqual(ts_output, output)
@nested_params(
["convolve", "fftconvolve"],
["full", "valid", "same"],
)
def test_convolve(self, fn, mode):
leading_dims = (2, 3, 2)
L_x, L_y = 32, 55
x = torch.rand(*leading_dims, L_x, dtype=self.dtype, device=self.device)
y = torch.rand(*leading_dims, L_y, dtype=self.dtype, device=self.device)
self._assert_consistency(getattr(F, fn), (x, y, mode))
def test_add_noise(self):
leading_dims = (2, 3)
L = 31
waveform = torch.rand(*leading_dims, L, dtype=self.dtype, device=self.device, requires_grad=True)
noise = torch.rand(*leading_dims, L, dtype=self.dtype, device=self.device, requires_grad=True)
lengths = torch.rand(*leading_dims, dtype=self.dtype, device=self.device, requires_grad=True)
snr = torch.rand(*leading_dims, dtype=self.dtype, device=self.device, requires_grad=True) * 10
self._assert_consistency(F.add_noise, (waveform, noise, lengths, snr))
def test_barkscale_fbanks(self):
if self.device != torch.device("cpu"):
raise unittest.SkipTest("No need to perform test on device other than CPU")
n_stft = 100
f_min = 0.0
f_max = 20.0
n_barks = 10
sample_rate = 16000
self._assert_consistency(F.barkscale_fbanks, (n_stft, f_min, f_max, n_barks, sample_rate, "traunmuller"))
def test_oscillator_bank(self):
num_frames, num_pitches, sample_rate = 8000, 8, 8000
freq = torch.rand((num_frames, num_pitches), dtype=self.dtype, device=self.device)
amps = torch.ones_like(freq)
self._assert_consistency(F.oscillator_bank, (freq, amps, sample_rate, "sum"))
def test_extend_pitch(self):
num_frames = 5
input = torch.ones((num_frames, 1), device=self.device, dtype=self.dtype)
num_pitches = 7
pattern = [i + 1.0 for i in range(num_pitches)]
self._assert_consistency(F.extend_pitch, (input, num_pitches))
self._assert_consistency(F.extend_pitch, (input, pattern))
self._assert_consistency(F.extend_pitch, (input, torch.tensor(pattern)))
def test_sinc_ir(self):
cutoff = torch.tensor([0, 0.5, 1.0], device=self.device, dtype=self.dtype)
self._assert_consistency(F.sinc_impulse_response, (cutoff, 513, False))
self._assert_consistency(F.sinc_impulse_response, (cutoff, 513, True))
def test_speed(self):
leading_dims = (3, 2)
T = 200
waveform = torch.rand(*leading_dims, T, dtype=self.dtype, device=self.device, requires_grad=True)
lengths = torch.randint(1, T, leading_dims, dtype=self.dtype, device=self.device)
self._assert_consistency(F.speed, (waveform, lengths, 1000, 1.1))
def test_preemphasis(self):
waveform = torch.rand(3, 2, 100, device=self.device, dtype=self.dtype)
coeff = 0.9
self._assert_consistency(F.preemphasis, (waveform, coeff))
def test_deemphasis(self):
waveform = torch.rand(3, 2, 100, device=self.device, dtype=self.dtype)
coeff = 0.9
self._assert_consistency(F.deemphasis, (waveform, coeff))
def test_freq_ir(self):
mags = torch.tensor([0, 0.5, 1.0], device=self.device, dtype=self.dtype)
self._assert_consistency(F.frequency_impulse_response, (mags,))
|
from typing import Any, Optional, Sequence
from llama_index.core.evaluation.base import BaseEvaluator, EvaluationResult
from llama_index.core.prompts.mixin import PromptDictType, PromptMixinType
from tonic_validate.metrics.answer_similarity_metric import (
AnswerSimilarityMetric,
)
from tonic_validate.services.openai_service import OpenAIService
class AnswerSimilarityEvaluator(BaseEvaluator):
"""
Tonic Validate's answer similarity metric.
The output score is a float between 0.0 and 5.0.
See https://docs.tonic.ai/validate/ for more details.
Args:
openai_service(OpenAIService): The OpenAI service to use. Specifies the chat
completion model to use as the LLM evaluator. Defaults to "gpt-4".
"""
def __init__(self, openai_service: Optional[Any] = None):
if openai_service is None:
openai_service = OpenAIService("gpt-4")
self.openai_service = openai_service
self.metric = AnswerSimilarityMetric()
async def aevaluate(
self,
query: Optional[str] = None,
response: Optional[str] = None,
contexts: Optional[Sequence[str]] = None,
reference_response: Optional[str] = None,
**kwargs: Any
) -> EvaluationResult:
from tonic_validate.classes.benchmark import BenchmarkItem
from tonic_validate.classes.llm_response import LLMResponse
benchmark_item = BenchmarkItem(question=query, answer=reference_response)
llm_response = LLMResponse(
llm_answer=response,
llm_context_list=contexts,
benchmark_item=benchmark_item,
)
score = self.metric.score(llm_response, self.openai_service)
return EvaluationResult(
query=query, contexts=contexts, response=response, score=score
)
def _get_prompts(self) -> PromptDictType:
return {}
def _get_prompt_modules(self) -> PromptMixinType:
return {}
def _update_prompts(self, prompts_dict: PromptDictType) -> None:
return
|
from typing import Any, Optional, Sequence
from llama_index.core.evaluation.base import BaseEvaluator, EvaluationResult
from llama_index.core.prompts.mixin import PromptDictType, PromptMixinType
from tonic_validate.metrics.answer_similarity_metric import (
AnswerSimilarityMetric,
)
from tonic_validate.services.openai_service import OpenAIService
class AnswerSimilarityEvaluator(BaseEvaluator):
"""Tonic Validate's answer similarity metric.
The output score is a float between 0.0 and 5.0.
See https://docs.tonic.ai/validate/ for more details.
Args:
openai_service(OpenAIService): The OpenAI service to use. Specifies the chat
completion model to use as the LLM evaluator. Defaults to "gpt-4".
"""
def __init__(self, openai_service: Optional[Any] = None):
if openai_service is None:
openai_service = OpenAIService("gpt-4")
self.openai_service = openai_service
self.metric = AnswerSimilarityMetric()
async def aevaluate(
self,
query: Optional[str] = None,
response: Optional[str] = None,
contexts: Optional[Sequence[str]] = None,
reference_response: Optional[str] = None,
**kwargs: Any
) -> EvaluationResult:
from tonic_validate.classes.benchmark import BenchmarkItem
from tonic_validate.classes.llm_response import LLMResponse
benchmark_item = BenchmarkItem(question=query, answer=reference_response)
llm_response = LLMResponse(
llm_answer=response,
llm_context_list=contexts,
benchmark_item=benchmark_item,
)
score = self.metric.score(llm_response, self.openai_service)
return EvaluationResult(
query=query, contexts=contexts, response=response, score=score
)
def _get_prompts(self) -> PromptDictType:
return {}
def _get_prompt_modules(self) -> PromptMixinType:
return {}
def _update_prompts(self, prompts_dict: PromptDictType) -> None:
return
|
from backend.app import run_processes
from backend.executor import DatabaseManager
from backend.notifications.notifications import NotificationManager
from backend.server.rest_api import AgentServer
def main():
"""
Run all the processes required for the AutoGPT-server REST API.
"""
run_processes(
NotificationManager(),
DatabaseManager(),
AgentServer(),
)
if __name__ == "__main__":
main()
|
from backend.app import run_processes
from backend.executor import DatabaseManager, Scheduler
from backend.notifications.notifications import NotificationManager
from backend.server.rest_api import AgentServer
def main():
"""
Run all the processes required for the AutoGPT-server REST API.
"""
run_processes(
NotificationManager(),
DatabaseManager(),
Scheduler(),
AgentServer(),
)
if __name__ == "__main__":
main()
|
"""Utilities to render tools."""
from __future__ import annotations
from inspect import signature
from typing import Callable
from langchain_core.tools.base import BaseTool
ToolsRenderer = Callable[[list[BaseTool]], str]
def render_text_description(tools: list[BaseTool]) -> str:
"""Render the tool name and description in plain text.
Args:
tools: The tools to render.
Returns:
The rendered text.
Output will be in the format of:
.. code-block:: markdown
search: This tool is used for search
calculator: This tool is used for math
"""
descriptions = []
for tool in tools:
if hasattr(tool, "func") and tool.func:
sig = signature(tool.func)
description = f"{tool.name}{sig} - {tool.description}"
else:
description = f"{tool.name} - {tool.description}"
descriptions.append(description)
return "\n".join(descriptions)
def render_text_description_and_args(tools: list[BaseTool]) -> str:
"""Render the tool name, description, and args in plain text.
Args:
tools: The tools to render.
Returns:
The rendered text.
Output will be in the format of:
.. code-block:: markdown
search: This tool is used for search, args: {"query": {"type": "string"}}
calculator: This tool is used for math, \
args: {"expression": {"type": "string"}}
"""
tool_strings = []
for tool in tools:
args_schema = str(tool.args)
if hasattr(tool, "func") and tool.func:
sig = signature(tool.func)
description = f"{tool.name}{sig} - {tool.description}"
else:
description = f"{tool.name} - {tool.description}"
tool_strings.append(f"{description}, args: {args_schema}")
return "\n".join(tool_strings)
|
from __future__ import annotations
from inspect import signature
from typing import Callable
from langchain_core.tools.base import BaseTool
ToolsRenderer = Callable[[list[BaseTool]], str]
def render_text_description(tools: list[BaseTool]) -> str:
"""Render the tool name and description in plain text.
Args:
tools: The tools to render.
Returns:
The rendered text.
Output will be in the format of:
.. code-block:: markdown
search: This tool is used for search
calculator: This tool is used for math
"""
descriptions = []
for tool in tools:
if hasattr(tool, "func") and tool.func:
sig = signature(tool.func)
description = f"{tool.name}{sig} - {tool.description}"
else:
description = f"{tool.name} - {tool.description}"
descriptions.append(description)
return "\n".join(descriptions)
def render_text_description_and_args(tools: list[BaseTool]) -> str:
"""Render the tool name, description, and args in plain text.
Args:
tools: The tools to render.
Returns:
The rendered text.
Output will be in the format of:
.. code-block:: markdown
search: This tool is used for search, args: {"query": {"type": "string"}}
calculator: This tool is used for math, \
args: {"expression": {"type": "string"}}
"""
tool_strings = []
for tool in tools:
args_schema = str(tool.args)
if hasattr(tool, "func") and tool.func:
sig = signature(tool.func)
description = f"{tool.name}{sig} - {tool.description}"
else:
description = f"{tool.name} - {tool.description}"
tool_strings.append(f"{description}, args: {args_schema}")
return "\n".join(tool_strings)
|
"""
Top-level module of Jina.
The primary function of this module is to import all of the public Jina
interfaces into a single place. The interfaces themselves are located in
sub-modules, as described below.
"""
import os as _os
import platform as _platform
import signal as _signal
import sys as _sys
import warnings as _warnings
import docarray as _docarray
if _sys.version_info < (3, 7, 0):
raise OSError(f'Jina requires Python >= 3.7, but yours is {_sys.version_info}')
def _warning_on_one_line(message, category, filename, lineno, *args, **kwargs):
return '\033[1;33m%s: %s\033[0m \033[1;30m(raised from %s:%s)\033[0m\n' % (
category.__name__,
message,
filename,
lineno,
)
_warnings.formatwarning = _warning_on_one_line
_warnings.simplefilter('always', DeprecationWarning)
# fix fork error on MacOS but seems no effect? must do EXPORT manually before jina start
_os.environ['OBJC_DISABLE_INITIALIZE_FORK_SAFETY'] = 'YES'
# JINA_MP_START_METHOD has higher priority than os-patch
_start_method = _os.environ.get('JINA_MP_START_METHOD', None)
if _start_method and _start_method.lower() in {'fork', 'spawn', 'forkserver'}:
from multiprocessing import set_start_method as _set_start_method
try:
_set_start_method(_start_method.lower())
_warnings.warn(
f'multiprocessing start method is set to `{_start_method.lower()}`'
)
except Exception as e:
_warnings.warn(
f'failed to set multiprocessing start_method to `{_start_method.lower()}`: {e!r}'
)
elif _sys.version_info >= (3, 8, 0) and _platform.system() == 'Darwin':
# DO SOME OS-WISE PATCHES
# temporary fix for python 3.8 on macos where the default start is set to "spawn"
# https://docs.python.org/3/library/multiprocessing.html#contexts-and-start-methods
from multiprocessing import set_start_method as _set_start_method
try:
_set_start_method('fork')
_warnings.warn(
f'multiprocessing start method is set to `fork`'
)
except Exception as e:
_warnings.warn(
f'failed to set multiprocessing start_method to `fork`: {e!r}'
)
# do not change this line manually
# this is managed by git tag and updated on every release
# NOTE: this represents the NEXT release version
__version__ = '3.17.1'
# do not change this line manually
# this is managed by proto/build-proto.sh and updated on every execution
__proto_version__ = '0.1.27'
try:
__docarray_version__ = _docarray.__version__
except AttributeError as e:
raise RuntimeError(
'`docarray` dependency is not installed correctly, please reinstall with `pip install -U --force-reinstall docarray`'
)
try:
_signal.signal(_signal.SIGINT, _signal.default_int_handler)
except Exception as exc:
_warnings.warn(f'failed to set default signal handler: {exc!r}`')
def _set_nofile(nofile_atleast=4096):
"""
Set nofile soft limit to at least 4096, useful for running matlplotlib/seaborn on
parallel executing plot generators vs. Ubuntu default ulimit -n 1024 or OS X El Captian 256
temporary setting extinguishing with Python session.
:param nofile_atleast: nofile soft limit
:return: nofile soft limit and nofile hard limit
"""
try:
import resource as res
except ImportError: # Windows
res = None
if res is None:
return (None,) * 2
soft, ohard = res.getrlimit(res.RLIMIT_NOFILE)
hard = ohard
if soft < nofile_atleast:
soft = nofile_atleast
if hard < soft:
hard = soft
try:
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except (ValueError, res.error):
try:
hard = soft
print(f'trouble with max limit, retrying with soft,hard {soft},{hard}')
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except Exception:
print('failed to set ulimit, giving up')
soft, hard = res.getrlimit(res.RLIMIT_NOFILE)
return soft, hard
_set_nofile()
# ONLY FIRST CLASS CITIZENS ARE ALLOWED HERE, namely Document, Executor Flow
# Document
from jina._docarray import Document, DocumentArray
# Client
from jina.clients import Client
# Deployment
from jina.orchestrate.deployments import Deployment
from jina.orchestrate.flow.asyncio import AsyncFlow
# Flow
from jina.orchestrate.flow.base import Flow
# Executor
from jina.serve.executors import BaseExecutor as Executor
from jina.serve.executors.decorators import dynamic_batching, monitor, requests
# Custom Gateway
from jina.serve.runtimes.gateway.gateway import Gateway
|
"""
Top-level module of Jina.
The primary function of this module is to import all of the public Jina
interfaces into a single place. The interfaces themselves are located in
sub-modules, as described below.
"""
import os as _os
import platform as _platform
import signal as _signal
import sys as _sys
import warnings as _warnings
import docarray as _docarray
if _sys.version_info < (3, 7, 0):
raise OSError(f'Jina requires Python >= 3.7, but yours is {_sys.version_info}')
def _warning_on_one_line(message, category, filename, lineno, *args, **kwargs):
return '\033[1;33m%s: %s\033[0m \033[1;30m(raised from %s:%s)\033[0m\n' % (
category.__name__,
message,
filename,
lineno,
)
_warnings.formatwarning = _warning_on_one_line
_warnings.simplefilter('always', DeprecationWarning)
# fix fork error on MacOS but seems no effect? must do EXPORT manually before jina start
_os.environ['OBJC_DISABLE_INITIALIZE_FORK_SAFETY'] = 'YES'
# JINA_MP_START_METHOD has higher priority than os-patch
_start_method = _os.environ.get('JINA_MP_START_METHOD', None)
if _start_method and _start_method.lower() in {'fork', 'spawn', 'forkserver'}:
from multiprocessing import set_start_method as _set_start_method
try:
_set_start_method(_start_method.lower())
_warnings.warn(
f'multiprocessing start method is set to `{_start_method.lower()}`'
)
except Exception as e:
_warnings.warn(
f'failed to set multiprocessing start_method to `{_start_method.lower()}`: {e!r}'
)
elif _sys.version_info >= (3, 8, 0) and _platform.system() == 'Darwin':
# DO SOME OS-WISE PATCHES
# temporary fix for python 3.8 on macos where the default start is set to "spawn"
# https://docs.python.org/3/library/multiprocessing.html#contexts-and-start-methods
from multiprocessing import set_start_method as _set_start_method
try:
_set_start_method('fork')
_warnings.warn(
f'multiprocessing start method is set to `fork`'
)
except Exception as e:
_warnings.warn(
f'failed to set multiprocessing start_method to `fork`: {e!r}'
)
# do not change this line manually
# this is managed by git tag and updated on every release
# NOTE: this represents the NEXT release version
__version__ = '3.17.0'
# do not change this line manually
# this is managed by proto/build-proto.sh and updated on every execution
__proto_version__ = '0.1.27'
try:
__docarray_version__ = _docarray.__version__
except AttributeError as e:
raise RuntimeError(
'`docarray` dependency is not installed correctly, please reinstall with `pip install -U --force-reinstall docarray`'
)
try:
_signal.signal(_signal.SIGINT, _signal.default_int_handler)
except Exception as exc:
_warnings.warn(f'failed to set default signal handler: {exc!r}`')
def _set_nofile(nofile_atleast=4096):
"""
Set nofile soft limit to at least 4096, useful for running matlplotlib/seaborn on
parallel executing plot generators vs. Ubuntu default ulimit -n 1024 or OS X El Captian 256
temporary setting extinguishing with Python session.
:param nofile_atleast: nofile soft limit
:return: nofile soft limit and nofile hard limit
"""
try:
import resource as res
except ImportError: # Windows
res = None
if res is None:
return (None,) * 2
soft, ohard = res.getrlimit(res.RLIMIT_NOFILE)
hard = ohard
if soft < nofile_atleast:
soft = nofile_atleast
if hard < soft:
hard = soft
try:
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except (ValueError, res.error):
try:
hard = soft
print(f'trouble with max limit, retrying with soft,hard {soft},{hard}')
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except Exception:
print('failed to set ulimit, giving up')
soft, hard = res.getrlimit(res.RLIMIT_NOFILE)
return soft, hard
_set_nofile()
# ONLY FIRST CLASS CITIZENS ARE ALLOWED HERE, namely Document, Executor Flow
# Document
from jina._docarray import Document, DocumentArray
# Client
from jina.clients import Client
# Deployment
from jina.orchestrate.deployments import Deployment
from jina.orchestrate.flow.asyncio import AsyncFlow
# Flow
from jina.orchestrate.flow.base import Flow
# Executor
from jina.serve.executors import BaseExecutor as Executor
from jina.serve.executors.decorators import dynamic_batching, monitor, requests
# Custom Gateway
from jina.serve.runtimes.gateway.gateway import Gateway
|
# Copyright (c) OpenMMLab. All rights reserved.
from .cityscapes_metric import CityScapesMetric
from .coco_metric import CocoMetric
from .coco_panoptic_metric import CocoPanopticMetric
from .openimages_metric import OpenImagesMetric
__all__ = [
'CityScapesMetric', 'CocoMetric', 'CocoPanopticMetric', 'OpenImagesMetric'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .cityscapes_metric import CityScapesMetric
from .coco_metric import CocoMetric
from .coco_panoptic_metric import CocoPanopticMetric
__all__ = ['CityScapesMetric', 'CocoMetric', 'CocoPanopticMetric']
|
"""Module contains a few fake embedding models for testing purposes."""
# Please do not add additional fake embedding model implementations here.
import hashlib
from pydantic import BaseModel
from typing_extensions import override
from langchain_core.embeddings import Embeddings
class FakeEmbeddings(Embeddings, BaseModel):
"""Fake embedding model for unit testing purposes.
This embedding model creates embeddings by sampling from a normal distribution.
Do not use this outside of testing, as it is not a real embedding model.
Instantiate:
.. code-block:: python
from langchain_core.embeddings import FakeEmbeddings
embed = FakeEmbeddings(size=100)
Embed single text:
.. code-block:: python
input_text = "The meaning of life is 42"
vector = embed.embed_query(input_text)
print(vector[:3])
.. code-block:: python
[-0.700234640213188, -0.581266257710429, -1.1328482266445354]
Embed multiple texts:
.. code-block:: python
input_texts = ["Document 1...", "Document 2..."]
vectors = embed.embed_documents(input_texts)
print(len(vectors))
# The first 3 coordinates for the first vector
print(vectors[0][:3])
.. code-block:: python
2
[-0.5670477847544458, -0.31403828652395727, -0.5840547508955257]
"""
size: int
"""The size of the embedding vector."""
def _get_embedding(self) -> list[float]:
import numpy as np # type: ignore[import-not-found, import-untyped]
return list(np.random.default_rng().normal(size=self.size))
@override
def embed_documents(self, texts: list[str]) -> list[list[float]]:
return [self._get_embedding() for _ in texts]
@override
def embed_query(self, text: str) -> list[float]:
return self._get_embedding()
class DeterministicFakeEmbedding(Embeddings, BaseModel):
"""Deterministic fake embedding model for unit testing purposes.
This embedding model creates embeddings by sampling from a normal distribution
with a seed based on the hash of the text.
Do not use this outside of testing, as it is not a real embedding model.
Instantiate:
.. code-block:: python
from langchain_core.embeddings import DeterministicFakeEmbedding
embed = DeterministicFakeEmbedding(size=100)
Embed single text:
.. code-block:: python
input_text = "The meaning of life is 42"
vector = embed.embed_query(input_text)
print(vector[:3])
.. code-block:: python
[-0.700234640213188, -0.581266257710429, -1.1328482266445354]
Embed multiple texts:
.. code-block:: python
input_texts = ["Document 1...", "Document 2..."]
vectors = embed.embed_documents(input_texts)
print(len(vectors))
# The first 3 coordinates for the first vector
print(vectors[0][:3])
.. code-block:: python
2
[-0.5670477847544458, -0.31403828652395727, -0.5840547508955257]
"""
size: int
"""The size of the embedding vector."""
def _get_embedding(self, seed: int) -> list[float]:
import numpy as np # type: ignore[import-not-found, import-untyped]
# set the seed for the random generator
rng = np.random.default_rng(seed)
return list(rng.normal(size=self.size))
def _get_seed(self, text: str) -> int:
"""Get a seed for the random generator, using the hash of the text."""
return int(hashlib.sha256(text.encode("utf-8")).hexdigest(), 16) % 10**8
@override
def embed_documents(self, texts: list[str]) -> list[list[float]]:
return [self._get_embedding(seed=self._get_seed(_)) for _ in texts]
@override
def embed_query(self, text: str) -> list[float]:
return self._get_embedding(seed=self._get_seed(text))
|
"""Module contains a few fake embedding models for testing purposes."""
# Please do not add additional fake embedding model implementations here.
import hashlib
from pydantic import BaseModel
from langchain_core.embeddings import Embeddings
class FakeEmbeddings(Embeddings, BaseModel):
"""Fake embedding model for unit testing purposes.
This embedding model creates embeddings by sampling from a normal distribution.
Do not use this outside of testing, as it is not a real embedding model.
Instantiate:
.. code-block:: python
from langchain_core.embeddings import FakeEmbeddings
embed = FakeEmbeddings(size=100)
Embed single text:
.. code-block:: python
input_text = "The meaning of life is 42"
vector = embed.embed_query(input_text)
print(vector[:3])
.. code-block:: python
[-0.700234640213188, -0.581266257710429, -1.1328482266445354]
Embed multiple texts:
.. code-block:: python
input_texts = ["Document 1...", "Document 2..."]
vectors = embed.embed_documents(input_texts)
print(len(vectors))
# The first 3 coordinates for the first vector
print(vectors[0][:3])
.. code-block:: python
2
[-0.5670477847544458, -0.31403828652395727, -0.5840547508955257]
"""
size: int
"""The size of the embedding vector."""
def _get_embedding(self) -> list[float]:
import numpy as np # type: ignore[import-not-found, import-untyped]
return list(np.random.default_rng().normal(size=self.size))
def embed_documents(self, texts: list[str]) -> list[list[float]]:
return [self._get_embedding() for _ in texts]
def embed_query(self, text: str) -> list[float]:
return self._get_embedding()
class DeterministicFakeEmbedding(Embeddings, BaseModel):
"""Deterministic fake embedding model for unit testing purposes.
This embedding model creates embeddings by sampling from a normal distribution
with a seed based on the hash of the text.
Do not use this outside of testing, as it is not a real embedding model.
Instantiate:
.. code-block:: python
from langchain_core.embeddings import DeterministicFakeEmbedding
embed = DeterministicFakeEmbedding(size=100)
Embed single text:
.. code-block:: python
input_text = "The meaning of life is 42"
vector = embed.embed_query(input_text)
print(vector[:3])
.. code-block:: python
[-0.700234640213188, -0.581266257710429, -1.1328482266445354]
Embed multiple texts:
.. code-block:: python
input_texts = ["Document 1...", "Document 2..."]
vectors = embed.embed_documents(input_texts)
print(len(vectors))
# The first 3 coordinates for the first vector
print(vectors[0][:3])
.. code-block:: python
2
[-0.5670477847544458, -0.31403828652395727, -0.5840547508955257]
"""
size: int
"""The size of the embedding vector."""
def _get_embedding(self, seed: int) -> list[float]:
import numpy as np # type: ignore[import-not-found, import-untyped]
# set the seed for the random generator
rng = np.random.default_rng(seed)
return list(rng.normal(size=self.size))
def _get_seed(self, text: str) -> int:
"""Get a seed for the random generator, using the hash of the text."""
return int(hashlib.sha256(text.encode("utf-8")).hexdigest(), 16) % 10**8
def embed_documents(self, texts: list[str]) -> list[list[float]]:
return [self._get_embedding(seed=self._get_seed(_)) for _ in texts]
def embed_query(self, text: str) -> list[float]:
return self._get_embedding(seed=self._get_seed(text))
|
import time
import uuid
from contextlib import contextmanager
from pathlib import Path
from typing import Optional
import pytest
import requests
from huggingface_hub.hf_api import HfApi, HfFolder, RepositoryNotFoundError
CI_HUB_USER = "__DUMMY_TRANSFORMERS_USER__"
CI_HUB_USER_FULL_NAME = "Dummy User"
CI_HUB_USER_TOKEN = "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"
CI_HUB_ENDPOINT = "https://hub-ci.huggingface.co"
CI_HUB_DATASETS_URL = CI_HUB_ENDPOINT + "/datasets/{repo_id}/resolve/{revision}/{path}"
CI_HFH_HUGGINGFACE_CO_URL_TEMPLATE = CI_HUB_ENDPOINT + "/{repo_id}/resolve/{revision}/{filename}"
CI_HUB_TOKEN_PATH = Path("~/.huggingface/hub_ci_token").expanduser()
@pytest.fixture
def ci_hfh_hf_hub_url(monkeypatch):
monkeypatch.setattr(
"huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE", CI_HFH_HUGGINGFACE_CO_URL_TEMPLATE
)
@pytest.fixture
def ci_hub_config(monkeypatch):
monkeypatch.setattr("datasets.config.HF_ENDPOINT", CI_HUB_ENDPOINT)
monkeypatch.setattr("datasets.config.HUB_DATASETS_URL", CI_HUB_DATASETS_URL)
@pytest.fixture
def ci_hub_token_path(monkeypatch):
monkeypatch.setattr("huggingface_hub.hf_api.HfFolder.path_token", CI_HUB_TOKEN_PATH)
@pytest.fixture
def set_ci_hub_access_token(ci_hub_config, ci_hub_token_path):
HfFolder.save_token(CI_HUB_USER_TOKEN)
yield
HfFolder.delete_token()
@pytest.fixture(scope="session")
def hf_api():
return HfApi(endpoint=CI_HUB_ENDPOINT)
@pytest.fixture(scope="session")
def hf_token():
yield CI_HUB_USER_TOKEN
@pytest.fixture
def cleanup_repo(hf_api):
def _cleanup_repo(repo_id):
hf_api.delete_repo(repo_id, token=CI_HUB_USER_TOKEN, repo_type="dataset")
return _cleanup_repo
@pytest.fixture
def temporary_repo(cleanup_repo):
@contextmanager
def _temporary_repo(repo_id: Optional[str] = None):
repo_id = repo_id or f"{CI_HUB_USER}/test-dataset-{uuid.uuid4().hex[:6]}-{int(time.time() * 10e3)}"
try:
yield repo_id
finally:
try:
cleanup_repo(repo_id)
except RepositoryNotFoundError:
pass
return _temporary_repo
@pytest.fixture(scope="session")
def hf_private_dataset_repo_txt_data_(hf_api: HfApi, hf_token, text_file):
repo_name = f"repo_txt_data-{int(time.time() * 10e6)}"
repo_id = f"{CI_HUB_USER}/{repo_name}"
hf_api.create_repo(repo_id, token=hf_token, repo_type="dataset", private=True)
hf_api.upload_file(
token=hf_token,
path_or_fileobj=str(text_file),
path_in_repo="data/text_data.txt",
repo_id=repo_id,
repo_type="dataset",
)
yield repo_id
try:
hf_api.delete_repo(repo_id, token=hf_token, repo_type="dataset")
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def hf_private_dataset_repo_txt_data(hf_private_dataset_repo_txt_data_, ci_hub_config, ci_hfh_hf_hub_url):
return hf_private_dataset_repo_txt_data_
@pytest.fixture(scope="session")
def hf_private_dataset_repo_zipped_txt_data_(hf_api: HfApi, hf_token, zip_csv_with_dir_path):
repo_name = f"repo_zipped_txt_data-{int(time.time() * 10e6)}"
repo_id = f"{CI_HUB_USER}/{repo_name}"
hf_api.create_repo(repo_id, token=hf_token, repo_type="dataset", private=True)
hf_api.upload_file(
token=hf_token,
path_or_fileobj=str(zip_csv_with_dir_path),
path_in_repo="data.zip",
repo_id=repo_id,
repo_type="dataset",
)
yield repo_id
try:
hf_api.delete_repo(repo_id, token=hf_token, repo_type="dataset")
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def hf_private_dataset_repo_zipped_txt_data(
hf_private_dataset_repo_zipped_txt_data_, ci_hub_config, ci_hfh_hf_hub_url
):
return hf_private_dataset_repo_zipped_txt_data_
@pytest.fixture(scope="session")
def hf_private_dataset_repo_zipped_img_data_(hf_api: HfApi, hf_token, zip_image_path):
repo_name = f"repo_zipped_img_data-{int(time.time() * 10e6)}"
repo_id = f"{CI_HUB_USER}/{repo_name}"
hf_api.create_repo(repo_id, token=hf_token, repo_type="dataset", private=True)
hf_api.upload_file(
token=hf_token,
path_or_fileobj=str(zip_image_path),
path_in_repo="data.zip",
repo_id=repo_id,
repo_type="dataset",
)
yield repo_id
try:
hf_api.delete_repo(repo_id, token=hf_token, repo_type="dataset")
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def hf_private_dataset_repo_zipped_img_data(
hf_private_dataset_repo_zipped_img_data_, ci_hub_config, ci_hfh_hf_hub_url
):
return hf_private_dataset_repo_zipped_img_data_
|
import time
from contextlib import contextmanager
from pathlib import Path
import pytest
import requests
from huggingface_hub.hf_api import HfApi, HfFolder
CI_HUB_USER = "__DUMMY_TRANSFORMERS_USER__"
CI_HUB_USER_FULL_NAME = "Dummy User"
CI_HUB_USER_TOKEN = "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"
CI_HUB_ENDPOINT = "https://hub-ci.huggingface.co"
CI_HUB_DATASETS_URL = CI_HUB_ENDPOINT + "/datasets/{repo_id}/resolve/{revision}/{path}"
CI_HFH_HUGGINGFACE_CO_URL_TEMPLATE = CI_HUB_ENDPOINT + "/{repo_id}/resolve/{revision}/{filename}"
CI_HUB_TOKEN_PATH = Path("~/.huggingface/hub_ci_token").expanduser()
@pytest.fixture
def ci_hfh_hf_hub_url(monkeypatch):
monkeypatch.setattr(
"huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE", CI_HFH_HUGGINGFACE_CO_URL_TEMPLATE
)
@pytest.fixture
def ci_hub_config(monkeypatch):
monkeypatch.setattr("datasets.config.HF_ENDPOINT", CI_HUB_ENDPOINT)
monkeypatch.setattr("datasets.config.HUB_DATASETS_URL", CI_HUB_DATASETS_URL)
@pytest.fixture
def ci_hub_token_path(monkeypatch):
monkeypatch.setattr("huggingface_hub.hf_api.HfFolder.path_token", CI_HUB_TOKEN_PATH)
@pytest.fixture
def set_ci_hub_access_token(ci_hub_config, ci_hub_token_path):
HfFolder.save_token(CI_HUB_USER_TOKEN)
yield
HfFolder.delete_token()
@pytest.fixture(scope="session")
def hf_api():
return HfApi(endpoint=CI_HUB_ENDPOINT)
@pytest.fixture(scope="session")
def hf_token():
yield CI_HUB_USER_TOKEN
@pytest.fixture
def cleanup_repo(hf_api):
def _cleanup_repo(repo_id):
hf_api.delete_repo(repo_id, token=CI_HUB_USER_TOKEN, repo_type="dataset")
return _cleanup_repo
@pytest.fixture
def temporary_repo(cleanup_repo):
@contextmanager
def _temporary_repo(repo_id):
try:
yield repo_id
finally:
cleanup_repo(repo_id)
return _temporary_repo
@pytest.fixture(scope="session")
def hf_private_dataset_repo_txt_data_(hf_api: HfApi, hf_token, text_file):
repo_name = f"repo_txt_data-{int(time.time() * 10e3)}"
repo_id = f"{CI_HUB_USER}/{repo_name}"
hf_api.create_repo(repo_id, token=hf_token, repo_type="dataset", private=True)
hf_api.upload_file(
token=hf_token,
path_or_fileobj=str(text_file),
path_in_repo="data/text_data.txt",
repo_id=repo_id,
repo_type="dataset",
)
yield repo_id
try:
hf_api.delete_repo(repo_id, token=hf_token, repo_type="dataset")
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def hf_private_dataset_repo_txt_data(hf_private_dataset_repo_txt_data_, ci_hub_config, ci_hfh_hf_hub_url):
return hf_private_dataset_repo_txt_data_
@pytest.fixture(scope="session")
def hf_private_dataset_repo_zipped_txt_data_(hf_api: HfApi, hf_token, zip_csv_with_dir_path):
repo_name = f"repo_zipped_txt_data-{int(time.time() * 10e3)}"
repo_id = f"{CI_HUB_USER}/{repo_name}"
hf_api.create_repo(repo_id, token=hf_token, repo_type="dataset", private=True)
hf_api.upload_file(
token=hf_token,
path_or_fileobj=str(zip_csv_with_dir_path),
path_in_repo="data.zip",
repo_id=repo_id,
repo_type="dataset",
)
yield repo_id
try:
hf_api.delete_repo(repo_id, token=hf_token, repo_type="dataset")
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def hf_private_dataset_repo_zipped_txt_data(
hf_private_dataset_repo_zipped_txt_data_, ci_hub_config, ci_hfh_hf_hub_url
):
return hf_private_dataset_repo_zipped_txt_data_
@pytest.fixture(scope="session")
def hf_private_dataset_repo_zipped_img_data_(hf_api: HfApi, hf_token, zip_image_path):
repo_name = f"repo_zipped_img_data-{int(time.time() * 10e3)}"
repo_id = f"{CI_HUB_USER}/{repo_name}"
hf_api.create_repo(repo_id, token=hf_token, repo_type="dataset", private=True)
hf_api.upload_file(
token=hf_token,
path_or_fileobj=str(zip_image_path),
path_in_repo="data.zip",
repo_id=repo_id,
repo_type="dataset",
)
yield repo_id
try:
hf_api.delete_repo(repo_id, token=hf_token, repo_type="dataset")
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def hf_private_dataset_repo_zipped_img_data(
hf_private_dataset_repo_zipped_img_data_, ci_hub_config, ci_hfh_hf_hub_url
):
return hf_private_dataset_repo_zipped_img_data_
|
"""Question-answering with sources over an index."""
from typing import Any
from langchain_core.callbacks import (
AsyncCallbackManagerForChainRun,
CallbackManagerForChainRun,
)
from langchain_core.documents import Document
from langchain_core.retrievers import BaseRetriever
from pydantic import Field
from langchain.chains.combine_documents.stuff import StuffDocumentsChain
from langchain.chains.qa_with_sources.base import BaseQAWithSourcesChain
class RetrievalQAWithSourcesChain(BaseQAWithSourcesChain):
"""Question-answering with sources over an index."""
retriever: BaseRetriever = Field(exclude=True)
"""Index to connect to."""
reduce_k_below_max_tokens: bool = False
"""Reduce the number of results to return from store based on tokens limit"""
max_tokens_limit: int = 3375
"""Restrict the docs to return from store based on tokens,
enforced only for StuffDocumentChain and if reduce_k_below_max_tokens is to true"""
def _reduce_tokens_below_limit(self, docs: list[Document]) -> list[Document]:
num_docs = len(docs)
if self.reduce_k_below_max_tokens and isinstance(
self.combine_documents_chain,
StuffDocumentsChain,
):
tokens = [
self.combine_documents_chain.llm_chain._get_num_tokens(doc.page_content)
for doc in docs
]
token_count = sum(tokens[:num_docs])
while token_count > self.max_tokens_limit:
num_docs -= 1
token_count -= tokens[num_docs]
return docs[:num_docs]
def _get_docs(
self,
inputs: dict[str, Any],
*,
run_manager: CallbackManagerForChainRun,
) -> list[Document]:
question = inputs[self.question_key]
docs = self.retriever.invoke(
question,
config={"callbacks": run_manager.get_child()},
)
return self._reduce_tokens_below_limit(docs)
async def _aget_docs(
self,
inputs: dict[str, Any],
*,
run_manager: AsyncCallbackManagerForChainRun,
) -> list[Document]:
question = inputs[self.question_key]
docs = await self.retriever.ainvoke(
question,
config={"callbacks": run_manager.get_child()},
)
return self._reduce_tokens_below_limit(docs)
@property
def _chain_type(self) -> str:
"""Return the chain type."""
return "retrieval_qa_with_sources_chain"
|
"""Question-answering with sources over an index."""
from typing import Any
from langchain_core.callbacks import (
AsyncCallbackManagerForChainRun,
CallbackManagerForChainRun,
)
from langchain_core.documents import Document
from langchain_core.retrievers import BaseRetriever
from pydantic import Field
from langchain.chains.combine_documents.stuff import StuffDocumentsChain
from langchain.chains.qa_with_sources.base import BaseQAWithSourcesChain
class RetrievalQAWithSourcesChain(BaseQAWithSourcesChain):
"""Question-answering with sources over an index."""
retriever: BaseRetriever = Field(exclude=True)
"""Index to connect to."""
reduce_k_below_max_tokens: bool = False
"""Reduce the number of results to return from store based on tokens limit"""
max_tokens_limit: int = 3375
"""Restrict the docs to return from store based on tokens,
enforced only for StuffDocumentChain and if reduce_k_below_max_tokens is to true"""
def _reduce_tokens_below_limit(self, docs: list[Document]) -> list[Document]:
num_docs = len(docs)
if self.reduce_k_below_max_tokens and isinstance(
self.combine_documents_chain, StuffDocumentsChain
):
tokens = [
self.combine_documents_chain.llm_chain._get_num_tokens(doc.page_content)
for doc in docs
]
token_count = sum(tokens[:num_docs])
while token_count > self.max_tokens_limit:
num_docs -= 1
token_count -= tokens[num_docs]
return docs[:num_docs]
def _get_docs(
self, inputs: dict[str, Any], *, run_manager: CallbackManagerForChainRun
) -> list[Document]:
question = inputs[self.question_key]
docs = self.retriever.invoke(
question, config={"callbacks": run_manager.get_child()}
)
return self._reduce_tokens_below_limit(docs)
async def _aget_docs(
self, inputs: dict[str, Any], *, run_manager: AsyncCallbackManagerForChainRun
) -> list[Document]:
question = inputs[self.question_key]
docs = await self.retriever.ainvoke(
question, config={"callbacks": run_manager.get_child()}
)
return self._reduce_tokens_below_limit(docs)
@property
def _chain_type(self) -> str:
"""Return the chain type."""
return "retrieval_qa_with_sources_chain"
|
"""
This examples demonstrates the setup for Question-Answer-Retrieval.
You can input a query or a question. The script then uses semantic search
to find relevant passages in Simple English Wikipedia (as it is smaller and fits better in RAM).
As model, we use: nq-distilbert-base-v1
It was trained on the Natural Questions dataset, a dataset with real questions from Google Search
together with annotated data from Wikipedia providing the answer. For the passages, we encode the
Wikipedia article tile together with the individual text passages.
Google Colab Example: https://colab.research.google.com/drive/11GunvCqJuebfeTlgbJWkIMT0xJH6PWF1?usp=sharing
"""
import gzip
import json
import os
import time
import torch
from sentence_transformers import SentenceTransformer, util
# We use the Bi-Encoder to encode all passages, so that we can use it with semantic search
model_name = "nq-distilbert-base-v1"
bi_encoder = SentenceTransformer(model_name)
top_k = 5 # Number of passages we want to retrieve with the bi-encoder
# As dataset, we use Simple English Wikipedia. Compared to the full English wikipedia, it has only
# about 170k articles. We split these articles into paragraphs and encode them with the bi-encoder
wikipedia_filepath = "data/simplewiki-2020-11-01.jsonl.gz"
if not os.path.exists(wikipedia_filepath):
util.http_get("http://sbert.net/datasets/simplewiki-2020-11-01.jsonl.gz", wikipedia_filepath)
passages = []
with gzip.open(wikipedia_filepath, "rt", encoding="utf8") as fIn:
for line in fIn:
data = json.loads(line.strip())
for paragraph in data["paragraphs"]:
# We encode the passages as [title, text]
passages.append([data["title"], paragraph])
# If you like, you can also limit the number of passages you want to use
print("Passages:", len(passages))
# To speed things up, pre-computed embeddings are downloaded.
# The provided file encoded the passages with the model 'nq-distilbert-base-v1'
if model_name == "nq-distilbert-base-v1":
embeddings_filepath = "simplewiki-2020-11-01-nq-distilbert-base-v1.pt"
if not os.path.exists(embeddings_filepath):
util.http_get("http://sbert.net/datasets/simplewiki-2020-11-01-nq-distilbert-base-v1.pt", embeddings_filepath)
corpus_embeddings = torch.load(embeddings_filepath)
corpus_embeddings = corpus_embeddings.float() # Convert embedding file to float
device = util.get_device_name()
corpus_embeddings = corpus_embeddings.to(device)
else: # Here, we compute the corpus_embeddings from scratch (which can take a while depending on the GPU)
corpus_embeddings = bi_encoder.encode(passages, convert_to_tensor=True, show_progress_bar=True)
while True:
query = input("Please enter a question: ")
# Encode the query using the bi-encoder and find potentially relevant passages
start_time = time.time()
question_embedding = bi_encoder.encode(query, convert_to_tensor=True)
hits = util.semantic_search(question_embedding, corpus_embeddings, top_k=top_k)
hits = hits[0] # Get the hits for the first query
end_time = time.time()
# Output of top-k hits
print("Input question:", query)
print(f"Results (after {end_time - start_time:.3f} seconds):")
for hit in hits:
print("\t{:.3f}\t{}".format(hit["score"], passages[hit["corpus_id"]]))
print("\n\n========\n")
|
"""
This examples demonstrates the setup for Question-Answer-Retrieval.
You can input a query or a question. The script then uses semantic search
to find relevant passages in Simple English Wikipedia (as it is smaller and fits better in RAM).
As model, we use: nq-distilbert-base-v1
It was trained on the Natural Questions dataset, a dataset with real questions from Google Search
together with annotated data from Wikipedia providing the answer. For the passages, we encode the
Wikipedia article tile together with the individual text passages.
Google Colab Example: https://colab.research.google.com/drive/11GunvCqJuebfeTlgbJWkIMT0xJH6PWF1?usp=sharing
"""
import gzip
import json
import os
import time
import torch
from sentence_transformers import SentenceTransformer, util
# We use the Bi-Encoder to encode all passages, so that we can use it with semantic search
model_name = "nq-distilbert-base-v1"
bi_encoder = SentenceTransformer(model_name)
top_k = 5 # Number of passages we want to retrieve with the bi-encoder
# As dataset, we use Simple English Wikipedia. Compared to the full English wikipedia, it has only
# about 170k articles. We split these articles into paragraphs and encode them with the bi-encoder
wikipedia_filepath = "data/simplewiki-2020-11-01.jsonl.gz"
if not os.path.exists(wikipedia_filepath):
util.http_get("http://sbert.net/datasets/simplewiki-2020-11-01.jsonl.gz", wikipedia_filepath)
passages = []
with gzip.open(wikipedia_filepath, "rt", encoding="utf8") as fIn:
for line in fIn:
data = json.loads(line.strip())
for paragraph in data["paragraphs"]:
# We encode the passages as [title, text]
passages.append([data["title"], paragraph])
# If you like, you can also limit the number of passages you want to use
print("Passages:", len(passages))
# To speed things up, pre-computed embeddings are downloaded.
# The provided file encoded the passages with the model 'nq-distilbert-base-v1'
if model_name == "nq-distilbert-base-v1":
embeddings_filepath = "simplewiki-2020-11-01-nq-distilbert-base-v1.pt"
if not os.path.exists(embeddings_filepath):
util.http_get("http://sbert.net/datasets/simplewiki-2020-11-01-nq-distilbert-base-v1.pt", embeddings_filepath)
corpus_embeddings = torch.load(embeddings_filepath)
corpus_embeddings = corpus_embeddings.float() # Convert embedding file to float
device = util.get_device_name()
corpus_embeddings = corpus_embeddings.to(device)
else: # Here, we compute the corpus_embeddings from scratch (which can take a while depending on the GPU)
corpus_embeddings = bi_encoder.encode(passages, convert_to_tensor=True, show_progress_bar=True)
while True:
query = input("Please enter a question: ")
# Encode the query using the bi-encoder and find potentially relevant passages
start_time = time.time()
question_embedding = bi_encoder.encode(query, convert_to_tensor=True)
hits = util.semantic_search(question_embedding, corpus_embeddings, top_k=top_k)
hits = hits[0] # Get the hits for the first query
end_time = time.time()
# Output of top-k hits
print("Input question:", query)
print("Results (after {:.3f} seconds):".format(end_time - start_time))
for hit in hits:
print("\t{:.3f}\t{}".format(hit["score"], passages[hit["corpus_id"]]))
print("\n\n========\n")
|
from jina import Executor, Flow, requests, DocumentArray
def test_gateway_metric_labels(monkeypatch_metric_exporter):
collect_metrics, read_metrics = monkeypatch_metric_exporter
class FirstExec(Executor):
@requests()
def meow(self, docs, **kwargs):
return DocumentArray.empty(3)
class SecondExec(Executor):
@requests()
def meow(self, docs, **kwargs):
return DocumentArray.empty(3)
with Flow(
tracing=False,
metrics=True,
metrics_exporter_host='localhost',
metrics_exporter_port=4317,
port=12345,
).add(name='first_exec', uses=FirstExec).add(
name="second_exec", uses=SecondExec
) as f:
f.post('/')
collect_metrics()
metrics = read_metrics()
gateway_metrics = metrics['gateway/rep-0'][0]['resource_metrics'][0][
'scope_metrics'
][0]['metrics']
gateway_metric_data_point = {
i['name']: i['data']['data_points'] for i in gateway_metrics
}
assert (
'address'
in gateway_metric_data_point['jina_sending_request_seconds'][0][
'attributes'
]
)
assert (
'address'
in gateway_metric_data_point['jina_sent_request_bytes'][0]['attributes']
)
assert (
'address'
in gateway_metric_data_point['jina_received_response_bytes'][0][
'attributes'
]
)
assert (
'address'
in gateway_metric_data_point['jina_sending_request_seconds'][1][
'attributes'
]
)
assert (
'address'
in gateway_metric_data_point['jina_sent_request_bytes'][1]['attributes']
)
assert (
'address'
in gateway_metric_data_point['jina_received_response_bytes'][1][
'attributes'
]
)
assert (
'deployment'
in gateway_metric_data_point['jina_sending_request_seconds'][0][
'attributes'
]
)
assert (
'deployment'
in gateway_metric_data_point['jina_sent_request_bytes'][0]['attributes']
)
assert (
'deployment'
in gateway_metric_data_point['jina_received_response_bytes'][0][
'attributes'
]
)
assert (
'deployment'
in gateway_metric_data_point['jina_sending_request_seconds'][1][
'attributes'
]
)
assert (
'deployment'
in gateway_metric_data_point['jina_sent_request_bytes'][1]['attributes']
)
assert (
'deployment'
in gateway_metric_data_point['jina_received_response_bytes'][1][
'attributes'
]
)
assert {'first_exec', 'second_exec'} == {
i['attributes']['deployment']
for i in gateway_metric_data_point['jina_received_response_bytes']
}
assert {'first_exec', 'second_exec'} == {
i['attributes']['deployment']
for i in gateway_metric_data_point['jina_sent_request_bytes']
}
assert {'first_exec', 'second_exec'} == {
i['attributes']['deployment']
for i in gateway_metric_data_point['jina_sending_request_seconds']
}
def test_merge_with_no_reduce(monkeypatch_metric_exporter):
collect_metrics, read_metrics = monkeypatch_metric_exporter
f = (
Flow(
tracing=False,
metrics=True,
metrics_exporter_host='localhost',
metrics_exporter_port=4317,
port=12345,
)
.add(name='name1')
.add(name='name2', needs=['gateway'])
.add(name='name3', needs=['name1', 'name2'], disable_reduce=True)
)
with f:
f.post('/')
collect_metrics()
metrics = read_metrics()
gateway_metrics = metrics['gateway/rep-0'][0]['resource_metrics'][0][
'scope_metrics'
][0]['metrics']
gateway_metric_data_point = {
i['name']: i['data']['data_points'] for i in gateway_metrics
}
assert {'name1', 'name2', 'name3'} == {
i['attributes']['deployment']
for i in gateway_metric_data_point['jina_received_response_bytes']
}
assert {'name1', 'name2', 'name3'} == {
i['attributes']['deployment']
for i in gateway_metric_data_point['jina_sent_request_bytes']
}
assert {'name1', 'name2', 'name3'} == {
i['attributes']['deployment']
for i in gateway_metric_data_point['jina_sending_request_seconds']
}
|
from jina import Executor, Flow, requests, DocumentArray
def test_gateway_metric_labels(monkeypatch_metric_exporter):
collect_metrics, read_metrics = monkeypatch_metric_exporter
class FirstExec(Executor):
@requests()
def meow(self, docs, **kwargs):
return DocumentArray.empty(3)
class SecondExec(Executor):
@requests()
def meow(self, docs, **kwargs):
return DocumentArray.empty(3)
with Flow(
tracing=False,
metrics=True,
metrics_exporter_host='localhost',
metrics_exporter_port=4317,
port=12345,
).add(name='first_exec', uses=FirstExec).add(
name="second_exec", uses=SecondExec
) as f:
f.post('/')
collect_metrics()
metrics = read_metrics()
gateway_metrics = metrics['gateway/rep-0'][0]['resource_metrics'][0][
'scope_metrics'
][0]['metrics']
gateway_metric_data_point = {
i['name']: i['data']['data_points'] for i in gateway_metrics
}
assert (
'address'
in gateway_metric_data_point['jina_sending_request_seconds'][0][
'attributes'
]
)
assert (
'address'
in gateway_metric_data_point['jina_sent_request_bytes'][0]['attributes']
)
assert (
'address'
in gateway_metric_data_point['jina_received_response_bytes'][0][
'attributes'
]
)
assert (
'address'
in gateway_metric_data_point['jina_sending_request_seconds'][1][
'attributes'
]
)
assert (
'address'
in gateway_metric_data_point['jina_sent_request_bytes'][1]['attributes']
)
assert (
'address'
in gateway_metric_data_point['jina_received_response_bytes'][1][
'attributes'
]
)
assert (
'deployment'
in gateway_metric_data_point['jina_sending_request_seconds'][0][
'attributes'
]
)
assert (
'deployment'
in gateway_metric_data_point['jina_sent_request_bytes'][0]['attributes']
)
assert (
'deployment'
in gateway_metric_data_point['jina_received_response_bytes'][0][
'attributes'
]
)
assert (
'deployment'
in gateway_metric_data_point['jina_sending_request_seconds'][1][
'attributes'
]
)
assert (
'deployment'
in gateway_metric_data_point['jina_sent_request_bytes'][1]['attributes']
)
assert (
'deployment'
in gateway_metric_data_point['jina_received_response_bytes'][1][
'attributes'
]
)
assert (
gateway_metric_data_point['jina_received_response_bytes'][0]['attributes'][
'deployment'
]
== 'first_exec'
)
assert (
gateway_metric_data_point['jina_received_response_bytes'][1]['attributes'][
'deployment'
]
== 'second_exec'
)
|
import json
import logging
from typing import Any, Optional
from langchain_core.callbacks import CallbackManagerForToolRun
from langchain_community.tools.slack.base import SlackBaseTool
class SlackGetChannel(SlackBaseTool):
"""Tool that gets Slack channel information."""
name: str = "get_channelid_name_dict"
description: str = (
"Use this tool to get channelid-name dict. There is no input to this tool"
)
def _run(
self, *args: Any, run_manager: Optional[CallbackManagerForToolRun] = None
) -> str:
try:
logging.getLogger(__name__)
result = self.client.conversations_list()
channels = result["channels"]
filtered_result = [
{key: channel[key] for key in ("id", "name", "created", "num_members")}
for channel in channels
if "id" in channel
and "name" in channel
and "created" in channel
and "num_members" in channel
]
return json.dumps(filtered_result, ensure_ascii=False)
except Exception as e:
return "Error creating conversation: {}".format(e)
|
import json
import logging
from typing import Any, Optional
from langchain_core.callbacks import CallbackManagerForToolRun
from langchain_community.tools.slack.base import SlackBaseTool
class SlackGetChannel(SlackBaseTool): # type: ignore[override]
"""Tool that gets Slack channel information."""
name: str = "get_channelid_name_dict"
description: str = (
"Use this tool to get channelid-name dict. There is no input to this tool"
)
def _run(
self, *args: Any, run_manager: Optional[CallbackManagerForToolRun] = None
) -> str:
try:
logging.getLogger(__name__)
result = self.client.conversations_list()
channels = result["channels"]
filtered_result = [
{key: channel[key] for key in ("id", "name", "created", "num_members")}
for channel in channels
if "id" in channel
and "name" in channel
and "created" in channel
and "num_members" in channel
]
return json.dumps(filtered_result, ensure_ascii=False)
except Exception as e:
return "Error creating conversation: {}".format(e)
|
from unittest.mock import MagicMock
from llama_index.core.base.llms.base import BaseLLM
from llama_index.core.tools import FunctionTool
from llama_index.llms.oci_genai import OCIGenAI
def test_oci_genai_embedding_class():
names_of_base_classes = [b.__name__ for b in OCIGenAI.__mro__]
assert BaseLLM.__name__ in names_of_base_classes
# Shared test tool for tool_required tests
def search(query: str) -> str:
"""Search for information about a query."""
return f"Results for {query}"
search_tool = FunctionTool.from_defaults(
fn=search, name="search_tool", description="A tool for searching information"
)
def test_prepare_chat_with_tools_tool_required():
"""Test that tool_required is correctly passed to the API request when True."""
# Mock the client to avoid authentication issues
mock_client = MagicMock()
llm = OCIGenAI(
model="cohere.command-r-16k",
service_endpoint="https://inference.generativeai.us-chicago-1.oci.oraclecloud.com",
compartment_id="test_compartment_id",
client=mock_client,
)
# Test with tool_required=True
result = llm._prepare_chat_with_tools(
tools=[search_tool], user_msg="Test message", tool_required=True
)
assert result["tool_choice"] == "REQUIRED"
assert len(result["tools"]) == 1
# FunctionTool objects don't have a name attribute directly
assert result["tools"][0].metadata.name == "search_tool"
def test_prepare_chat_with_tools_tool_not_required():
"""Test that tool_required is correctly passed to the API request when False."""
# Mock the client to avoid authentication issues
mock_client = MagicMock()
llm = OCIGenAI(
model="cohere.command-r-16k",
service_endpoint="https://inference.generativeai.us-chicago-1.oci.oraclecloud.com",
compartment_id="test_compartment_id",
client=mock_client,
)
# Test with tool_required=False (default)
result = llm._prepare_chat_with_tools(
tools=[search_tool],
user_msg="Test message",
)
# When tool_required is False, tool_choice should not be included
assert "tool_choice" not in result
assert len(result["tools"]) == 1
# FunctionTool objects don't have a name attribute directly
assert result["tools"][0].metadata.name == "search_tool"
|
from unittest.mock import MagicMock
from llama_index.core.base.llms.base import BaseLLM
from llama_index.core.tools import FunctionTool
from llama_index.llms.oci_genai import OCIGenAI
def test_oci_genai_embedding_class():
names_of_base_classes = [b.__name__ for b in OCIGenAI.__mro__]
assert BaseLLM.__name__ in names_of_base_classes
# Shared test tool for tool_required tests
def search(query: str) -> str:
"""Search for information about a query."""
return f"Results for {query}"
search_tool = FunctionTool.from_defaults(
fn=search, name="search_tool", description="A tool for searching information"
)
def test_prepare_chat_with_tools_tool_required():
"""Test that tool_required is correctly passed to the API request when True."""
# Mock the client to avoid authentication issues
mock_client = MagicMock()
llm = OCIGenAI(
model="cohere.command-r-16k",
service_endpoint="https://inference.generativeai.us-chicago-1.oci.oraclecloud.com",
compartment_id="test_compartment_id",
client=mock_client,
)
# Test with tool_required=True
result = llm._prepare_chat_with_tools(
tools=[search_tool], user_msg="Test message", tool_required=True
)
assert result["tool_choice"] == "REQUIRED"
assert len(result["tools"]) == 1
# CohereTool objects have a `name` attribute directly
assert result["tools"][0].name == "search_tool"
def test_prepare_chat_with_tools_tool_not_required():
"""Test that tool_required is correctly passed to the API request when False."""
# Mock the client to avoid authentication issues
mock_client = MagicMock()
llm = OCIGenAI(
model="cohere.command-r-16k",
service_endpoint="https://inference.generativeai.us-chicago-1.oci.oraclecloud.com",
compartment_id="test_compartment_id",
client=mock_client,
)
# Test with tool_required=False (default)
result = llm._prepare_chat_with_tools(
tools=[search_tool],
user_msg="Test message",
)
# When tool_required is False, tool_choice should not be included
assert "tool_choice" not in result
assert len(result["tools"]) == 1
# CohereTool objects have a `name` attribute directly
assert result["tools"][0].name == "search_tool"
|
# Copyright (c) OpenMMLab. All rights reserved.
"""MMEngine provides 20 root registries to support using modules across
projects.
More datails can be found at
https://mmengine.readthedocs.io/en/latest/advanced_tutorials/registry.html.
"""
from .build_functions import (build_model_from_cfg, build_runner_from_cfg,
build_scheduler_from_cfg)
from .registry import Registry
# manage all kinds of runners like `EpochBasedRunner` and `IterBasedRunner`
RUNNERS = Registry('runner', build_func=build_runner_from_cfg)
# manage runner constructors that define how to initialize runners
RUNNER_CONSTRUCTORS = Registry('runner constructor')
# manage all kinds of loops like `EpochBasedTrainLoop`
LOOPS = Registry('loop')
# manage all kinds of hooks like `CheckpointHook`
HOOKS = Registry('hook')
# manage data-related modules
DATASETS = Registry('dataset')
DATA_SAMPLERS = Registry('data sampler')
TRANSFORMS = Registry('transform')
# mangage all kinds of modules inheriting `nn.Module`
MODELS = Registry('model', build_model_from_cfg)
# mangage all kinds of model wrappers like 'MMDistributedDataParallel'
MODEL_WRAPPERS = Registry('model_wrapper')
# mangage all kinds of weight initialization modules like `Uniform`
WEIGHT_INITIALIZERS = Registry('weight initializer')
# mangage all kinds of optimizers like `SGD` and `Adam`
OPTIMIZERS = Registry('optimizer')
# manage optimizer wrapper
OPTIM_WRAPPERS = Registry('optim_wrapper')
# manage constructors that customize the optimization hyperparameters.
OPTIM_WRAPPER_CONSTRUCTORS = Registry('optimizer wrapper constructor')
# mangage all kinds of parameter schedulers like `MultiStepLR`
PARAM_SCHEDULERS = Registry(
'parameter scheduler', build_func=build_scheduler_from_cfg)
# manage all kinds of metrics
METRICS = Registry('metric')
# manage evaluator
EVALUATOR = Registry('evaluator')
# manage task-specific modules like anchor generators and box coders
TASK_UTILS = Registry('task util')
# manage visualizer
VISUALIZERS = Registry('visualizer')
# manage visualizer backend
VISBACKENDS = Registry('vis_backend')
# manage logprocessor
LOG_PROCESSORS = Registry('log_processor')
# manage inferencer
INFERENCERS = Registry('inferencer')
|
# Copyright (c) OpenMMLab. All rights reserved.
"""MMEngine provides 20 root registries to support using modules across
projects.
More datails can be found at
https://mmengine.readthedocs.io/en/latest/tutorials/registry.html.
"""
from .build_functions import (build_model_from_cfg, build_runner_from_cfg,
build_scheduler_from_cfg)
from .registry import Registry
# manage all kinds of runners like `EpochBasedRunner` and `IterBasedRunner`
RUNNERS = Registry('runner', build_func=build_runner_from_cfg)
# manage runner constructors that define how to initialize runners
RUNNER_CONSTRUCTORS = Registry('runner constructor')
# manage all kinds of loops like `EpochBasedTrainLoop`
LOOPS = Registry('loop')
# manage all kinds of hooks like `CheckpointHook`
HOOKS = Registry('hook')
# manage data-related modules
DATASETS = Registry('dataset')
DATA_SAMPLERS = Registry('data sampler')
TRANSFORMS = Registry('transform')
# mangage all kinds of modules inheriting `nn.Module`
MODELS = Registry('model', build_model_from_cfg)
# mangage all kinds of model wrappers like 'MMDistributedDataParallel'
MODEL_WRAPPERS = Registry('model_wrapper')
# mangage all kinds of weight initialization modules like `Uniform`
WEIGHT_INITIALIZERS = Registry('weight initializer')
# mangage all kinds of optimizers like `SGD` and `Adam`
OPTIMIZERS = Registry('optimizer')
# manage optimizer wrapper
OPTIM_WRAPPERS = Registry('optim_wrapper')
# manage constructors that customize the optimization hyperparameters.
OPTIM_WRAPPER_CONSTRUCTORS = Registry('optimizer wrapper constructor')
# mangage all kinds of parameter schedulers like `MultiStepLR`
PARAM_SCHEDULERS = Registry(
'parameter scheduler', build_func=build_scheduler_from_cfg)
# manage all kinds of metrics
METRICS = Registry('metric')
# manage evaluator
EVALUATOR = Registry('evaluator')
# manage task-specific modules like anchor generators and box coders
TASK_UTILS = Registry('task util')
# manage visualizer
VISUALIZERS = Registry('visualizer')
# manage visualizer backend
VISBACKENDS = Registry('vis_backend')
# manage logprocessor
LOG_PROCESSORS = Registry('log_processor')
# manage inferencer
INFERENCERS = Registry('inferencer')
|
"""Retriever tool."""
from typing import TYPE_CHECKING, Any, List, Optional
from llama_index.core.base.base_retriever import BaseRetriever
if TYPE_CHECKING:
from llama_index.core.langchain_helpers.agents.tools import LlamaIndexTool
from llama_index.core.schema import (
MetadataMode,
Node,
NodeWithScore,
QueryBundle,
TextNode,
)
from llama_index.core.tools.types import AsyncBaseTool, ToolMetadata, ToolOutput
from llama_index.core.postprocessor.types import BaseNodePostprocessor
DEFAULT_NAME = "retriever_tool"
DEFAULT_DESCRIPTION = """Useful for running a natural language query
against a knowledge base and retrieving a set of relevant documents.
"""
class RetrieverTool(AsyncBaseTool):
"""
Retriever tool.
A tool making use of a retriever.
Args:
retriever (BaseRetriever): A retriever.
metadata (ToolMetadata): The associated metadata of the query engine.
node_postprocessors (Optional[List[BaseNodePostprocessor]]): A list of
node postprocessors.
"""
def __init__(
self,
retriever: BaseRetriever,
metadata: ToolMetadata,
node_postprocessors: Optional[List[BaseNodePostprocessor]] = None,
) -> None:
self._retriever = retriever
self._metadata = metadata
self._node_postprocessors = node_postprocessors or []
@classmethod
def from_defaults(
cls,
retriever: BaseRetriever,
node_postprocessors: Optional[List[BaseNodePostprocessor]] = None,
name: Optional[str] = None,
description: Optional[str] = None,
) -> "RetrieverTool":
name = name or DEFAULT_NAME
description = description or DEFAULT_DESCRIPTION
metadata = ToolMetadata(name=name, description=description)
return cls(
retriever=retriever,
metadata=metadata,
node_postprocessors=node_postprocessors,
)
@property
def retriever(self) -> BaseRetriever:
return self._retriever
@property
def metadata(self) -> ToolMetadata:
return self._metadata
def call(self, *args: Any, **kwargs: Any) -> ToolOutput:
query_str = ""
if args is not None:
query_str += ", ".join([str(arg) for arg in args]) + "\n"
if kwargs is not None:
query_str += (
", ".join([f"{k!s} is {v!s}" for k, v in kwargs.items()]) + "\n"
)
if query_str == "":
raise ValueError("Cannot call query engine without inputs")
docs = self._retriever.retrieve(query_str)
docs = self._apply_node_postprocessors(docs, QueryBundle(query_str))
content = ""
for doc in docs:
assert isinstance(doc.node, (Node, TextNode))
node_copy = doc.node.model_copy()
content += node_copy.get_content(MetadataMode.LLM) + "\n\n"
return ToolOutput(
content=content,
tool_name=self.metadata.get_name(),
raw_input={"input": query_str},
raw_output=docs,
)
async def acall(self, *args: Any, **kwargs: Any) -> ToolOutput:
query_str = ""
if args is not None:
query_str += ", ".join([str(arg) for arg in args]) + "\n"
if kwargs is not None:
query_str += (
", ".join([f"{k!s} is {v!s}" for k, v in kwargs.items()]) + "\n"
)
if query_str == "":
raise ValueError("Cannot call query engine without inputs")
docs = await self._retriever.aretrieve(query_str)
content = ""
docs = self._apply_node_postprocessors(docs, QueryBundle(query_str))
for doc in docs:
assert isinstance(doc.node, (Node, TextNode))
node_copy = doc.node.model_copy()
content += node_copy.get_content(MetadataMode.LLM) + "\n\n"
return ToolOutput(
content=content,
tool_name=self.metadata.get_name(),
raw_input={"input": query_str},
raw_output=docs,
)
def as_langchain_tool(self) -> "LlamaIndexTool":
raise NotImplementedError("`as_langchain_tool` not implemented here.")
def _apply_node_postprocessors(
self, nodes: List[NodeWithScore], query_bundle: QueryBundle
) -> List[NodeWithScore]:
for node_postprocessor in self._node_postprocessors:
nodes = node_postprocessor.postprocess_nodes(
nodes, query_bundle=query_bundle
)
return nodes
|
"""Retriever tool."""
from typing import TYPE_CHECKING, Any, List, Optional
from llama_index.core.base.base_retriever import BaseRetriever
if TYPE_CHECKING:
from llama_index.core.langchain_helpers.agents.tools import LlamaIndexTool
from llama_index.core.schema import (
MetadataMode,
Node,
NodeWithScore,
QueryBundle,
TextNode,
)
from llama_index.core.tools.types import AsyncBaseTool, ToolMetadata, ToolOutput
from llama_index.core.postprocessor.types import BaseNodePostprocessor
DEFAULT_NAME = "retriever_tool"
DEFAULT_DESCRIPTION = """Useful for running a natural language query
against a knowledge base and retrieving a set of relevant documents.
"""
class RetrieverTool(AsyncBaseTool):
"""
Retriever tool.
A tool making use of a retriever.
Args:
retriever (BaseRetriever): A retriever.
metadata (ToolMetadata): The associated metadata of the query engine.
node_postprocessors (Optional[List[BaseNodePostprocessor]]): A list of
node postprocessors.
"""
def __init__(
self,
retriever: BaseRetriever,
metadata: ToolMetadata,
node_postprocessors: Optional[List[BaseNodePostprocessor]] = None,
) -> None:
self._retriever = retriever
self._metadata = metadata
self._node_postprocessors = node_postprocessors or []
@classmethod
def from_defaults(
cls,
retriever: BaseRetriever,
node_postprocessors: Optional[List[BaseNodePostprocessor]] = None,
name: Optional[str] = None,
description: Optional[str] = None,
) -> "RetrieverTool":
name = name or DEFAULT_NAME
description = description or DEFAULT_DESCRIPTION
metadata = ToolMetadata(name=name, description=description)
return cls(
retriever=retriever,
metadata=metadata,
node_postprocessors=node_postprocessors,
)
@property
def retriever(self) -> BaseRetriever:
return self._retriever
@property
def metadata(self) -> ToolMetadata:
return self._metadata
def call(self, *args: Any, **kwargs: Any) -> ToolOutput:
query_str = ""
if args is not None:
query_str += ", ".join([str(arg) for arg in args]) + "\n"
if kwargs is not None:
query_str += (
", ".join([f"{k!s} is {v!s}" for k, v in kwargs.items()]) + "\n"
)
if query_str == "":
raise ValueError("Cannot call query engine without inputs")
docs = self._retriever.retrieve(query_str)
docs = self._apply_node_postprocessors(docs, QueryBundle(query_str))
content = ""
for doc in docs:
assert isinstance(doc.node, (Node, TextNode))
node_copy = doc.node.model_copy()
content += node_copy.get_content(MetadataMode.LLM) + "\n\n"
return ToolOutput(
content=content,
tool_name=self.metadata.name,
raw_input={"input": query_str},
raw_output=docs,
)
async def acall(self, *args: Any, **kwargs: Any) -> ToolOutput:
query_str = ""
if args is not None:
query_str += ", ".join([str(arg) for arg in args]) + "\n"
if kwargs is not None:
query_str += (
", ".join([f"{k!s} is {v!s}" for k, v in kwargs.items()]) + "\n"
)
if query_str == "":
raise ValueError("Cannot call query engine without inputs")
docs = await self._retriever.aretrieve(query_str)
content = ""
docs = self._apply_node_postprocessors(docs, QueryBundle(query_str))
for doc in docs:
assert isinstance(doc.node, (Node, TextNode))
node_copy = doc.node.model_copy()
content += node_copy.get_content(MetadataMode.LLM) + "\n\n"
return ToolOutput(
content=content,
tool_name=self.metadata.name,
raw_input={"input": query_str},
raw_output=docs,
)
def as_langchain_tool(self) -> "LlamaIndexTool":
raise NotImplementedError("`as_langchain_tool` not implemented here.")
def _apply_node_postprocessors(
self, nodes: List[NodeWithScore], query_bundle: QueryBundle
) -> List[NodeWithScore]:
for node_postprocessor in self._node_postprocessors:
nodes = node_postprocessor.postprocess_nodes(
nodes, query_bundle=query_bundle
)
return nodes
|
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
import pytorch_sphinx_theme
sys.path.insert(0, os.path.abspath('../..'))
# -- Project information -----------------------------------------------------
project = 'mmengine'
copyright = '2022, mmengine contributors'
author = 'mmengine contributors'
version_file = '../../mmengine/version.py'
with open(version_file) as f:
exec(compile(f.read(), version_file, 'exec'))
__version__ = locals()['__version__']
# The short X.Y version
version = __version__
# The full version, including alpha/beta/rc tags
release = __version__
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.napoleon',
'sphinx.ext.viewcode',
'sphinx.ext.autosectionlabel',
'sphinx_markdown_tables',
'myst_parser',
'sphinx_copybutton',
'sphinx.ext.autodoc.typehints',
] # yapf: disable
autodoc_typehints = 'description'
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'pytorch_sphinx_theme'
html_theme_path = [pytorch_sphinx_theme.get_html_theme_path()]
html_theme_options = {
'menu': [
{
'name': 'GitHub',
'url': 'https://github.com/open-mmlab/mmengine'
},
],
# Specify the language of shared menu
'menu_lang': 'en',
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Extension configuration -------------------------------------------------
# Ignore >>> when copying code
copybutton_prompt_text = r'>>> |\.\.\. '
copybutton_prompt_is_regexp = True
|
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
import pytorch_sphinx_theme
sys.path.insert(0, os.path.abspath('../..'))
# -- Project information -----------------------------------------------------
project = 'mmengine'
copyright = '2022, mmengine contributors'
author = 'mmengine contributors'
version_file = '../../mmengine/version.py'
with open(version_file, 'r') as f:
exec(compile(f.read(), version_file, 'exec'))
__version__ = locals()['__version__']
# The short X.Y version
version = __version__
# The full version, including alpha/beta/rc tags
release = __version__
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.napoleon',
'sphinx.ext.viewcode',
'sphinx.ext.autosectionlabel',
'sphinx_markdown_tables',
'myst_parser',
'sphinx_copybutton',
'sphinx.ext.autodoc.typehints',
] # yapf: disable
autodoc_typehints = 'description'
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'pytorch_sphinx_theme'
html_theme_path = [pytorch_sphinx_theme.get_html_theme_path()]
html_theme_options = {
'menu': [
{
'name': 'GitHub',
'url': 'https://github.com/open-mmlab/mmengine'
},
],
# Specify the language of shared menu
'menu_lang': 'en',
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Extension configuration -------------------------------------------------
# Ignore >>> when copying code
copybutton_prompt_text = r'>>> |\.\.\. '
copybutton_prompt_is_regexp = True
|
# Copyright 2020 The HuggingFace Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import torch
class TorchFormatter(TensorFormatter[Mapping, "torch.Tensor", Mapping]):
def __init__(self, features=None, **torch_tensor_kwargs):
super().__init__(features=features)
self.torch_tensor_kwargs = torch_tensor_kwargs
import torch # noqa import torch at initialization
def _consolidate(self, column):
import torch
if isinstance(column, list) and column:
if all(
isinstance(x, torch.Tensor) and x.shape == column[0].shape and x.dtype == column[0].dtype
for x in column
):
return torch.stack(column)
return column
def _tensorize(self, value):
import torch
if isinstance(value, (str, bytes, type(None))):
return value
elif isinstance(value, (np.character, np.ndarray)) and np.issubdtype(value.dtype, np.character):
return value.tolist()
default_dtype = {}
if isinstance(value, (np.number, np.ndarray)) and np.issubdtype(value.dtype, np.integer):
default_dtype = {"dtype": torch.int64}
# Convert dtype to np.int64 if it's either np.uint16 or np.uint32 to ensure compatibility.
# np.uint64 is excluded from this conversion as there is no compatible PyTorch dtype that can handle it without loss.
if value.dtype in [np.uint16, np.uint32]:
value = value.astype(np.int64)
elif isinstance(value, (np.number, np.ndarray)) and np.issubdtype(value.dtype, np.floating):
default_dtype = {"dtype": torch.float32}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(value, PIL.Image.Image):
value = np.asarray(value)
if value.ndim == 2:
value = value[:, :, np.newaxis]
value = value.transpose((2, 0, 1))
return torch.tensor(value, **{**default_dtype, **self.torch_tensor_kwargs})
def _recursive_tensorize(self, data_struct):
import torch
# support for torch, tf, jax etc.
if hasattr(data_struct, "__array__") and not isinstance(data_struct, torch.Tensor):
data_struct = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(data_struct, np.ndarray):
if data_struct.dtype == object: # torch tensors cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(substruct) for substruct in data_struct])
elif isinstance(data_struct, (list, tuple)):
return self._consolidate([self.recursive_tensorize(substruct) for substruct in data_struct])
return self._tensorize(data_struct)
def recursive_tensorize(self, data_struct: dict):
return map_nested(self._recursive_tensorize, data_struct, map_list=False)
def format_row(self, pa_table: pa.Table) -> Mapping:
row = self.numpy_arrow_extractor().extract_row(pa_table)
row = self.python_features_decoder.decode_row(row)
return self.recursive_tensorize(row)
def format_column(self, pa_table: pa.Table) -> "torch.Tensor":
column = self.numpy_arrow_extractor().extract_column(pa_table)
column = self.python_features_decoder.decode_column(column, pa_table.column_names[0])
column = self.recursive_tensorize(column)
column = self._consolidate(column)
return column
def format_batch(self, pa_table: pa.Table) -> Mapping:
batch = self.numpy_arrow_extractor().extract_batch(pa_table)
batch = self.python_features_decoder.decode_batch(batch)
batch = self.recursive_tensorize(batch)
for column_name in batch:
batch[column_name] = self._consolidate(batch[column_name])
return batch
|
# Copyright 2020 The HuggingFace Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import torch
class TorchFormatter(TensorFormatter[Mapping, "torch.Tensor", Mapping]):
def __init__(self, features=None, **torch_tensor_kwargs):
super().__init__(features=features)
self.torch_tensor_kwargs = torch_tensor_kwargs
import torch # noqa import torch at initialization
def _consolidate(self, column):
import torch
if isinstance(column, list) and column:
if all(
isinstance(x, torch.Tensor) and x.shape == column[0].shape and x.dtype == column[0].dtype
for x in column
):
return torch.stack(column)
return column
def _tensorize(self, value):
import torch
if isinstance(value, (str, bytes, type(None))):
return value
elif isinstance(value, (np.character, np.ndarray)) and np.issubdtype(value.dtype, np.character):
return value.tolist()
default_dtype = {}
if isinstance(value, (np.number, np.ndarray)) and np.issubdtype(value.dtype, np.integer):
default_dtype = {"dtype": torch.int64}
# Convert dtype to np.int64 if it's either np.uint16 or np.uint32 to ensure compatibility.
# np.uint64 is excluded from this conversion as there is no compatible PyTorch dtype that can handle it without loss.
if value.dtype in [np.uint16, np.uint32]:
value = value.astype(np.int64)
elif isinstance(value, (np.number, np.ndarray)) and np.issubdtype(value.dtype, np.floating):
default_dtype = {"dtype": torch.float32}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(value, PIL.Image.Image):
value = np.asarray(value)
return torch.tensor(value, **{**default_dtype, **self.torch_tensor_kwargs})
def _recursive_tensorize(self, data_struct):
import torch
# support for torch, tf, jax etc.
if hasattr(data_struct, "__array__") and not isinstance(data_struct, torch.Tensor):
data_struct = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(data_struct, np.ndarray):
if data_struct.dtype == object: # torch tensors cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(substruct) for substruct in data_struct])
elif isinstance(data_struct, (list, tuple)):
return self._consolidate([self.recursive_tensorize(substruct) for substruct in data_struct])
return self._tensorize(data_struct)
def recursive_tensorize(self, data_struct: dict):
return map_nested(self._recursive_tensorize, data_struct, map_list=False)
def format_row(self, pa_table: pa.Table) -> Mapping:
row = self.numpy_arrow_extractor().extract_row(pa_table)
row = self.python_features_decoder.decode_row(row)
return self.recursive_tensorize(row)
def format_column(self, pa_table: pa.Table) -> "torch.Tensor":
column = self.numpy_arrow_extractor().extract_column(pa_table)
column = self.python_features_decoder.decode_column(column, pa_table.column_names[0])
column = self.recursive_tensorize(column)
column = self._consolidate(column)
return column
def format_batch(self, pa_table: pa.Table) -> Mapping:
batch = self.numpy_arrow_extractor().extract_batch(pa_table)
batch = self.python_features_decoder.decode_batch(batch)
batch = self.recursive_tensorize(batch)
for column_name in batch:
batch[column_name] = self._consolidate(batch[column_name])
return batch
|
from typing import Any, Dict, Union
import torch
from torchvision import transforms as _transforms
from torchvision.prototype import datapoints
from torchvision.prototype.transforms import functional as F, Transform
from .utils import is_simple_tensor
class ConvertBoundingBoxFormat(Transform):
_transformed_types = (datapoints.BoundingBox,)
def __init__(self, format: Union[str, datapoints.BoundingBoxFormat]) -> None:
super().__init__()
if isinstance(format, str):
format = datapoints.BoundingBoxFormat[format]
self.format = format
def _transform(self, inpt: datapoints.BoundingBox, params: Dict[str, Any]) -> datapoints.BoundingBox:
# We need to unwrap here to avoid unnecessary `__torch_function__` calls,
# since `convert_format_bounding_box` does not have a dispatcher function that would do that for us
output = F.convert_format_bounding_box(
inpt.as_subclass(torch.Tensor), old_format=inpt.format, new_format=params["format"]
)
return datapoints.BoundingBox.wrap_like(inpt, output, format=params["format"])
class ConvertDtype(Transform):
_v1_transform_cls = _transforms.ConvertImageDtype
_transformed_types = (is_simple_tensor, datapoints.Image, datapoints.Video)
def __init__(self, dtype: torch.dtype = torch.float32) -> None:
super().__init__()
self.dtype = dtype
def _transform(
self, inpt: Union[datapoints.TensorImageType, datapoints.TensorVideoType], params: Dict[str, Any]
) -> Union[datapoints.TensorImageType, datapoints.TensorVideoType]:
return F.convert_dtype(inpt, self.dtype)
# We changed the name to align it with the new naming scheme. Still, `ConvertImageDtype` is
# prevalent and well understood. Thus, we just alias it without deprecating the old name.
ConvertImageDtype = ConvertDtype
class ClampBoundingBoxes(Transform):
_transformed_types = (datapoints.BoundingBox,)
def _transform(self, inpt: datapoints.BoundingBox, params: Dict[str, Any]) -> datapoints.BoundingBox:
return F.clamp_bounding_box(inpt) # type: ignore[return-value]
|
from typing import Any, Dict, Union
import torch
from torchvision import transforms as _transforms
from torchvision.prototype import datapoints
from torchvision.prototype.transforms import functional as F, Transform
from .utils import is_simple_tensor
class ConvertBoundingBoxFormat(Transform):
_transformed_types = (datapoints.BoundingBox,)
def __init__(self, format: Union[str, datapoints.BoundingBoxFormat]) -> None:
super().__init__()
if isinstance(format, str):
format = datapoints.BoundingBoxFormat[format]
self.format = format
def _transform(self, inpt: datapoints.BoundingBox, params: Dict[str, Any]) -> datapoints.BoundingBox:
# We need to unwrap here to avoid unnecessary `__torch_function__` calls,
# since `convert_format_bounding_box` does not have a dispatcher function that would do that for us
output = F.convert_format_bounding_box(
inpt.as_subclass(torch.Tensor), old_format=inpt.format, new_format=params["format"]
)
return datapoints.BoundingBox.wrap_like(inpt, output, format=params["format"])
class ConvertDtype(Transform):
_v1_transform_cls = _transforms.ConvertImageDtype
_transformed_types = (is_simple_tensor, datapoints.Image, datapoints.Video)
def __init__(self, dtype: torch.dtype = torch.float32) -> None:
super().__init__()
self.dtype = dtype
def _transform(
self, inpt: Union[datapoints.TensorImageType, datapoints.TensorVideoType], params: Dict[str, Any]
) -> Union[datapoints.TensorImageType, datapoints.TensorVideoType]:
return F.convert_dtype(inpt, self.dtype)
# We changed the name to align it with the new naming scheme. Still, `ConvertImageDtype` is
# prevalent and well understood. Thus, we just alias it without deprecating the old name.
ConvertImageDtype = ConvertDtype
class ClampBoundingBoxes(Transform):
_transformed_types = (datapoints.BoundingBox,)
def _transform(self, inpt: datapoints.BoundingBox, params: Dict[str, Any]) -> datapoints.BoundingBox:
# We need to unwrap here to avoid unnecessary `__torch_function__` calls,
# since `clamp_bounding_box` does not have a dispatcher function that would do that for us
output = F.clamp_bounding_box(
inpt.as_subclass(torch.Tensor), format=inpt.format, spatial_size=inpt.spatial_size
)
return datapoints.BoundingBox.wrap_like(inpt, output)
|
import multiprocessing
from concurrent.futures import ThreadPoolExecutor
import pytest
import xgboost as xgb
@pytest.mark.parametrize("verbosity_level", [0, 1, 2, 3])
def test_global_config_verbosity(verbosity_level):
def get_current_verbosity():
return xgb.get_config()["verbosity"]
old_verbosity = get_current_verbosity()
assert old_verbosity == 1
with xgb.config_context(verbosity=verbosity_level):
new_verbosity = get_current_verbosity()
assert new_verbosity == verbosity_level
assert old_verbosity == get_current_verbosity()
@pytest.mark.parametrize("use_rmm", [False, True])
def test_global_config_use_rmm(use_rmm):
def get_current_use_rmm_flag():
return xgb.get_config()["use_rmm"]
old_use_rmm_flag = get_current_use_rmm_flag()
with xgb.config_context(use_rmm=use_rmm):
new_use_rmm_flag = get_current_use_rmm_flag()
assert new_use_rmm_flag == use_rmm
assert old_use_rmm_flag == get_current_use_rmm_flag()
def test_nested_config() -> None:
verbosity = xgb.get_config()["verbosity"]
assert verbosity == 1
with xgb.config_context(verbosity=3):
assert xgb.get_config()["verbosity"] == 3
with xgb.config_context(verbosity=2):
assert xgb.get_config()["verbosity"] == 2
with xgb.config_context(verbosity=1):
assert xgb.get_config()["verbosity"] == 1
assert xgb.get_config()["verbosity"] == 2
assert xgb.get_config()["verbosity"] == 3
with xgb.config_context(verbosity=3):
assert xgb.get_config()["verbosity"] == 3
with xgb.config_context(verbosity=None):
assert xgb.get_config()["verbosity"] == 3 # None has no effect
xgb.set_config(verbosity=2)
assert xgb.get_config()["verbosity"] == 2
with xgb.config_context(verbosity=3):
assert xgb.get_config()["verbosity"] == 3
xgb.set_config(verbosity=verbosity) # reset
verbosity = xgb.get_config()["verbosity"]
assert verbosity == 1
def test_thread_safety():
n_threads = multiprocessing.cpu_count()
futures = []
with ThreadPoolExecutor(max_workers=n_threads) as executor:
for i in range(256):
f = executor.submit(test_nested_config)
futures.append(f)
for f in futures:
f.result()
|
import multiprocessing
from concurrent.futures import ThreadPoolExecutor
import pytest
import xgboost as xgb
@pytest.mark.parametrize("verbosity_level", [0, 1, 2, 3])
def test_global_config_verbosity(verbosity_level):
def get_current_verbosity():
return xgb.get_config()["verbosity"]
old_verbosity = get_current_verbosity()
assert old_verbosity == 1
with xgb.config_context(verbosity=verbosity_level):
new_verbosity = get_current_verbosity()
assert new_verbosity == verbosity_level
assert old_verbosity == get_current_verbosity()
@pytest.mark.parametrize("use_rmm", [False, True])
def test_global_config_use_rmm(use_rmm):
def get_current_use_rmm_flag():
return xgb.get_config()["use_rmm"]
old_use_rmm_flag = get_current_use_rmm_flag()
with xgb.config_context(use_rmm=use_rmm):
new_use_rmm_flag = get_current_use_rmm_flag()
assert new_use_rmm_flag == use_rmm
assert old_use_rmm_flag == get_current_use_rmm_flag()
def test_nested_config() -> None:
verbosity = xgb.get_config()["verbosity"]
assert verbosity == 1
with xgb.config_context(verbosity=3):
assert xgb.get_config()["verbosity"] == 3
with xgb.config_context(verbosity=2):
assert xgb.get_config()["verbosity"] == 2
with xgb.config_context(verbosity=1):
assert xgb.get_config()["verbosity"] == 1
assert xgb.get_config()["verbosity"] == 2
assert xgb.get_config()["verbosity"] == 3
with xgb.config_context(verbosity=3):
assert xgb.get_config()["verbosity"] == 3
with xgb.config_context(verbosity=None):
assert xgb.get_config()["verbosity"] == 3 # None has no effect
xgb.set_config(verbosity=2)
assert xgb.get_config()["verbosity"] == 2
with xgb.config_context(verbosity=3):
assert xgb.get_config()["verbosity"] == 3
xgb.set_config(verbosity=verbosity) # reset
verbosity = xgb.get_config()["verbosity"]
assert verbosity == 1
def test_thread_safty():
n_threads = multiprocessing.cpu_count()
futures = []
with ThreadPoolExecutor(max_workers=n_threads) as executor:
for i in range(256):
f = executor.submit(test_nested_config)
futures.append(f)
for f in futures:
f.result()
|
"""Test embedding model integration."""
from typing import Any
from unittest.mock import patch
from langchain_ollama.embeddings import OllamaEmbeddings
MODEL_NAME = "llama3.1"
def test_initialization() -> None:
"""Test embedding model initialization."""
OllamaEmbeddings(model="llama3", keep_alive=1)
@patch("langchain_ollama.embeddings.validate_model")
def test_validate_model_on_init(mock_validate_model: Any) -> None:
"""Test that the model is validated on initialization when requested."""
# Test that validate_model is called when validate_model_on_init=True
OllamaEmbeddings(model=MODEL_NAME, validate_model_on_init=True)
mock_validate_model.assert_called_once()
mock_validate_model.reset_mock()
# Test that validate_model is NOT called when validate_model_on_init=False
OllamaEmbeddings(model=MODEL_NAME, validate_model_on_init=False)
mock_validate_model.assert_not_called()
# Test that validate_model is NOT called by default
OllamaEmbeddings(model=MODEL_NAME)
mock_validate_model.assert_not_called()
|
"""Test embedding model integration."""
from langchain_ollama.embeddings import OllamaEmbeddings
def test_initialization() -> None:
"""Test embedding model initialization."""
OllamaEmbeddings(model="llama3", keep_alive=1)
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.agent_toolkits.ainetwork.toolkit import AINetworkToolkit
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"AINetworkToolkit": "langchain_community.agent_toolkits.ainetwork.toolkit",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"AINetworkToolkit",
]
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.agent_toolkits.ainetwork.toolkit import AINetworkToolkit
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"AINetworkToolkit": "langchain_community.agent_toolkits.ainetwork.toolkit"
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"AINetworkToolkit",
]
|
from docarray.typing.tensor.embedding.embedding import AnyEmbedding
from docarray.typing.tensor.embedding.ndarray import NdArrayEmbedding
__all__ = ['NdArrayEmbedding', 'AnyEmbedding']
from docarray.utils.misc import is_tf_available, is_torch_available
torch_available = is_torch_available()
if torch_available:
from docarray.typing.tensor.embedding.torch import TorchEmbedding # noqa F401
__all__.append('TorchEmbedding')
tf_available = is_tf_available()
if tf_available:
from docarray.typing.tensor.embedding.tensorflow import ( # noqa F401
TensorFlowEmbedding,
)
__all__.append('TensorFlowEmbedding')
|
from docarray.typing.tensor.embedding.embedding import AnyEmbedding
from docarray.typing.tensor.embedding.ndarray import NdArrayEmbedding
__all__ = ['NdArrayEmbedding', 'AnyEmbedding']
try:
import torch # noqa: F401
except ImportError:
pass
else:
from docarray.typing.tensor.embedding.torch import TorchEmbedding # noqa F401
__all__.append('TorchEmbedding')
|
# Copyright (c) OpenMMLab. All rights reserved.
from .collect_env import collect_env
from .compat_config import compat_cfg
from .dist_utils import (all_reduce_dict, allreduce_grads, reduce_mean,
sync_random_seed)
from .logger import get_caller_name, log_img_scale
from .memory import AvoidCUDAOOM, AvoidOOM
from .misc import add_dump_metric, find_latest_checkpoint, update_data_root
from .replace_cfg_vals import replace_cfg_vals
from .setup_env import register_all_modules, setup_multi_processes
from .split_batch import split_batch
from .typing import (ConfigType, InstanceList, MultiConfig, OptConfigType,
OptInstanceList, OptMultiConfig, OptPixelList, PixelList,
RangeType)
__all__ = [
'collect_env', 'find_latest_checkpoint', 'update_data_root',
'setup_multi_processes', 'get_caller_name', 'log_img_scale', 'compat_cfg',
'split_batch', 'register_all_modules', 'replace_cfg_vals', 'AvoidOOM',
'AvoidCUDAOOM', 'all_reduce_dict', 'allreduce_grads', 'reduce_mean',
'sync_random_seed', 'ConfigType', 'InstanceList', 'MultiConfig',
'OptConfigType', 'OptInstanceList', 'OptMultiConfig', 'OptPixelList',
'PixelList', 'RangeType', 'add_dump_metric'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .collect_env import collect_env
from .compat_config import compat_cfg
from .dist_utils import (all_reduce_dict, allreduce_grads, reduce_mean,
sync_random_seed)
from .logger import get_caller_name, log_img_scale
from .memory import AvoidCUDAOOM, AvoidOOM
from .misc import find_latest_checkpoint, update_data_root
from .replace_cfg_vals import replace_cfg_vals
from .setup_env import register_all_modules, setup_multi_processes
from .split_batch import split_batch
from .typing import (ConfigType, InstanceList, MultiConfig, OptConfigType,
OptInstanceList, OptMultiConfig, OptPixelList, PixelList,
RangeType)
__all__ = [
'collect_env', 'find_latest_checkpoint', 'update_data_root',
'setup_multi_processes', 'get_caller_name', 'log_img_scale', 'compat_cfg',
'split_batch', 'register_all_modules', 'replace_cfg_vals', 'AvoidOOM',
'AvoidCUDAOOM', 'all_reduce_dict', 'allreduce_grads', 'reduce_mean',
'sync_random_seed', 'ConfigType', 'InstanceList', 'MultiConfig',
'OptConfigType', 'OptInstanceList', 'OptMultiConfig', 'OptPixelList',
'PixelList', 'RangeType'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
import os
import os.path as osp
from typing import Optional, Sequence
from mmengine.dist import is_main_process
from mmengine.evaluator import BaseMetric
from mmengine.fileio import dump
from mmengine.logging import MMLogger
from mmengine.structures import InstanceData
from mmdet.registry import METRICS
@METRICS.register_module()
class DumpProposals(BaseMetric):
"""Dump proposals pseudo metric.
Args:
output_dir (str): The root directory for ``proposals_file``.
Defaults to ''.
proposals_file (str): Proposals file path. Defaults to 'proposals.pkl'.
num_max_proposals (int, optional): Maximum number of proposals to dump.
If not specified, all proposals will be dumped.
file_client_args (dict, optional): Arguments to instantiate the
corresponding backend in mmdet <= 3.0.0rc6. Defaults to None.
backend_args (dict, optional): Arguments to instantiate the
corresponding backend. Defaults to None.
collect_device (str): Device name used for collecting results from
different ranks during distributed training. Must be 'cpu' or
'gpu'. Defaults to 'cpu'.
prefix (str, optional): The prefix that will be added in the metric
names to disambiguate homonymous metrics of different evaluators.
If prefix is not provided in the argument, self.default_prefix
will be used instead. Defaults to None.
"""
default_prefix: Optional[str] = 'dump_proposals'
def __init__(self,
output_dir: str = '',
proposals_file: str = 'proposals.pkl',
num_max_proposals: Optional[int] = None,
file_client_args: dict = None,
backend_args: dict = None,
collect_device: str = 'cpu',
prefix: Optional[str] = None) -> None:
super().__init__(collect_device=collect_device, prefix=prefix)
self.num_max_proposals = num_max_proposals
# TODO: update after mmengine finish refactor fileio.
self.backend_args = backend_args
if file_client_args is not None:
raise RuntimeError(
'The `file_client_args` is deprecated, '
'please use `backend_args` instead, please refer to'
'https://github.com/open-mmlab/mmdetection/blob/main/configs/_base_/datasets/coco_detection.py' # noqa: E501
)
self.output_dir = output_dir
assert proposals_file.endswith(('.pkl', '.pickle')), \
'The output file must be a pkl file.'
self.proposals_file = os.path.join(self.output_dir, proposals_file)
if is_main_process():
os.makedirs(self.output_dir, exist_ok=True)
def process(self, data_batch: Sequence[dict],
data_samples: Sequence[dict]) -> None:
"""Process one batch of data samples and predictions. The processed
results should be stored in ``self.results``, which will be used to
compute the metrics when all batches have been processed.
Args:
data_batch (dict): A batch of data from the dataloader.
data_samples (Sequence[dict]): A batch of data samples that
contain annotations and predictions.
"""
for data_sample in data_samples:
pred = data_sample['pred_instances']
# `bboxes` is sorted by `scores`
ranked_scores, rank_inds = pred['scores'].sort(descending=True)
ranked_bboxes = pred['bboxes'][rank_inds, :]
ranked_bboxes = ranked_bboxes.cpu().numpy()
ranked_scores = ranked_scores.cpu().numpy()
pred_instance = InstanceData()
pred_instance.bboxes = ranked_bboxes
pred_instance.scores = ranked_scores
if self.num_max_proposals is not None:
pred_instance = pred_instance[:self.num_max_proposals]
img_path = data_sample['img_path']
# `file_name` is the key to obtain the proposals from the
# `proposals_list`.
file_name = osp.join(
osp.split(osp.split(img_path)[0])[-1],
osp.split(img_path)[-1])
result = {file_name: pred_instance}
self.results.append(result)
def compute_metrics(self, results: list) -> dict:
"""Dump the processed results.
Args:
results (list): The processed results of each batch.
Returns:
dict: An empty dict.
"""
logger: MMLogger = MMLogger.get_current_instance()
dump_results = {}
for result in results:
dump_results.update(result)
dump(
dump_results,
file=self.proposals_file,
backend_args=self.backend_args)
logger.info(f'Results are saved at {self.proposals_file}')
return {}
|
# Copyright (c) OpenMMLab. All rights reserved.
import os
import os.path as osp
from typing import Optional, Sequence
from mmengine.dist import is_main_process
from mmengine.evaluator import BaseMetric
from mmengine.fileio import dump
from mmengine.logging import MMLogger
from mmengine.structures import InstanceData
from mmdet.registry import METRICS
@METRICS.register_module()
class DumpProposals(BaseMetric):
"""Dump proposals pseudo metric.
Args:
output_dir (str): The root directory for ``proposals_file``.
Defaults to ''.
proposals_file (str): Proposals file path. Defaults to 'proposals.pkl'.
num_max_proposals (int, optional): Maximum number of proposals to dump.
If not specified, all proposals will be dumped.
file_client_args (dict): Arguments to instantiate a FileClient.
See :class:`mmengine.fileio.FileClient` for details.
Defaults to ``dict(backend='disk')``.
collect_device (str): Device name used for collecting results from
different ranks during distributed training. Must be 'cpu' or
'gpu'. Defaults to 'cpu'.
prefix (str, optional): The prefix that will be added in the metric
names to disambiguate homonymous metrics of different evaluators.
If prefix is not provided in the argument, self.default_prefix
will be used instead. Defaults to None.
"""
default_prefix: Optional[str] = 'dump_proposals'
def __init__(self,
output_dir: str = '',
proposals_file: str = 'proposals.pkl',
num_max_proposals: Optional[int] = None,
file_client_args: dict = dict(backend='disk'),
collect_device: str = 'cpu',
prefix: Optional[str] = None) -> None:
super().__init__(collect_device=collect_device, prefix=prefix)
self.num_max_proposals = num_max_proposals
# TODO: update after mmengine finish refactor fileio.
self.file_client_args = file_client_args
self.output_dir = output_dir
assert proposals_file.endswith(('.pkl', '.pickle')), \
'The output file must be a pkl file.'
self.proposals_file = os.path.join(self.output_dir, proposals_file)
if is_main_process():
os.makedirs(self.output_dir, exist_ok=True)
def process(self, data_batch: Sequence[dict],
data_samples: Sequence[dict]) -> None:
"""Process one batch of data samples and predictions. The processed
results should be stored in ``self.results``, which will be used to
compute the metrics when all batches have been processed.
Args:
data_batch (dict): A batch of data from the dataloader.
data_samples (Sequence[dict]): A batch of data samples that
contain annotations and predictions.
"""
for data_sample in data_samples:
pred = data_sample['pred_instances']
# `bboxes` is sorted by `scores`
ranked_scores, rank_inds = pred['scores'].sort(descending=True)
ranked_bboxes = pred['bboxes'][rank_inds, :]
ranked_bboxes = ranked_bboxes.cpu().numpy()
ranked_scores = ranked_scores.cpu().numpy()
pred_instance = InstanceData()
pred_instance.bboxes = ranked_bboxes
pred_instance.scores = ranked_scores
if self.num_max_proposals is not None:
pred_instance = pred_instance[:self.num_max_proposals]
img_path = data_sample['img_path']
# `file_name` is the key to obtain the proposals from the
# `proposals_list`.
file_name = osp.join(
osp.split(osp.split(img_path)[0])[-1],
osp.split(img_path)[-1])
result = {file_name: pred_instance}
self.results.append(result)
def compute_metrics(self, results: list) -> dict:
"""Dump the processed results.
Args:
results (list): The processed results of each batch.
Returns:
dict: An empty dict.
"""
logger: MMLogger = MMLogger.get_current_instance()
dump_results = {}
for result in results:
dump_results.update(result)
dump(
dump_results,
file=self.proposals_file,
file_client_args=self.file_client_args)
logger.info(f'Results are saved at {self.proposals_file}')
return {}
|
import random
from typing import Optional, TYPE_CHECKING
if TYPE_CHECKING: # pragma: no cover
from docarray.array.document import DocumentArray
class SampleMixin:
"""A mixin that provides search functionality to DocumentArrays"""
def sample(self, k: int, seed: Optional[int] = None) -> 'DocumentArray':
"""random sample k elements from :class:`DocumentArray` without replacement.
:param k: Number of elements to sample from the document array.
:param seed: initialize the random number generator, by default is None. If set will
save the state of the random function to produce certain outputs.
:return: A sampled list of :class:`Document` represented as :class:`DocumentArray`.
"""
if seed is not None:
random.seed(seed)
# NOTE, this could simplified to random.sample(self, k)
# without getting indices and itemgetter etc.
# however it's only work on DocumentArray.
sampled = random.sample(self, k)
from docarray.array.document import DocumentArray
return DocumentArray(sampled)
def shuffle(self, seed: Optional[int] = None) -> 'DocumentArray':
"""Randomly shuffle documents within the :class:`DocumentArray`.
:param seed: initialize the random number generator, by default is None. If set will
save the state of the random function to produce certain outputs.
:return: The shuffled list of :class:`Document` represented as :class:`DocumentArray`.
"""
return self.sample(len(self), seed=seed)
|
import random
from typing import Optional, TYPE_CHECKING
if TYPE_CHECKING:
from docarray.array.document import DocumentArray
class SampleMixin:
"""A mixin that provides search functionality to DocumentArrays"""
def sample(self, k: int, seed: Optional[int] = None) -> 'DocumentArray':
"""random sample k elements from :class:`DocumentArray` without replacement.
:param k: Number of elements to sample from the document array.
:param seed: initialize the random number generator, by default is None. If set will
save the state of the random function to produce certain outputs.
:return: A sampled list of :class:`Document` represented as :class:`DocumentArray`.
"""
if seed is not None:
random.seed(seed)
# NOTE, this could simplified to random.sample(self, k)
# without getting indices and itemgetter etc.
# however it's only work on DocumentArray.
sampled = random.sample(self, k)
from docarray.array.document import DocumentArray
return DocumentArray(sampled)
def shuffle(self, seed: Optional[int] = None) -> 'DocumentArray':
"""Randomly shuffle documents within the :class:`DocumentArray`.
:param seed: initialize the random number generator, by default is None. If set will
save the state of the random function to produce certain outputs.
:return: The shuffled list of :class:`Document` represented as :class:`DocumentArray`.
"""
return self.sample(len(self), seed=seed)
|
from __future__ import annotations
import os
import sys
from typing import Any, BinaryIO, TypeVar
import PIL.Image
import torch
from torchvision.prototype.utils._internal import fromfile, ReadOnlyTensorBuffer
from torchvision.tv_tensors._tv_tensor import TVTensor
D = TypeVar("D", bound="EncodedData")
class EncodedData(TVTensor):
@classmethod
def _wrap(cls: type[D], tensor: torch.Tensor) -> D:
return tensor.as_subclass(cls)
def __new__(
cls,
data: Any,
*,
dtype: torch.dtype | None = None,
device: torch.device | str | int | None = None,
requires_grad: bool = False,
) -> EncodedData:
tensor = cls._to_tensor(data, dtype=dtype, device=device, requires_grad=requires_grad)
# TODO: warn / bail out if we encounter a tensor with shape other than (N,) or with dtype other than uint8?
return cls._wrap(tensor)
@classmethod
def wrap_like(cls: type[D], other: D, tensor: torch.Tensor) -> D:
return cls._wrap(tensor)
@classmethod
def from_file(cls: type[D], file: BinaryIO, **kwargs: Any) -> D:
encoded_data = cls(fromfile(file, dtype=torch.uint8, byte_order=sys.byteorder), **kwargs)
file.close()
return encoded_data
@classmethod
def from_path(cls: type[D], path: str | os.PathLike, **kwargs: Any) -> D:
with open(path, "rb") as file:
return cls.from_file(file, **kwargs)
class EncodedImage(EncodedData):
# TODO: Use @functools.cached_property if we can depend on Python 3.8
@property
def spatial_size(self) -> tuple[int, int]:
if not hasattr(self, "_spatial_size"):
with PIL.Image.open(ReadOnlyTensorBuffer(self)) as image:
self._spatial_size = image.height, image.width
return self._spatial_size
|
from __future__ import annotations
import os
import sys
from typing import Any, BinaryIO, Optional, Tuple, Type, TypeVar, Union
import PIL.Image
import torch
from torchvision.prototype.utils._internal import fromfile, ReadOnlyTensorBuffer
from torchvision.tv_tensors._tv_tensor import TVTensor
D = TypeVar("D", bound="EncodedData")
class EncodedData(TVTensor):
@classmethod
def _wrap(cls: Type[D], tensor: torch.Tensor) -> D:
return tensor.as_subclass(cls)
def __new__(
cls,
data: Any,
*,
dtype: Optional[torch.dtype] = None,
device: Optional[Union[torch.device, str, int]] = None,
requires_grad: bool = False,
) -> EncodedData:
tensor = cls._to_tensor(data, dtype=dtype, device=device, requires_grad=requires_grad)
# TODO: warn / bail out if we encounter a tensor with shape other than (N,) or with dtype other than uint8?
return cls._wrap(tensor)
@classmethod
def wrap_like(cls: Type[D], other: D, tensor: torch.Tensor) -> D:
return cls._wrap(tensor)
@classmethod
def from_file(cls: Type[D], file: BinaryIO, **kwargs: Any) -> D:
encoded_data = cls(fromfile(file, dtype=torch.uint8, byte_order=sys.byteorder), **kwargs)
file.close()
return encoded_data
@classmethod
def from_path(cls: Type[D], path: Union[str, os.PathLike], **kwargs: Any) -> D:
with open(path, "rb") as file:
return cls.from_file(file, **kwargs)
class EncodedImage(EncodedData):
# TODO: Use @functools.cached_property if we can depend on Python 3.8
@property
def spatial_size(self) -> Tuple[int, int]:
if not hasattr(self, "_spatial_size"):
with PIL.Image.open(ReadOnlyTensorBuffer(self)) as image:
self._spatial_size = image.height, image.width
return self._spatial_size
|
import logging
import os
import sys
from torchaudio._internal.module_utils import eval_env, fail_with_message, is_module_available, no_op
try:
from .fb import _init_ffmpeg
except ImportError:
from .utils import _init_ffmpeg
from .utils import _check_cuda_version, _fail_since_no_ffmpeg, _init_dll_path, _init_sox, _load_lib
_LG = logging.getLogger(__name__)
# Note:
# `_check_cuda_version` is not meant to be used by regular users.
# Builder uses it for debugging purpose, so we export it.
# https://github.com/pytorch/builder/blob/e2e4542b8eb0bdf491214451a1a4128bd606cce2/test/smoke_test/smoke_test.py#L80
__all__ = [
"fail_if_no_sox",
"fail_if_no_ffmpeg",
"_check_cuda_version",
"_IS_TORCHAUDIO_EXT_AVAILABLE",
"_IS_RIR_AVAILABLE",
"_SOX_INITIALIZED",
"_FFMPEG_EXT",
]
if os.name == "nt" and (3, 8) <= sys.version_info < (3, 9):
_init_dll_path()
# When the extension module is built, we initialize it.
# In case of an error, we do not catch the failure as it suggests there is something
# wrong with the installation.
_IS_TORCHAUDIO_EXT_AVAILABLE = is_module_available("torchaudio.lib._torchaudio")
# RIR features are implemented in _torchaudio extension, but they can be individually
# turned on/off at build time. Available means that _torchaudio is loaded properly, and
# RIR features are found there.
_IS_RIR_AVAILABLE = False
_IS_ALIGN_AVAILABLE = False
if _IS_TORCHAUDIO_EXT_AVAILABLE:
_load_lib("libtorchaudio")
import torchaudio.lib._torchaudio # noqa
_check_cuda_version()
_IS_RIR_AVAILABLE = torchaudio.lib._torchaudio.is_rir_available()
_IS_ALIGN_AVAILABLE = torchaudio.lib._torchaudio.is_align_available()
# Similar to libtorchaudio, sox-related features should be importable when present.
#
# Note: This will be change in the future when sox is dynamically linked.
# At that point, this initialization should handle the case where
# sox integration is built but libsox is not found.
_SOX_INITIALIZED = False
_USE_SOX = False if os.name == "nt" else eval_env("TORCHAUDIO_USE_SOX", True)
_SOX_MODULE_AVAILABLE = is_module_available("torchaudio.lib._torchaudio_sox")
if _USE_SOX and _SOX_MODULE_AVAILABLE:
_init_sox()
_SOX_INITIALIZED = True
if os.name == "nt":
fail_if_no_sox = fail_with_message("requires sox extension, which is not supported on Windows.")
elif not _USE_SOX:
fail_if_no_sox = fail_with_message("requires sox extension, but it is disabled. (TORCHAUDIO_USE_SOX=0)")
elif not _SOX_MODULE_AVAILABLE:
fail_if_no_sox = fail_with_message(
"requires sox extension, but TorchAudio is not compiled with it. "
"Please build TorchAudio with libsox support. (BUILD_SOX=1)"
)
else:
fail_if_no_sox = no_op
# Initialize FFmpeg-related features
_FFMPEG_EXT = None
_USE_FFMPEG = eval_env("TORCHAUDIO_USE_FFMPEG", True)
if _USE_FFMPEG and _IS_TORCHAUDIO_EXT_AVAILABLE:
try:
_FFMPEG_EXT = _init_ffmpeg()
except Exception:
# The initialization of FFmpeg extension will fail if supported FFmpeg
# libraries are not found in the system.
# Since the rest of the torchaudio works without it, we do not report the
# error here.
# The error will be raised when user code attempts to use these features.
_LG.debug("Failed to initialize ffmpeg bindings", exc_info=True)
if _USE_FFMPEG:
fail_if_no_ffmpeg = _fail_since_no_ffmpeg if _FFMPEG_EXT is None else no_op
else:
fail_if_no_ffmpeg = fail_with_message("requires ffmpeg extension, but it is disabled. (TORCHAUDIO_USE_FFMPEG=0)")
fail_if_no_rir = (
no_op
if _IS_RIR_AVAILABLE
else fail_with_message(
"requires RIR extension, but TorchAudio is not compiled with it. Please build TorchAudio with RIR support."
)
)
fail_if_no_align = (
no_op
if _IS_ALIGN_AVAILABLE
else fail_with_message(
"Requires alignment extension, but TorchAudio is not compiled with it. \
Please build TorchAudio with alignment support."
)
)
|
import logging
import os
import sys
from torchaudio._internal.module_utils import fail_with_message, is_module_available, no_op
try:
from .fb import _init_ffmpeg
except ImportError:
from .utils import _init_ffmpeg
from .utils import _check_cuda_version, _fail_since_no_ffmpeg, _init_dll_path, _init_sox, _load_lib
_LG = logging.getLogger(__name__)
# Note:
# `_check_cuda_version` is not meant to be used by regular users.
# Builder uses it for debugging purpose, so we export it.
# https://github.com/pytorch/builder/blob/e2e4542b8eb0bdf491214451a1a4128bd606cce2/test/smoke_test/smoke_test.py#L80
__all__ = [
"fail_if_no_sox",
"fail_if_no_ffmpeg",
"_check_cuda_version",
"_IS_TORCHAUDIO_EXT_AVAILABLE",
"_IS_RIR_AVAILABLE",
"_SOX_INITIALIZED",
"_FFMPEG_EXT",
]
if os.name == "nt" and (3, 8) <= sys.version_info < (3, 9):
_init_dll_path()
# When the extension module is built, we initialize it.
# In case of an error, we do not catch the failure as it suggests there is something
# wrong with the installation.
_IS_TORCHAUDIO_EXT_AVAILABLE = is_module_available("torchaudio.lib._torchaudio")
# RIR features are implemented in _torchaudio extension, but they can be individually
# turned on/off at build time. Available means that _torchaudio is loaded properly, and
# RIR features are found there.
_IS_RIR_AVAILABLE = False
_IS_ALIGN_AVAILABLE = False
if _IS_TORCHAUDIO_EXT_AVAILABLE:
_load_lib("libtorchaudio")
import torchaudio.lib._torchaudio # noqa
_check_cuda_version()
_IS_RIR_AVAILABLE = torchaudio.lib._torchaudio.is_rir_available()
_IS_ALIGN_AVAILABLE = torchaudio.lib._torchaudio.is_align_available()
# Similar to libtorchaudio, sox-related features should be importable when present.
#
# Note: This will be change in the future when sox is dynamically linked.
# At that point, this initialization should handle the case where
# sox integration is built but libsox is not found.
_SOX_INITIALIZED = False
if is_module_available("torchaudio.lib._torchaudio_sox"):
_init_sox()
_SOX_INITIALIZED = True
# Initialize FFmpeg-related features
_FFMPEG_EXT = None
if _IS_TORCHAUDIO_EXT_AVAILABLE:
try:
_FFMPEG_EXT = _init_ffmpeg()
except Exception:
# The initialization of FFmpeg extension will fail if supported FFmpeg
# libraries are not found in the system.
# Since the rest of the torchaudio works without it, we do not report the
# error here.
# The error will be raised when user code attempts to use these features.
_LG.debug("Failed to initialize ffmpeg bindings", exc_info=True)
fail_if_no_sox = (
no_op
if _SOX_INITIALIZED
else fail_with_message(
"requires sox extension, but TorchAudio is not compiled with it. Please build TorchAudio with libsox support."
)
)
fail_if_no_ffmpeg = _fail_since_no_ffmpeg if _FFMPEG_EXT is None else no_op
fail_if_no_rir = (
no_op
if _IS_RIR_AVAILABLE
else fail_with_message(
"requires RIR extension, but TorchAudio is not compiled with it. Please build TorchAudio with RIR support."
)
)
fail_if_no_align = (
no_op
if _IS_ALIGN_AVAILABLE
else fail_with_message(
"Requires alignment extension, but TorchAudio is not compiled with it. \
Please build TorchAudio with alignment support."
)
)
|
__version__ = '0.14.4'
import os
from docarray.document import Document
from docarray.array import DocumentArray
from docarray.dataclasses import dataclass, field
if 'DA_RICH_HANDLER' in os.environ:
from rich.traceback import install
install()
|
__version__ = '0.14.3'
import os
from docarray.document import Document
from docarray.array import DocumentArray
from docarray.dataclasses import dataclass, field
if 'DA_RICH_HANDLER' in os.environ:
from rich.traceback import install
install()
|
from base64 import b64encode
from urllib.parse import urlencode
from backend.data.model import OAuth2Credentials
from backend.util.request import requests
from .base import BaseOAuthHandler
class NotionOAuthHandler(BaseOAuthHandler):
"""
Based on the documentation at https://developers.notion.com/docs/authorization
Notes:
- Notion uses non-expiring access tokens and therefore doesn't have a refresh flow
- Notion doesn't use scopes
"""
PROVIDER_NAME = "notion"
def __init__(self, client_id: str, client_secret: str, redirect_uri: str):
self.client_id = client_id
self.client_secret = client_secret
self.redirect_uri = redirect_uri
self.auth_base_url = "https://api.notion.com/v1/oauth/authorize"
self.token_url = "https://api.notion.com/v1/oauth/token"
def get_login_url(self, scopes: list[str], state: str) -> str:
params = {
"client_id": self.client_id,
"redirect_uri": self.redirect_uri,
"response_type": "code",
"owner": "user",
"state": state,
}
return f"{self.auth_base_url}?{urlencode(params)}"
def exchange_code_for_tokens(
self, code: str, scopes: list[str]
) -> OAuth2Credentials:
request_body = {
"grant_type": "authorization_code",
"code": code,
"redirect_uri": self.redirect_uri,
}
auth_str = b64encode(f"{self.client_id}:{self.client_secret}".encode()).decode()
headers = {
"Authorization": f"Basic {auth_str}",
"Accept": "application/json",
}
response = requests.post(self.token_url, json=request_body, headers=headers)
token_data = response.json()
# Email is only available for non-bot users
email = (
token_data["owner"]["person"]["email"]
if "person" in token_data["owner"]
and "email" in token_data["owner"]["person"]
else None
)
return OAuth2Credentials(
provider=self.PROVIDER_NAME,
title=token_data.get("workspace_name"),
username=email,
access_token=token_data["access_token"],
refresh_token=None,
access_token_expires_at=None, # Notion tokens don't expire
refresh_token_expires_at=None,
scopes=[],
metadata={
"owner": token_data["owner"],
"bot_id": token_data["bot_id"],
"workspace_id": token_data["workspace_id"],
"workspace_name": token_data.get("workspace_name"),
"workspace_icon": token_data.get("workspace_icon"),
},
)
def revoke_tokens(self, credentials: OAuth2Credentials) -> bool:
# Notion doesn't support token revocation
return False
def _refresh_tokens(self, credentials: OAuth2Credentials) -> OAuth2Credentials:
# Notion doesn't support token refresh
return credentials
def needs_refresh(self, credentials: OAuth2Credentials) -> bool:
# Notion access tokens don't expire
return False
|
from base64 import b64encode
from urllib.parse import urlencode
from autogpt_libs.supabase_integration_credentials_store import OAuth2Credentials
from backend.util.request import requests
from .base import BaseOAuthHandler
class NotionOAuthHandler(BaseOAuthHandler):
"""
Based on the documentation at https://developers.notion.com/docs/authorization
Notes:
- Notion uses non-expiring access tokens and therefore doesn't have a refresh flow
- Notion doesn't use scopes
"""
PROVIDER_NAME = "notion"
def __init__(self, client_id: str, client_secret: str, redirect_uri: str):
self.client_id = client_id
self.client_secret = client_secret
self.redirect_uri = redirect_uri
self.auth_base_url = "https://api.notion.com/v1/oauth/authorize"
self.token_url = "https://api.notion.com/v1/oauth/token"
def get_login_url(self, scopes: list[str], state: str) -> str:
params = {
"client_id": self.client_id,
"redirect_uri": self.redirect_uri,
"response_type": "code",
"owner": "user",
"state": state,
}
return f"{self.auth_base_url}?{urlencode(params)}"
def exchange_code_for_tokens(
self, code: str, scopes: list[str]
) -> OAuth2Credentials:
request_body = {
"grant_type": "authorization_code",
"code": code,
"redirect_uri": self.redirect_uri,
}
auth_str = b64encode(f"{self.client_id}:{self.client_secret}".encode()).decode()
headers = {
"Authorization": f"Basic {auth_str}",
"Accept": "application/json",
}
response = requests.post(self.token_url, json=request_body, headers=headers)
token_data = response.json()
# Email is only available for non-bot users
email = (
token_data["owner"]["person"]["email"]
if "person" in token_data["owner"]
and "email" in token_data["owner"]["person"]
else None
)
return OAuth2Credentials(
provider=self.PROVIDER_NAME,
title=token_data.get("workspace_name"),
username=email,
access_token=token_data["access_token"],
refresh_token=None,
access_token_expires_at=None, # Notion tokens don't expire
refresh_token_expires_at=None,
scopes=[],
metadata={
"owner": token_data["owner"],
"bot_id": token_data["bot_id"],
"workspace_id": token_data["workspace_id"],
"workspace_name": token_data.get("workspace_name"),
"workspace_icon": token_data.get("workspace_icon"),
},
)
def revoke_tokens(self, credentials: OAuth2Credentials) -> bool:
# Notion doesn't support token revocation
return False
def _refresh_tokens(self, credentials: OAuth2Credentials) -> OAuth2Credentials:
# Notion doesn't support token refresh
return credentials
def needs_refresh(self, credentials: OAuth2Credentials) -> bool:
# Notion access tokens don't expire
return False
|
_base_ = '../cascade_rcnn/cascade-rcnn_r50_fpn_1x_coco.py'
norm_cfg = dict(type='SyncBN', requires_grad=True)
model = dict(
# use ResNeSt img_norm
data_preprocessor=dict(
mean=[123.68, 116.779, 103.939],
std=[58.393, 57.12, 57.375],
bgr_to_rgb=True),
backbone=dict(
type='ResNeSt',
stem_channels=64,
depth=50,
radix=2,
reduction_factor=4,
avg_down_stride=True,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=norm_cfg,
norm_eval=False,
style='pytorch',
init_cfg=dict(type='Pretrained', checkpoint='open-mmlab://resnest50')),
roi_head=dict(
bbox_head=[
dict(
type='Shared4Conv1FCBBoxHead',
in_channels=256,
conv_out_channels=256,
fc_out_channels=1024,
norm_cfg=norm_cfg,
roi_feat_size=7,
num_classes=80,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2]),
reg_class_agnostic=True,
loss_cls=dict(
type='CrossEntropyLoss',
use_sigmoid=False,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0,
loss_weight=1.0)),
dict(
type='Shared4Conv1FCBBoxHead',
in_channels=256,
conv_out_channels=256,
fc_out_channels=1024,
norm_cfg=norm_cfg,
roi_feat_size=7,
num_classes=80,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.05, 0.05, 0.1, 0.1]),
reg_class_agnostic=True,
loss_cls=dict(
type='CrossEntropyLoss',
use_sigmoid=False,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0,
loss_weight=1.0)),
dict(
type='Shared4Conv1FCBBoxHead',
in_channels=256,
conv_out_channels=256,
fc_out_channels=1024,
norm_cfg=norm_cfg,
roi_feat_size=7,
num_classes=80,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.033, 0.033, 0.067, 0.067]),
reg_class_agnostic=True,
loss_cls=dict(
type='CrossEntropyLoss',
use_sigmoid=False,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0))
], ))
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='RandomResize', scale=[(1333, 640), (1333, 800)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
|
_base_ = '../cascade_rcnn/cascade_rcnn_r50_fpn_1x_coco.py'
norm_cfg = dict(type='SyncBN', requires_grad=True)
model = dict(
# use ResNeSt img_norm
data_preprocessor=dict(
mean=[123.68, 116.779, 103.939],
std=[58.393, 57.12, 57.375],
bgr_to_rgb=True),
backbone=dict(
type='ResNeSt',
stem_channels=64,
depth=50,
radix=2,
reduction_factor=4,
avg_down_stride=True,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=norm_cfg,
norm_eval=False,
style='pytorch',
init_cfg=dict(type='Pretrained', checkpoint='open-mmlab://resnest50')),
roi_head=dict(
bbox_head=[
dict(
type='Shared4Conv1FCBBoxHead',
in_channels=256,
conv_out_channels=256,
fc_out_channels=1024,
norm_cfg=norm_cfg,
roi_feat_size=7,
num_classes=80,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2]),
reg_class_agnostic=True,
loss_cls=dict(
type='CrossEntropyLoss',
use_sigmoid=False,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0,
loss_weight=1.0)),
dict(
type='Shared4Conv1FCBBoxHead',
in_channels=256,
conv_out_channels=256,
fc_out_channels=1024,
norm_cfg=norm_cfg,
roi_feat_size=7,
num_classes=80,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.05, 0.05, 0.1, 0.1]),
reg_class_agnostic=True,
loss_cls=dict(
type='CrossEntropyLoss',
use_sigmoid=False,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0,
loss_weight=1.0)),
dict(
type='Shared4Conv1FCBBoxHead',
in_channels=256,
conv_out_channels=256,
fc_out_channels=1024,
norm_cfg=norm_cfg,
roi_feat_size=7,
num_classes=80,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.033, 0.033, 0.067, 0.067]),
reg_class_agnostic=True,
loss_cls=dict(
type='CrossEntropyLoss',
use_sigmoid=False,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0))
], ))
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='RandomResize', scale=[(1333, 640), (1333, 800)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
|
import os
import pytest
from llama_index.llms.nvidia import NVIDIA
from typing import Any
from pytest_httpx import HTTPXMock
@pytest.fixture()
def mock_local_models(httpx_mock: HTTPXMock):
mock_response = {
"data": [
{
"id": "model1",
"object": "model",
"created": 1234567890,
"owned_by": "OWNER",
"root": "model1",
}
]
}
httpx_mock.add_response(
url="https://test_url/v1/models",
method="GET",
json=mock_response,
status_code=200,
)
def get_api_key(instance: Any) -> str:
return instance.api_key
def test_create_default_url_without_api_key() -> None:
NVIDIA()
@pytest.mark.usefixtures("mock_local_models")
def test_create_unknown_url_without_api_key(masked_env_var: str) -> None:
NVIDIA(base_url="https://test_url/v1")
@pytest.mark.parametrize("param", ["nvidia_api_key", "api_key"])
def test_create_with_api_key(param: str, masked_env_var: str) -> None:
instance = NVIDIA(**{param: "just testing no failure"})
assert get_api_key(instance) == "just testing no failure"
def test_api_key_priority(masked_env_var: str) -> None:
try:
os.environ["NVIDIA_API_KEY"] = "ENV"
assert get_api_key(NVIDIA()) == "ENV"
assert get_api_key(NVIDIA(nvidia_api_key="PARAM")) == "PARAM"
assert get_api_key(NVIDIA(api_key="PARAM")) == "PARAM"
assert get_api_key(NVIDIA(api_key="LOW", nvidia_api_key="HIGH")) == "HIGH"
finally:
# we must clean up environ or it may impact other tests
del os.environ["NVIDIA_API_KEY"]
@pytest.mark.integration
def test_missing_api_key_error(masked_env_var: str) -> None:
with pytest.warns(UserWarning):
client = NVIDIA()
with pytest.raises(Exception) as exc_info:
client.complete("Hello, world!").text
message = str(exc_info.value)
assert "401" in message
@pytest.mark.integration
def test_bogus_api_key_error(masked_env_var: str) -> None:
client = NVIDIA(nvidia_api_key="BOGUS")
with pytest.raises(Exception) as exc_info:
client.complete("Hello, world!").text
message = str(exc_info.value)
assert "401" in message
@pytest.mark.integration
@pytest.mark.parametrize("param", ["nvidia_api_key", "api_key"])
def test_api_key(chat_model: str, mode: dict, param: str, masked_env_var: str) -> None:
client = NVIDIA(model=chat_model, **{**mode, **{param: masked_env_var}})
assert client.complete("Hello, world!").text
|
import os
import pytest
from llama_index.llms.nvidia import NVIDIA
from typing import Any
from pytest_httpx import HTTPXMock
@pytest.fixture()
def mock_local_models(httpx_mock: HTTPXMock):
mock_response = {
"data": [
{
"id": "model1",
"object": "model",
"created": 1234567890,
"owned_by": "OWNER",
"root": "model1",
}
]
}
httpx_mock.add_response(
url="https://test_url/v1/models",
method="GET",
json=mock_response,
status_code=200,
)
def get_api_key(instance: Any) -> str:
return instance.api_key
def test_create_default_url_without_api_key() -> None:
NVIDIA()
@pytest.mark.usefixtures("mock_local_models")
def test_create_unknown_url_without_api_key(masked_env_var: str) -> None:
NVIDIA(base_url="https://test_url/v1")
@pytest.mark.parametrize("param", ["nvidia_api_key", "api_key"])
def test_create_with_api_key(param: str, masked_env_var: str) -> None:
instance = NVIDIA(**{param: "just testing no failure"})
assert get_api_key(instance) == "just testing no failure"
def test_api_key_priority(masked_env_var: str) -> None:
try:
os.environ["NVIDIA_API_KEY"] = "ENV"
assert get_api_key(NVIDIA()) == "ENV"
assert get_api_key(NVIDIA(nvidia_api_key="PARAM")) == "PARAM"
assert get_api_key(NVIDIA(api_key="PARAM")) == "PARAM"
assert get_api_key(NVIDIA(api_key="LOW", nvidia_api_key="HIGH")) == "HIGH"
finally:
# we must clean up environ or it may impact other tests
del os.environ["NVIDIA_API_KEY"]
@pytest.mark.integration()
def test_missing_api_key_error(masked_env_var: str) -> None:
with pytest.warns(UserWarning):
client = NVIDIA()
with pytest.raises(Exception) as exc_info:
client.complete("Hello, world!").text
message = str(exc_info.value)
assert "401" in message
@pytest.mark.integration()
def test_bogus_api_key_error(masked_env_var: str) -> None:
client = NVIDIA(nvidia_api_key="BOGUS")
with pytest.raises(Exception) as exc_info:
client.complete("Hello, world!").text
message = str(exc_info.value)
assert "401" in message
@pytest.mark.integration()
@pytest.mark.parametrize("param", ["nvidia_api_key", "api_key"])
def test_api_key(chat_model: str, mode: dict, param: str, masked_env_var: str) -> None:
client = NVIDIA(model=chat_model, **{**mode, **{param: masked_env_var}})
assert client.complete("Hello, world!").text
|
"""Test Output parsers."""
import pytest
from llama_index.core.output_parsers.langchain import LangchainOutputParser
try:
import langchain # pants: no-infer-dep
from llama_index.core.bridge.langchain import (
BaseOutputParser as LCOutputParser,
)
from llama_index.core.bridge.langchain import (
ResponseSchema,
)
except ImportError:
langchain = None # type: ignore
@pytest.mark.skipif(langchain is None, reason="langchain not installed")
def test_lc_output_parser() -> None:
"""Test langchain output parser."""
class MockOutputParser(LCOutputParser):
"""
Mock output parser.
Similar to langchain's StructuredOutputParser, but better for testing.
"""
response_schema: ResponseSchema
def get_format_instructions(self) -> str:
"""Get format instructions."""
return (
f"{{ {self.response_schema.name}, {self.response_schema.description} }}"
)
def parse(self, text: str) -> str:
"""Parse the output of an LLM call."""
# TODO: make this better
return text
response_schema = ResponseSchema(
name="Education",
description="education experience",
)
lc_output_parser = MockOutputParser(response_schema=response_schema)
output_parser = LangchainOutputParser(lc_output_parser)
query_str = "Hello world."
output_instructions = output_parser.format(query_str)
assert output_instructions == (
"Hello world.\n\n{ Education, education experience }"
)
query_str = "foo {bar}."
output_instructions = output_parser.format(query_str)
assert output_instructions == (
"foo {bar}.\n\n{{ Education, education experience }}"
)
|
"""Test Output parsers."""
import pytest
from llama_index.core.output_parsers.langchain import LangchainOutputParser
try:
import langchain # pants: no-infer-dep
from llama_index.core.bridge.langchain import (
BaseOutputParser as LCOutputParser,
)
from llama_index.core.bridge.langchain import (
ResponseSchema,
)
except ImportError:
langchain = None # type: ignore
@pytest.mark.skipif(langchain is None, reason="langchain not installed")
def test_lc_output_parser() -> None:
"""Test langchain output parser."""
class MockOutputParser(LCOutputParser):
"""
Mock output parser.
Similar to langchain's StructuredOutputParser, but better for testing.
"""
response_schema: ResponseSchema
def get_format_instructions(self) -> str:
"""Get format instructions."""
return (
f"{{ {self.response_schema.name}, {self.response_schema.description} }}"
)
def parse(self, text: str) -> str:
"""Parse the output of an LLM call."""
# TODO: make this better
return text
response_schema = ResponseSchema(
name="Education",
description="education experience",
)
lc_output_parser = MockOutputParser(response_schema=response_schema)
output_parser = LangchainOutputParser(lc_output_parser)
query_str = "Hello world."
output_instructions = output_parser.format(query_str)
assert output_instructions == (
"Hello world.\n\n" "{ Education, education experience }"
)
query_str = "foo {bar}."
output_instructions = output_parser.format(query_str)
assert output_instructions == (
"foo {bar}.\n\n" "{{ Education, education experience }}"
)
|
"""This is the langchain_ollama package.
It provides infrastructure for interacting with the Ollama service.
"""
from importlib import metadata
from langchain_ollama.chat_models import ChatOllama
from langchain_ollama.embeddings import OllamaEmbeddings
from langchain_ollama.llms import OllamaLLM
try:
__version__ = metadata.version(__package__)
except metadata.PackageNotFoundError:
# Case where package metadata is not available.
__version__ = ""
del metadata # optional, avoids polluting the results of dir(__package__)
__all__ = [
"ChatOllama",
"OllamaEmbeddings",
"OllamaLLM",
"__version__",
]
|
"""This is the langchain_ollama package.
It provides infrastructure for interacting with the Ollama service.
"""
from importlib import metadata
from langchain_ollama.chat_models import ChatOllama
from langchain_ollama.embeddings import OllamaEmbeddings
from langchain_ollama.llms import OllamaLLM
try:
__version__ = metadata.version(__package__)
except metadata.PackageNotFoundError:
# Case where package metadata is not available.
__version__ = ""
del metadata # optional, avoids polluting the results of dir(__package__)
__all__ = [
"ChatOllama",
"OllamaLLM",
"OllamaEmbeddings",
"__version__",
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .augment_wrappers import AutoAugment, RandAugment
from .colorspace import (AutoContrast, Brightness, Color, ColorTransform,
Contrast, Equalize, Invert, Posterize, Sharpness,
Solarize, SolarizeAdd)
from .formatting import (ImageToTensor, PackDetInputs, ToDataContainer,
ToTensor, Transpose)
from .geometric import (GeomTransform, Rotate, ShearX, ShearY, TranslateX,
TranslateY)
from .instaboost import InstaBoost
from .loading import (FilterAnnotations, LoadAnnotations, LoadImageFromNDArray,
LoadMultiChannelImageFromFiles, LoadPanopticAnnotations,
LoadProposals)
from .test_time_aug import MultiScaleFlipAug
from .transforms import (Albu, CopyPaste, CutOut, Expand, MinIoURandomCrop,
MixUp, Mosaic, Normalize, Pad, PhotoMetricDistortion,
RandomAffine, RandomCenterCropPad, RandomCrop,
RandomFlip, RandomShift, Resize, SegRescale,
YOLOXHSVRandomAug)
from .wrappers import MultiBranch
__all__ = [
'PackDetInputs', 'ToTensor', 'ImageToTensor', 'ToDataContainer',
'Transpose', 'LoadImageFromNDArray', 'LoadAnnotations',
'LoadPanopticAnnotations', 'LoadMultiChannelImageFromFiles',
'LoadProposals', 'MultiScaleFlipAug', 'Resize', 'RandomFlip', 'RandomCrop',
'Normalize', 'SegRescale', 'MinIoURandomCrop', 'Expand',
'PhotoMetricDistortion', 'Albu', 'InstaBoost', 'RandomCenterCropPad',
'AutoAugment', 'CutOut', 'ShearX', 'ShearY', 'Rotate', 'Color', 'Equalize',
'Brightness', 'Contrast', 'TranslateX', 'TranslateY', 'RandomShift',
'Mosaic', 'MixUp', 'RandomAffine', 'YOLOXHSVRandomAug', 'CopyPaste',
'FilterAnnotations', 'Pad', 'GeomTransform', 'ColorTransform',
'RandAugment', 'Sharpness', 'Solarize', 'SolarizeAdd', 'Posterize',
'AutoContrast', 'Invert', 'MultiBranch'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .augment_wrappers import AutoAugment, RandAugment
from .colorspace import (AutoContrast, Brightness, Color, ColorTransform,
Contrast, Equalize, Invert, Posterize, Sharpness,
Solarize, SolarizeAdd)
from .compose import Compose
from .formatting import (ImageToTensor, PackDetInputs, ToDataContainer,
ToTensor, Transpose)
from .geometric import (GeomTransform, Rotate, ShearX, ShearY, TranslateX,
TranslateY)
from .instaboost import InstaBoost
from .loading import (FilterAnnotations, LoadAnnotations, LoadImageFromNDArray,
LoadMultiChannelImageFromFiles, LoadPanopticAnnotations,
LoadProposals)
from .test_time_aug import MultiScaleFlipAug
from .transforms import (Albu, CopyPaste, CutOut, Expand, MinIoURandomCrop,
MixUp, Mosaic, Normalize, Pad, PhotoMetricDistortion,
RandomAffine, RandomCenterCropPad, RandomCrop,
RandomFlip, RandomShift, Resize, SegRescale,
YOLOXHSVRandomAug)
__all__ = [
'PackDetInputs', 'Compose', 'ToTensor', 'ImageToTensor', 'ToDataContainer',
'Transpose', 'LoadImageFromNDArray', 'LoadAnnotations',
'LoadPanopticAnnotations', 'LoadMultiChannelImageFromFiles',
'LoadProposals', 'MultiScaleFlipAug', 'Resize', 'RandomFlip', 'RandomCrop',
'Normalize', 'SegRescale', 'MinIoURandomCrop', 'Expand',
'PhotoMetricDistortion', 'Albu', 'InstaBoost', 'RandomCenterCropPad',
'AutoAugment', 'CutOut', 'ShearX', 'ShearY', 'Rotate', 'Color', 'Equalize',
'Brightness', 'Contrast', 'TranslateX', 'TranslateY', 'RandomShift',
'Mosaic', 'MixUp', 'RandomAffine', 'YOLOXHSVRandomAug', 'CopyPaste',
'FilterAnnotations', 'Pad', 'GeomTransform', 'ColorTransform',
'RandAugment', 'Sharpness', 'Solarize', 'SolarizeAdd', 'Posterize',
'AutoContrast', 'Invert'
]
|
from __future__ import annotations
from enum import Enum
from typing import Any, Optional, Tuple, Union
import torch
from ._datapoint import Datapoint
class BoundingBoxFormat(Enum):
"""[BETA] Coordinate format of a bounding box.
Available formats are
* ``XYXY``
* ``XYWH``
* ``CXCYWH``
"""
XYXY = "XYXY"
XYWH = "XYWH"
CXCYWH = "CXCYWH"
class BoundingBoxes(Datapoint):
"""[BETA] :class:`torch.Tensor` subclass for bounding boxes.
Args:
data: Any data that can be turned into a tensor with :func:`torch.as_tensor`.
format (BoundingBoxFormat, str): Format of the bounding box.
canvas_size (two-tuple of ints): Height and width of the corresponding image or video.
dtype (torch.dtype, optional): Desired data type of the bounding box. If omitted, will be inferred from
``data``.
device (torch.device, optional): Desired device of the bounding box. If omitted and ``data`` is a
:class:`torch.Tensor`, the device is taken from it. Otherwise, the bounding box is constructed on the CPU.
requires_grad (bool, optional): Whether autograd should record operations on the bounding box. If omitted and
``data`` is a :class:`torch.Tensor`, the value is taken from it. Otherwise, defaults to ``False``.
"""
format: BoundingBoxFormat
canvas_size: Tuple[int, int]
@classmethod
def _wrap(cls, tensor: torch.Tensor, *, format: Union[BoundingBoxFormat, str], canvas_size: Tuple[int, int]) -> BoundingBoxes: # type: ignore[override]
if isinstance(format, str):
format = BoundingBoxFormat[format.upper()]
bounding_boxes = tensor.as_subclass(cls)
bounding_boxes.format = format
bounding_boxes.canvas_size = canvas_size
return bounding_boxes
def __new__(
cls,
data: Any,
*,
format: Union[BoundingBoxFormat, str],
canvas_size: Tuple[int, int],
dtype: Optional[torch.dtype] = None,
device: Optional[Union[torch.device, str, int]] = None,
requires_grad: Optional[bool] = None,
) -> BoundingBoxes:
tensor = cls._to_tensor(data, dtype=dtype, device=device, requires_grad=requires_grad)
return cls._wrap(tensor, format=format, canvas_size=canvas_size)
@classmethod
def wrap_like(
cls,
other: BoundingBoxes,
tensor: torch.Tensor,
*,
format: Optional[Union[BoundingBoxFormat, str]] = None,
canvas_size: Optional[Tuple[int, int]] = None,
) -> BoundingBoxes:
"""Wrap a :class:`torch.Tensor` as :class:`BoundingBoxes` from a reference.
Args:
other (BoundingBoxes): Reference bounding box.
tensor (Tensor): Tensor to be wrapped as :class:`BoundingBoxes`
format (BoundingBoxFormat, str, optional): Format of the bounding box. If omitted, it is taken from the
reference.
canvas_size (two-tuple of ints, optional): Height and width of the corresponding image or video. If
omitted, it is taken from the reference.
"""
return cls._wrap(
tensor,
format=format if format is not None else other.format,
canvas_size=canvas_size if canvas_size is not None else other.canvas_size,
)
def __repr__(self, *, tensor_contents: Any = None) -> str: # type: ignore[override]
return self._make_repr(format=self.format, canvas_size=self.canvas_size)
|
from __future__ import annotations
from enum import Enum
from typing import Any, Optional, Tuple, Union
import torch
from ._datapoint import Datapoint
class BoundingBoxFormat(Enum):
"""[BETA] Coordinate format of a bounding box.
Available formats are
* ``XYXY``
* ``XYWH``
* ``CXCYWH``
"""
XYXY = "XYXY"
XYWH = "XYWH"
CXCYWH = "CXCYWH"
class BoundingBoxes(Datapoint):
"""[BETA] :class:`torch.Tensor` subclass for bounding boxes.
Args:
data: Any data that can be turned into a tensor with :func:`torch.as_tensor`.
format (BoundingBoxFormat, str): Format of the bounding box.
canvas_size (two-tuple of ints): Height and width of the corresponding image or video.
dtype (torch.dtype, optional): Desired data type of the bounding box. If omitted, will be inferred from
``data``.
device (torch.device, optional): Desired device of the bounding box. If omitted and ``data`` is a
:class:`torch.Tensor`, the device is taken from it. Otherwise, the bounding box is constructed on the CPU.
requires_grad (bool, optional): Whether autograd should record operations on the bounding box. If omitted and
``data`` is a :class:`torch.Tensor`, the value is taken from it. Otherwise, defaults to ``False``.
"""
format: BoundingBoxFormat
canvas_size: Tuple[int, int]
@classmethod
def _wrap(cls, tensor: torch.Tensor, *, format: BoundingBoxFormat, canvas_size: Tuple[int, int]) -> BoundingBoxes: # type: ignore[override]
bounding_boxes = tensor.as_subclass(cls)
bounding_boxes.format = format
bounding_boxes.canvas_size = canvas_size
return bounding_boxes
def __new__(
cls,
data: Any,
*,
format: Union[BoundingBoxFormat, str],
canvas_size: Tuple[int, int],
dtype: Optional[torch.dtype] = None,
device: Optional[Union[torch.device, str, int]] = None,
requires_grad: Optional[bool] = None,
) -> BoundingBoxes:
tensor = cls._to_tensor(data, dtype=dtype, device=device, requires_grad=requires_grad)
if isinstance(format, str):
format = BoundingBoxFormat[format.upper()]
return cls._wrap(tensor, format=format, canvas_size=canvas_size)
@classmethod
def wrap_like(
cls,
other: BoundingBoxes,
tensor: torch.Tensor,
*,
format: Optional[BoundingBoxFormat] = None,
canvas_size: Optional[Tuple[int, int]] = None,
) -> BoundingBoxes:
"""Wrap a :class:`torch.Tensor` as :class:`BoundingBoxes` from a reference.
Args:
other (BoundingBoxes): Reference bounding box.
tensor (Tensor): Tensor to be wrapped as :class:`BoundingBoxes`
format (BoundingBoxFormat, str, optional): Format of the bounding box. If omitted, it is taken from the
reference.
canvas_size (two-tuple of ints, optional): Height and width of the corresponding image or video. If
omitted, it is taken from the reference.
"""
if isinstance(format, str):
format = BoundingBoxFormat[format.upper()]
return cls._wrap(
tensor,
format=format if format is not None else other.format,
canvas_size=canvas_size if canvas_size is not None else other.canvas_size,
)
def __repr__(self, *, tensor_contents: Any = None) -> str: # type: ignore[override]
return self._make_repr(format=self.format, canvas_size=self.canvas_size)
|
from typing import Any, Callable
from langchain_core.documents import Document
from langchain.retrievers.multi_vector import MultiVectorRetriever, SearchType
from langchain.storage import InMemoryStore
from tests.unit_tests.indexes.test_indexing import InMemoryVectorStore
class InMemoryVectorstoreWithSearch(InMemoryVectorStore):
@staticmethod
def _identity_fn(score: float) -> float:
return score
def _select_relevance_score_fn(self) -> Callable[[float], float]:
return self._identity_fn
def similarity_search(
self, query: str, k: int = 4, **kwargs: Any
) -> list[Document]:
res = self.store.get(query)
if res is None:
return []
return [res]
def similarity_search_with_score(
self, query: str, k: int = 4, **kwargs: Any
) -> list[tuple[Document, float]]:
res = self.store.get(query)
if res is None:
return []
return [(res, 0.8)]
def test_multi_vector_retriever_initialization() -> None:
vectorstore = InMemoryVectorstoreWithSearch()
retriever = MultiVectorRetriever( # type: ignore[call-arg]
vectorstore=vectorstore, docstore=InMemoryStore(), doc_id="doc_id"
)
documents = [Document(page_content="test document", metadata={"doc_id": "1"})]
retriever.vectorstore.add_documents(documents, ids=["1"])
retriever.docstore.mset(list(zip(["1"], documents)))
results = retriever.invoke("1")
assert len(results) > 0
assert results[0].page_content == "test document"
async def test_multi_vector_retriever_initialization_async() -> None:
vectorstore = InMemoryVectorstoreWithSearch()
retriever = MultiVectorRetriever( # type: ignore[call-arg]
vectorstore=vectorstore, docstore=InMemoryStore(), doc_id="doc_id"
)
documents = [Document(page_content="test document", metadata={"doc_id": "1"})]
await retriever.vectorstore.aadd_documents(documents, ids=["1"])
await retriever.docstore.amset(list(zip(["1"], documents)))
results = await retriever.ainvoke("1")
assert len(results) > 0
assert results[0].page_content == "test document"
def test_multi_vector_retriever_similarity_search_with_score() -> None:
documents = [Document(page_content="test document", metadata={"doc_id": "1"})]
vectorstore = InMemoryVectorstoreWithSearch()
vectorstore.add_documents(documents, ids=["1"])
# score_threshold = 0.5
retriever = MultiVectorRetriever( # type: ignore[call-arg]
vectorstore=vectorstore,
docstore=InMemoryStore(),
doc_id="doc_id",
search_kwargs={"score_threshold": 0.5},
search_type=SearchType.similarity_score_threshold,
)
retriever.docstore.mset(list(zip(["1"], documents)))
results = retriever.invoke("1")
assert len(results) == 1
assert results[0].page_content == "test document"
# score_threshold = 0.9
retriever = MultiVectorRetriever( # type: ignore[call-arg]
vectorstore=vectorstore,
docstore=InMemoryStore(),
doc_id="doc_id",
search_kwargs={"score_threshold": 0.9},
search_type=SearchType.similarity_score_threshold,
)
retriever.docstore.mset(list(zip(["1"], documents)))
results = retriever.invoke("1")
assert len(results) == 0
async def test_multi_vector_retriever_similarity_search_with_score_async() -> None:
documents = [Document(page_content="test document", metadata={"doc_id": "1"})]
vectorstore = InMemoryVectorstoreWithSearch()
await vectorstore.aadd_documents(documents, ids=["1"])
# score_threshold = 0.5
retriever = MultiVectorRetriever( # type: ignore[call-arg]
vectorstore=vectorstore,
docstore=InMemoryStore(),
doc_id="doc_id",
search_kwargs={"score_threshold": 0.5},
search_type=SearchType.similarity_score_threshold,
)
await retriever.docstore.amset(list(zip(["1"], documents)))
results = retriever.invoke("1")
assert len(results) == 1
assert results[0].page_content == "test document"
# score_threshold = 0.9
retriever = MultiVectorRetriever( # type: ignore[call-arg]
vectorstore=vectorstore,
docstore=InMemoryStore(),
doc_id="doc_id",
search_kwargs={"score_threshold": 0.9},
search_type=SearchType.similarity_score_threshold,
)
await retriever.docstore.amset(list(zip(["1"], documents)))
results = retriever.invoke("1")
assert len(results) == 0
|
from typing import Any, Callable, List, Tuple
from langchain_core.documents import Document
from langchain.retrievers.multi_vector import MultiVectorRetriever, SearchType
from langchain.storage import InMemoryStore
from tests.unit_tests.indexes.test_indexing import InMemoryVectorStore
class InMemoryVectorstoreWithSearch(InMemoryVectorStore):
@staticmethod
def _identity_fn(score: float) -> float:
return score
def _select_relevance_score_fn(self) -> Callable[[float], float]:
return self._identity_fn
def similarity_search(
self, query: str, k: int = 4, **kwargs: Any
) -> List[Document]:
res = self.store.get(query)
if res is None:
return []
return [res]
def similarity_search_with_score(
self, query: str, k: int = 4, **kwargs: Any
) -> List[Tuple[Document, float]]:
res = self.store.get(query)
if res is None:
return []
return [(res, 0.8)]
def test_multi_vector_retriever_initialization() -> None:
vectorstore = InMemoryVectorstoreWithSearch()
retriever = MultiVectorRetriever( # type: ignore[call-arg]
vectorstore=vectorstore, docstore=InMemoryStore(), doc_id="doc_id"
)
documents = [Document(page_content="test document", metadata={"doc_id": "1"})]
retriever.vectorstore.add_documents(documents, ids=["1"])
retriever.docstore.mset(list(zip(["1"], documents)))
results = retriever.invoke("1")
assert len(results) > 0
assert results[0].page_content == "test document"
async def test_multi_vector_retriever_initialization_async() -> None:
vectorstore = InMemoryVectorstoreWithSearch()
retriever = MultiVectorRetriever( # type: ignore[call-arg]
vectorstore=vectorstore, docstore=InMemoryStore(), doc_id="doc_id"
)
documents = [Document(page_content="test document", metadata={"doc_id": "1"})]
await retriever.vectorstore.aadd_documents(documents, ids=["1"])
await retriever.docstore.amset(list(zip(["1"], documents)))
results = await retriever.ainvoke("1")
assert len(results) > 0
assert results[0].page_content == "test document"
def test_multi_vector_retriever_similarity_search_with_score() -> None:
documents = [Document(page_content="test document", metadata={"doc_id": "1"})]
vectorstore = InMemoryVectorstoreWithSearch()
vectorstore.add_documents(documents, ids=["1"])
# score_threshold = 0.5
retriever = MultiVectorRetriever( # type: ignore[call-arg]
vectorstore=vectorstore,
docstore=InMemoryStore(),
doc_id="doc_id",
search_kwargs={"score_threshold": 0.5},
search_type=SearchType.similarity_score_threshold,
)
retriever.docstore.mset(list(zip(["1"], documents)))
results = retriever.invoke("1")
assert len(results) == 1
assert results[0].page_content == "test document"
# score_threshold = 0.9
retriever = MultiVectorRetriever( # type: ignore[call-arg]
vectorstore=vectorstore,
docstore=InMemoryStore(),
doc_id="doc_id",
search_kwargs={"score_threshold": 0.9},
search_type=SearchType.similarity_score_threshold,
)
retriever.docstore.mset(list(zip(["1"], documents)))
results = retriever.invoke("1")
assert len(results) == 0
async def test_multi_vector_retriever_similarity_search_with_score_async() -> None:
documents = [Document(page_content="test document", metadata={"doc_id": "1"})]
vectorstore = InMemoryVectorstoreWithSearch()
await vectorstore.aadd_documents(documents, ids=["1"])
# score_threshold = 0.5
retriever = MultiVectorRetriever( # type: ignore[call-arg]
vectorstore=vectorstore,
docstore=InMemoryStore(),
doc_id="doc_id",
search_kwargs={"score_threshold": 0.5},
search_type=SearchType.similarity_score_threshold,
)
await retriever.docstore.amset(list(zip(["1"], documents)))
results = retriever.invoke("1")
assert len(results) == 1
assert results[0].page_content == "test document"
# score_threshold = 0.9
retriever = MultiVectorRetriever( # type: ignore[call-arg]
vectorstore=vectorstore,
docstore=InMemoryStore(),
doc_id="doc_id",
search_kwargs={"score_threshold": 0.9},
search_type=SearchType.similarity_score_threshold,
)
await retriever.docstore.amset(list(zip(["1"], documents)))
results = retriever.invoke("1")
assert len(results) == 0
|
"""
Top-level module of Jina.
The primary function of this module is to import all of the public Jina
interfaces into a single place. The interfaces themselves are located in
sub-modules, as described below.
"""
import os as _os
import platform as _platform
import signal as _signal
import sys as _sys
import warnings as _warnings
import docarray as _docarray
if _sys.version_info < (3, 7, 0):
raise OSError(f'Jina requires Python >= 3.7, but yours is {_sys.version_info}')
def _warning_on_one_line(message, category, filename, lineno, *args, **kwargs):
return '\033[1;33m%s: %s\033[0m \033[1;30m(raised from %s:%s)\033[0m\n' % (
category.__name__,
message,
filename,
lineno,
)
def _ignore_google_warnings():
import warnings
warnings.filterwarnings(
'ignore',
category=DeprecationWarning,
message='Deprecated call to `pkg_resources.declare_namespace(\'google\')`.',
append=True,
)
_warnings.formatwarning = _warning_on_one_line
_warnings.simplefilter('always', DeprecationWarning, append=True)
_ignore_google_warnings()
# fix fork error on MacOS but seems no effect? must do EXPORT manually before jina start
_os.environ['OBJC_DISABLE_INITIALIZE_FORK_SAFETY'] = 'YES'
# JINA_MP_START_METHOD has higher priority than os-patch
_start_method = _os.environ.get('JINA_MP_START_METHOD', None)
if _start_method and _start_method.lower() in {'fork', 'spawn', 'forkserver'}:
from multiprocessing import set_start_method as _set_start_method
try:
_set_start_method(_start_method.lower())
_warnings.warn(
f'multiprocessing start method is set to `{_start_method.lower()}`'
)
except Exception as e:
_warnings.warn(
f'failed to set multiprocessing start_method to `{_start_method.lower()}`: {e!r}'
)
elif _sys.version_info >= (3, 8, 0) and _platform.system() == 'Darwin':
# DO SOME OS-WISE PATCHES
# temporary fix for python 3.8 on macos where the default start is set to "spawn"
# https://docs.python.org/3/library/multiprocessing.html#contexts-and-start-methods
from multiprocessing import set_start_method as _set_start_method
try:
_set_start_method('fork')
_warnings.warn(f'multiprocessing start method is set to `fork`')
except Exception as e:
_warnings.warn(f'failed to set multiprocessing start_method to `fork`: {e!r}')
# do not change this line manually this is managed by git tag and updated on every release
# NOTE: this represents the NEXT release version
__version__ = '3.27.13'
# do not change this line manually
# this is managed by proto/build-proto.sh and updated on every execution
__proto_version__ = '0.1.27'
try:
__docarray_version__ = _docarray.__version__
except AttributeError as e:
raise RuntimeError(
'`docarray` dependency is not installed correctly, please reinstall with `pip install -U --force-reinstall docarray`'
)
try:
_signal.signal(_signal.SIGINT, _signal.default_int_handler)
except Exception as exc:
_warnings.warn(f'failed to set default signal handler: {exc!r}`')
def _set_nofile(nofile_atleast=4096):
"""
Set nofile soft limit to at least 4096, useful for running matlplotlib/seaborn on
parallel executing plot generators vs. Ubuntu default ulimit -n 1024 or OS X El Captian 256
temporary setting extinguishing with Python session.
:param nofile_atleast: nofile soft limit
:return: nofile soft limit and nofile hard limit
"""
try:
import resource as res
except ImportError: # Windows
res = None
if res is None:
return (None,) * 2
soft, ohard = res.getrlimit(res.RLIMIT_NOFILE)
hard = ohard
if soft < nofile_atleast:
soft = nofile_atleast
if hard < soft:
hard = soft
try:
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except (ValueError, res.error):
try:
hard = soft
print(f'trouble with max limit, retrying with soft,hard {soft},{hard}')
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except Exception:
print('failed to set ulimit, giving up')
soft, hard = res.getrlimit(res.RLIMIT_NOFILE)
return soft, hard
_set_nofile()
# ONLY FIRST CLASS CITIZENS ARE ALLOWED HERE, namely Document, Executor Flow
# Document
from jina._docarray import Document, DocumentArray
# Client
from jina.clients import Client
# Deployment
from jina.orchestrate.deployments import Deployment
from jina.orchestrate.flow.asyncio import AsyncFlow
# Flow
from jina.orchestrate.flow.base import Flow
# Executor
from jina.serve.executors import BaseExecutor as Executor
from jina.serve.executors.decorators import dynamic_batching, monitor, requests
# Custom Gateway
from jina.serve.runtimes.gateway.gateway import Gateway
|
"""
Top-level module of Jina.
The primary function of this module is to import all of the public Jina
interfaces into a single place. The interfaces themselves are located in
sub-modules, as described below.
"""
import os as _os
import platform as _platform
import signal as _signal
import sys as _sys
import warnings as _warnings
import docarray as _docarray
if _sys.version_info < (3, 7, 0):
raise OSError(f'Jina requires Python >= 3.7, but yours is {_sys.version_info}')
def _warning_on_one_line(message, category, filename, lineno, *args, **kwargs):
return '\033[1;33m%s: %s\033[0m \033[1;30m(raised from %s:%s)\033[0m\n' % (
category.__name__,
message,
filename,
lineno,
)
def _ignore_google_warnings():
import warnings
warnings.filterwarnings(
'ignore',
category=DeprecationWarning,
message='Deprecated call to `pkg_resources.declare_namespace(\'google\')`.',
append=True,
)
_warnings.formatwarning = _warning_on_one_line
_warnings.simplefilter('always', DeprecationWarning, append=True)
_ignore_google_warnings()
# fix fork error on MacOS but seems no effect? must do EXPORT manually before jina start
_os.environ['OBJC_DISABLE_INITIALIZE_FORK_SAFETY'] = 'YES'
# JINA_MP_START_METHOD has higher priority than os-patch
_start_method = _os.environ.get('JINA_MP_START_METHOD', None)
if _start_method and _start_method.lower() in {'fork', 'spawn', 'forkserver'}:
from multiprocessing import set_start_method as _set_start_method
try:
_set_start_method(_start_method.lower())
_warnings.warn(
f'multiprocessing start method is set to `{_start_method.lower()}`'
)
except Exception as e:
_warnings.warn(
f'failed to set multiprocessing start_method to `{_start_method.lower()}`: {e!r}'
)
elif _sys.version_info >= (3, 8, 0) and _platform.system() == 'Darwin':
# DO SOME OS-WISE PATCHES
# temporary fix for python 3.8 on macos where the default start is set to "spawn"
# https://docs.python.org/3/library/multiprocessing.html#contexts-and-start-methods
from multiprocessing import set_start_method as _set_start_method
try:
_set_start_method('fork')
_warnings.warn(f'multiprocessing start method is set to `fork`')
except Exception as e:
_warnings.warn(f'failed to set multiprocessing start_method to `fork`: {e!r}')
# do not change this line manually this is managed by git tag and updated on every release
# NOTE: this represents the NEXT release version
__version__ = '3.27.12'
# do not change this line manually
# this is managed by proto/build-proto.sh and updated on every execution
__proto_version__ = '0.1.27'
try:
__docarray_version__ = _docarray.__version__
except AttributeError as e:
raise RuntimeError(
'`docarray` dependency is not installed correctly, please reinstall with `pip install -U --force-reinstall docarray`'
)
try:
_signal.signal(_signal.SIGINT, _signal.default_int_handler)
except Exception as exc:
_warnings.warn(f'failed to set default signal handler: {exc!r}`')
def _set_nofile(nofile_atleast=4096):
"""
Set nofile soft limit to at least 4096, useful for running matlplotlib/seaborn on
parallel executing plot generators vs. Ubuntu default ulimit -n 1024 or OS X El Captian 256
temporary setting extinguishing with Python session.
:param nofile_atleast: nofile soft limit
:return: nofile soft limit and nofile hard limit
"""
try:
import resource as res
except ImportError: # Windows
res = None
if res is None:
return (None,) * 2
soft, ohard = res.getrlimit(res.RLIMIT_NOFILE)
hard = ohard
if soft < nofile_atleast:
soft = nofile_atleast
if hard < soft:
hard = soft
try:
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except (ValueError, res.error):
try:
hard = soft
print(f'trouble with max limit, retrying with soft,hard {soft},{hard}')
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except Exception:
print('failed to set ulimit, giving up')
soft, hard = res.getrlimit(res.RLIMIT_NOFILE)
return soft, hard
_set_nofile()
# ONLY FIRST CLASS CITIZENS ARE ALLOWED HERE, namely Document, Executor Flow
# Document
from jina._docarray import Document, DocumentArray
# Client
from jina.clients import Client
# Deployment
from jina.orchestrate.deployments import Deployment
from jina.orchestrate.flow.asyncio import AsyncFlow
# Flow
from jina.orchestrate.flow.base import Flow
# Executor
from jina.serve.executors import BaseExecutor as Executor
from jina.serve.executors.decorators import dynamic_batching, monitor, requests
# Custom Gateway
from jina.serve.runtimes.gateway.gateway import Gateway
|
import logging
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.evaluation import SparseNanoBEIREvaluator
logging.basicConfig(format="%(message)s", level=logging.INFO)
# Load a model
model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
datasets = ["QuoraRetrieval", "MSMARCO"]
evaluator = SparseNanoBEIREvaluator(
dataset_names=datasets,
show_progress_bar=True,
batch_size=32,
)
# Run evaluation
results = evaluator(model)
"""
Evaluating NanoQuoraRetrieval
Information Retrieval Evaluation of the model on the NanoQuoraRetrieval dataset:
Queries: 50
Corpus: 5046
Score-Function: dot
Accuracy@1: 92.00%
Accuracy@3: 96.00%
Accuracy@5: 98.00%
Accuracy@10: 100.00%
Precision@1: 92.00%
Precision@3: 40.00%
Precision@5: 24.80%
Precision@10: 13.20%
Recall@1: 79.73%
Recall@3: 92.53%
Recall@5: 94.93%
Recall@10: 98.27%
MRR@10: 0.9439
NDCG@10: 0.9339
MAP@100: 0.9070
Model Query Sparsity: Active Dimensions: 59.4, Sparsity Ratio: 0.9981
Model Corpus Sparsity: Active Dimensions: 61.9, Sparsity Ratio: 0.9980
Information Retrieval Evaluation of the model on the NanoMSMARCO dataset:
Queries: 50
Corpus: 5043
Score-Function: dot
Accuracy@1: 48.00%
Accuracy@3: 74.00%
Accuracy@5: 76.00%
Accuracy@10: 86.00%
Precision@1: 48.00%
Precision@3: 24.67%
Precision@5: 15.20%
Precision@10: 8.60%
Recall@1: 48.00%
Recall@3: 74.00%
Recall@5: 76.00%
Recall@10: 86.00%
MRR@10: 0.6191
NDCG@10: 0.6780
MAP@100: 0.6277
Model Query Sparsity: Active Dimensions: 45.4, Sparsity Ratio: 0.9985
Model Corpus Sparsity: Active Dimensions: 122.6, Sparsity Ratio: 0.9960
Average Queries: 50.0
Average Corpus: 5044.5
Aggregated for Score Function: dot
Accuracy@1: 70.00%
Accuracy@3: 85.00%
Accuracy@5: 87.00%
Accuracy@10: 93.00%
Precision@1: 70.00%
Recall@1: 63.87%
Precision@3: 32.33%
Recall@3: 83.27%
Precision@5: 20.00%
Recall@5: 85.47%
Precision@10: 10.90%
Recall@10: 92.13%
MRR@10: 0.7815
NDCG@10: 0.8060
Model Query Sparsity: Active Dimensions: 52.4, Sparsity Ratio: 0.9983
Model Corpus Sparsity: Active Dimensions: 92.2, Sparsity Ratio: 0.9970
"""
# Print the results
print(f"Primary metric: {evaluator.primary_metric}")
# => Primary metric: NanoBEIR_mean_dot_ndcg@10
print(f"Primary metric value: {results[evaluator.primary_metric]:.4f}")
# => Primary metric value: 0.8060
|
import logging
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.evaluation import SparseNanoBEIREvaluator
logging.basicConfig(format="%(message)s", level=logging.INFO)
# Load a model
model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
datasets = ["QuoraRetrieval", "MSMARCO"]
evaluator = SparseNanoBEIREvaluator(
dataset_names=datasets,
show_progress_bar=True,
batch_size=32,
)
# Run evaluation
results = evaluator(model)
"""
Evaluating NanoQuoraRetrieval
Information Retrieval Evaluation of the model on the NanoQuoraRetrieval dataset:
Queries: 50
Corpus: 5046
Score-Function: dot
Accuracy@1: 92.00%
Accuracy@3: 96.00%
Accuracy@5: 98.00%
Accuracy@10: 100.00%
Precision@1: 92.00%
Precision@3: 40.00%
Precision@5: 24.80%
Precision@10: 13.20%
Recall@1: 79.73%
Recall@3: 92.53%
Recall@5: 94.93%
Recall@10: 98.27%
MRR@10: 0.9439
NDCG@10: 0.9339
MAP@100: 0.9072
Model Query Sparsity: Active Dimensions: 63.0, Sparsity Ratio: 0.9979
Model Corpus Sparsity: Active Dimensions: 63.4, Sparsity Ratio: 0.9979
Information Retrieval Evaluation of the model on the NanoMSMARCO dataset:
Queries: 50
Corpus: 5043
Score-Function: dot
Accuracy@1: 48.00%
Accuracy@3: 74.00%
Accuracy@5: 76.00%
Accuracy@10: 88.00%
Precision@1: 48.00%
Precision@3: 24.67%
Precision@5: 15.20%
Precision@10: 8.80%
Recall@1: 48.00%
Recall@3: 74.00%
Recall@5: 76.00%
Recall@10: 88.00%
MRR@10: 0.6211
NDCG@10: 0.6838
MAP@100: 0.6277
Model Query Sparsity: Active Dimensions: 48.1, Sparsity Ratio: 0.9984
Model Corpus Sparsity: Active Dimensions: 125.4, Sparsity Ratio: 0.9959
Average Queries: 50.0
Average Corpus: 5044.5
Aggregated for Score Function: dot
Accuracy@1: 70.00%
Accuracy@3: 85.00%
Accuracy@5: 87.00%
Accuracy@10: 94.00%
Precision@1: 70.00%
Recall@1: 63.87%
Precision@3: 32.33%
Recall@3: 83.27%
Precision@5: 20.00%
Recall@5: 85.47%
Precision@10: 11.00%
Recall@10: 93.13%
MRR@10: 0.7825
NDCG@10: 0.8089
Model Query Sparsity: Active Dimensions: 55.5, Sparsity Ratio: 0.9982
Model Corpus Sparsity: Active Dimensions: 94.4, Sparsity Ratio: 0.9969
"""
# Print the results
print(f"Primary metric: {evaluator.primary_metric}")
# => Primary metric: NanoBEIR_mean_dot_ndcg@10
print(f"Primary metric value: {results[evaluator.primary_metric]:.4f}")
# => Primary metric value: 0.8089
|
"""
=============================
Recursive feature elimination
=============================
This example demonstrates how Recursive Feature Elimination
(:class:`~sklearn.feature_selection.RFE`) can be used to determine the
importance of individual pixels for classifying handwritten digits.
:class:`~sklearn.feature_selection.RFE` recursively removes the least
significant features, assigning ranks based on their importance, where higher
`ranking_` values denote lower importance. The ranking is visualized using both
shades of blue and pixel annotations for clarity. As expected, pixels positioned
at the center of the image tend to be more predictive than those near the edges.
.. note::
See also :ref:`sphx_glr_auto_examples_feature_selection_plot_rfe_with_cross_validation.py`
""" # noqa: E501
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import matplotlib.pyplot as plt
from sklearn.datasets import load_digits
from sklearn.feature_selection import RFE
from sklearn.linear_model import LogisticRegression
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import MinMaxScaler
# Load the digits dataset
digits = load_digits()
X = digits.images.reshape((len(digits.images), -1))
y = digits.target
pipe = Pipeline(
[
("scaler", MinMaxScaler()),
("rfe", RFE(estimator=LogisticRegression(), n_features_to_select=1, step=1)),
]
)
pipe.fit(X, y)
ranking = pipe.named_steps["rfe"].ranking_.reshape(digits.images[0].shape)
# Plot pixel ranking
plt.matshow(ranking, cmap=plt.cm.Blues)
# Add annotations for pixel numbers
for i in range(ranking.shape[0]):
for j in range(ranking.shape[1]):
plt.text(j, i, str(ranking[i, j]), ha="center", va="center", color="black")
plt.colorbar()
plt.title("Ranking of pixels with RFE\n(Logistic Regression)")
plt.show()
|
"""
=============================
Recursive feature elimination
=============================
This example demonstrates how Recursive Feature Elimination
(:class:`~sklearn.feature_selection.RFE`) can be used to determine the
importance of individual pixels for classifying handwritten digits.
:class:`~sklearn.feature_selection.RFE` recursively removes the least
significant features, assigning ranks based on their importance, where higher
`ranking_` values denote lower importance. The ranking is visualized using both
shades of blue and pixel annotations for clarity. As expected, pixels positioned
at the center of the image tend to be more predictive than those near the edges.
.. note::
See also :ref:`sphx_glr_auto_examples_feature_selection_plot_rfe_with_cross_validation.py`
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import matplotlib.pyplot as plt
from sklearn.datasets import load_digits
from sklearn.feature_selection import RFE
from sklearn.linear_model import LogisticRegression
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import MinMaxScaler
# Load the digits dataset
digits = load_digits()
X = digits.images.reshape((len(digits.images), -1))
y = digits.target
pipe = Pipeline(
[
("scaler", MinMaxScaler()),
("rfe", RFE(estimator=LogisticRegression(), n_features_to_select=1, step=1)),
]
)
pipe.fit(X, y)
ranking = pipe.named_steps["rfe"].ranking_.reshape(digits.images[0].shape)
# Plot pixel ranking
plt.matshow(ranking, cmap=plt.cm.Blues)
# Add annotations for pixel numbers
for i in range(ranking.shape[0]):
for j in range(ranking.shape[1]):
plt.text(j, i, str(ranking[i, j]), ha="center", va="center", color="black")
plt.colorbar()
plt.title("Ranking of pixels with RFE\n(Logistic Regression)")
plt.show()
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch
import torch.nn as nn
from ..builder import LOSSES
from .utils import weight_reduce_loss
def dice_loss(pred,
target,
weight=None,
eps=1e-3,
reduction='mean',
avg_factor=None):
"""Calculate dice loss, which is proposed in
`V-Net: Fully Convolutional Neural Networks for Volumetric
Medical Image Segmentation <https://arxiv.org/abs/1606.04797>`_.
Args:
pred (torch.Tensor): The prediction, has a shape (n, *)
target (torch.Tensor): The learning label of the prediction,
shape (n, *), same shape of pred.
weight (torch.Tensor, optional): The weight of loss for each
prediction, has a shape (n,). Defaults to None.
eps (float): Avoid dividing by zero. Default: 1e-3.
reduction (str, optional): The method used to reduce the loss into
a scalar. Defaults to 'mean'.
Options are "none", "mean" and "sum".
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
"""
input = pred.flatten(1)
target = target.flatten(1).float()
a = torch.sum(input * target, 1)
b = torch.sum(input * input, 1) + eps
c = torch.sum(target * target, 1) + eps
d = (2 * a) / (b + c)
loss = 1 - d
if weight is not None:
assert weight.ndim == loss.ndim
assert len(weight) == len(pred)
loss = weight_reduce_loss(loss, weight, reduction, avg_factor)
return loss
@LOSSES.register_module()
class DiceLoss(nn.Module):
def __init__(self,
use_sigmoid=True,
activate=True,
reduction='mean',
loss_weight=1.0,
eps=1e-3):
"""`Dice Loss, which is proposed in
`V-Net: Fully Convolutional Neural Networks for Volumetric
Medical Image Segmentation <https://arxiv.org/abs/1606.04797>`_.
Args:
use_sigmoid (bool, optional): Whether to the prediction is
used for sigmoid or softmax. Defaults to True.
activate (bool): Whether to activate the predictions inside,
this will disable the inside sigmoid operation.
Defaults to True.
reduction (str, optional): The method used
to reduce the loss. Options are "none",
"mean" and "sum". Defaults to 'mean'.
loss_weight (float, optional): Weight of loss. Defaults to 1.0.
eps (float): Avoid dividing by zero. Defaults to 1e-3.
"""
super(DiceLoss, self).__init__()
self.use_sigmoid = use_sigmoid
self.reduction = reduction
self.loss_weight = loss_weight
self.eps = eps
self.activate = activate
def forward(self,
pred,
target,
weight=None,
reduction_override=None,
avg_factor=None):
"""Forward function.
Args:
pred (torch.Tensor): The prediction, has a shape (n, *).
target (torch.Tensor): The label of the prediction,
shape (n, *), same shape of pred.
weight (torch.Tensor, optional): The weight of loss for each
prediction, has a shape (n,). Defaults to None.
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
reduction_override (str, optional): The reduction method used to
override the original reduction method of the loss.
Options are "none", "mean" and "sum".
Returns:
torch.Tensor: The calculated loss
"""
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (
reduction_override if reduction_override else self.reduction)
if self.activate:
if self.use_sigmoid:
pred = pred.sigmoid()
else:
raise NotImplementedError
loss = self.loss_weight * dice_loss(
pred,
target,
weight,
eps=self.eps,
reduction=reduction,
avg_factor=avg_factor)
return loss
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch
import torch.nn as nn
from ..builder import LOSSES
from .utils import weight_reduce_loss
def dice_loss(pred,
target,
weight=None,
eps=1e-3,
reduction='mean',
avg_factor=None):
"""Calculate dice loss, which is proposed in
`V-Net: Fully Convolutional Neural Networks for Volumetric
Medical Image Segmentation <https://arxiv.org/abs/1606.04797>`_.
Args:
pred (torch.Tensor): The prediction, has a shape (n, *)
target (torch.Tensor): The learning label of the prediction,
shape (n, *), same shape of pred.
weight (torch.Tensor, optional): The weight of loss for each
prediction, has a shape (n,). Defaults to None.
eps (float): Avoid dividing by zero. Default: 1e-3.
reduction (str, optional): The method used to reduce the loss into
a scalar. Defaults to 'mean'.
Options are "none", "mean" and "sum".
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
"""
input = pred.reshape(pred.size()[0], -1)
target = target.reshape(target.size()[0], -1).float()
a = torch.sum(input * target, 1)
b = torch.sum(input * input, 1) + eps
c = torch.sum(target * target, 1) + eps
d = (2 * a) / (b + c)
loss = 1 - d
if weight is not None:
assert weight.ndim == loss.ndim
assert len(weight) == len(pred)
loss = weight_reduce_loss(loss, weight, reduction, avg_factor)
return loss
@LOSSES.register_module()
class DiceLoss(nn.Module):
def __init__(self,
use_sigmoid=True,
activate=True,
reduction='mean',
loss_weight=1.0,
eps=1e-3):
"""`Dice Loss, which is proposed in
`V-Net: Fully Convolutional Neural Networks for Volumetric
Medical Image Segmentation <https://arxiv.org/abs/1606.04797>`_.
Args:
use_sigmoid (bool, optional): Whether to the prediction is
used for sigmoid or softmax. Defaults to True.
activate (bool): Whether to activate the predictions inside,
this will disable the inside sigmoid operation.
Defaults to True.
reduction (str, optional): The method used
to reduce the loss. Options are "none",
"mean" and "sum". Defaults to 'mean'.
loss_weight (float, optional): Weight of loss. Defaults to 1.0.
eps (float): Avoid dividing by zero. Defaults to 1e-3.
"""
super(DiceLoss, self).__init__()
self.use_sigmoid = use_sigmoid
self.reduction = reduction
self.loss_weight = loss_weight
self.eps = eps
self.activate = activate
def forward(self,
pred,
target,
weight=None,
reduction_override=None,
avg_factor=None):
"""Forward function.
Args:
pred (torch.Tensor): The prediction, has a shape (n, *).
target (torch.Tensor): The label of the prediction,
shape (n, *), same shape of pred.
weight (torch.Tensor, optional): The weight of loss for each
prediction, has a shape (n,). Defaults to None.
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
reduction_override (str, optional): The reduction method used to
override the original reduction method of the loss.
Options are "none", "mean" and "sum".
Returns:
torch.Tensor: The calculated loss
"""
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (
reduction_override if reduction_override else self.reduction)
if self.activate:
if self.use_sigmoid:
pred = pred.sigmoid()
else:
raise NotImplementedError
loss = self.loss_weight * dice_loss(
pred,
target,
weight,
eps=self.eps,
reduction=reduction,
avg_factor=avg_factor)
return loss
|
"""Helpers for creating Anthropic API clients.
This module allows for the caching of httpx clients to avoid creating new instances
for each instance of ChatAnthropic.
Logic is largely replicated from anthropic._base_client.
"""
from __future__ import annotations
import asyncio
import os
from functools import lru_cache
from typing import Any, Optional
import anthropic
_NOT_GIVEN: Any = object()
class _SyncHttpxClientWrapper(anthropic.DefaultHttpxClient):
"""Borrowed from anthropic._base_client."""
def __del__(self) -> None:
if self.is_closed:
return
try:
self.close()
except Exception: # noqa: S110
pass
class _AsyncHttpxClientWrapper(anthropic.DefaultAsyncHttpxClient):
"""Borrowed from anthropic._base_client."""
def __del__(self) -> None:
if self.is_closed:
return
try:
# TODO(someday): support non asyncio runtimes here
asyncio.get_running_loop().create_task(self.aclose())
except Exception: # noqa: S110
pass
@lru_cache
def _get_default_httpx_client(
*,
base_url: Optional[str],
timeout: Any = _NOT_GIVEN,
) -> _SyncHttpxClientWrapper:
kwargs: dict[str, Any] = {
"base_url": base_url
or os.environ.get("ANTHROPIC_BASE_URL")
or "https://api.anthropic.com",
}
if timeout is not _NOT_GIVEN:
kwargs["timeout"] = timeout
return _SyncHttpxClientWrapper(**kwargs)
@lru_cache
def _get_default_async_httpx_client(
*,
base_url: Optional[str],
timeout: Any = _NOT_GIVEN,
) -> _AsyncHttpxClientWrapper:
kwargs: dict[str, Any] = {
"base_url": base_url
or os.environ.get("ANTHROPIC_BASE_URL")
or "https://api.anthropic.com",
}
if timeout is not _NOT_GIVEN:
kwargs["timeout"] = timeout
return _AsyncHttpxClientWrapper(**kwargs)
|
"""Helpers for creating Anthropic API clients.
This module allows for the caching of httpx clients to avoid creating new instances
for each instance of ChatAnthropic.
Logic is largely replicated from anthropic._base_client.
"""
import asyncio
import os
from functools import lru_cache
from typing import Any, Optional
import anthropic
_NOT_GIVEN: Any = object()
class _SyncHttpxClientWrapper(anthropic.DefaultHttpxClient):
"""Borrowed from anthropic._base_client"""
def __del__(self) -> None:
if self.is_closed:
return
try:
self.close()
except Exception: # noqa: S110
pass
class _AsyncHttpxClientWrapper(anthropic.DefaultAsyncHttpxClient):
"""Borrowed from anthropic._base_client"""
def __del__(self) -> None:
if self.is_closed:
return
try:
# TODO(someday): support non asyncio runtimes here
asyncio.get_running_loop().create_task(self.aclose())
except Exception: # noqa: S110
pass
@lru_cache
def _get_default_httpx_client(
*,
base_url: Optional[str],
timeout: Any = _NOT_GIVEN,
) -> _SyncHttpxClientWrapper:
kwargs: dict[str, Any] = {
"base_url": base_url
or os.environ.get("ANTHROPIC_BASE_URL")
or "https://api.anthropic.com",
}
if timeout is not _NOT_GIVEN:
kwargs["timeout"] = timeout
return _SyncHttpxClientWrapper(**kwargs)
@lru_cache
def _get_default_async_httpx_client(
*,
base_url: Optional[str],
timeout: Any = _NOT_GIVEN,
) -> _AsyncHttpxClientWrapper:
kwargs: dict[str, Any] = {
"base_url": base_url
or os.environ.get("ANTHROPIC_BASE_URL")
or "https://api.anthropic.com",
}
if timeout is not _NOT_GIVEN:
kwargs["timeout"] = timeout
return _AsyncHttpxClientWrapper(**kwargs)
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.applications.vgg16 import VGG16 as VGG16
from keras.src.applications.vgg16 import (
decode_predictions as decode_predictions,
)
from keras.src.applications.vgg16 import preprocess_input as preprocess_input
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.applications.vgg16 import VGG16
from keras.src.applications.vgg16 import decode_predictions
from keras.src.applications.vgg16 import preprocess_input
|
import argparse
from jina.enums import GatewayProtocolType
from jina.helper import parse_host_scheme
from jina.logging.predefined import default_logger
class NetworkChecker:
"""Check if a Deployment is running or not."""
def __init__(self, args: 'argparse.Namespace'):
"""
Create a new :class:`NetworkChecker`.
:param args: args provided by the CLI.
"""
import time
from jina.clients import Client
from jina.logging.profile import TimeContext
from jina.serve.runtimes.gateway import GatewayRuntime
from jina.serve.runtimes.worker import WorkerRuntime
try:
total_time = 0
total_success = 0
for j in range(args.attempts):
with TimeContext(
f'ping {args.target} on {args.host} at {j} round', default_logger
) as tc:
if args.target == 'executor':
hostname, port, protocol, _ = parse_host_scheme(args.host)
r = WorkerRuntime.is_ready(ctrl_address=f'{hostname}:{port}')
elif args.target == 'gateway':
hostname, port, protocol, _ = parse_host_scheme(args.host)
r = GatewayRuntime.is_ready(
f'{hostname}:{port}',
protocol=GatewayProtocolType.from_string(protocol)
)
elif args.target == 'flow':
r = Client(host=args.host).is_flow_ready(timeout=args.timeout / 1000)
if not r:
default_logger.warning(
'not responding, attempt (%d/%d) in 1s'
% (j + 1, args.attempts)
)
else:
total_success += 1
total_time += tc.duration
if args.attempts > 0:
time.sleep(1)
if total_success < args.attempts:
default_logger.debug(
'message lost %.0f%% (%d/%d) '
% (
(1 - total_success / args.attempts) * 100,
args.attempts - total_success,
args.attempts,
)
)
if total_success > 0:
default_logger.debug(
'avg. latency: %.0f ms' % (total_time / total_success * 1000)
)
if total_success >= args.min_successful_attempts:
default_logger.debug(
f'readiness check succeeded {total_success} times!!!'
)
exit(0)
else:
default_logger.debug(
f'readiness check succeeded {total_success} times, less than {args.min_successful_attempts}'
)
except KeyboardInterrupt:
pass
# returns 1 (anomaly) when it comes to here
exit(1)
|
import argparse
from jina.enums import GatewayProtocolType
from jina.helper import parse_host_scheme
from jina.logging.predefined import default_logger
class NetworkChecker:
"""Check if a BaseDeployment is running or not."""
def __init__(self, args: 'argparse.Namespace'):
"""
Create a new :class:`NetworkChecker`.
:param args: args provided by the CLI.
"""
import time
from jina.clients import Client
from jina.logging.profile import TimeContext
from jina.serve.runtimes.gateway import GatewayRuntime
from jina.serve.runtimes.worker import WorkerRuntime
try:
total_time = 0
total_success = 0
for j in range(args.attempts):
with TimeContext(
f'ping {args.target} on {args.host} at {j} round', default_logger
) as tc:
if args.target == 'executor':
hostname, port, protocol, _ = parse_host_scheme(args.host)
r = WorkerRuntime.is_ready(ctrl_address=f'{hostname}:{port}')
elif args.target == 'gateway':
hostname, port, protocol, _ = parse_host_scheme(args.host)
r = GatewayRuntime.is_ready(
f'{hostname}:{port}',
protocol=GatewayProtocolType.from_string(protocol)
)
elif args.target == 'flow':
r = Client(host=args.host).is_flow_ready(timeout=args.timeout / 1000)
if not r:
default_logger.warning(
'not responding, attempt (%d/%d) in 1s'
% (j + 1, args.attempts)
)
else:
total_success += 1
total_time += tc.duration
if args.attempts > 0:
time.sleep(1)
if total_success < args.attempts:
default_logger.debug(
'message lost %.0f%% (%d/%d) '
% (
(1 - total_success / args.attempts) * 100,
args.attempts - total_success,
args.attempts,
)
)
if total_success > 0:
default_logger.debug(
'avg. latency: %.0f ms' % (total_time / total_success * 1000)
)
if total_success >= args.min_successful_attempts:
default_logger.debug(
f'readiness check succeeded {total_success} times!!!'
)
exit(0)
else:
default_logger.debug(
f'readiness check succeeded {total_success} times, less than {args.min_successful_attempts}'
)
except KeyboardInterrupt:
pass
# returns 1 (anomaly) when it comes to here
exit(1)
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: docarray.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_database
from google.protobuf.internal import builder as _builder
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.protobuf import struct_pb2 as google_dot_protobuf_dot_struct__pb2
DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(
b'\n\x0e\x64ocarray.proto\x12\x08\x64ocarray\x1a\x1cgoogle/protobuf/struct.proto\"A\n\x11\x44\x65nseNdArrayProto\x12\x0e\n\x06\x62uffer\x18\x01 \x01(\x0c\x12\r\n\x05shape\x18\x02 \x03(\r\x12\r\n\x05\x64type\x18\x03 \x01(\t\"g\n\x0cNdArrayProto\x12*\n\x05\x64\x65nse\x18\x01 \x01(\x0b\x32\x1b.docarray.DenseNdArrayProto\x12+\n\nparameters\x18\x02 \x01(\x0b\x32\x17.google.protobuf.Struct\"\x8a\x05\n\tNodeProto\x12\x0e\n\x04\x62lob\x18\x01 \x01(\x0cH\x00\x12)\n\x07ndarray\x18\x02 \x01(\x0b\x32\x16.docarray.NdArrayProtoH\x00\x12\x0e\n\x04text\x18\x03 \x01(\tH\x00\x12)\n\x06nested\x18\x04 \x01(\x0b\x32\x17.docarray.DocumentProtoH\x00\x12.\n\x06\x63hunks\x18\x05 \x01(\x0b\x32\x1c.docarray.DocumentArrayProtoH\x00\x12+\n\tembedding\x18\x06 \x01(\x0b\x32\x16.docarray.NdArrayProtoH\x00\x12\x11\n\x07\x61ny_url\x18\x07 \x01(\tH\x00\x12\x13\n\timage_url\x18\x08 \x01(\tH\x00\x12\x12\n\x08text_url\x18\t \x01(\tH\x00\x12\x0c\n\x02id\x18\n \x01(\tH\x00\x12.\n\x0ctorch_tensor\x18\x0b \x01(\x0b\x32\x16.docarray.NdArrayProtoH\x00\x12\x12\n\x08mesh_url\x18\x0c \x01(\tH\x00\x12\x19\n\x0fpoint_cloud_url\x18\r \x01(\tH\x00\x12\x13\n\taudio_url\x18\x0e \x01(\tH\x00\x12/\n\raudio_ndarray\x18\x0f \x01(\x0b\x32\x16.docarray.NdArrayProtoH\x00\x12\x34\n\x12\x61udio_torch_tensor\x18\x10 \x01(\x0b\x32\x16.docarray.NdArrayProtoH\x00\x12\x13\n\tvideo_url\x18\x11 \x01(\tH\x00\x12/\n\rvideo_ndarray\x18\x12 \x01(\x0b\x32\x16.docarray.NdArrayProtoH\x00\x12\x34\n\x12video_torch_tensor\x18\x13 \x01(\x0b\x32\x16.docarray.NdArrayProtoH\x00\x42\t\n\x07\x63ontent\"\x82\x01\n\rDocumentProto\x12/\n\x04\x64\x61ta\x18\x01 \x03(\x0b\x32!.docarray.DocumentProto.DataEntry\x1a@\n\tDataEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\"\n\x05value\x18\x02 \x01(\x0b\x32\x13.docarray.NodeProto:\x02\x38\x01\";\n\x12\x44ocumentArrayProto\x12%\n\x04\x64ocs\x18\x01 \x03(\x0b\x32\x17.docarray.DocumentProto\"\x86\x01\n\x0fUnionArrayProto\x12=\n\x0e\x64ocument_array\x18\x01 \x01(\x0b\x32#.docarray.DocumentArrayStackedProtoH\x00\x12)\n\x07ndarray\x18\x02 \x01(\x0b\x32\x16.docarray.NdArrayProtoH\x00\x42\t\n\x07\x63ontent\"\xd6\x01\n\x19\x44ocumentArrayStackedProto\x12+\n\x05list_\x18\x01 \x01(\x0b\x32\x1c.docarray.DocumentArrayProto\x12\x41\n\x07\x63olumns\x18\x02 \x03(\x0b\x32\x30.docarray.DocumentArrayStackedProto.ColumnsEntry\x1aI\n\x0c\x43olumnsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12(\n\x05value\x18\x02 \x01(\x0b\x32\x19.docarray.UnionArrayProto:\x02\x38\x01\x62\x06proto3'
)
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals())
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'docarray_pb2', globals())
if _descriptor._USE_C_DESCRIPTORS == False:
DESCRIPTOR._options = None
_DOCUMENTPROTO_DATAENTRY._options = None
_DOCUMENTPROTO_DATAENTRY._serialized_options = b'8\001'
_DOCUMENTARRAYSTACKEDPROTO_COLUMNSENTRY._options = None
_DOCUMENTARRAYSTACKEDPROTO_COLUMNSENTRY._serialized_options = b'8\001'
_DENSENDARRAYPROTO._serialized_start = 58
_DENSENDARRAYPROTO._serialized_end = 123
_NDARRAYPROTO._serialized_start = 125
_NDARRAYPROTO._serialized_end = 228
_NODEPROTO._serialized_start = 231
_NODEPROTO._serialized_end = 881
_DOCUMENTPROTO._serialized_start = 884
_DOCUMENTPROTO._serialized_end = 1014
_DOCUMENTPROTO_DATAENTRY._serialized_start = 950
_DOCUMENTPROTO_DATAENTRY._serialized_end = 1014
_DOCUMENTARRAYPROTO._serialized_start = 1016
_DOCUMENTARRAYPROTO._serialized_end = 1075
_UNIONARRAYPROTO._serialized_start = 1078
_UNIONARRAYPROTO._serialized_end = 1212
_DOCUMENTARRAYSTACKEDPROTO._serialized_start = 1215
_DOCUMENTARRAYSTACKEDPROTO._serialized_end = 1429
_DOCUMENTARRAYSTACKEDPROTO_COLUMNSENTRY._serialized_start = 1356
_DOCUMENTARRAYSTACKEDPROTO_COLUMNSENTRY._serialized_end = 1429
# @@protoc_insertion_point(module_scope)
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: docarray.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_database
from google.protobuf.internal import builder as _builder
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.protobuf import struct_pb2 as google_dot_protobuf_dot_struct__pb2
DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(
b'\n\x0e\x64ocarray.proto\x12\x08\x64ocarray\x1a\x1cgoogle/protobuf/struct.proto\"A\n\x11\x44\x65nseNdArrayProto\x12\x0e\n\x06\x62uffer\x18\x01 \x01(\x0c\x12\r\n\x05shape\x18\x02 \x03(\r\x12\r\n\x05\x64type\x18\x03 \x01(\t\"g\n\x0cNdArrayProto\x12*\n\x05\x64\x65nse\x18\x01 \x01(\x0b\x32\x1b.docarray.DenseNdArrayProto\x12+\n\nparameters\x18\x02 \x01(\x0b\x32\x17.google.protobuf.Struct\"\x8e\x04\n\tNodeProto\x12\x0e\n\x04\x62lob\x18\x01 \x01(\x0cH\x00\x12)\n\x07ndarray\x18\x02 \x01(\x0b\x32\x16.docarray.NdArrayProtoH\x00\x12\x0e\n\x04text\x18\x03 \x01(\tH\x00\x12)\n\x06nested\x18\x04 \x01(\x0b\x32\x17.docarray.DocumentProtoH\x00\x12.\n\x06\x63hunks\x18\x05 \x01(\x0b\x32\x1c.docarray.DocumentArrayProtoH\x00\x12+\n\tembedding\x18\x06 \x01(\x0b\x32\x16.docarray.NdArrayProtoH\x00\x12\x11\n\x07\x61ny_url\x18\x07 \x01(\tH\x00\x12\x13\n\timage_url\x18\x08 \x01(\tH\x00\x12\x12\n\x08text_url\x18\t \x01(\tH\x00\x12\x0c\n\x02id\x18\n \x01(\tH\x00\x12.\n\x0ctorch_tensor\x18\x0b \x01(\x0b\x32\x16.docarray.NdArrayProtoH\x00\x12\x12\n\x08mesh_url\x18\x0c \x01(\tH\x00\x12\x19\n\x0fpoint_cloud_url\x18\r \x01(\tH\x00\x12\x13\n\taudio_url\x18\x0e \x01(\tH\x00\x12/\n\raudio_ndarray\x18\x0f \x01(\x0b\x32\x16.docarray.NdArrayProtoH\x00\x12\x34\n\x12\x61udio_torch_tensor\x18\x10 \x01(\x0b\x32\x16.docarray.NdArrayProtoH\x00\x42\t\n\x07\x63ontent\"\x82\x01\n\rDocumentProto\x12/\n\x04\x64\x61ta\x18\x01 \x03(\x0b\x32!.docarray.DocumentProto.DataEntry\x1a@\n\tDataEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\"\n\x05value\x18\x02 \x01(\x0b\x32\x13.docarray.NodeProto:\x02\x38\x01\";\n\x12\x44ocumentArrayProto\x12%\n\x04\x64ocs\x18\x01 \x03(\x0b\x32\x17.docarray.DocumentProto\"\x86\x01\n\x0fUnionArrayProto\x12=\n\x0e\x64ocument_array\x18\x01 \x01(\x0b\x32#.docarray.DocumentArrayStackedProtoH\x00\x12)\n\x07ndarray\x18\x02 \x01(\x0b\x32\x16.docarray.NdArrayProtoH\x00\x42\t\n\x07\x63ontent\"\xd6\x01\n\x19\x44ocumentArrayStackedProto\x12+\n\x05list_\x18\x01 \x01(\x0b\x32\x1c.docarray.DocumentArrayProto\x12\x41\n\x07\x63olumns\x18\x02 \x03(\x0b\x32\x30.docarray.DocumentArrayStackedProto.ColumnsEntry\x1aI\n\x0c\x43olumnsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12(\n\x05value\x18\x02 \x01(\x0b\x32\x19.docarray.UnionArrayProto:\x02\x38\x01\x62\x06proto3'
)
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals())
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'docarray_pb2', globals())
if _descriptor._USE_C_DESCRIPTORS == False:
DESCRIPTOR._options = None
_DOCUMENTPROTO_DATAENTRY._options = None
_DOCUMENTPROTO_DATAENTRY._serialized_options = b'8\001'
_DOCUMENTARRAYSTACKEDPROTO_COLUMNSENTRY._options = None
_DOCUMENTARRAYSTACKEDPROTO_COLUMNSENTRY._serialized_options = b'8\001'
_DENSENDARRAYPROTO._serialized_start = 58
_DENSENDARRAYPROTO._serialized_end = 123
_NDARRAYPROTO._serialized_start = 125
_NDARRAYPROTO._serialized_end = 228
_NODEPROTO._serialized_start = 231
_NODEPROTO._serialized_end = 757
_DOCUMENTPROTO._serialized_start = 760
_DOCUMENTPROTO._serialized_end = 890
_DOCUMENTPROTO_DATAENTRY._serialized_start = 826
_DOCUMENTPROTO_DATAENTRY._serialized_end = 890
_DOCUMENTARRAYPROTO._serialized_start = 892
_DOCUMENTARRAYPROTO._serialized_end = 951
_UNIONARRAYPROTO._serialized_start = 954
_UNIONARRAYPROTO._serialized_end = 1088
_DOCUMENTARRAYSTACKEDPROTO._serialized_start = 1091
_DOCUMENTARRAYSTACKEDPROTO._serialized_end = 1305
_DOCUMENTARRAYSTACKEDPROTO_COLUMNSENTRY._serialized_start = 1232
_DOCUMENTARRAYSTACKEDPROTO_COLUMNSENTRY._serialized_end = 1305
# @@protoc_insertion_point(module_scope)
|
"""Test in memory docstore."""
from typing import Any
from langchain.output_parsers.combining import CombiningOutputParser
from langchain.output_parsers.regex import RegexParser
from langchain.output_parsers.structured import ResponseSchema, StructuredOutputParser
DEF_EXPECTED_RESULT = {
"answer": "Paris",
"source": "https://en.wikipedia.org/wiki/France",
"confidence": "A",
"explanation": "Paris is the capital of France according to Wikipedia.",
}
DEF_README = """```json
{
"answer": "Paris",
"source": "https://en.wikipedia.org/wiki/France"
}
```
//Confidence: A, Explanation: Paris is the capital of France according to Wikipedia."""
def test_combining_dict_result() -> None:
"""Test combining result."""
parsers = [
StructuredOutputParser(
response_schemas=[
ResponseSchema(
name="answer",
description="answer to the user's question",
),
ResponseSchema(
name="source",
description="source used to answer the user's question",
),
],
),
RegexParser(
regex=r"Confidence: (A|B|C), Explanation: (.*)",
output_keys=["confidence", "explanation"],
default_output_key="noConfidence",
),
]
combining_parser = CombiningOutputParser(parsers=parsers)
result_dict = combining_parser.parse(DEF_README)
assert result_dict == DEF_EXPECTED_RESULT
def test_combining_output_parser_output_type() -> None:
"""Test combining output parser output type is Dict[str, Any]."""
parsers = [
StructuredOutputParser(
response_schemas=[
ResponseSchema(
name="answer",
description="answer to the user's question",
),
ResponseSchema(
name="source",
description="source used to answer the user's question",
),
],
),
RegexParser(
regex=r"Confidence: (A|B|C), Explanation: (.*)",
output_keys=["confidence", "explanation"],
default_output_key="noConfidence",
),
]
combining_parser = CombiningOutputParser(parsers=parsers)
assert combining_parser.OutputType == dict[str, Any]
|
"""Test in memory docstore."""
from typing import Any
from langchain.output_parsers.combining import CombiningOutputParser
from langchain.output_parsers.regex import RegexParser
from langchain.output_parsers.structured import ResponseSchema, StructuredOutputParser
DEF_EXPECTED_RESULT = {
"answer": "Paris",
"source": "https://en.wikipedia.org/wiki/France",
"confidence": "A",
"explanation": "Paris is the capital of France according to Wikipedia.",
}
DEF_README = """```json
{
"answer": "Paris",
"source": "https://en.wikipedia.org/wiki/France"
}
```
//Confidence: A, Explanation: Paris is the capital of France according to Wikipedia."""
def test_combining_dict_result() -> None:
"""Test combining result."""
parsers = [
StructuredOutputParser(
response_schemas=[
ResponseSchema(
name="answer", description="answer to the user's question"
),
ResponseSchema(
name="source",
description="source used to answer the user's question",
),
]
),
RegexParser(
regex=r"Confidence: (A|B|C), Explanation: (.*)",
output_keys=["confidence", "explanation"],
default_output_key="noConfidence",
),
]
combining_parser = CombiningOutputParser(parsers=parsers)
result_dict = combining_parser.parse(DEF_README)
assert result_dict == DEF_EXPECTED_RESULT
def test_combining_output_parser_output_type() -> None:
"""Test combining output parser output type is Dict[str, Any]."""
parsers = [
StructuredOutputParser(
response_schemas=[
ResponseSchema(
name="answer", description="answer to the user's question"
),
ResponseSchema(
name="source",
description="source used to answer the user's question",
),
]
),
RegexParser(
regex=r"Confidence: (A|B|C), Explanation: (.*)",
output_keys=["confidence", "explanation"],
default_output_key="noConfidence",
),
]
combining_parser = CombiningOutputParser(parsers=parsers)
assert combining_parser.OutputType == dict[str, Any]
|
_base_ = './mask-rcnn_s50_fpn_syncbn-backbone+head_ms-1x_coco.py'
model = dict(
backbone=dict(
stem_channels=128,
depth=101,
init_cfg=dict(type='Pretrained',
checkpoint='open-mmlab://resnest101')))
|
_base_ = './mask_rcnn_s50_fpn_syncbn-backbone+head_mstrain_1x_coco.py'
model = dict(
backbone=dict(
stem_channels=128,
depth=101,
init_cfg=dict(type='Pretrained',
checkpoint='open-mmlab://resnest101')))
|
# dataset settings
dataset_type = 'RefCocoDataset'
data_root = 'data/coco/'
backend_args = None
test_pipeline = [
dict(type='LoadImageFromFile', backend_args=backend_args),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(
type='LoadAnnotations',
with_mask=True,
with_bbox=False,
with_seg=False,
with_label=False),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor', 'gt_masks', 'text'))
]
val_dataloader = dict(
batch_size=1,
num_workers=2,
persistent_workers=True,
drop_last=False,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type=dataset_type,
data_root=data_root,
data_prefix=dict(img_path='train2014/'),
ann_file='refcoco+/instances.json',
split_file='refcoco+/refs(unc).p',
split='val',
text_mode='select_first',
pipeline=test_pipeline))
test_dataloader = dict(
batch_size=1,
num_workers=2,
persistent_workers=True,
drop_last=False,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type=dataset_type,
data_root=data_root,
data_prefix=dict(img_path='train2014/'),
ann_file='refcoco+/instances.json',
split_file='refcoco+/refs(unc).p',
split='testA', # or 'testB'
text_mode='select_first',
pipeline=test_pipeline))
val_evaluator = dict(type='RefSegMetric', metric=['cIoU', 'mIoU'])
test_evaluator = val_evaluator
|
# dataset settings
dataset_type = 'RefCOCODataset'
data_root = 'data/refcoco/'
backend_args = None
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor', 'text', 'image_id'))
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor', 'text', 'image_id'))
]
train_dataloader = dict(
batch_size=2,
num_workers=2,
persistent_workers=True,
sampler=dict(type='DefaultSampler', shuffle=True),
batch_sampler=dict(type='AspectRatioBatchSampler'),
dataset=dict(
type=dataset_type,
data_root=data_root,
data_prefix=dict(img='train2014/'),
ann_file='refcoco+/instances.json',
split_file='refcoco+/refs(unc).p',
split='train',
pipeline=train_pipeline,
backend_args=backend_args))
val_dataloader = dict(
batch_size=1,
num_workers=2,
persistent_workers=True,
drop_last=False,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type=dataset_type,
data_root=data_root,
data_prefix=dict(img='train2014/'),
ann_file='refcoco+/instances.json',
split_file='refcoco+/refs(unc).p',
split='val',
pipeline=test_pipeline,
backend_args=backend_args))
test_dataloader = dict(
batch_size=1,
num_workers=2,
persistent_workers=True,
drop_last=False,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type=dataset_type,
data_root=data_root,
data_prefix=dict(img='train2014/'),
ann_file='refcoco+/instances.json',
split_file='refcoco+/refs(unc).p',
split='testA', # or 'testB'
pipeline=test_pipeline,
backend_args=backend_args))
# TODO: set the metrics
|
from ._conformer_wav2vec2 import (
conformer_wav2vec2_base,
conformer_wav2vec2_model,
conformer_wav2vec2_pretrain_base,
conformer_wav2vec2_pretrain_large,
conformer_wav2vec2_pretrain_model,
ConformerWav2Vec2PretrainModel,
)
from ._emformer_hubert import emformer_hubert_base, emformer_hubert_model
from .conv_emformer import ConvEmformer
from .rnnt import conformer_rnnt_base, conformer_rnnt_model
__all__ = [
"conformer_rnnt_base",
"conformer_rnnt_model",
"ConvEmformer",
"conformer_wav2vec2_model",
"conformer_wav2vec2_base",
"conformer_wav2vec2_pretrain_model",
"conformer_wav2vec2_pretrain_base",
"conformer_wav2vec2_pretrain_large",
"ConformerWav2Vec2PretrainModel",
"emformer_hubert_base",
"emformer_hubert_model",
]
|
from ._conformer_wav2vec2 import conformer_wav2vec2_base, conformer_wav2vec2_model
from ._emformer_hubert import emformer_hubert_base, emformer_hubert_model
from .conv_emformer import ConvEmformer
from .rnnt import conformer_rnnt_base, conformer_rnnt_model
__all__ = [
"conformer_rnnt_base",
"conformer_rnnt_model",
"ConvEmformer",
"conformer_wav2vec2_model",
"conformer_wav2vec2_base",
"emformer_hubert_base",
"emformer_hubert_model",
]
|
import os
import shutil
import pytest
import torch
import torchaudio
class GreedyCTCDecoder(torch.nn.Module):
def __init__(self, labels, blank: int = 0):
super().__init__()
self.blank = blank
self.labels = labels
def forward(self, logits: torch.Tensor) -> str:
"""Given a sequence logits over labels, get the best path string
Args:
logits (Tensor): Logit tensors. Shape `[num_seq, num_label]`.
Returns:
str: The resulting transcript
"""
best_path = torch.argmax(logits, dim=-1) # [num_seq,]
best_path = torch.unique_consecutive(best_path, dim=-1)
hypothesis = []
for i in best_path:
if i != self.blank:
hypothesis.append(self.labels[i])
return "".join(hypothesis)
@pytest.fixture
def ctc_decoder():
return GreedyCTCDecoder
_FILES = {
"en": "Lab41-SRI-VOiCES-src-sp0307-ch127535-sg0042.flac",
"de": "20090505-0900-PLENARY-16-de_20090505-21_56_00_8.flac",
"en2": "20120613-0900-PLENARY-8-en_20120613-13_46_50_3.flac",
"es": "20130207-0900-PLENARY-7-es_20130207-13_02_05_5.flac",
"fr": "20121212-0900-PLENARY-5-fr_20121212-11_37_04_10.flac",
"it": "20170516-0900-PLENARY-16-it_20170516-18_56_31_1.flac",
}
_MIXTURE_FILES = {
"speech_separation": "mixture_3729-6852-0037_8463-287645-0000.wav",
"music_separation": "al_james_mixture_shorter.wav",
}
_CLEAN_FILES = {
"speech_separation": [
"s1_3729-6852-0037_8463-287645-0000.wav",
"s2_3729-6852-0037_8463-287645-0000.wav",
],
"music_separation": [
"al_james_drums_shorter.wav",
"al_james_bass_shorter.wav",
"al_james_other_shorter.wav",
"al_james_vocals_shorter.wav",
],
}
@pytest.fixture
def sample_speech(lang):
if lang not in _FILES:
raise NotImplementedError(f"Unexpected lang: {lang}")
filename = _FILES[lang]
path = torchaudio.utils.download_asset(f"test-assets/{filename}")
return path
@pytest.fixture
def mixture_source(task):
if task not in _MIXTURE_FILES:
raise NotImplementedError(f"Unexpected task: {task}")
path = torchaudio.utils.download_asset(f"test-assets/{_MIXTURE_FILES[task]}")
return path
@pytest.fixture
def clean_sources(task):
if task not in _CLEAN_FILES:
raise NotImplementedError(f"Unexpected task: {task}")
paths = []
for file in _CLEAN_FILES[task]:
path = torchaudio.utils.download_asset(f"test-assets/{file}")
paths.append(path)
return paths
def pytest_addoption(parser):
parser.addoption(
"--use-tmp-hub-dir",
action="store_true",
help=(
"When provided, tests will use temporary directory as Torch Hub directory. "
"Downloaded models will be deleted after each test."
),
)
@pytest.fixture(autouse=True)
def temp_hub_dir(tmp_path, pytestconfig):
if not pytestconfig.getoption("use_tmp_hub_dir"):
yield
else:
org_dir = torch.hub.get_dir()
subdir = os.path.join(tmp_path, "hub")
torch.hub.set_dir(subdir)
yield
torch.hub.set_dir(org_dir)
shutil.rmtree(subdir, ignore_errors=True)
@pytest.fixture()
def emissions():
path = torchaudio.utils.download_asset("test-assets/emissions-8555-28447-0012.pt")
return torch.load(path)
|
import os
import shutil
import pytest
import torch
import torchaudio
class GreedyCTCDecoder(torch.nn.Module):
def __init__(self, labels, blank: int = 0):
super().__init__()
self.blank = blank
self.labels = labels
def forward(self, logits: torch.Tensor) -> str:
"""Given a sequence logits over labels, get the best path string
Args:
logits (Tensor): Logit tensors. Shape `[num_seq, num_label]`.
Returns:
str: The resulting transcript
"""
best_path = torch.argmax(logits, dim=-1) # [num_seq,]
best_path = torch.unique_consecutive(best_path, dim=-1)
hypothesis = []
for i in best_path:
if i != self.blank:
hypothesis.append(self.labels[i])
return "".join(hypothesis)
@pytest.fixture
def ctc_decoder():
return GreedyCTCDecoder
_FILES = {
"en": "Lab41-SRI-VOiCES-src-sp0307-ch127535-sg0042.flac",
"de": "20090505-0900-PLENARY-16-de_20090505-21_56_00_8.flac",
"en2": "20120613-0900-PLENARY-8-en_20120613-13_46_50_3.flac",
"es": "20130207-0900-PLENARY-7-es_20130207-13_02_05_5.flac",
"fr": "20121212-0900-PLENARY-5-fr_20121212-11_37_04_10.flac",
"it": "20170516-0900-PLENARY-16-it_20170516-18_56_31_1.flac",
}
_MIXTURE_FILE = "mixture_3729-6852-0037_8463-287645-0000.wav"
_CLEAN_FILES = [
"s1_3729-6852-0037_8463-287645-0000.wav",
"s2_3729-6852-0037_8463-287645-0000.wav",
]
@pytest.fixture
def sample_speech(lang):
if lang not in _FILES:
raise NotImplementedError(f"Unexpected lang: {lang}")
filename = _FILES[lang]
path = torchaudio.utils.download_asset(f"test-assets/{filename}")
return path
@pytest.fixture
def mixture_source():
path = torchaudio.utils.download_asset(f"test-assets/{_MIXTURE_FILE}")
return path
@pytest.fixture
def clean_sources():
paths = []
for file in _CLEAN_FILES:
path = torchaudio.utils.download_asset(f"test-assets/{file}")
paths.append(path)
return paths
def pytest_addoption(parser):
parser.addoption(
"--use-tmp-hub-dir",
action="store_true",
help=(
"When provided, tests will use temporary directory as Torch Hub directory. "
"Downloaded models will be deleted after each test."
),
)
@pytest.fixture(autouse=True)
def temp_hub_dir(tmp_path, pytestconfig):
if not pytestconfig.getoption("use_tmp_hub_dir"):
yield
else:
org_dir = torch.hub.get_dir()
subdir = os.path.join(tmp_path, "hub")
torch.hub.set_dir(subdir)
yield
torch.hub.set_dir(org_dir)
shutil.rmtree(subdir, ignore_errors=True)
@pytest.fixture()
def emissions():
path = torchaudio.utils.download_asset("test-assets/emissions-8555-28447-0012.pt")
return torch.load(path)
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch.nn as nn
from mmcv.cnn import ConvModule
from mmdet.registry import MODELS
from .anchor_head import AnchorHead
@MODELS.register_module()
class RetinaHead(AnchorHead):
r"""An anchor-based head used in `RetinaNet
<https://arxiv.org/pdf/1708.02002.pdf>`_.
The head contains two subnetworks. The first classifies anchor boxes and
the second regresses deltas for the anchors.
Example:
>>> import torch
>>> self = RetinaHead(11, 7)
>>> x = torch.rand(1, 7, 32, 32)
>>> cls_score, bbox_pred = self.forward_single(x)
>>> # Each anchor predicts a score for each class except background
>>> cls_per_anchor = cls_score.shape[1] / self.num_anchors
>>> box_per_anchor = bbox_pred.shape[1] / self.num_anchors
>>> assert cls_per_anchor == (self.num_classes)
>>> assert box_per_anchor == 4
"""
def __init__(self,
num_classes,
in_channels,
stacked_convs=4,
conv_cfg=None,
norm_cfg=None,
anchor_generator=dict(
type='AnchorGenerator',
octave_base_scale=4,
scales_per_octave=3,
ratios=[0.5, 1.0, 2.0],
strides=[8, 16, 32, 64, 128]),
init_cfg=dict(
type='Normal',
layer='Conv2d',
std=0.01,
override=dict(
type='Normal',
name='retina_cls',
std=0.01,
bias_prob=0.01)),
**kwargs):
self.stacked_convs = stacked_convs
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
super(RetinaHead, self).__init__(
num_classes,
in_channels,
anchor_generator=anchor_generator,
init_cfg=init_cfg,
**kwargs)
def _init_layers(self):
"""Initialize layers of the head."""
self.relu = nn.ReLU(inplace=True)
self.cls_convs = nn.ModuleList()
self.reg_convs = nn.ModuleList()
for i in range(self.stacked_convs):
chn = self.in_channels if i == 0 else self.feat_channels
self.cls_convs.append(
ConvModule(
chn,
self.feat_channels,
3,
stride=1,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg))
self.reg_convs.append(
ConvModule(
chn,
self.feat_channels,
3,
stride=1,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg))
self.retina_cls = nn.Conv2d(
self.feat_channels,
self.num_base_priors * self.cls_out_channels,
3,
padding=1)
self.retina_reg = nn.Conv2d(
self.feat_channels, self.num_base_priors * 4, 3, padding=1)
def forward_single(self, x):
"""Forward feature of a single scale level.
Args:
x (Tensor): Features of a single scale level.
Returns:
tuple:
cls_score (Tensor): Cls scores for a single scale level
the channels number is num_anchors * num_classes.
bbox_pred (Tensor): Box energies / deltas for a single scale
level, the channels number is num_anchors * 4.
"""
cls_feat = x
reg_feat = x
for cls_conv in self.cls_convs:
cls_feat = cls_conv(cls_feat)
for reg_conv in self.reg_convs:
reg_feat = reg_conv(reg_feat)
cls_score = self.retina_cls(cls_feat)
bbox_pred = self.retina_reg(reg_feat)
return cls_score, bbox_pred
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch.nn as nn
from mmcv.cnn import ConvModule
from ..builder import HEADS
from .anchor_head import AnchorHead
@HEADS.register_module()
class RetinaHead(AnchorHead):
r"""An anchor-based head used in `RetinaNet
<https://arxiv.org/pdf/1708.02002.pdf>`_.
The head contains two subnetworks. The first classifies anchor boxes and
the second regresses deltas for the anchors.
Example:
>>> import torch
>>> self = RetinaHead(11, 7)
>>> x = torch.rand(1, 7, 32, 32)
>>> cls_score, bbox_pred = self.forward_single(x)
>>> # Each anchor predicts a score for each class except background
>>> cls_per_anchor = cls_score.shape[1] / self.num_anchors
>>> box_per_anchor = bbox_pred.shape[1] / self.num_anchors
>>> assert cls_per_anchor == (self.num_classes)
>>> assert box_per_anchor == 4
"""
def __init__(self,
num_classes,
in_channels,
stacked_convs=4,
conv_cfg=None,
norm_cfg=None,
anchor_generator=dict(
type='AnchorGenerator',
octave_base_scale=4,
scales_per_octave=3,
ratios=[0.5, 1.0, 2.0],
strides=[8, 16, 32, 64, 128]),
init_cfg=dict(
type='Normal',
layer='Conv2d',
std=0.01,
override=dict(
type='Normal',
name='retina_cls',
std=0.01,
bias_prob=0.01)),
**kwargs):
self.stacked_convs = stacked_convs
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
super(RetinaHead, self).__init__(
num_classes,
in_channels,
anchor_generator=anchor_generator,
init_cfg=init_cfg,
**kwargs)
def _init_layers(self):
"""Initialize layers of the head."""
self.relu = nn.ReLU(inplace=True)
self.cls_convs = nn.ModuleList()
self.reg_convs = nn.ModuleList()
for i in range(self.stacked_convs):
chn = self.in_channels if i == 0 else self.feat_channels
self.cls_convs.append(
ConvModule(
chn,
self.feat_channels,
3,
stride=1,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg))
self.reg_convs.append(
ConvModule(
chn,
self.feat_channels,
3,
stride=1,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg))
self.retina_cls = nn.Conv2d(
self.feat_channels,
self.num_base_priors * self.cls_out_channels,
3,
padding=1)
self.retina_reg = nn.Conv2d(
self.feat_channels, self.num_base_priors * 4, 3, padding=1)
def forward_single(self, x):
"""Forward feature of a single scale level.
Args:
x (Tensor): Features of a single scale level.
Returns:
tuple:
cls_score (Tensor): Cls scores for a single scale level
the channels number is num_anchors * num_classes.
bbox_pred (Tensor): Box energies / deltas for a single scale
level, the channels number is num_anchors * 4.
"""
cls_feat = x
reg_feat = x
for cls_conv in self.cls_convs:
cls_feat = cls_conv(cls_feat)
for reg_conv in self.reg_convs:
reg_feat = reg_conv(reg_feat)
cls_score = self.retina_cls(cls_feat)
bbox_pred = self.retina_reg(reg_feat)
return cls_score, bbox_pred
|
import sys
from os import path
from setuptools import find_packages
from setuptools import setup
if sys.version_info < (3, 7, 0):
raise OSError(f'DocArray requires Python >=3.7, but yours is {sys.version}')
try:
pkg_name = 'docarray'
libinfo_py = path.join(pkg_name, '__init__.py')
libinfo_content = open(libinfo_py, 'r', encoding='utf8').readlines()
version_line = [l.strip() for l in libinfo_content if l.startswith('__version__')][
0
]
exec(version_line) # gives __version__
except FileNotFoundError:
__version__ = '0.0.0'
try:
with open('README.md', encoding='utf8') as fp:
_long_description = fp.read()
except FileNotFoundError:
_long_description = ''
setup(
name=pkg_name,
packages=find_packages(),
version=__version__,
include_package_data=True,
description='The data structure for unstructured data',
author='Jina AI',
author_email='hello@jina.ai',
license='Apache 2.0',
url='https://github.com/jina-ai/docarray',
download_url='https://github.com/jina-ai/docarray/tags',
long_description=_long_description,
long_description_content_type='text/markdown',
zip_safe=False,
setup_requires=['setuptools>=18.0', 'wheel'],
install_requires=['numpy', 'rich>=12.0.0'],
extras_require={
# req usage, please see https://docarray.jina.ai/#install
'common': [
'protobuf>=3.13.0',
'lz4',
'requests',
'matplotlib',
'Pillow',
'fastapi',
'uvicorn',
],
'full': [
'protobuf>=3.13.0',
'lz4',
'requests',
'matplotlib',
'Pillow',
'trimesh',
'scipy',
'av',
'fastapi',
'uvicorn',
'strawberry-graphql',
'weaviate-client~=3.3.0',
'annlite>=0.3.0',
'qdrant-client~=0.7.1',
'elasticsearch>=8.0.1',
],
'qdrant': [
'qdrant-client~=0.7.1',
],
'annlite': [
'annlite>=0.3.0',
],
'weaviate': [
'weaviate-client~=3.3.0',
],
'elasticsearch': [
'elasticsearch>=8.0.1',
],
'test': [
'pytest',
'pytest-timeout',
'pytest-mock',
'pytest-cov',
'pytest-repeat',
'pytest-reraise',
'mock',
'pytest-custom_exit_code',
'black==22.3.0',
'tensorflow==2.7.0',
'paddlepaddle==2.2.0',
'torch==1.9.0',
'torchvision==0.10.0',
'datasets',
'onnx',
'onnxruntime',
'jupyterlab',
'transformers>=4.16.2',
'weaviate-client~=3.3.0',
'annlite>=0.3.0',
'elasticsearch>=8.0.1',
'jina',
],
},
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Intended Audience :: Science/Research',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
'Programming Language :: Unix Shell',
'Environment :: Console',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Topic :: Database :: Database Engines/Servers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Internet :: WWW/HTTP :: Indexing/Search',
'Topic :: Scientific/Engineering :: Image Recognition',
'Topic :: Multimedia :: Video',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Mathematics',
'Topic :: Software Development',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
],
project_urls={
'Documentation': 'https://docarray.jina.ai',
'Source': 'https://github.com/jina-ai/docarray/',
'Tracker': 'https://github.com/jina-ai/docarray/issues',
},
keywords='docarray deep-learning data-structures cross-modal multi-modal unstructured-data nested-data neural-search',
)
|
import sys
from os import path
from setuptools import find_packages
from setuptools import setup
if sys.version_info < (3, 7, 0):
raise OSError(f'DocArray requires Python >=3.7, but yours is {sys.version}')
try:
pkg_name = 'docarray'
libinfo_py = path.join(pkg_name, '__init__.py')
libinfo_content = open(libinfo_py, 'r', encoding='utf8').readlines()
version_line = [l.strip() for l in libinfo_content if l.startswith('__version__')][
0
]
exec(version_line) # gives __version__
except FileNotFoundError:
__version__ = '0.0.0'
try:
with open('README.md', encoding='utf8') as fp:
_long_description = fp.read()
except FileNotFoundError:
_long_description = ''
setup(
name=pkg_name,
packages=find_packages(),
version=__version__,
include_package_data=True,
description='The data structure for unstructured data',
author='Jina AI',
author_email='hello@jina.ai',
license='Apache 2.0',
url='https://github.com/jina-ai/docarray',
download_url='https://github.com/jina-ai/docarray/tags',
long_description=_long_description,
long_description_content_type='text/markdown',
zip_safe=False,
setup_requires=['setuptools>=18.0', 'wheel'],
install_requires=['numpy', 'rich>=12.0.0'],
extras_require={
# req usage, please see https://docarray.jina.ai/#install
'common': [
'protobuf>=3.13.0',
'lz4',
'requests',
'matplotlib',
'Pillow',
'fastapi',
'uvicorn',
],
'full': [
'protobuf>=3.13.0',
'lz4',
'requests',
'matplotlib',
'Pillow',
'trimesh',
'scipy',
'av',
'fastapi',
'uvicorn',
'weaviate-client~=3.3.0',
'annlite>=0.3.0',
'qdrant-client~=0.7.1',
'strawberry-graphql',
'elasticsearch>=8.0.1',
],
'qdrant': [
'qdrant-client~=0.7.0',
],
'annlite': [
'annlite>=0.3.0',
],
'test': [
'pytest',
'pytest-timeout',
'pytest-mock',
'pytest-cov',
'pytest-repeat',
'pytest-reraise',
'mock',
'pytest-custom_exit_code',
'black==22.3.0',
'tensorflow==2.7.0',
'paddlepaddle==2.2.0',
'torch==1.9.0',
'torchvision==0.10.0',
'datasets',
'onnx',
'onnxruntime',
'jupyterlab',
'transformers>=4.16.2',
'weaviate-client~=3.3.0',
'annlite>=0.3.0',
'jina',
],
},
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Intended Audience :: Science/Research',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
'Programming Language :: Unix Shell',
'Environment :: Console',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Topic :: Database :: Database Engines/Servers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Internet :: WWW/HTTP :: Indexing/Search',
'Topic :: Scientific/Engineering :: Image Recognition',
'Topic :: Multimedia :: Video',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Mathematics',
'Topic :: Software Development',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
],
project_urls={
'Documentation': 'https://docarray.jina.ai',
'Source': 'https://github.com/jina-ai/docarray/',
'Tracker': 'https://github.com/jina-ai/docarray/issues',
},
keywords='docarray deep-learning data-structures cross-modal multi-modal unstructured-data nested-data neural-search',
)
|
# Copyright (c) OpenMMLab. All rights reserved.
from .csp_darknet import CSPDarknet
from .darknet import Darknet
from .detectors_resnet import DetectoRS_ResNet
from .detectors_resnext import DetectoRS_ResNeXt
from .hourglass import HourglassNet
from .hrnet import HRNet
from .mobilenet_v2 import MobileNetV2
from .regnet import RegNet
from .res2net import Res2Net
from .resnest import ResNeSt
from .resnet import ResNet, ResNetV1d
from .resnext import ResNeXt
from .ssd_vgg import SSDVGG
from .swin import SwinTransformer
from .trident_resnet import TridentResNet
__all__ = [
'RegNet', 'ResNet', 'ResNetV1d', 'ResNeXt', 'SSDVGG', 'HRNet',
'MobileNetV2', 'Res2Net', 'HourglassNet', 'DetectoRS_ResNet',
'DetectoRS_ResNeXt', 'Darknet', 'ResNeSt', 'TridentResNet', 'CSPDarknet',
'SwinTransformer'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .csp_darknet import CSPDarknet
from .darknet import Darknet
from .detectors_resnet import DetectoRS_ResNet
from .detectors_resnext import DetectoRS_ResNeXt
from .hourglass import HourglassNet
from .hrnet import HRNet
from .mobilenet_v2 import MobileNetV2
from .regnet import RegNet
from .res2net import Res2Net
from .resnest import ResNeSt
from .resnet import ResNet, ResNetV1d
from .resnext import ResNeXt
from .ssd_vgg import SSDVGG
from .trident_resnet import TridentResNet
__all__ = [
'RegNet', 'ResNet', 'ResNetV1d', 'ResNeXt', 'SSDVGG', 'HRNet',
'MobileNetV2', 'Res2Net', 'HourglassNet', 'DetectoRS_ResNet',
'DetectoRS_ResNeXt', 'Darknet', 'ResNeSt', 'TridentResNet', 'CSPDarknet'
]
|
from __future__ import annotations
from collections import Counter
import pytest
from sentence_transformers.sampler import GroupByLabelBatchSampler
from sentence_transformers.util import is_datasets_available
if is_datasets_available():
from datasets import Dataset
else:
pytest.skip(
reason='Sentence Transformers was not installed with the `["train"]` extra.',
allow_module_level=True,
)
@pytest.fixture
def dummy_dataset():
"""
Dummy dataset for testing purposes. The dataset looks as follows:
{
"data": [0, 1, 2, ..., 99],
"label_a": [0, 1, 0, 1, ..., 0, 1],
"label_b": [0, 1, 2, 3, 4, 0, ..., 4]
}
"""
data = {"data": list(range(100)), "label_a": [i % 2 for i in range(100)], "label_b": [i % 5 for i in range(100)]}
return Dataset.from_dict(data)
@pytest.fixture
def dummy_uneven_dataset():
"""
Dummy dataset for testing purposes. The dataset looks as follows:
{
"data": ["a"] * 51,
"label": [0] * 17 + [1] * 17 + [2] * 17,
}
"""
data = {"data": ["a"] * 51, "label": [0] * 17 + [1] * 17 + [2] * 17}
return Dataset.from_dict(data)
def test_group_by_label_batch_sampler_label_a(dummy_dataset: Dataset) -> None:
batch_size = 10
sampler = GroupByLabelBatchSampler(
dataset=dummy_dataset, batch_size=batch_size, drop_last=False, valid_label_columns=["label_a", "label_b"]
)
batches = list(iter(sampler))
assert all(len(batch) == batch_size for batch in batches)
# Check if all labels within each batch are identical
# In this case, label_a has 50 0's and 50 1's, so with a batch size of 10 we expect each batch to
# have only 0's or only 1's.
for batch in batches:
labels = [dummy_dataset[int(idx)]["label_a"] for idx in batch]
assert len(set(labels)) == 1, f"Batch {batch} does not have identical labels: {labels}"
def test_group_by_label_batch_sampler_label_b(dummy_dataset: Dataset) -> None:
batch_size = 8
sampler = GroupByLabelBatchSampler(
dataset=dummy_dataset, batch_size=batch_size, drop_last=True, valid_label_columns=["label_b"]
)
# drop_last=True, so each batch should be the same length and the last batch is dropped.
batches = list(iter(sampler))
assert all(
len(batch) == batch_size for batch in batches
), "Not all batches are the same size, while drop_last was True."
# Assert that we have the expected number of total samples in the batches.
assert sum(len(batch) for batch in batches) == 100 // batch_size * batch_size
# Since we have 20 occurrences each of label_b values 0, 1, 2, 3 and 4 and a batch_size of 8, we expect each batch
# to have either 4 or 8 samples with the same label. (The first two batches are 16 samples of the first label,
# leaving 4 for the third batch. There 4 of the next label are added, leaving 16 for the next two batches, and so on.)
for batch in batches:
labels = [dummy_dataset[int(idx)]["label_b"] for idx in batch]
counts = list(Counter(labels).values())
assert counts == [8] or counts == [4, 4]
def test_group_by_label_batch_sampler_uneven_dataset(dummy_uneven_dataset: Dataset) -> None:
batch_size = 8
sampler = GroupByLabelBatchSampler(
dataset=dummy_uneven_dataset, batch_size=batch_size, drop_last=False, valid_label_columns=["label"]
)
# With a batch_size of 8 and 17 samples per label; verify that every label in a batch occurs at least twice.
# We accept some tiny data loss (1 sample per label) due to the uneven number of samples per label.
batches = list(iter(sampler))
for batch in batches:
labels = [dummy_uneven_dataset[int(idx)]["label"] for idx in batch]
counts = list(Counter(labels).values())
assert [count > 1 for count in counts]
|
from __future__ import annotations
from collections import Counter
import pytest
from datasets import Dataset
from sentence_transformers.sampler import GroupByLabelBatchSampler
@pytest.fixture
def dummy_dataset():
"""
Dummy dataset for testing purposes. The dataset looks as follows:
{
"data": [0, 1, 2, ..., 99],
"label_a": [0, 1, 0, 1, ..., 0, 1],
"label_b": [0, 1, 2, 3, 4, 0, ..., 4]
}
"""
data = {"data": list(range(100)), "label_a": [i % 2 for i in range(100)], "label_b": [i % 5 for i in range(100)]}
return Dataset.from_dict(data)
@pytest.fixture
def dummy_uneven_dataset():
"""
Dummy dataset for testing purposes. The dataset looks as follows:
{
"data": ["a"] * 51,
"label": [0] * 17 + [1] * 17 + [2] * 17,
}
"""
data = {"data": ["a"] * 51, "label": [0] * 17 + [1] * 17 + [2] * 17}
return Dataset.from_dict(data)
def test_group_by_label_batch_sampler_label_a(dummy_dataset: Dataset) -> None:
batch_size = 10
sampler = GroupByLabelBatchSampler(
dataset=dummy_dataset, batch_size=batch_size, drop_last=False, valid_label_columns=["label_a", "label_b"]
)
batches = list(iter(sampler))
assert all(len(batch) == batch_size for batch in batches)
# Check if all labels within each batch are identical
# In this case, label_a has 50 0's and 50 1's, so with a batch size of 10 we expect each batch to
# have only 0's or only 1's.
for batch in batches:
labels = [dummy_dataset[int(idx)]["label_a"] for idx in batch]
assert len(set(labels)) == 1, f"Batch {batch} does not have identical labels: {labels}"
def test_group_by_label_batch_sampler_label_b(dummy_dataset: Dataset) -> None:
batch_size = 8
sampler = GroupByLabelBatchSampler(
dataset=dummy_dataset, batch_size=batch_size, drop_last=True, valid_label_columns=["label_b"]
)
# drop_last=True, so each batch should be the same length and the last batch is dropped.
batches = list(iter(sampler))
assert all(
len(batch) == batch_size for batch in batches
), "Not all batches are the same size, while drop_last was True."
# Assert that we have the expected number of total samples in the batches.
assert sum(len(batch) for batch in batches) == 100 // batch_size * batch_size
# Since we have 20 occurrences each of label_b values 0, 1, 2, 3 and 4 and a batch_size of 8, we expect each batch
# to have either 4 or 8 samples with the same label. (The first two batches are 16 samples of the first label,
# leaving 4 for the third batch. There 4 of the next label are added, leaving 16 for the next two batches, and so on.)
for batch in batches:
labels = [dummy_dataset[int(idx)]["label_b"] for idx in batch]
counts = list(Counter(labels).values())
assert counts == [8] or counts == [4, 4]
def test_group_by_label_batch_sampler_uneven_dataset(dummy_uneven_dataset: Dataset) -> None:
batch_size = 8
sampler = GroupByLabelBatchSampler(
dataset=dummy_uneven_dataset, batch_size=batch_size, drop_last=False, valid_label_columns=["label"]
)
# With a batch_size of 8 and 17 samples per label; verify that every label in a batch occurs at least twice.
# We accept some tiny data loss (1 sample per label) due to the uneven number of samples per label.
batches = list(iter(sampler))
for batch in batches:
labels = [dummy_uneven_dataset[int(idx)]["label"] for idx in batch]
counts = list(Counter(labels).values())
assert [count > 1 for count in counts]
|
# dataset settings
dataset_type = 'CityscapesDataset'
data_root = 'data/cityscapes/'
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(type='RandomResize', scale=[(2048, 800), (2048, 1024)]),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='Resize', scale=(2048, 1024), keep_ratio=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(
batch_size=1,
num_workers=2,
persistent_workers=True,
sampler=dict(type='DefaultSampler', shuffle=True),
batch_sampler=dict(type='AspectRatioBatchSampler'),
dataset=dict(
type='RepeatDataset',
times=8,
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instancesonly_filtered_gtFine_train.json',
data_prefix=dict(img='leftImg8bit/train/'),
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=train_pipeline)))
val_dataloader = dict(
batch_size=1,
num_workers=2,
persistent_workers=True,
drop_last=False,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instancesonly_filtered_gtFine_val.json',
data_prefix=dict(img='leftImg8bit/val/'),
test_mode=True,
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=test_pipeline))
test_dataloader = val_dataloader
val_evaluator = [
dict(
type='CocoMetric',
ann_file=data_root +
'annotations/instancesonly_filtered_gtFine_val.json',
metric=['bbox', 'segm']),
dict(
type='CityScapesMetric',
ann_file=data_root +
'annotations/instancesonly_filtered_gtFine_val.json',
seg_prefix=data_root + '/gtFine/val',
outfile_prefix='./work_dirs/cityscapes_metric/instance')
]
test_evaluator = val_evaluator
# inference on test dataset and
# format the output results for submission.
# test_dataloader = dict(
# batch_size=1,
# num_workers=2,
# persistent_workers=True,
# drop_last=False,
# sampler=dict(type='DefaultSampler', shuffle=False),
# dataset=dict(
# type=dataset_type,
# data_root=data_root,
# ann_file='annotations/instancesonly_filtered_gtFine_test.json',
# data_prefix=dict(img='leftImg8bit/test/'),
# test_mode=True,
# filter_cfg=dict(filter_empty_gt=True, min_size=32),
# pipeline=test_pipeline))
# test_evaluator = dict(
# type='CityScapesMetric',
# format_only=True,
# outfile_prefix='./work_dirs/cityscapes_metric/test')
|
# dataset settings
dataset_type = 'CityscapesDataset'
# TODO remove it after cityscape metric
# data_root = '/mnt/lustre/luochunhua.vendor/openmmlab2.0/data/cityscapes/'
data_root = 'data/cityscapes/'
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(type='RandomResize', scale=[(2048, 800), (2048, 1024)]),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='Resize', scale=(2048, 1024), keep_ratio=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(
batch_size=1,
num_workers=2,
persistent_workers=True,
sampler=dict(type='DefaultSampler', shuffle=True),
batch_sampler=dict(type='AspectRatioBatchSampler'),
dataset=dict(
type='RepeatDataset',
times=8,
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instancesonly_filtered_gtFine_train.json',
data_prefix=dict(img='leftImg8bit/train/'),
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=train_pipeline)))
val_dataloader = dict(
batch_size=1,
num_workers=2,
persistent_workers=True,
drop_last=False,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instancesonly_filtered_gtFine_val.json',
data_prefix=dict(img='leftImg8bit/val/'),
test_mode=True,
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=test_pipeline))
test_dataloader = dict(
batch_size=1,
num_workers=2,
persistent_workers=True,
drop_last=False,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instancesonly_filtered_gtFine_val.json',
data_prefix=dict(img='leftImg8bit/val/'),
test_mode=True,
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=test_pipeline))
val_evaluator = dict(
type='CocoMetric',
ann_file=data_root + 'annotations/instancesonly_filtered_gtFine_val.json',
metric=['bbox', 'segm'])
test_evaluator = dict(
type='CocoMetric',
ann_file=data_root + 'annotations/instancesonly_filtered_gtFine_val.json',
metric=['bbox', 'segm'])
# TODO add setting on test dataset after cityscape metric
# inference on test dataset and
# format the output results for submission.
# test_dataloader = None
# test_evaluator = None
|
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import pytest
from jina import Document, DocumentArray
from ...match_merger import MatchMerger
@pytest.fixture
def docs_matrix():
return [
DocumentArray(
[
Document(
id=f'doc {i}',
matches=[Document(id=f'doc {i}, match {j}') for j in range(3)],
chunks=[
Document(
id=f'doc {i}, chunk {j}',
matches=[
Document(id=f'doc {i}, chunk {j}, match {k}')
for k in range(2)
],
)
for j in range(3)
],
)
for i in range(2)
]
)
for shard in range(4)
]
def test_root_traversal(docs_matrix):
executor = MatchMerger()
document_array = executor.merge(docs_matrix=docs_matrix, parameters={})
assert len(document_array) == 2
for d in document_array:
assert len(d.matches) == 12
def test_chunk_traversal(docs_matrix):
executor = MatchMerger(default_traversal_paths=('c',))
document_array = executor.merge(docs_matrix=docs_matrix, parameters={})
assert len(document_array) == 6
for d in document_array:
assert len(d.matches) == 8
|
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import pytest
from jina import Document, DocumentArray
from ...match_merger import MatchMerger
@pytest.fixture
def docs_matrix():
return [
DocumentArray(
[
Document(
id=f'doc {i}',
matches=[
Document(id=f'doc {i}, match {j}')
for j in range(3)
],
chunks=[
Document(
id=f'doc {i}, chunk {j}',
matches=[
Document(
id=f'doc {i}, chunk {j}, match {k}'
)
for k in range(2)
]
)
for j in range(3)
])
for i in range(2)
]
)
for shard in range(4)
]
def test_root_traversal(docs_matrix):
executor = MatchMerger()
document_array = executor.merge(docs_matrix=docs_matrix, parameters={})
assert len(document_array) == 2
for d in document_array:
assert len(d.matches) == 12
def test_chunk_traversal(docs_matrix):
executor = MatchMerger(default_traversal_paths=('c',))
document_array = executor.merge(docs_matrix=docs_matrix, parameters={})
assert len(document_array) == 6
for d in document_array:
assert len(d.matches) == 8
|
from typing import TYPE_CHECKING, Type, List
if TYPE_CHECKING:
from docarray.document.pydantic_model import PydanticDocumentArray
from docarray.typing import T
from pydantic import BaseModel
class PydanticMixin:
@classmethod
def get_json_schema(cls, indent: int = 2) -> str:
"""Return a JSON Schema of DocumentArray class."""
from pydantic import schema_json_of
from docarray.document.pydantic_model import PydanticDocumentArray
return schema_json_of(
PydanticDocumentArray, title='DocumentArray Schema', indent=indent
)
def to_pydantic_model(self) -> 'PydanticDocumentArray':
"""Convert a DocumentArray object into a Pydantic model."""
return [d.to_pydantic_model() for d in self]
@classmethod
def from_pydantic_model(cls: Type['T'], model: List['BaseModel']) -> 'T':
"""Convert a list of PydanticDocument into DocumentArray
:param model: the list of pydantic data model objects that represents a DocumentArray
:return: a DocumentArray
"""
from docarray import Document
return cls(Document.from_pydantic_model(m) for m in model)
|
from typing import TYPE_CHECKING, Type, List
if TYPE_CHECKING:
from ...document.pydantic_model import PydanticDocumentArray
from ...typing import T
from pydantic import BaseModel
class PydanticMixin:
@classmethod
def get_json_schema(cls, indent: int = 2) -> str:
"""Return a JSON Schema of DocumentArray class."""
from pydantic import schema_json_of
from ...document.pydantic_model import PydanticDocumentArray
return schema_json_of(
PydanticDocumentArray, title='DocumentArray Schema', indent=indent
)
def to_pydantic_model(self) -> 'PydanticDocumentArray':
"""Convert a DocumentArray object into a Pydantic model."""
return [d.to_pydantic_model() for d in self]
@classmethod
def from_pydantic_model(cls: Type['T'], model: List['BaseModel']) -> 'T':
"""Convert a list of PydanticDocument into DocumentArray
:param model: the list of pydantic data model objects that represents a DocumentArray
:return: a DocumentArray
"""
from ... import Document
return cls(Document.from_pydantic_model(m) for m in model)
|
"""Argparser module for pinging"""
from jina.parsers.base import set_base_parser
def set_new_project_parser(parser=None):
"""Set the parser for `new`
:param parser: an existing parser to build upon
:return: the parser
"""
if not parser:
parser = set_base_parser()
parser.add_argument(
'name', type=str, help='The name of the project', default='hello-jina'
)
parser.add_argument(
'--type', type=str, help='The type of project to be created (either flow or deployment)', default='flow'
)
return parser
|
"""Argparser module for pinging"""
from jina.parsers.base import set_base_parser
def set_new_project_parser(parser=None):
"""Set the parser for `new`
:param parser: an existing parser to build upon
:return: the parser
"""
if not parser:
parser = set_base_parser()
parser.add_argument(
'name', type=str, help='The name of the project', default='hello-jina'
)
return parser
|
# Copyright (c) OpenMMLab. All rights reserved.
import math
from typing import Optional
import torch
import torch.nn as nn
from mmengine.model import ExponentialMovingAverage
from torch import Tensor
from mmdet.registry import MODELS
@MODELS.register_module()
class ExpMomentumEMA(ExponentialMovingAverage):
"""Exponential moving average (EMA) with exponential momentum strategy,
which is used in YOLOX.
Args:
model (nn.Module): The model to be averaged.
momentum (float): The momentum used for updating ema parameter.
Ema's parameter are updated with the formula:
`averaged_param = (1-momentum) * averaged_param + momentum *
source_param`. Defaults to 0.0002.
gamma (int): Use a larger momentum early in training and gradually
annealing to a smaller value to update the ema model smoothly. The
momentum is calculated as
`(1 - momentum) * exp(-(1 + steps) / gamma) + momentum`.
Defaults to 2000.
interval (int): Interval between two updates. Defaults to 1.
device (torch.device, optional): If provided, the averaged model will
be stored on the :attr:`device`. Defaults to None.
update_buffers (bool): if True, it will compute running averages for
both the parameters and the buffers of the model. Defaults to
False.
"""
def __init__(self,
model: nn.Module,
momentum: float = 0.0002,
gamma: int = 2000,
interval=1,
device: Optional[torch.device] = None,
update_buffers: bool = False) -> None:
super().__init__(
model=model,
momentum=momentum,
interval=interval,
device=device,
update_buffers=update_buffers)
assert gamma > 0, f'gamma must be greater than 0, but got {gamma}'
self.gamma = gamma
def avg_func(self, averaged_param: Tensor, source_param: Tensor,
steps: int) -> None:
"""Compute the moving average of the parameters using the exponential
momentum strategy.
Args:
averaged_param (Tensor): The averaged parameters.
source_param (Tensor): The source parameters.
steps (int): The number of times the parameters have been
updated.
"""
momentum = (1 - self.momentum) * math.exp(
-(1 + steps) / self.gamma) + self.momentum
averaged_param.mul_(1 - momentum).add_(source_param, alpha=momentum)
|
# Copyright (c) OpenMMLab. All rights reserved.
import math
from typing import Optional
import torch
import torch.nn as nn
from mmengine.model import ExponentialMovingAverage
from torch import Tensor
from mmdet.registry import MODELS
@MODELS.register_module()
class ExpMomentumEMA(ExponentialMovingAverage):
"""Exponential moving average (EMA) with exponential momentum strategy,
which is used in YOLOX.
Args:
model (nn.Module): The model to be averaged.
momentum (float): The momentum used for updating ema parameter.
Ema's parameter are updated with the formula:
`averaged_param = (1-momentum) * averaged_param + momentum *
source_param`. Defaults to 0.0002.
gamma (int): Use a larger momentum early in training and gradually
annealing to a smaller value to update the ema model smoothly. The
momentum is calculated as
`(1 - momentum) * exp(-(1 + steps) / gamma) + momentum`.
Defaults to 2000.
interval (int): Interval between two updates. Defaults to 1.
device (torch.device, optional): If provided, the averaged model will
be stored on the :attr:`device`. Defaults to None.
update_buffers (bool): if True, it will compute running averages for
both the parameters and the buffers of the model. Defaults to
False.
"""
def __init__(self,
model: nn.Module,
momentum: float = 0.0002,
gamma: int = 2000,
interval=1,
device: Optional[torch.device] = None,
update_buffers: bool = False) -> None:
super().__init__(
model=model,
momentum=momentum,
interval=interval,
device=device,
update_buffers=update_buffers)
assert gamma > 0, f'gamma must be greater than 0, but got {gamma}'
self.gamma = gamma
def avg_func(self, averaged_param: Tensor, source_param: Tensor,
steps: int) -> Tensor:
"""Compute the moving average of the parameters using the linear
momentum strategy.
Args:
averaged_param (Tensor): The averaged parameters.
source_param (Tensor): The source parameters.
steps (int): The number of times the parameters have been
updated.
Returns:
Tensor: The averaged parameters.
"""
momentum = (1 - self.momentum) * math.exp(
-(1 + steps) / self.gamma) + self.momentum
return averaged_param * (1 - momentum) + source_param * momentum
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import mmengine
from mmengine import Config, DictAction
from mmengine.evaluator import Evaluator
from mmengine.registry import init_default_scope
from mmdet.registry import DATASETS
def parse_args():
parser = argparse.ArgumentParser(description='Evaluate metric of the '
'results saved in pkl format')
parser.add_argument('config', help='Config of the model')
parser.add_argument('pkl_results', help='Results in pickle format')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
args = parser.parse_args()
return args
def main():
args = parse_args()
cfg = Config.fromfile(args.config)
init_default_scope(cfg.get('default_scope', 'mmdet'))
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
dataset = DATASETS.build(cfg.test_dataloader.dataset)
predictions = mmengine.load(args.pkl_results)
evaluator = Evaluator(cfg.val_evaluator)
evaluator.dataset_meta = dataset.metainfo
eval_results = evaluator.offline_evaluate(predictions)
print(eval_results)
if __name__ == '__main__':
main()
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import mmengine
from mmengine import Config, DictAction
from mmengine.evaluator import Evaluator
from mmdet.registry import DATASETS
from mmdet.utils import register_all_modules
def parse_args():
parser = argparse.ArgumentParser(description='Evaluate metric of the '
'results saved in pkl format')
parser.add_argument('config', help='Config of the model')
parser.add_argument('pkl_results', help='Results in pickle format')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
args = parser.parse_args()
return args
def main():
args = parse_args()
register_all_modules(init_default_scope=True)
cfg = Config.fromfile(args.config)
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
dataset = DATASETS.build(cfg.test_dataloader.dataset)
predictions = mmengine.load(args.pkl_results)
evaluator = Evaluator(cfg.val_evaluator)
evaluator.dataset_meta = dataset.metainfo
eval_results = evaluator.offline_evaluate(predictions)
print(eval_results)
if __name__ == '__main__':
main()
|
import numpy as np
import pytest
import tempfile
import os
from PIL import Image
from unittest.mock import patch, MagicMock
from llama_index.core.schema import ImageDocument
from llama_index.core.base.llms.types import ChatMessage
from llama_index.core.multi_modal_llms.base import MultiModalLLM
from llama_index.multi_modal_llms.huggingface import HuggingFaceMultiModal
@pytest.fixture(scope="module")
def mock_model():
with (
patch(
"llama_index.multi_modal_llms.huggingface.base.AutoConfig"
) as mock_config,
patch(
"llama_index.multi_modal_llms.huggingface.base.Qwen2VLForConditionalGeneration"
) as mock_model_class,
patch(
"llama_index.multi_modal_llms.huggingface.base.AutoProcessor"
) as mock_processor,
):
mock_config.from_pretrained.return_value = MagicMock(
architectures=["Qwen2VLForConditionalGeneration"]
)
mock_model = mock_model_class.from_pretrained.return_value
mock_processor = mock_processor.from_pretrained.return_value
yield HuggingFaceMultiModal.from_model_name("Qwen/Qwen2-VL-2B-Instruct")
# Replace the existing 'model' fixture with this mock_model
@pytest.fixture(scope="module")
def model(mock_model):
return mock_model
@pytest.fixture(scope="module")
def temp_image_path():
# Create a white square image
white_square = np.ones((100, 100, 3), dtype=np.uint8) * 255
image = Image.fromarray(white_square)
# Create a temporary file
with tempfile.NamedTemporaryFile(suffix=".png", delete=False) as temp_file:
image.save(temp_file, format="PNG")
temp_path = temp_file.name
yield temp_path
# Clean up the temporary file after the test
os.unlink(temp_path)
def test_class():
names_of_base_classes = [b.__name__ for b in HuggingFaceMultiModal.__mro__]
assert MultiModalLLM.__name__ in names_of_base_classes
def test_initialization(model):
assert isinstance(model, HuggingFaceMultiModal)
assert model.model_name == "Qwen/Qwen2-VL-2B-Instruct"
def test_metadata(model):
metadata = model.metadata
assert metadata.model_name == "Qwen/Qwen2-VL-2B-Instruct"
assert metadata.context_window == 3900 # Default value
assert metadata.num_output == 256 # Default value
def test_complete(model, temp_image_path):
prompt = "Describe this image:"
image_doc = ImageDocument(image_path=temp_image_path)
# Mock the _prepare_messages and _generate methods
model._prepare_messages = MagicMock(return_value={"mocked": "inputs"})
model._generate = MagicMock(return_value="This is a mocked response.")
response = model.complete(prompt, image_documents=[image_doc])
assert response.text == "This is a mocked response."
model._prepare_messages.assert_called_once()
model._generate.assert_called_once_with({"mocked": "inputs"})
def test_chat(model, temp_image_path):
messages = [ChatMessage(role="user", content="What's in this image?")]
image_doc = ImageDocument(image_path=temp_image_path)
# Mock the _prepare_messages and _generate methods
model._prepare_messages = MagicMock(return_value={"mocked": "inputs"})
model._generate = MagicMock(return_value="This is a mocked chat response.")
response = model.chat(messages, image_documents=[image_doc])
assert response.message.content == "This is a mocked chat response."
model._prepare_messages.assert_called_once()
model._generate.assert_called_once_with({"mocked": "inputs"})
@pytest.mark.asyncio
@pytest.mark.parametrize(
"method_name",
[
"astream_chat",
"astream_complete",
"acomplete",
"achat",
],
)
async def test_unsupported_methods(model, method_name):
with pytest.raises(NotImplementedError):
method = getattr(model, method_name)
if method_name in ["astream_chat", "achat"]:
await method([])
else:
await method("prompt", [])
|
import numpy as np
import pytest
import tempfile
import os
from PIL import Image
from unittest.mock import patch, MagicMock
from llama_index.core.schema import ImageDocument
from llama_index.core.base.llms.types import ChatMessage
from llama_index.core.multi_modal_llms.base import MultiModalLLM
from llama_index.multi_modal_llms.huggingface import HuggingFaceMultiModal
@pytest.fixture(scope="module")
def mock_model():
with patch(
"llama_index.multi_modal_llms.huggingface.base.AutoConfig"
) as mock_config, patch(
"llama_index.multi_modal_llms.huggingface.base.Qwen2VLForConditionalGeneration"
) as mock_model_class, patch(
"llama_index.multi_modal_llms.huggingface.base.AutoProcessor"
) as mock_processor:
mock_config.from_pretrained.return_value = MagicMock(
architectures=["Qwen2VLForConditionalGeneration"]
)
mock_model = mock_model_class.from_pretrained.return_value
mock_processor = mock_processor.from_pretrained.return_value
yield HuggingFaceMultiModal.from_model_name("Qwen/Qwen2-VL-2B-Instruct")
# Replace the existing 'model' fixture with this mock_model
@pytest.fixture(scope="module")
def model(mock_model):
return mock_model
@pytest.fixture(scope="module")
def temp_image_path():
# Create a white square image
white_square = np.ones((100, 100, 3), dtype=np.uint8) * 255
image = Image.fromarray(white_square)
# Create a temporary file
with tempfile.NamedTemporaryFile(suffix=".png", delete=False) as temp_file:
image.save(temp_file, format="PNG")
temp_path = temp_file.name
yield temp_path
# Clean up the temporary file after the test
os.unlink(temp_path)
def test_class():
names_of_base_classes = [b.__name__ for b in HuggingFaceMultiModal.__mro__]
assert MultiModalLLM.__name__ in names_of_base_classes
def test_initialization(model):
assert isinstance(model, HuggingFaceMultiModal)
assert model.model_name == "Qwen/Qwen2-VL-2B-Instruct"
def test_metadata(model):
metadata = model.metadata
assert metadata.model_name == "Qwen/Qwen2-VL-2B-Instruct"
assert metadata.context_window == 3900 # Default value
assert metadata.num_output == 256 # Default value
def test_complete(model, temp_image_path):
prompt = "Describe this image:"
image_doc = ImageDocument(image_path=temp_image_path)
# Mock the _prepare_messages and _generate methods
model._prepare_messages = MagicMock(return_value={"mocked": "inputs"})
model._generate = MagicMock(return_value="This is a mocked response.")
response = model.complete(prompt, image_documents=[image_doc])
assert response.text == "This is a mocked response."
model._prepare_messages.assert_called_once()
model._generate.assert_called_once_with({"mocked": "inputs"})
def test_chat(model, temp_image_path):
messages = [ChatMessage(role="user", content="What's in this image?")]
image_doc = ImageDocument(image_path=temp_image_path)
# Mock the _prepare_messages and _generate methods
model._prepare_messages = MagicMock(return_value={"mocked": "inputs"})
model._generate = MagicMock(return_value="This is a mocked chat response.")
response = model.chat(messages, image_documents=[image_doc])
assert response.message.content == "This is a mocked chat response."
model._prepare_messages.assert_called_once()
model._generate.assert_called_once_with({"mocked": "inputs"})
@pytest.mark.asyncio
@pytest.mark.parametrize(
"method_name",
[
"astream_chat",
"astream_complete",
"acomplete",
"achat",
],
)
async def test_unsupported_methods(model, method_name):
with pytest.raises(NotImplementedError):
method = getattr(model, method_name)
if method_name in ["astream_chat", "achat"]:
await method([])
else:
await method("prompt", [])
|
from __future__ import annotations
from torch import Tensor, nn
from sentence_transformers.cross_encoder import CrossEncoder
class BinaryCrossEntropyLoss(nn.Module):
def __init__(self, model: CrossEncoder, pos_weight: Tensor | None = None, **kwargs) -> None:
super().__init__()
self.model = model
self.bce_with_logits_loss = nn.BCEWithLogitsLoss(pos_weight=pos_weight, **kwargs)
if self.model.num_labels != 1:
raise ValueError(
f"{self.__class__.__name__} expects a model with 1 output label, "
f"but got a model with {self.model.num_labels} output labels."
)
def forward(self, inputs: list[list[str]], labels: Tensor) -> Tensor:
if len(inputs) != 2:
raise ValueError(
f"BinaryCrossEntropyLoss expects a dataset with two non-label columns, but got a dataset with {len(inputs)} columns."
)
pairs = list(zip(inputs[0], inputs[1]))
tokens = self.model.tokenizer(
pairs,
padding=True,
truncation=True,
return_tensors="pt",
)
tokens.to(self.model.device)
logits = self.model(**tokens)[0].view(-1)
loss = self.bce_with_logits_loss(logits, labels.float())
return loss
|
from __future__ import annotations
from torch import Tensor, nn
from sentence_transformers.cross_encoder import CrossEncoder
# TODO: Bad name, don't 1-1 copy the name from PyTorch
class BinaryCrossEntropyLoss(nn.Module):
def __init__(self, model: CrossEncoder, pos_weight: Tensor | None = None, **kwargs) -> None:
super().__init__()
self.model = model
self.bce_with_logits_loss = nn.BCEWithLogitsLoss(pos_weight=pos_weight, **kwargs)
if self.model.num_labels != 1:
raise ValueError(
f"{self.__class__.__name__} expects a model with 1 output label, "
f"but got a model with {self.model.num_labels} output labels."
)
def forward(self, inputs: list[list[str]], labels: Tensor) -> Tensor:
if len(inputs) != 2:
raise ValueError(
f"BinaryCrossEntropyLoss expects a dataset with two non-label columns, but got a dataset with {len(inputs)} columns."
)
pairs = list(zip(inputs[0], inputs[1]))
tokens = self.model.tokenizer(
pairs,
padding=True,
truncation=True,
return_tensors="pt",
)
tokens.to(self.model.device)
logits = self.model(**tokens)[0].view(-1)
loss = self.bce_with_logits_loss(logits, labels.float())
return loss
|
# Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import _LazyModule
from ...utils.import_utils import define_import_structure
if TYPE_CHECKING:
from .auto_factory import *
from .configuration_auto import *
from .feature_extraction_auto import *
from .image_processing_auto import *
from .modeling_auto import *
from .modeling_flax_auto import *
from .modeling_tf_auto import *
from .processing_auto import *
from .tokenization_auto import *
from .video_processing_auto import *
else:
import sys
_file = globals()["__file__"]
sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__)
|
# Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import _LazyModule
from ...utils.import_utils import define_import_structure
if TYPE_CHECKING:
from .auto_factory import *
from .configuration_auto import *
from .feature_extraction_auto import *
from .image_processing_auto import *
from .modeling_auto import *
from .modeling_flax_auto import *
from .modeling_tf_auto import *
from .processing_auto import *
from .tokenization_auto import *
else:
import sys
_file = globals()["__file__"]
sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__)
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest.mock import patch
import pytest
from mmengine.device import is_cuda_available
from mmengine.testing import RunnerTestCase
class TestEmptyCacheHook(RunnerTestCase):
@pytest.mark.skipif(
not is_cuda_available(), reason='cuda should be available')
def test_with_runner(self):
with patch('torch.cuda.empty_cache') as mock_empty_cache:
cfg = self.epoch_based_cfg
cfg.custom_hooks = [dict(type='EmptyCacheHook')]
cfg.train_cfg.val_interval = 1e6 # disable validation during training # noqa: E501
runner = self.build_runner(cfg)
runner.train()
runner.test()
runner.val()
# Call `torch.cuda.empty_cache` after each epoch:
# runner.train: `max_epochs` times.
# runner.val: `1` time.
# runner.test: `1` time.
target_called_times = runner.max_epochs + 2
self.assertEqual(mock_empty_cache.call_count, target_called_times)
with patch('torch.cuda.empty_cache') as mock_empty_cache:
cfg.custom_hooks = [dict(type='EmptyCacheHook', before_epoch=True)]
runner = self.build_runner(cfg)
runner.train()
runner.val()
runner.test()
# Call `torch.cuda.empty_cache` after/before each epoch:
# runner.train: `max_epochs*2` times.
# runner.val: `1*2` times.
# runner.test: `1*2` times.
target_called_times = runner.max_epochs * 2 + 4
self.assertEqual(mock_empty_cache.call_count, target_called_times)
with patch('torch.cuda.empty_cache') as mock_empty_cache:
cfg.custom_hooks = [
dict(
type='EmptyCacheHook', after_iter=True, before_epoch=True)
]
runner = self.build_runner(cfg)
runner.train()
runner.val()
runner.test()
# Call `torch.cuda.empty_cache` after/before each epoch,
# after each iteration:
# runner.train: `max_epochs*2 + len(dataloader)*max_epochs` times. # noqa: E501
# runner.val: `1*2 + len(val_dataloader)` times.
# runner.test: `1*2 + len(val_dataloader)` times.
target_called_times = \
runner.max_epochs * 2 + 4 + \
len(runner.train_dataloader) * runner.max_epochs + \
len(runner.val_dataloader) + \
len(runner.test_dataloader)
self.assertEqual(mock_empty_cache.call_count, target_called_times)
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest.mock import patch
from mmengine.testing import RunnerTestCase
class TestEmptyCacheHook(RunnerTestCase):
def test_with_runner(self):
with patch('torch.cuda.empty_cache') as mock_empty_cache:
cfg = self.epoch_based_cfg
cfg.custom_hooks = [dict(type='EmptyCacheHook')]
cfg.train_cfg.val_interval = 1e6 # disable validation during training # noqa: E501
runner = self.build_runner(cfg)
runner.train()
runner.test()
runner.val()
# Call `torch.cuda.empty_cache` after each epoch:
# runner.train: `max_epochs` times.
# runner.val: `1` time.
# runner.test: `1` time.
target_called_times = runner.max_epochs + 2
self.assertEqual(mock_empty_cache.call_count, target_called_times)
with patch('torch.cuda.empty_cache') as mock_empty_cache:
cfg.custom_hooks = [dict(type='EmptyCacheHook', before_epoch=True)]
runner = self.build_runner(cfg)
runner.train()
runner.val()
runner.test()
# Call `torch.cuda.empty_cache` after/before each epoch:
# runner.train: `max_epochs*2` times.
# runner.val: `1*2` times.
# runner.test: `1*2` times.
target_called_times = runner.max_epochs * 2 + 4
self.assertEqual(mock_empty_cache.call_count, target_called_times)
with patch('torch.cuda.empty_cache') as mock_empty_cache:
cfg.custom_hooks = [
dict(
type='EmptyCacheHook', after_iter=True, before_epoch=True)
]
runner = self.build_runner(cfg)
runner.train()
runner.val()
runner.test()
# Call `torch.cuda.empty_cache` after/before each epoch,
# after each iteration:
# runner.train: `max_epochs*2 + len(dataloader)*max_epochs` times. # noqa: E501
# runner.val: `1*2 + len(val_dataloader)` times.
# runner.test: `1*2 + len(val_dataloader)` times.
target_called_times = \
runner.max_epochs * 2 + 4 + \
len(runner.train_dataloader) * runner.max_epochs + \
len(runner.val_dataloader) + \
len(runner.test_dataloader)
self.assertEqual(mock_empty_cache.call_count, target_called_times)
|
from typing import Optional, Union, Callable, Tuple, TYPE_CHECKING, Dict
if TYPE_CHECKING: # pragma: no cover
import numpy as np
from docarray.typing import ArrayType
from docarray import DocumentArray
class MatchMixin:
"""A mixin that provides match functionality to DocumentArrays"""
def match(
self,
darray: 'DocumentArray',
metric: Union[
str, Callable[['ArrayType', 'ArrayType'], 'np.ndarray']
] = 'cosine',
limit: Optional[Union[int, float]] = 20,
normalization: Optional[Tuple[float, float]] = None,
metric_name: Optional[str] = None,
batch_size: Optional[int] = None,
exclude_self: bool = False,
filter: Optional[Dict] = None,
only_id: bool = False,
use_scipy: bool = False,
device: str = 'cpu',
num_worker: Optional[int] = 1,
on: Optional[str] = None,
**kwargs,
) -> None:
"""Compute embedding based nearest neighbour in `another` for each Document in `self`,
and store results in `matches`.
For the purpose of evaluation, one can also directly use the
:meth:`~docarray.array.mixins.evaluation.EvaluationMixin.embed_and_evaluate`
function.
.. note::
'cosine', 'euclidean', 'sqeuclidean' are supported natively without extra dependency.
You can use other distance metric provided by ``scipy``, such as `braycurtis`, `canberra`, `chebyshev`,
`cityblock`, `correlation`, `cosine`, `dice`, `euclidean`, `hamming`, `jaccard`, `jensenshannon`,
`kulsinski`, `mahalanobis`, `matching`, `minkowski`, `rogerstanimoto`, `russellrao`, `seuclidean`,
`sokalmichener`, `sokalsneath`, `sqeuclidean`, `wminkowski`, `yule`.
To use scipy metric, please set ``use_scipy=True``.
- To make all matches values in [0, 1], use ``dA.match(dB, normalization=(0, 1))``
- To invert the distance as score and make all values in range [0, 1],
use ``dA.match(dB, normalization=(1, 0))``. Note, how ``normalization`` differs from the previous.
- If a custom metric distance is provided. Make sure that it returns scores as distances and not similarity, meaning the smaller the better.
:param darray: the other DocumentArray to match against
:param metric: the distance metric
:param limit: the maximum number of matches, when not given defaults to 20.
:param normalization: a tuple [a, b] to be used with min-max normalization,
the min distance will be rescaled to `a`, the max distance will be rescaled to `b`
all values will be rescaled into range `[a, b]`.
:param metric_name: if provided, then match result will be marked with this string.
:param batch_size: if provided, then ``darray`` is loaded in batches, where each of them is at most ``batch_size``
elements. When `darray` is big, this can significantly speedup the computation.
:param exclude_self: if set, Documents in ``darray`` with same ``id`` as the left-hand values will not be
considered as matches.
:param filter: filter query used for pre-filtering
:param only_id: if set, then returning matches will only contain ``id``
:param use_scipy: if set, use ``scipy`` as the computation backend. Note, ``scipy`` does not support distance
on sparse matrix.
:param device: the computational device for ``.match()``, can be either `cpu` or `cuda`.
:param num_worker: the number of parallel workers. If not given, then the number of CPUs in the system will be used.
.. note::
This argument is only effective when ``batch_size`` is set.
:param on: specifies a subindex to search on. If set, the returned DocumentArray will be retrieved from the given subindex.
:param kwargs: other kwargs.
"""
if not (self and darray):
return
for d in self:
d.matches.clear()
match_docs = darray.find(
self,
metric=metric,
limit=limit,
normalization=normalization,
metric_name=metric_name,
batch_size=batch_size,
exclude_self=exclude_self,
filter=filter,
only_id=only_id,
use_scipy=use_scipy,
device=device,
num_worker=num_worker,
on=on,
**kwargs,
)
if not isinstance(match_docs, list):
match_docs = [match_docs]
for m, d in zip(match_docs, self):
d.matches = m
|
from typing import Optional, Union, Callable, Tuple, TYPE_CHECKING, Dict
if TYPE_CHECKING: # pragma: no cover
import numpy as np
from docarray.typing import ArrayType
from docarray import DocumentArray
class MatchMixin:
"""A mixin that provides match functionality to DocumentArrays"""
def match(
self,
darray: 'DocumentArray',
metric: Union[
str, Callable[['ArrayType', 'ArrayType'], 'np.ndarray']
] = 'cosine',
limit: Optional[Union[int, float]] = 20,
normalization: Optional[Tuple[float, float]] = None,
metric_name: Optional[str] = None,
batch_size: Optional[int] = None,
exclude_self: bool = False,
filter: Optional[Dict] = None,
only_id: bool = False,
use_scipy: bool = False,
device: str = 'cpu',
num_worker: Optional[int] = 1,
on: Optional[str] = None,
**kwargs,
) -> None:
"""Compute embedding based nearest neighbour in `another` for each Document in `self`,
and store results in `matches`.
.. note::
'cosine', 'euclidean', 'sqeuclidean' are supported natively without extra dependency.
You can use other distance metric provided by ``scipy``, such as `braycurtis`, `canberra`, `chebyshev`,
`cityblock`, `correlation`, `cosine`, `dice`, `euclidean`, `hamming`, `jaccard`, `jensenshannon`,
`kulsinski`, `mahalanobis`, `matching`, `minkowski`, `rogerstanimoto`, `russellrao`, `seuclidean`,
`sokalmichener`, `sokalsneath`, `sqeuclidean`, `wminkowski`, `yule`.
To use scipy metric, please set ``use_scipy=True``.
- To make all matches values in [0, 1], use ``dA.match(dB, normalization=(0, 1))``
- To invert the distance as score and make all values in range [0, 1],
use ``dA.match(dB, normalization=(1, 0))``. Note, how ``normalization`` differs from the previous.
- If a custom metric distance is provided. Make sure that it returns scores as distances and not similarity, meaning the smaller the better.
:param darray: the other DocumentArray to match against
:param metric: the distance metric
:param limit: the maximum number of matches, when not given defaults to 20.
:param normalization: a tuple [a, b] to be used with min-max normalization,
the min distance will be rescaled to `a`, the max distance will be rescaled to `b`
all values will be rescaled into range `[a, b]`.
:param metric_name: if provided, then match result will be marked with this string.
:param batch_size: if provided, then ``darray`` is loaded in batches, where each of them is at most ``batch_size``
elements. When `darray` is big, this can significantly speedup the computation.
:param exclude_self: if set, Documents in ``darray`` with same ``id`` as the left-hand values will not be
considered as matches.
:param filter: filter query used for pre-filtering
:param only_id: if set, then returning matches will only contain ``id``
:param use_scipy: if set, use ``scipy`` as the computation backend. Note, ``scipy`` does not support distance
on sparse matrix.
:param device: the computational device for ``.match()``, can be either `cpu` or `cuda`.
:param num_worker: the number of parallel workers. If not given, then the number of CPUs in the system will be used.
.. note::
This argument is only effective when ``batch_size`` is set.
:param on: specifies a subindex to search on. If set, the returned DocumentArray will be retrieved from the given subindex.
:param kwargs: other kwargs.
"""
if not (self and darray):
return
for d in self:
d.matches.clear()
match_docs = darray.find(
self,
metric=metric,
limit=limit,
normalization=normalization,
metric_name=metric_name,
batch_size=batch_size,
exclude_self=exclude_self,
filter=filter,
only_id=only_id,
use_scipy=use_scipy,
device=device,
num_worker=num_worker,
on=on,
**kwargs,
)
if not isinstance(match_docs, list):
match_docs = [match_docs]
for m, d in zip(match_docs, self):
d.matches = m
|
"""Interface with the LangChain Hub."""
from __future__ import annotations
import json
from collections.abc import Sequence
from typing import Any, Optional
from langchain_core.load.dump import dumps
from langchain_core.load.load import loads
from langchain_core.prompts import BasePromptTemplate
def _get_client(
api_key: Optional[str] = None,
api_url: Optional[str] = None,
) -> Any:
try:
from langsmith import Client as LangSmithClient
ls_client = LangSmithClient(api_url, api_key=api_key)
if hasattr(ls_client, "push_prompt") and hasattr(ls_client, "pull_prompt"):
return ls_client
from langchainhub import Client as LangChainHubClient
return LangChainHubClient(api_url, api_key=api_key)
except ImportError:
try:
from langchainhub import Client as LangChainHubClient
return LangChainHubClient(api_url, api_key=api_key)
except ImportError as e:
msg = (
"Could not import langsmith or langchainhub (deprecated),"
"please install with `pip install langsmith`."
)
raise ImportError(msg) from e
def push(
repo_full_name: str,
object: Any, # noqa: A002
*,
api_url: Optional[str] = None,
api_key: Optional[str] = None,
parent_commit_hash: Optional[str] = None,
new_repo_is_public: bool = False,
new_repo_description: Optional[str] = None,
readme: Optional[str] = None,
tags: Optional[Sequence[str]] = None,
) -> str:
"""
Push an object to the hub and returns the URL it can be viewed at in a browser.
:param repo_full_name: The full name of the prompt to push to in the format of
`owner/prompt_name` or `prompt_name`.
:param object: The LangChain to serialize and push to the hub.
:param api_url: The URL of the LangChain Hub API. Defaults to the hosted API service
if you have an api key set, or a localhost instance if not.
:param api_key: The API key to use to authenticate with the LangChain Hub API.
:param parent_commit_hash: The commit hash of the parent commit to push to. Defaults
to the latest commit automatically.
:param new_repo_is_public: Whether the prompt should be public. Defaults to
False (Private by default).
:param new_repo_description: The description of the prompt. Defaults to an empty
string.
"""
client = _get_client(api_key=api_key, api_url=api_url)
# Then it's langsmith
if hasattr(client, "push_prompt"):
return client.push_prompt(
repo_full_name,
object=object,
parent_commit_hash=parent_commit_hash,
is_public=new_repo_is_public,
description=new_repo_description,
readme=readme,
tags=tags,
)
# Then it's langchainhub
manifest_json = dumps(object)
return client.push(
repo_full_name,
manifest_json,
parent_commit_hash=parent_commit_hash,
new_repo_is_public=new_repo_is_public,
new_repo_description=new_repo_description,
)
def pull(
owner_repo_commit: str,
*,
include_model: Optional[bool] = None,
api_url: Optional[str] = None,
api_key: Optional[str] = None,
) -> Any:
"""
Pull an object from the hub and returns it as a LangChain object.
:param owner_repo_commit: The full name of the prompt to pull from in the format of
`owner/prompt_name:commit_hash` or `owner/prompt_name`
or just `prompt_name` if it's your own prompt.
:param api_url: The URL of the LangChain Hub API. Defaults to the hosted API service
if you have an api key set, or a localhost instance if not.
:param api_key: The API key to use to authenticate with the LangChain Hub API.
"""
client = _get_client(api_key=api_key, api_url=api_url)
# Then it's langsmith
if hasattr(client, "pull_prompt"):
return client.pull_prompt(owner_repo_commit, include_model=include_model)
# Then it's langchainhub
if hasattr(client, "pull_repo"):
# >= 0.1.15
res_dict = client.pull_repo(owner_repo_commit)
obj = loads(json.dumps(res_dict["manifest"]))
if isinstance(obj, BasePromptTemplate):
if obj.metadata is None:
obj.metadata = {}
obj.metadata["lc_hub_owner"] = res_dict["owner"]
obj.metadata["lc_hub_repo"] = res_dict["repo"]
obj.metadata["lc_hub_commit_hash"] = res_dict["commit_hash"]
return obj
# Then it's < 0.1.15 langchainhub
resp: str = client.pull(owner_repo_commit)
return loads(resp)
|
"""Interface with the LangChain Hub."""
from __future__ import annotations
import json
from collections.abc import Sequence
from typing import Any, Optional
from langchain_core.load.dump import dumps
from langchain_core.load.load import loads
from langchain_core.prompts import BasePromptTemplate
def _get_client(
api_key: Optional[str] = None,
api_url: Optional[str] = None,
) -> Any:
try:
from langsmith import Client as LangSmithClient
ls_client = LangSmithClient(api_url, api_key=api_key)
if hasattr(ls_client, "push_prompt") and hasattr(ls_client, "pull_prompt"):
return ls_client
else:
from langchainhub import Client as LangChainHubClient
return LangChainHubClient(api_url, api_key=api_key)
except ImportError:
try:
from langchainhub import Client as LangChainHubClient
return LangChainHubClient(api_url, api_key=api_key)
except ImportError as e:
msg = (
"Could not import langsmith or langchainhub (deprecated),"
"please install with `pip install langsmith`."
)
raise ImportError(msg) from e
def push(
repo_full_name: str,
object: Any, # noqa: A002
*,
api_url: Optional[str] = None,
api_key: Optional[str] = None,
parent_commit_hash: Optional[str] = None,
new_repo_is_public: bool = False,
new_repo_description: Optional[str] = None,
readme: Optional[str] = None,
tags: Optional[Sequence[str]] = None,
) -> str:
"""
Push an object to the hub and returns the URL it can be viewed at in a browser.
:param repo_full_name: The full name of the prompt to push to in the format of
`owner/prompt_name` or `prompt_name`.
:param object: The LangChain to serialize and push to the hub.
:param api_url: The URL of the LangChain Hub API. Defaults to the hosted API service
if you have an api key set, or a localhost instance if not.
:param api_key: The API key to use to authenticate with the LangChain Hub API.
:param parent_commit_hash: The commit hash of the parent commit to push to. Defaults
to the latest commit automatically.
:param new_repo_is_public: Whether the prompt should be public. Defaults to
False (Private by default).
:param new_repo_description: The description of the prompt. Defaults to an empty
string.
"""
client = _get_client(api_key=api_key, api_url=api_url)
# Then it's langsmith
if hasattr(client, "push_prompt"):
return client.push_prompt(
repo_full_name,
object=object,
parent_commit_hash=parent_commit_hash,
is_public=new_repo_is_public,
description=new_repo_description,
readme=readme,
tags=tags,
)
# Then it's langchainhub
manifest_json = dumps(object)
message = client.push(
repo_full_name,
manifest_json,
parent_commit_hash=parent_commit_hash,
new_repo_is_public=new_repo_is_public,
new_repo_description=new_repo_description,
)
return message
def pull(
owner_repo_commit: str,
*,
include_model: Optional[bool] = None,
api_url: Optional[str] = None,
api_key: Optional[str] = None,
) -> Any:
"""
Pull an object from the hub and returns it as a LangChain object.
:param owner_repo_commit: The full name of the prompt to pull from in the format of
`owner/prompt_name:commit_hash` or `owner/prompt_name`
or just `prompt_name` if it's your own prompt.
:param api_url: The URL of the LangChain Hub API. Defaults to the hosted API service
if you have an api key set, or a localhost instance if not.
:param api_key: The API key to use to authenticate with the LangChain Hub API.
"""
client = _get_client(api_key=api_key, api_url=api_url)
# Then it's langsmith
if hasattr(client, "pull_prompt"):
response = client.pull_prompt(owner_repo_commit, include_model=include_model)
return response
# Then it's langchainhub
if hasattr(client, "pull_repo"):
# >= 0.1.15
res_dict = client.pull_repo(owner_repo_commit)
obj = loads(json.dumps(res_dict["manifest"]))
if isinstance(obj, BasePromptTemplate):
if obj.metadata is None:
obj.metadata = {}
obj.metadata["lc_hub_owner"] = res_dict["owner"]
obj.metadata["lc_hub_repo"] = res_dict["repo"]
obj.metadata["lc_hub_commit_hash"] = res_dict["commit_hash"]
return obj
# Then it's < 0.1.15 langchainhub
resp: str = client.pull(owner_repo_commit)
return loads(resp)
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras._tf_keras.keras.preprocessing import image as image
from keras._tf_keras.keras.preprocessing import sequence as sequence
from keras._tf_keras.keras.preprocessing import text as text
from keras.src.utils.image_dataset_utils import (
image_dataset_from_directory as image_dataset_from_directory,
)
from keras.src.utils.text_dataset_utils import (
text_dataset_from_directory as text_dataset_from_directory,
)
from keras.src.utils.timeseries_dataset_utils import (
timeseries_dataset_from_array as timeseries_dataset_from_array,
)
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.api._tf_keras.keras.preprocessing import image
from keras.api._tf_keras.keras.preprocessing import sequence
from keras.api._tf_keras.keras.preprocessing import text
from keras.src.utils.image_dataset_utils import image_dataset_from_directory
from keras.src.utils.text_dataset_utils import text_dataset_from_directory
from keras.src.utils.timeseries_dataset_utils import (
timeseries_dataset_from_array,
)
|
"""Tests using Scikit-Learn's bundled estimator_checks."""
from contextlib import contextmanager
import pytest
import sklearn
from packaging.version import parse as parse_version
from sklearn.utils.estimator_checks import parametrize_with_checks
import keras
from keras.src.backend import floatx
from keras.src.backend import set_floatx
from keras.src.layers import Dense
from keras.src.layers import Input
from keras.src.models import Model
from keras.src.wrappers import SKLearnClassifier
from keras.src.wrappers import SKLearnRegressor
from keras.src.wrappers import SKLearnTransformer
def wrapped_parametrize_with_checks(
estimators,
*,
legacy: bool = True,
expected_failed_checks=None,
):
"""Wrapped `parametrize_with_checks` handling backwards compat."""
sklearn_version = parse_version(
parse_version(sklearn.__version__).base_version
)
if sklearn_version >= parse_version("1.6"):
return parametrize_with_checks(
estimators,
legacy=legacy,
expected_failed_checks=expected_failed_checks,
)
def patched_more_tags(estimator, expected_failed_checks):
import copy
original_tags = copy.deepcopy(sklearn.utils._tags._safe_tags(estimator))
def patched_more_tags(self):
original_tags.update({"_xfail_checks": expected_failed_checks})
return original_tags
estimator.__class__._more_tags = patched_more_tags
return estimator
estimators = [
patched_more_tags(estimator, expected_failed_checks(estimator))
for estimator in estimators
]
# legacy is not supported and ignored
return parametrize_with_checks(estimators)
def dynamic_model(X, y, loss, layers=[10]):
"""Creates a basic MLP classifier dynamically choosing binary/multiclass
classification loss and ouput activations.
"""
n_features_in = X.shape[1]
inp = Input(shape=(n_features_in,))
hidden = inp
for layer_size in layers:
hidden = Dense(layer_size, activation="relu")(hidden)
n_outputs = y.shape[1] if len(y.shape) > 1 else 1
out = [Dense(n_outputs, activation="softmax")(hidden)]
model = Model(inp, out)
model.compile(loss=loss, optimizer="rmsprop")
return model
@contextmanager
def use_floatx(x: str):
"""Context manager to temporarily
set the keras backend precision.
"""
_floatx = floatx()
set_floatx(x)
try:
yield
finally:
set_floatx(_floatx)
EXPECTED_FAILED_CHECKS = {
"SKLearnClassifier": {
"check_classifiers_regression_target": "not an issue in sklearn>=1.6",
"check_parameters_default_constructible": (
"not an issue in sklearn>=1.6"
),
"check_classifiers_one_label_sample_weights": (
"0 sample weight is not ignored"
),
"check_classifiers_classes": (
"with small test cases the estimator returns not all classes "
"sometimes"
),
"check_classifier_data_not_an_array": (
"This test assumes reproducibility in fit."
),
"check_supervised_y_2d": "This test assumes reproducibility in fit.",
"check_fit_idempotent": "This test assumes reproducibility in fit.",
},
"SKLearnRegressor": {
"check_parameters_default_constructible": (
"not an issue in sklearn>=1.6"
),
},
"SKLearnTransformer": {
"check_parameters_default_constructible": (
"not an issue in sklearn>=1.6"
),
},
}
@wrapped_parametrize_with_checks(
estimators=[
SKLearnClassifier(
model=dynamic_model,
model_kwargs={
"loss": "categorical_crossentropy",
"layers": [20, 20, 20],
},
fit_kwargs={"epochs": 5},
),
SKLearnRegressor(
model=dynamic_model,
model_kwargs={"loss": "mse"},
),
SKLearnTransformer(
model=dynamic_model,
model_kwargs={"loss": "mse"},
),
],
expected_failed_checks=lambda estimator: EXPECTED_FAILED_CHECKS[
type(estimator).__name__
],
)
def test_sklearn_estimator_checks(estimator, check):
"""Checks that can be passed with sklearn's default tolerances
and in a single epoch.
"""
try:
check(estimator)
except Exception as exc:
if keras.config.backend() in ["numpy", "openvino"] and (
isinstance(exc, NotImplementedError)
or "NotImplementedError" in str(exc)
):
pytest.xfail("Backend not implemented")
else:
raise
|
"""Tests using Scikit-Learn's bundled estimator_checks."""
from contextlib import contextmanager
import pytest
import keras
from keras.src.backend import floatx
from keras.src.backend import set_floatx
from keras.src.layers import Dense
from keras.src.layers import Input
from keras.src.models import Model
from keras.src.wrappers import SKLearnClassifier
from keras.src.wrappers import SKLearnRegressor
from keras.src.wrappers import SKLearnTransformer
from keras.src.wrappers.fixes import parametrize_with_checks
def dynamic_model(X, y, loss, layers=[10]):
"""Creates a basic MLP classifier dynamically choosing binary/multiclass
classification loss and ouput activations.
"""
n_features_in = X.shape[1]
inp = Input(shape=(n_features_in,))
hidden = inp
for layer_size in layers:
hidden = Dense(layer_size, activation="relu")(hidden)
n_outputs = y.shape[1] if len(y.shape) > 1 else 1
out = [Dense(n_outputs, activation="softmax")(hidden)]
model = Model(inp, out)
model.compile(loss=loss, optimizer="rmsprop")
return model
@contextmanager
def use_floatx(x: str):
"""Context manager to temporarily
set the keras backend precision.
"""
_floatx = floatx()
set_floatx(x)
try:
yield
finally:
set_floatx(_floatx)
EXPECTED_FAILED_CHECKS = {
"SKLearnClassifier": {
"check_classifiers_regression_target": "not an issue in sklearn>=1.6",
"check_parameters_default_constructible": (
"not an issue in sklearn>=1.6"
),
"check_classifiers_one_label_sample_weights": (
"0 sample weight is not ignored"
),
"check_classifiers_classes": (
"with small test cases the estimator returns not all classes "
"sometimes"
),
"check_classifier_data_not_an_array": (
"This test assumes reproducibility in fit."
),
"check_supervised_y_2d": "This test assumes reproducibility in fit.",
"check_fit_idempotent": "This test assumes reproducibility in fit.",
},
"SKLearnRegressor": {
"check_parameters_default_constructible": (
"not an issue in sklearn>=1.6"
),
},
"SKLearnTransformer": {
"check_parameters_default_constructible": (
"not an issue in sklearn>=1.6"
),
},
}
@parametrize_with_checks(
estimators=[
SKLearnClassifier(
model=dynamic_model,
model_kwargs={
"loss": "categorical_crossentropy",
"layers": [20, 20, 20],
},
fit_kwargs={"epochs": 5},
),
SKLearnRegressor(
model=dynamic_model,
model_kwargs={"loss": "mse"},
),
SKLearnTransformer(
model=dynamic_model,
model_kwargs={"loss": "mse"},
),
],
expected_failed_checks=lambda estimator: EXPECTED_FAILED_CHECKS[
type(estimator).__name__
],
)
def test_sklearn_estimator_checks(estimator, check):
"""Checks that can be passed with sklearn's default tolerances
and in a single epoch.
"""
try:
check(estimator)
except Exception as exc:
if keras.config.backend() in ["numpy", "openvino"] and (
isinstance(exc, NotImplementedError)
or "NotImplementedError" in str(exc)
):
pytest.xfail("Backend not implemented")
else:
raise
|
"""Test OllamaLLM llm."""
from langchain_core.runnables import RunnableConfig
from langchain_ollama.llms import OllamaLLM
MODEL_NAME = "llama3.1"
def test_stream() -> None:
"""Test streaming tokens from OpenAI."""
llm = OllamaLLM(model=MODEL_NAME)
for token in llm.stream("I'm Pickle Rick"):
assert isinstance(token, str)
async def test_astream() -> None:
"""Test streaming tokens from OpenAI."""
llm = OllamaLLM(model=MODEL_NAME)
async for token in llm.astream("I'm Pickle Rick"):
assert isinstance(token, str)
async def test_abatch() -> None:
"""Test streaming tokens from OllamaLLM."""
llm = OllamaLLM(model=MODEL_NAME)
result = await llm.abatch(["I'm Pickle Rick", "I'm not Pickle Rick"])
for token in result:
assert isinstance(token, str)
async def test_abatch_tags() -> None:
"""Test batch tokens from OllamaLLM."""
llm = OllamaLLM(model=MODEL_NAME)
result = await llm.abatch(
["I'm Pickle Rick", "I'm not Pickle Rick"], config={"tags": ["foo"]}
)
for token in result:
assert isinstance(token, str)
def test_batch() -> None:
"""Test batch tokens from OllamaLLM."""
llm = OllamaLLM(model=MODEL_NAME)
result = llm.batch(["I'm Pickle Rick", "I'm not Pickle Rick"])
for token in result:
assert isinstance(token, str)
async def test_ainvoke() -> None:
"""Test invoke tokens from OllamaLLM."""
llm = OllamaLLM(model=MODEL_NAME)
result = await llm.ainvoke("I'm Pickle Rick", config=RunnableConfig(tags=["foo"]))
assert isinstance(result, str)
def test_invoke() -> None:
"""Test invoke tokens from OllamaLLM."""
llm = OllamaLLM(model=MODEL_NAME)
result = llm.invoke("I'm Pickle Rick", config=RunnableConfig(tags=["foo"]))
assert isinstance(result, str)
|
"""Test OllamaLLM llm."""
from langchain_ollama.llms import OllamaLLM
MODEL_NAME = "llama3"
def test_stream() -> None:
"""Test streaming tokens from OpenAI."""
llm = OllamaLLM(model=MODEL_NAME)
for token in llm.stream("I'm Pickle Rick"):
assert isinstance(token, str)
async def test_astream() -> None:
"""Test streaming tokens from OpenAI."""
llm = OllamaLLM(model=MODEL_NAME)
async for token in llm.astream("I'm Pickle Rick"):
assert isinstance(token, str)
async def test_abatch() -> None:
"""Test streaming tokens from OllamaLLM."""
llm = OllamaLLM(model=MODEL_NAME)
result = await llm.abatch(["I'm Pickle Rick", "I'm not Pickle Rick"])
for token in result:
assert isinstance(token, str)
async def test_abatch_tags() -> None:
"""Test batch tokens from OllamaLLM."""
llm = OllamaLLM(model=MODEL_NAME)
result = await llm.abatch(
["I'm Pickle Rick", "I'm not Pickle Rick"], config={"tags": ["foo"]}
)
for token in result:
assert isinstance(token, str)
def test_batch() -> None:
"""Test batch tokens from OllamaLLM."""
llm = OllamaLLM(model=MODEL_NAME)
result = llm.batch(["I'm Pickle Rick", "I'm not Pickle Rick"])
for token in result:
assert isinstance(token, str)
async def test_ainvoke() -> None:
"""Test invoke tokens from OllamaLLM."""
llm = OllamaLLM(model=MODEL_NAME)
result = await llm.ainvoke("I'm Pickle Rick", config={"tags": ["foo"]})
assert isinstance(result, str)
def test_invoke() -> None:
"""Test invoke tokens from OllamaLLM."""
llm = OllamaLLM(model=MODEL_NAME)
result = llm.invoke("I'm Pickle Rick", config=dict(tags=["foo"]))
assert isinstance(result, str)
|
from docarray.base_doc.any_doc import AnyDoc
from docarray.base_doc.base_node import BaseNode
from docarray.base_doc.doc import BaseDoc
from docarray.utils._internal.misc import (
_get_path_from_docarray_root_level,
import_library,
)
__all__ = ['AnyDoc', 'BaseDoc', 'BaseNode']
def __getattr__(name: str):
if name == 'DocArrayResponse':
import_library('fastapi', raise_error=True)
from docarray.base_doc.docarray_response import DocArrayResponse
if name not in __all__:
__all__.append(name)
return DocArrayResponse
else:
raise ImportError(
f'cannot import name \'{name}\' from \'{_get_path_from_docarray_root_level(__file__)}\''
)
|
from docarray.base_doc.any_doc import AnyDoc
from docarray.base_doc.base_node import BaseNode
from docarray.base_doc.doc import BaseDoc
from docarray.utils._internal.misc import (
_get_path_from_docarray_root_level,
import_library,
)
__all__ = ['AnyDoc', 'BaseDoc', 'BaseNode']
def __getattr__(name: str):
if name == 'DocResponse':
import_library('fastapi', raise_error=True)
from docarray.base_doc.doc_response import DocResponse
if name not in __all__:
__all__.append(name)
return DocResponse
else:
raise ImportError(
f'cannot import name \'{name}\' from \'{_get_path_from_docarray_root_level(__file__)}\''
)
|
import contextlib
import logging
import typing
import fastapi
import fastapi.responses
import starlette.middleware.cors
import uvicorn
import backend.data.block
import backend.data.db
import backend.data.user
import backend.server.routers.v1
import backend.util.service
import backend.util.settings
settings = backend.util.settings.Settings()
logger = logging.getLogger(__name__)
@contextlib.asynccontextmanager
async def lifespan_context(app: fastapi.FastAPI):
await backend.data.db.connect()
await backend.data.block.initialize_blocks()
await backend.data.user.migrate_and_encrypt_user_integrations()
yield
await backend.data.db.disconnect()
def handle_internal_http_error(status_code: int = 500, log_error: bool = True):
def handler(request: fastapi.Request, exc: Exception):
if log_error:
logger.exception(f"{request.method} {request.url.path} failed: {exc}")
return fastapi.responses.JSONResponse(
content={
"message": f"{request.method} {request.url.path} failed",
"detail": str(exc),
},
status_code=status_code,
)
return handler
docs_url = (
"/docs"
if settings.config.app_env == backend.util.settings.AppEnvironment.LOCAL
else None
)
app = fastapi.FastAPI(
title="AutoGPT Agent Server",
description=(
"This server is used to execute agents that are created by the "
"AutoGPT system."
),
summary="AutoGPT Agent Server",
version="0.1",
lifespan=lifespan_context,
docs_url=docs_url,
)
app.add_exception_handler(ValueError, handle_internal_http_error(400))
app.add_exception_handler(500, handle_internal_http_error(500))
app.include_router(backend.server.routers.v1.v1_router, tags=["v1"])
@app.get(path="/health", tags=["health"], dependencies=[])
async def health():
return {"status": "healthy"}
class AgentServer(backend.util.service.AppProcess):
def run(self):
server_app = starlette.middleware.cors.CORSMiddleware(
app=app,
allow_origins=settings.config.backend_cors_allow_origins,
allow_credentials=True,
allow_methods=["*"], # Allows all methods
allow_headers=["*"], # Allows all headers
)
uvicorn.run(
server_app,
host=backend.util.settings.Config().agent_api_host,
port=backend.util.settings.Config().agent_api_port,
)
@staticmethod
async def test_execute_graph(
graph_id: str, node_input: dict[typing.Any, typing.Any], user_id: str
):
return await backend.server.routers.v1.execute_graph(
graph_id, node_input, user_id
)
@staticmethod
async def test_create_graph(
create_graph: backend.server.routers.v1.CreateGraph,
user_id: str,
is_template=False,
):
return await backend.server.routers.v1.create_new_graph(create_graph, user_id)
@staticmethod
async def test_get_graph_run_status(
graph_id: str, graph_exec_id: str, user_id: str
):
return await backend.server.routers.v1.get_graph_run_status(
graph_id, graph_exec_id, user_id
)
@staticmethod
async def test_get_graph_run_node_execution_results(
graph_id: str, graph_exec_id: str, user_id: str
):
return await backend.server.routers.v1.get_graph_run_node_execution_results(
graph_id, graph_exec_id, user_id
)
@staticmethod
async def test_delete_graph(graph_id: str, user_id: str):
return await backend.server.routers.v1.delete_graph(graph_id, user_id)
def set_test_dependency_overrides(self, overrides: dict):
app.dependency_overrides.update(overrides)
|
import contextlib
import logging
import typing
import fastapi
import fastapi.responses
import starlette.middleware.cors
import uvicorn
import backend.data.block
import backend.data.db
import backend.data.user
import backend.server.routers.v1
import backend.util.service
import backend.util.settings
settings = backend.util.settings.Settings()
logger = logging.getLogger(__name__)
@contextlib.asynccontextmanager
async def lifespan_context(app: fastapi.FastAPI):
await backend.data.db.connect()
await backend.data.block.initialize_blocks()
yield
await backend.data.db.disconnect()
def handle_internal_http_error(status_code: int = 500, log_error: bool = True):
def handler(request: fastapi.Request, exc: Exception):
if log_error:
logger.exception(f"{request.method} {request.url.path} failed: {exc}")
return fastapi.responses.JSONResponse(
content={
"message": f"{request.method} {request.url.path} failed",
"detail": str(exc),
},
status_code=status_code,
)
return handler
docs_url = (
"/docs"
if settings.config.app_env == backend.util.settings.AppEnvironment.LOCAL
else None
)
app = fastapi.FastAPI(
title="AutoGPT Agent Server",
description=(
"This server is used to execute agents that are created by the "
"AutoGPT system."
),
summary="AutoGPT Agent Server",
version="0.1",
lifespan=lifespan_context,
docs_url=docs_url,
)
app.add_exception_handler(ValueError, handle_internal_http_error(400))
app.add_exception_handler(500, handle_internal_http_error(500))
app.include_router(backend.server.routers.v1.v1_router, tags=["v1"])
@app.get(path="/health", tags=["health"], dependencies=[])
async def health():
return {"status": "healthy"}
class AgentServer(backend.util.service.AppProcess):
def run(self):
server_app = starlette.middleware.cors.CORSMiddleware(
app=app,
allow_origins=settings.config.backend_cors_allow_origins,
allow_credentials=True,
allow_methods=["*"], # Allows all methods
allow_headers=["*"], # Allows all headers
)
uvicorn.run(
server_app,
host=backend.util.settings.Config().agent_api_host,
port=backend.util.settings.Config().agent_api_port,
)
@staticmethod
async def test_execute_graph(
graph_id: str, node_input: dict[typing.Any, typing.Any], user_id: str
):
return await backend.server.routers.v1.execute_graph(
graph_id, node_input, user_id
)
@staticmethod
async def test_create_graph(
create_graph: backend.server.routers.v1.CreateGraph,
user_id: str,
is_template=False,
):
return await backend.server.routers.v1.create_new_graph(create_graph, user_id)
@staticmethod
async def test_get_graph_run_status(
graph_id: str, graph_exec_id: str, user_id: str
):
return await backend.server.routers.v1.get_graph_run_status(
graph_id, graph_exec_id, user_id
)
@staticmethod
async def test_get_graph_run_node_execution_results(
graph_id: str, graph_exec_id: str, user_id: str
):
return await backend.server.routers.v1.get_graph_run_node_execution_results(
graph_id, graph_exec_id, user_id
)
@staticmethod
async def test_delete_graph(graph_id: str, user_id: str):
return await backend.server.routers.v1.delete_graph(graph_id, user_id)
def set_test_dependency_overrides(self, overrides: dict):
app.dependency_overrides.update(overrides)
|
## under jina root dir
# python scripts/get-last-release-note.py
## result in root/tmp.md
with open('CHANGELOG.md', encoding='utf-8') as fp:
n = []
for v in fp:
if v.startswith('## Release Note'):
n.clear()
n.append(v)
with open('tmp.md', 'w', encoding='utf-8') as fp:
fp.writelines(n)
|
## under jina root dir
# python scripts/get-last-release-note.py
## result in root/tmp.md
with open('CHANGELOG.md') as fp:
n = []
for v in fp:
if v.startswith('## Release Note'):
n.clear()
n.append(v)
with open('tmp.md', 'w') as fp:
fp.writelines(n)
|
__version__ = '0.15.0'
import os
from docarray.document import Document
from docarray.array import DocumentArray
from docarray.dataclasses import dataclass, field
if 'DA_RICH_HANDLER' in os.environ:
from rich.traceback import install
install()
|
__version__ = '0.14.12'
import os
from docarray.document import Document
from docarray.array import DocumentArray
from docarray.dataclasses import dataclass, field
if 'DA_RICH_HANDLER' in os.environ:
from rich.traceback import install
install()
|
from typing import Optional, TYPE_CHECKING, TypeVar, Type, Union, Any
from docarray.typing.proto_register import _register_proto
from docarray.typing.url.any_url import AnyUrl
from docarray.typing.url.filetypes import TEXT_FILE_FORMATS
if TYPE_CHECKING:
from pydantic import BaseConfig
from pydantic.fields import ModelField
T = TypeVar('T', bound='TextUrl')
@_register_proto(proto_type_name='text_url')
class TextUrl(AnyUrl):
"""
URL to a text file.
Can be remote (web) URL, or a local file path.
"""
@classmethod
def validate(
cls: Type[T],
value: Union[T, str, Any],
field: 'ModelField',
config: 'BaseConfig',
):
import os
from urllib.parse import urlparse
url = super().validate(value, field, config) # basic url validation
path = urlparse(url).path
ext = os.path.splitext(path)[1][1:].lower()
# pass test if extension is valid or no extension
has_valid_text_extension = ext in TEXT_FILE_FORMATS or ext == ''
if not has_valid_text_extension:
raise ValueError('Text URL must have a valid extension')
return cls(str(url), scheme=None)
def load(self, charset: str = 'utf-8', timeout: Optional[float] = None) -> str:
"""
Load the text file into a string.
EXAMPLE USAGE
.. code-block:: python
from docarray import BaseDocument
from docarray.typing import TextUrl
class MyDoc(BaseDocument):
remote_url: TextUrl
local_url: TextUrl
doc = MyDoc(
remote_url='https://de.wikipedia.org/wiki/Brixen',
local_url='home/username/my_file.txt',
)
remote_txt = doc.remote_url.load()
print(remote_txt)
# prints: ```<!DOCTYPE html>\n<html class="client-nojs" ... > ...```
local_txt = doc.local_url.load()
print(local_txt)
# prints content of my_file.txt
:param timeout: timeout (sec) for urlopen network request.
Only relevant if URL is not local
:param charset: decoding charset; may be any character set registered with IANA
:return: the text file content
"""
_bytes = self.load_bytes(timeout=timeout)
return _bytes.decode(charset)
|
from typing import Optional
from docarray.typing.proto_register import _register_proto
from docarray.typing.url.any_url import AnyUrl
@_register_proto(proto_type_name='text_url')
class TextUrl(AnyUrl):
"""
URL to a text file.
Can be remote (web) URL, or a local file path.
"""
def load(self, charset: str = 'utf-8', timeout: Optional[float] = None) -> str:
"""
Load the text file into a string.
EXAMPLE USAGE
.. code-block:: python
from docarray import BaseDocument
from docarray.typing import TextUrl
class MyDoc(BaseDocument):
remote_url: TextUrl
local_url: TextUrl
doc = MyDoc(
remote_url='https://de.wikipedia.org/wiki/Brixen',
local_url='home/username/my_file.txt',
)
remote_txt = doc.remote_url.load()
print(remote_txt)
# prints: ```<!DOCTYPE html>\n<html class="client-nojs" ... > ...```
local_txt = doc.local_url.load()
print(local_txt)
# prints content of my_file.txt
:param timeout: timeout (sec) for urlopen network request.
Only relevant if URL is not local
:param charset: decoding charset; may be any character set registered with IANA
:return: the text file content
"""
_bytes = self.load_bytes(timeout=timeout)
return _bytes.decode(charset)
|
# Copyright (c) OpenMMLab. All rights reserved.
__version__ = '0.8.3'
def parse_version_info(version_str):
"""Parse the version information.
Args:
version_str (str): version string like '0.1.0'.
Returns:
tuple: version information contains major, minor, micro version.
"""
version_info = []
for x in version_str.split('.'):
if x.isdigit():
version_info.append(int(x))
elif x.find('rc') != -1:
patch_version = x.split('rc')
version_info.append(int(patch_version[0]))
version_info.append(f'rc{patch_version[1]}')
return tuple(version_info)
version_info = parse_version_info(__version__)
|
# Copyright (c) OpenMMLab. All rights reserved.
__version__ = '0.8.2'
def parse_version_info(version_str):
"""Parse the version information.
Args:
version_str (str): version string like '0.1.0'.
Returns:
tuple: version information contains major, minor, micro version.
"""
version_info = []
for x in version_str.split('.'):
if x.isdigit():
version_info.append(int(x))
elif x.find('rc') != -1:
patch_version = x.split('rc')
version_info.append(int(patch_version[0]))
version_info.append(f'rc{patch_version[1]}')
return tuple(version_info)
version_info = parse_version_info(__version__)
|
_base_ = './faster-rcnn_r50-caffe_fpn_1x_coco.py'
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet101_caffe')))
|
_base_ = './faster_rcnn_r50_caffe_fpn_1x_coco.py'
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet101_caffe')))
|
import torch
from torchvision.transforms import autoaugment, transforms
from torchvision.transforms.functional import InterpolationMode
class ClassificationPresetTrain:
def __init__(
self,
*,
crop_size,
mean=(0.485, 0.456, 0.406),
std=(0.229, 0.224, 0.225),
interpolation=InterpolationMode.BILINEAR,
hflip_prob=0.5,
auto_augment_policy=None,
ra_magnitude=9,
augmix_severity=3,
random_erase_prob=0.0,
backend="pil",
):
trans = []
backend = backend.lower()
if backend == "tensor":
trans.append(transforms.PILToTensor())
elif backend != "pil":
raise ValueError(f"backend can be 'tensor' or 'pil', but got {backend}")
trans.append(transforms.RandomResizedCrop(crop_size, interpolation=interpolation, antialias=True))
if hflip_prob > 0:
trans.append(transforms.RandomHorizontalFlip(hflip_prob))
if auto_augment_policy is not None:
if auto_augment_policy == "ra":
trans.append(autoaugment.RandAugment(interpolation=interpolation, magnitude=ra_magnitude))
elif auto_augment_policy == "ta_wide":
trans.append(autoaugment.TrivialAugmentWide(interpolation=interpolation))
elif auto_augment_policy == "augmix":
trans.append(autoaugment.AugMix(interpolation=interpolation, severity=augmix_severity))
else:
aa_policy = autoaugment.AutoAugmentPolicy(auto_augment_policy)
trans.append(autoaugment.AutoAugment(policy=aa_policy, interpolation=interpolation))
if backend == "pil":
trans.append(transforms.PILToTensor())
trans.extend(
[
transforms.ConvertImageDtype(torch.float),
transforms.Normalize(mean=mean, std=std),
]
)
if random_erase_prob > 0:
trans.append(transforms.RandomErasing(p=random_erase_prob))
self.transforms = transforms.Compose(trans)
def __call__(self, img):
return self.transforms(img)
class ClassificationPresetEval:
def __init__(
self,
*,
crop_size,
resize_size=256,
mean=(0.485, 0.456, 0.406),
std=(0.229, 0.224, 0.225),
interpolation=InterpolationMode.BILINEAR,
backend="pil",
):
trans = []
backend = backend.lower()
if backend == "tensor":
trans.append(transforms.PILToTensor())
else:
raise ValueError(f"backend can be 'tensor' or 'pil', but got {backend}")
trans += [
transforms.Resize(resize_size, interpolation=interpolation, antialias=True),
transforms.CenterCrop(crop_size),
]
if backend == "pil":
trans.append(transforms.PILToTensor())
trans += [
transforms.ConvertImageDtype(torch.float),
transforms.Normalize(mean=mean, std=std),
]
self.transforms = transforms.Compose(trans)
def __call__(self, img):
return self.transforms(img)
|
import torch
from torchvision.transforms import autoaugment, transforms
from torchvision.transforms.functional import InterpolationMode
class ClassificationPresetTrain:
def __init__(
self,
*,
crop_size,
mean=(0.485, 0.456, 0.406),
std=(0.229, 0.224, 0.225),
interpolation=InterpolationMode.BILINEAR,
hflip_prob=0.5,
auto_augment_policy=None,
ra_magnitude=9,
augmix_severity=3,
random_erase_prob=0.0,
):
trans = [transforms.RandomResizedCrop(crop_size, interpolation=interpolation)]
if hflip_prob > 0:
trans.append(transforms.RandomHorizontalFlip(hflip_prob))
if auto_augment_policy is not None:
if auto_augment_policy == "ra":
trans.append(autoaugment.RandAugment(interpolation=interpolation, magnitude=ra_magnitude))
elif auto_augment_policy == "ta_wide":
trans.append(autoaugment.TrivialAugmentWide(interpolation=interpolation))
elif auto_augment_policy == "augmix":
trans.append(autoaugment.AugMix(interpolation=interpolation, severity=augmix_severity))
else:
aa_policy = autoaugment.AutoAugmentPolicy(auto_augment_policy)
trans.append(autoaugment.AutoAugment(policy=aa_policy, interpolation=interpolation))
trans.extend(
[
transforms.PILToTensor(),
transforms.ConvertImageDtype(torch.float),
transforms.Normalize(mean=mean, std=std),
]
)
if random_erase_prob > 0:
trans.append(transforms.RandomErasing(p=random_erase_prob))
self.transforms = transforms.Compose(trans)
def __call__(self, img):
return self.transforms(img)
class ClassificationPresetEval:
def __init__(
self,
*,
crop_size,
resize_size=256,
mean=(0.485, 0.456, 0.406),
std=(0.229, 0.224, 0.225),
interpolation=InterpolationMode.BILINEAR,
):
self.transforms = transforms.Compose(
[
transforms.Resize(resize_size, interpolation=interpolation),
transforms.CenterCrop(crop_size),
transforms.PILToTensor(),
transforms.ConvertImageDtype(torch.float),
transforms.Normalize(mean=mean, std=std),
]
)
def __call__(self, img):
return self.transforms(img)
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os.path as osp
import numpy as np
from mmengine.fileio import dump, load
from mmengine.utils import mkdir_or_exist, track_parallel_progress
prog_description = '''K-Fold coco split.
To split coco data for semi-supervised object detection:
python tools/misc/split_coco.py
'''
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument(
'--data-root',
type=str,
help='The data root of coco dataset.',
default='./data/coco/')
parser.add_argument(
'--out-dir',
type=str,
help='The output directory of coco semi-supervised annotations.',
default='./data/coco/semi_anns/')
parser.add_argument(
'--labeled-percent',
type=float,
nargs='+',
help='The percentage of labeled data in the training set.',
default=[1, 2, 5, 10])
parser.add_argument(
'--fold',
type=int,
help='K-fold cross validation for semi-supervised object detection.',
default=5)
args = parser.parse_args()
return args
def split_coco(data_root, out_dir, percent, fold):
"""Split COCO data for Semi-supervised object detection.
Args:
data_root (str): The data root of coco dataset.
out_dir (str): The output directory of coco semi-supervised
annotations.
percent (float): The percentage of labeled data in the training set.
fold (int): The fold of dataset and set as random seed for data split.
"""
def save_anns(name, images, annotations):
sub_anns = dict()
sub_anns['images'] = images
sub_anns['annotations'] = annotations
sub_anns['licenses'] = anns['licenses']
sub_anns['categories'] = anns['categories']
sub_anns['info'] = anns['info']
mkdir_or_exist(out_dir)
dump(sub_anns, f'{out_dir}/{name}.json')
# set random seed with the fold
np.random.seed(fold)
ann_file = osp.join(data_root, 'annotations/instances_train2017.json')
anns = load(ann_file)
image_list = anns['images']
labeled_total = int(percent / 100. * len(image_list))
labeled_inds = set(
np.random.choice(range(len(image_list)), size=labeled_total))
labeled_ids, labeled_images, unlabeled_images = [], [], []
for i in range(len(image_list)):
if i in labeled_inds:
labeled_images.append(image_list[i])
labeled_ids.append(image_list[i]['id'])
else:
unlabeled_images.append(image_list[i])
# get all annotations of labeled images
labeled_ids = set(labeled_ids)
labeled_annotations, unlabeled_annotations = [], []
for ann in anns['annotations']:
if ann['image_id'] in labeled_ids:
labeled_annotations.append(ann)
else:
unlabeled_annotations.append(ann)
# save labeled and unlabeled
labeled_name = f'instances_train2017.{fold}@{percent}'
unlabeled_name = f'instances_train2017.{fold}@{percent}-unlabeled'
save_anns(labeled_name, labeled_images, labeled_annotations)
save_anns(unlabeled_name, unlabeled_images, unlabeled_annotations)
def multi_wrapper(args):
return split_coco(*args)
if __name__ == '__main__':
args = parse_args()
arguments_list = [(args.data_root, args.out_dir, p, f)
for f in range(1, args.fold + 1)
for p in args.labeled_percent]
track_parallel_progress(multi_wrapper, arguments_list, args.fold)
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os.path as osp
import numpy as np
from mmengine.fileio import dump, load
from mmengine.utils import mkdir_or_exist, track_parallel_progress
prog_description = '''K-Fold coco split.
To split coco data for semi-supervised object detection:
python tools/misc/split_coco.py
'''
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument(
'--data-root',
type=str,
help='The data root of coco dataset.',
default='./data/coco/')
parser.add_argument(
'--out-dir',
type=str,
help='The output directory of coco semi-supervised annotations.',
default='./data/coco_semi_annos/')
parser.add_argument(
'--labeled-percent',
type=float,
nargs='+',
help='The percentage of labeled data in the training set.',
default=[1, 2, 5, 10])
parser.add_argument(
'--fold',
type=int,
help='K-fold cross validation for semi-supervised object detection.',
default=5)
args = parser.parse_args()
return args
def split_coco(data_root, out_dir, percent, fold):
"""Split COCO data for Semi-supervised object detection.
Args:
data_root (str): The data root of coco dataset.
out_dir (str): The output directory of coco semi-supervised
annotations.
percent (float): The percentage of labeled data in the training set.
fold (int): The fold of dataset and set as random seed for data split.
"""
def save_anns(name, images, annotations):
sub_anns = dict()
sub_anns['images'] = images
sub_anns['annotations'] = annotations
sub_anns['licenses'] = anns['licenses']
sub_anns['categories'] = anns['categories']
sub_anns['info'] = anns['info']
mkdir_or_exist(out_dir)
dump(sub_anns, f'{out_dir}/{name}.json')
# set random seed with the fold
np.random.seed(fold)
ann_file = osp.join(data_root, 'annotations/instances_train2017.json')
anns = load(ann_file)
image_list = anns['images']
labeled_total = int(percent / 100. * len(image_list))
labeled_inds = set(
np.random.choice(range(len(image_list)), size=labeled_total))
labeled_ids, labeled_images, unlabeled_images = [], [], []
for i in range(len(image_list)):
if i in labeled_inds:
labeled_images.append(image_list[i])
labeled_ids.append(image_list[i]['id'])
else:
unlabeled_images.append(image_list[i])
# get all annotations of labeled images
labeled_ids = set(labeled_ids)
labeled_annotations, unlabeled_annotations = [], []
for ann in anns['annotations']:
if ann['image_id'] in labeled_ids:
labeled_annotations.append(ann)
else:
unlabeled_annotations.append(ann)
# save labeled and unlabeled
labeled_name = f'instances_train2017.{fold}@{percent}'
unlabeled_name = f'instances_train2017.{fold}@{percent}-unlabeled'
save_anns(labeled_name, labeled_images, labeled_annotations)
save_anns(unlabeled_name, unlabeled_images, unlabeled_annotations)
def multi_wrapper(args):
return split_coco(*args)
if __name__ == '__main__':
args = parse_args()
arguments_list = [(args.data_root, args.out_dir, p, f)
for f in range(1, args.fold + 1)
for p in args.labeled_percent]
track_parallel_progress(multi_wrapper, arguments_list, args.fold)
|
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import pytest
from jina import Document, DocumentArray, Flow, requests
from jina.executors import BaseExecutor
from ...match_merger import MatchMerger
class MockShard(BaseExecutor):
@requests
def search(self, docs: DocumentArray, **kwargs):
for doc in docs:
doc.matches.append(Document(tags={'shard_id': self.runtime_args.pea_id}))
@pytest.fixture
def docs():
return [Document(text=f'sample text {i}') for i in range(2)]
@pytest.mark.parametrize('shards', (1, 3, 5))
def test_match_merger(docs, shards):
with Flow().add(
uses=MockShard, uses_after=MatchMerger, shards=shards, polling='all'
) as f:
documents = f.search(docs, return_results=True)[0].docs
assert len(documents) == 2
for doc in documents:
assert {d.tags['shard_id'] for d in doc.matches} == {
float(i) for i in range(shards)
}
|
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import pytest
from jina import Flow, Document, requests, DocumentArray
from jina.executors import BaseExecutor
from ...match_merger import MatchMerger
class MockShard(BaseExecutor):
@requests
def search(self, docs: DocumentArray, **kwargs):
for doc in docs:
doc.matches.append(Document(tags={'shard_id': self.runtime_args.pea_id}))
@pytest.fixture
def docs():
return [Document(text=f'sample text {i}') for i in range(2)]
@pytest.mark.parametrize('shards', (1, 3, 5))
def test_match_merger(docs, shards):
with Flow().add(
uses=MockShard,
uses_after=MatchMerger,
shards=shards,
polling='all'
) as f:
documents = f.search(docs, return_results=True)[0].docs
assert len(documents) == 2
for doc in documents:
assert {d.tags['shard_id'] for d in doc.matches} == {float(i) for i in range(shards)}
|
__copyright__ = 'Copyright (c) 2021 Jina AI Limited. All rights reserved.'
__license__ = 'Apache-2.0'
import os
import subprocess
from pathlib import Path
import pytest
from jina import Document, DocumentArray
@pytest.fixture(scope='session')
def docker_image_name() -> str:
return Path(__file__).parents[1].stem.lower()
@pytest.fixture(scope='session')
def build_docker_image(docker_image_name: str) -> str:
subprocess.run(['docker', 'build', '-t', docker_image_name, '.'], check=True)
return docker_image_name
@pytest.fixture(scope='session')
def build_docker_image_gpu(docker_image_name: str) -> str:
image_name = f'{docker_image_name}:gpu'
subprocess.run(
['docker', 'build', '-t', image_name, '-f', 'Dockerfile.gpu', '.'], check=True
)
return image_name
@pytest.fixture()
def test_dir() -> str:
return os.path.dirname(os.path.abspath(__file__))
@pytest.fixture()
def data_generator(test_dir: str):
def _generator():
data_file_path = os.path.join(test_dir, 'data', 'test_data.txt')
with open(data_file_path, 'r') as file:
lines = file.readlines()
for line in lines:
yield Document(text=line.strip())
return _generator
@pytest.fixture()
def docs_with_text() -> DocumentArray:
return DocumentArray([Document(text='hello world') for _ in range(10)])
@pytest.fixture()
def docs_with_chunk_text() -> DocumentArray:
return DocumentArray(
[Document(chunks=[Document(text='hello world') for _ in range(10)])]
)
@pytest.fixture()
def docs_with_chunk_chunk_text() -> DocumentArray:
return DocumentArray(
[
Document(
chunks=[
Document(chunks=[Document(text='hello world') for _ in range(10)])
]
)
]
)
|
__copyright__ = 'Copyright (c) 2021 Jina AI Limited. All rights reserved.'
__license__ = 'Apache-2.0'
import os
import subprocess
from pathlib import Path
import pytest
from jina import Document, DocumentArray
@pytest.fixture(scope='session')
def build_docker_image() -> str:
img_name = Path(__file__).parents[1].stem.lower()
subprocess.run(['docker', 'build', '-t', img_name, '.'], check=True)
return img_name
@pytest.fixture()
def test_dir() -> str:
return os.path.dirname(os.path.abspath(__file__))
@pytest.fixture()
def data_generator(test_dir: str):
def _generator():
data_file_path = os.path.join(test_dir, 'data', 'test_data.txt')
with open(data_file_path, 'r') as file:
lines = file.readlines()
for line in lines:
yield Document(text=line.strip())
return _generator
@pytest.fixture()
def docs_with_text() -> DocumentArray:
return DocumentArray([Document(text='hello world') for _ in range(10)])
@pytest.fixture()
def docs_with_chunk_text() -> DocumentArray:
return DocumentArray(
[Document(chunks=[Document(text='hello world') for _ in range(10)])]
)
@pytest.fixture()
def docs_with_chunk_chunk_text() -> DocumentArray:
return DocumentArray(
[
Document(
chunks=[
Document(chunks=[Document(text='hello world') for _ in range(10)])
]
)
]
)
|
import random
import numpy as np
import pytest
from jina import Document, DocumentArray
from ..catboost_ranker import CatboostRanker
NUM_DOCS = 1000
NUM_MATCHES = 5
@pytest.fixture
def ranker():
return CatboostRanker(
query_features=['brand', 'price'],
match_features=['brand', 'price'],
relevance_label='relevance',
)
@pytest.fixture
def ranker_with_weight():
return CatboostRanker(
query_features=['brand', 'price'],
match_features=['brand', 'price'],
relevance_label='relevance',
weight='weight',
)
@pytest.fixture
def relevances():
return np.random.uniform(0, 1, [1, NUM_DOCS]).flatten()
@pytest.fixture
def documents_to_train_stub_model(relevances):
"""features: color, brand, price. Label relevance"""
# initial stub model, relevance purely dependent on brand, not price.
# brand relevance 5 > 4 > 3 > 2 > 1.
da = DocumentArray()
bins = np.array([0.0, 0.2, 0.4, 0.6, 0.8, 1.0])
inds = np.digitize(relevances, bins)
for brand, relevance in zip(inds, relevances):
doc = Document(
tags={
'brand': int(brand),
'price': random.randint(50, 200),
'weight': random.uniform(0, 1),
}
)
for _ in range(NUM_MATCHES):
# each match has an extra relevance field indicates score.
doc.matches.append(
Document(
tags={
'brand': int(brand),
'price': random.randint(50, 200),
'relevance': float(relevance),
}
)
)
da.append(doc)
return da
@pytest.fixture
def documents_to_train_price_sensitive_model():
"""features: color, brand, price. Label relevance"""
# price sensitive, relevance based on pure price, cheaper relevance higher.
da = DocumentArray()
for _ in range(NUM_DOCS):
root = Document(tags={'price': random.randint(200, 500), 'brand': 1})
for _ in range(NUM_MATCHES):
root_price = root.tags['price']
root.matches.extend(
[
Document(
tags={'price': root_price - 100, 'brand': 3, 'relevance': 0.8}
),
Document(tags={'price': root_price, 'brand': 3, 'relevance': 0.6}),
Document(
tags={'price': root_price + 100, 'brand': 3, 'relevance': 0.4}
),
Document(
tags={'price': root_price + 200, 'brand': 3, 'relevance': 0.2}
),
]
)
da.append(root)
return da
@pytest.fixture
def documents_without_label_random_price():
"""features: color, brand, price. Label relevance"""
# expect 5 > 3 > 1
# expect price
da = DocumentArray()
d1 = Document(tags={'brand': random.randint(0, 5), 'price': 200})
d1.matches.append(Document(tags={'brand': random.randint(0, 5), 'price': 196}))
d1.matches.append(Document(tags={'brand': random.randint(0, 5), 'price': 100}))
d1.matches.append(Document(tags={'brand': random.randint(0, 5), 'price': 50}))
da.append(d1)
return da
@pytest.fixture
def documents_without_label_random_brand():
"""features: color, brand, price. Label relevance"""
# expect price
da = DocumentArray()
d1 = Document(tags={'brand': 2, 'price': 200})
d1.matches.append(Document(id=1, tags={'brand': 2, 'price': 405}))
d1.matches.append(Document(id=2, tags={'brand': 2, 'price': 305}))
d1.matches.append(Document(id=3, tags={'brand': 2, 'price': 96}))
d1.matches.append(Document(id=4, tags={'brand': 2, 'price': 200}))
da.append(d1)
return da
|
import random
import pytest
import numpy as np
from jina import Document, DocumentArray
from ..catboost_ranker import CatboostRanker
NUM_DOCS = 1000
NUM_MATCHES = 5
@pytest.fixture
def ranker():
return CatboostRanker(
query_features=['brand', 'price'],
match_features=['brand', 'price'],
relevance_label='relevance',
)
@pytest.fixture
def ranker_with_weight():
return CatboostRanker(
query_features=['brand', 'price'],
match_features=['brand', 'price'],
relevance_label='relevance',
weight='weight',
)
@pytest.fixture
def relevances():
return np.random.uniform(0, 1, [1, NUM_DOCS]).flatten()
@pytest.fixture
def documents_to_train_stub_model(relevances):
"""features: color, brand, price. Label relevance"""
# initial stub model, relevance purely dependent on brand, not price.
# brand relevance 5 > 4 > 3 > 2 > 1.
da = DocumentArray()
bins = np.array([0.0, 0.2, 0.4, 0.6, 0.8, 1.0])
inds = np.digitize(relevances, bins)
for brand, relevance in zip(inds, relevances):
doc = Document(
tags={
'brand': int(brand),
'price': random.randint(50, 200),
'weight': random.uniform(0, 1),
}
)
for _ in range(NUM_MATCHES):
# each match has an extra relevance field indicates score.
doc.matches.append(
Document(
tags={
'brand': int(brand),
'price': random.randint(50, 200),
'relevance': float(relevance),
}
)
)
da.append(doc)
return da
@pytest.fixture
def documents_to_train_price_sensitive_model():
"""features: color, brand, price. Label relevance"""
# price sensitive, relevance based on pure price, cheaper relevance higher.
da = DocumentArray()
for _ in range(NUM_DOCS):
root = Document(tags={'price': random.randint(200, 500), 'brand': 1})
for _ in range(NUM_MATCHES):
root_price = root.tags['price']
root.matches.extend(
[
Document(
tags={'price': root_price - 100, 'brand': 3, 'relevance': 0.8}
),
Document(tags={'price': root_price, 'brand': 3, 'relevance': 0.6}),
Document(
tags={'price': root_price + 100, 'brand': 3, 'relevance': 0.4}
),
Document(
tags={'price': root_price + 200, 'brand': 3, 'relevance': 0.2}
),
]
)
da.append(root)
return da
@pytest.fixture
def documents_without_label_random_price():
"""features: color, brand, price. Label relevance"""
# expect 5 > 3 > 1
# expect price
da = DocumentArray()
d1 = Document(tags={'brand': random.randint(0, 5), 'price': 200})
d1.matches.append(Document(tags={'brand': random.randint(0, 5), 'price': 196}))
d1.matches.append(Document(tags={'brand': random.randint(0, 5), 'price': 100}))
d1.matches.append(Document(tags={'brand': random.randint(0, 5), 'price': 50}))
da.append(d1)
return da
@pytest.fixture
def documents_without_label_random_brand():
"""features: color, brand, price. Label relevance"""
# expect price
da = DocumentArray()
d1 = Document(tags={'brand': 2, 'price': 200})
d1.matches.append(Document(id=1, tags={'brand': 2, 'price': 405}))
d1.matches.append(Document(id=2, tags={'brand': 2, 'price': 305}))
d1.matches.append(Document(id=3, tags={'brand': 2, 'price': 96}))
d1.matches.append(Document(id=4, tags={'brand': 2, 'price': 200}))
da.append(d1)
return da
|
import pytest
from docarray import DocumentArray, Document
from docarray.array.opensearch import DocumentArrayOpenSearch
from docarray.array.qdrant import DocumentArrayQdrant
from docarray.array.sqlite import DocumentArraySqlite
from docarray.array.annlite import DocumentArrayAnnlite, AnnliteConfig
from docarray.array.storage.opensearch import OpenSearchConfig
from docarray.array.storage.qdrant import QdrantConfig
from docarray.array.storage.weaviate import WeaviateConfig
from docarray.array.weaviate import DocumentArrayWeaviate
from docarray.array.elastic import DocumentArrayElastic, ElasticConfig
from docarray.array.redis import DocumentArrayRedis, RedisConfig
from docarray.array.milvus import DocumentArrayMilvus, MilvusConfig
N = 100
def da_and_dam():
da = DocumentArray.empty(N)
dasq = DocumentArraySqlite.empty(N)
return (da, dasq)
@pytest.fixture
def docs():
yield (Document(text=str(j)) for j in range(100))
@pytest.mark.parametrize(
'da_cls,config',
[
(DocumentArray, None),
(DocumentArraySqlite, None),
(DocumentArrayAnnlite, AnnliteConfig(n_dim=128)),
(DocumentArrayWeaviate, WeaviateConfig(n_dim=128)),
(DocumentArrayQdrant, QdrantConfig(n_dim=128)),
(DocumentArrayElastic, ElasticConfig(n_dim=128)),
(DocumentArrayOpenSearch, OpenSearchConfig(n_dim=128)),
(DocumentArrayRedis, RedisConfig(n_dim=1)),
(DocumentArrayMilvus, MilvusConfig(n_dim=128)),
],
)
def test_iter_len_bool(da_cls, config, start_storage):
if config:
da = da_cls.empty(N, config=config)
else:
da = da_cls.empty(N)
j = 0
for _ in da:
j += 1
assert j == N
assert j == len(da)
assert da
da.clear()
assert not da
@pytest.mark.parametrize(
'da_cls,config',
[
(DocumentArray, None),
(DocumentArraySqlite, None),
(DocumentArrayAnnlite, AnnliteConfig(n_dim=128)),
(DocumentArrayWeaviate, WeaviateConfig(n_dim=128)),
(DocumentArrayQdrant, QdrantConfig(n_dim=128)),
(DocumentArrayElastic, ElasticConfig(n_dim=128)),
(DocumentArrayOpenSearch, OpenSearchConfig(n_dim=128)),
(DocumentArrayRedis, RedisConfig(n_dim=128)),
(DocumentArrayMilvus, MilvusConfig(n_dim=128)),
],
)
def test_repr(da_cls, config, start_storage):
if config:
da = da_cls.empty(N, config=config)
else:
da = da_cls.empty(N)
assert f'length={N}' in repr(da)
@pytest.mark.parametrize(
'storage, config',
[
('memory', None),
('sqlite', None),
('annlite', AnnliteConfig(n_dim=128)),
('weaviate', WeaviateConfig(n_dim=128)),
('qdrant', QdrantConfig(n_dim=128)),
('elasticsearch', ElasticConfig(n_dim=128)),
('opensearch', OpenSearchConfig(n_dim=128)),
('redis', RedisConfig(n_dim=128)),
('milvus', MilvusConfig(n_dim=128)),
],
)
def test_repr_str(docs, storage, config, start_storage):
if config:
da = DocumentArray(docs, storage=storage, config=config)
else:
da = DocumentArray(docs, storage=storage)
da.summary()
assert da
da.clear()
assert not da
print(da)
@pytest.mark.parametrize(
'da_cls, config',
[
(DocumentArray, None),
(DocumentArraySqlite, None),
(DocumentArrayAnnlite, AnnliteConfig(n_dim=10)),
(DocumentArrayWeaviate, WeaviateConfig(n_dim=10)),
(DocumentArrayQdrant, QdrantConfig(n_dim=10)),
(DocumentArrayElastic, ElasticConfig(n_dim=10)),
(DocumentArrayOpenSearch, OpenSearchConfig(n_dim=10)),
(DocumentArrayRedis, RedisConfig(n_dim=10)),
(DocumentArrayMilvus, MilvusConfig(n_dim=10)),
],
)
def test_iadd(da_cls, config, start_storage):
if config:
da = da_cls.empty(N, config=config)
else:
da = da_cls.empty(N)
oid = id(da)
dap = DocumentArray.empty(10)
da += dap
assert len(da) == N + len(dap)
nid = id(da)
assert nid == oid
@pytest.mark.parametrize('da', [da_and_dam()[0]])
def test_add(da):
oid = id(da)
dap = DocumentArray.empty(10)
da = da + dap
assert len(da) == N + len(dap)
nid = id(da)
assert nid != oid
|
import pytest
from docarray import DocumentArray, Document
from docarray.array.qdrant import DocumentArrayQdrant
from docarray.array.sqlite import DocumentArraySqlite
from docarray.array.annlite import DocumentArrayAnnlite, AnnliteConfig
from docarray.array.storage.qdrant import QdrantConfig
from docarray.array.storage.weaviate import WeaviateConfig
from docarray.array.weaviate import DocumentArrayWeaviate
from docarray.array.elastic import DocumentArrayElastic, ElasticConfig
from docarray.array.redis import DocumentArrayRedis, RedisConfig
from docarray.array.milvus import DocumentArrayMilvus, MilvusConfig
N = 100
def da_and_dam():
da = DocumentArray.empty(N)
dasq = DocumentArraySqlite.empty(N)
return (da, dasq)
@pytest.fixture
def docs():
yield (Document(text=str(j)) for j in range(100))
@pytest.mark.parametrize(
'da_cls,config',
[
(DocumentArray, None),
(DocumentArraySqlite, None),
(DocumentArrayAnnlite, AnnliteConfig(n_dim=128)),
(DocumentArrayWeaviate, WeaviateConfig(n_dim=128)),
(DocumentArrayQdrant, QdrantConfig(n_dim=128)),
(DocumentArrayElastic, ElasticConfig(n_dim=128)),
(DocumentArrayRedis, RedisConfig(n_dim=1)),
(DocumentArrayMilvus, MilvusConfig(n_dim=128)),
],
)
def test_iter_len_bool(da_cls, config, start_storage):
if config:
da = da_cls.empty(N, config=config)
else:
da = da_cls.empty(N)
j = 0
for _ in da:
j += 1
assert j == N
assert j == len(da)
assert da
da.clear()
assert not da
@pytest.mark.parametrize(
'da_cls,config',
[
(DocumentArray, None),
(DocumentArraySqlite, None),
(DocumentArrayAnnlite, AnnliteConfig(n_dim=128)),
(DocumentArrayWeaviate, WeaviateConfig(n_dim=128)),
(DocumentArrayQdrant, QdrantConfig(n_dim=128)),
(DocumentArrayElastic, ElasticConfig(n_dim=128)),
(DocumentArrayRedis, RedisConfig(n_dim=128)),
(DocumentArrayMilvus, MilvusConfig(n_dim=128)),
],
)
def test_repr(da_cls, config, start_storage):
if config:
da = da_cls.empty(N, config=config)
else:
da = da_cls.empty(N)
assert f'length={N}' in repr(da)
@pytest.mark.parametrize(
'storage, config',
[
('memory', None),
('sqlite', None),
('annlite', AnnliteConfig(n_dim=128)),
('weaviate', WeaviateConfig(n_dim=128)),
('qdrant', QdrantConfig(n_dim=128)),
('elasticsearch', ElasticConfig(n_dim=128)),
('redis', RedisConfig(n_dim=128)),
('milvus', MilvusConfig(n_dim=128)),
],
)
def test_repr_str(docs, storage, config, start_storage):
if config:
da = DocumentArray(docs, storage=storage, config=config)
else:
da = DocumentArray(docs, storage=storage)
da.summary()
assert da
da.clear()
assert not da
print(da)
@pytest.mark.parametrize(
'da_cls, config',
[
(DocumentArray, None),
(DocumentArraySqlite, None),
(DocumentArrayAnnlite, AnnliteConfig(n_dim=10)),
(DocumentArrayWeaviate, WeaviateConfig(n_dim=10)),
(DocumentArrayQdrant, QdrantConfig(n_dim=10)),
(DocumentArrayElastic, ElasticConfig(n_dim=10)),
(DocumentArrayRedis, RedisConfig(n_dim=10)),
(DocumentArrayMilvus, MilvusConfig(n_dim=10)),
],
)
def test_iadd(da_cls, config, start_storage):
if config:
da = da_cls.empty(N, config=config)
else:
da = da_cls.empty(N)
oid = id(da)
dap = DocumentArray.empty(10)
da += dap
assert len(da) == N + len(dap)
nid = id(da)
assert nid == oid
@pytest.mark.parametrize('da', [da_and_dam()[0]])
def test_add(da):
oid = id(da)
dap = DocumentArray.empty(10)
da = da + dap
assert len(da) == N + len(dap)
nid = id(da)
assert nid != oid
|
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import copy
from typing import Dict
from jina import DocumentArray, Executor, requests
from jinahub.indexers.searcher.FaissSearcher import FaissSearcher
from jinahub.indexers.storage.LMDBStorage import LMDBStorage
class FaissLMDBSearcher(Executor):
"""
`Document` with `.embedding` the same shape as the `Documents` stored in the
`FaissSearcher`. The ids of the `Documents` stored in `FaissSearcher` need to
exist in the `FileSearcher`. Otherwise you will not get back the original metadata.
The `FaissSearcher` attaches matches to the `Documents` sent as inputs, with the id of
the match, and its embedding. Then, the `FileSearcher` retrieves the full metadata
(original text or image blob) and attaches those to the `Document`. You receive back
the full `Document`.
"""
def __init__(self, dump_path=None, *args, **kwargs):
"""
:param dump_path: dump path
"""
super().__init__(*args, **kwargs)
self._vec_indexer = FaissSearcher(dump_path=dump_path, *args, **kwargs)
self._kv_indexer = LMDBStorage(dump_path=dump_path, *args, **kwargs)
@requests(on="/search")
def search(self, docs: "DocumentArray", parameters: Dict = None, **kwargs):
self._vec_indexer.search(docs, parameters)
kv_parameters = copy.deepcopy(parameters)
kv_parameters["traversal_paths"] = [
path + "m" for path in kv_parameters.get("traversal_paths", ["r"])
]
self._kv_indexer.search(docs, parameters=kv_parameters)
|
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import copy
from typing import Dict
from jina import DocumentArray, Executor, requests
from jinahub.indexers.searcher.FaissSearcher import FaissSearcher
from jinahub.indexers.storage.LMDBStorage import LMDBStorage
class FaissLMDBSearcher(Executor):
"""
`Document` with `.embedding` the same shape as the `Documents` stored in the
`FaissSearcher`. The ids of the `Documents` stored in `FaissSearcher` need to
exist in the `FileSearcher`. Otherwise you will not get back the original metadata.
The `FaissSearcher` attaches matches to the `Documents` sent as inputs, with the id of
the match, and its embedding. Then, the `FileSearcher` retrieves the full metadata
(original text or image blob) and attaches those to the `Document`. You receive back
the full `Document`.
"""
def __init__(self, dump_path=None, *args, **kwargs):
super().__init__(*args, **kwargs)
self._vec_indexer = FaissSearcher(dump_path=dump_path, *args, **kwargs)
self._kv_indexer = LMDBStorage(dump_path=dump_path, *args, **kwargs)
@requests(on="/search")
def search(self, docs: "DocumentArray", parameters: Dict = None, **kwargs):
self._vec_indexer.search(docs, parameters)
kv_parameters = copy.deepcopy(parameters)
kv_parameters["traversal_paths"] = [
path + "m" for path in kv_parameters.get("traversal_paths", ["r"])
]
self._kv_indexer.search(docs, parameters=kv_parameters)
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest.mock import Mock
from mmengine.hooks import ParamSchedulerHook
class TestParamSchedulerHook:
def test_after_iter(self):
Hook = ParamSchedulerHook()
Runner = Mock()
scheduler = Mock()
scheduler.step = Mock()
scheduler.by_epoch = False
Runner.schedulers = [scheduler]
Hook.after_train_iter(Runner)
scheduler.step.assert_called()
def test_after_epoch(self):
Hook = ParamSchedulerHook()
Runner = Mock()
scheduler = Mock()
scheduler.step = Mock()
scheduler.by_epoch = True
Runner.schedulers = [scheduler]
Hook.after_train_epoch(Runner)
scheduler.step.assert_called()
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest.mock import Mock
from mmengine.hooks import ParamSchedulerHook
class TestParamSchedulerHook:
def test_after_iter(self):
Hook = ParamSchedulerHook()
Runner = Mock()
scheduler = Mock()
scheduler.step = Mock()
scheduler.by_epoch = False
Runner.schedulers = [scheduler]
Hook.after_iter(Runner)
scheduler.step.assert_called()
def test_after_epoch(self):
Hook = ParamSchedulerHook()
Runner = Mock()
scheduler = Mock()
scheduler.step = Mock()
scheduler.by_epoch = True
Runner.schedulers = [scheduler]
Hook.after_epoch(Runner)
scheduler.step.assert_called()
|
import argparse
import jsonlines
from pycocotools.coco import COCO
from tqdm import tqdm
def _has_only_empty_bbox(anno):
return all(any(o <= 1 for o in obj['bbox'][2:]) for obj in anno)
def has_valid_annotation(anno):
# if it's empty, there is no annotation
if len(anno) == 0:
return False
# if all boxes have close to zero area, there is no annotation
if _has_only_empty_bbox(anno):
return False
return True
def goldg2odvg(args):
coco = COCO(args.input)
ids = list(sorted(coco.imgs.keys()))
out_results = []
for img_id in tqdm(ids):
if isinstance(img_id, str):
ann_ids = coco.getAnnIds(imgIds=[img_id], iscrowd=0)
else:
ann_ids = coco.getAnnIds(imgIds=img_id, iscrowd=0)
annos = coco.loadAnns(ann_ids)
if not has_valid_annotation(annos):
continue
img_info = coco.loadImgs(img_id)[0]
file_name = img_info['file_name']
caption = img_info['caption']
regions = {}
for anno in annos:
box = anno['bbox']
tokens_positive = anno['tokens_positive']
x1, y1, w, h = box
inter_w = max(0, min(x1 + w, int(img_info['width'])) - max(x1, 0))
inter_h = max(0, min(y1 + h, int(img_info['height'])) - max(y1, 0))
if inter_w * inter_h == 0:
continue
if anno['area'] <= 0 or w < 1 or h < 1:
continue
if anno.get('iscrowd', False):
continue
bbox_xyxy = [
x1, y1,
min(x1 + w, int(img_info['width'])),
min(y1 + h, int(img_info['height']))
]
tokens_positive = sorted(tokens_positive, key=lambda x: x[0])
phrase = []
pre_end_index = -10
for token in tokens_positive:
start_index = token[0]
end_index = token[1]
if pre_end_index + 1 == start_index:
if caption[token[0] - 1] == ' ':
phrase[
-1] = phrase[-1] + ' ' + caption[token[0]:token[1]]
else:
phrase.append(caption[token[0]:token[1]])
else:
phrase.append(caption[token[0]:token[1]])
pre_end_index = end_index
key = ' '.join(phrase)
if key not in regions:
regions[key] = {
'bbox': bbox_xyxy,
'phrase': phrase,
'tokens_positive': tokens_positive
}
else:
old_box = regions[key]['bbox']
if isinstance(old_box[0], list):
old_box.append(bbox_xyxy)
else:
old_box = [old_box, bbox_xyxy]
regions[key]['bbox'] = old_box
out_dict = {
'filename': file_name,
'height': int(img_info['height']),
'width': int(img_info['width']),
'grounding': {
'caption': caption
}
}
region_list = []
for key, value in regions.items():
phrase = value['phrase']
if len(phrase) == 1:
phrase = phrase[0]
region_list.append({
'bbox': value['bbox'],
'phrase': phrase,
'tokens_positive': value['tokens_positive']
})
out_dict['grounding']['regions'] = region_list
out_results.append(out_dict)
if args.out_ann is None:
out_path = args.input[:-5] + '_vg.json'
else:
out_path = args.out_ann
with jsonlines.open(out_path, mode='w') as writer:
writer.write_all(out_results)
print(f'save to {out_path}')
# goldg+: final_mixed_train_no_coco.json +
# final_flickr_separateGT_train.json +
# final_mixed_train_only_coco.json
if __name__ == '__main__':
parser = argparse.ArgumentParser('goldg to odvg format.', add_help=True)
parser.add_argument('input', type=str, help='input json file name')
parser.add_argument('--out-ann', '-o', type=str)
args = parser.parse_args()
goldg2odvg(args)
|
import argparse
import jsonlines
from pycocotools.coco import COCO
from tqdm import tqdm
def _has_only_empty_bbox(anno):
return all(any(o <= 1 for o in obj['bbox'][2:]) for obj in anno)
def has_valid_annotation(anno):
# if it's empty, there is no annotation
if len(anno) == 0:
return False
# if all boxes have close to zero area, there is no annotation
if _has_only_empty_bbox(anno):
return False
return True
def goldg2odvg(args):
coco = COCO(args.input)
ids = list(sorted(coco.imgs.keys()))
out_results = []
for img_id in tqdm(ids):
if isinstance(img_id, str):
ann_ids = coco.getAnnIds(imgIds=[img_id], iscrowd=0)
else:
ann_ids = coco.getAnnIds(imgIds=img_id, iscrowd=0)
annos = coco.loadAnns(ann_ids)
if not has_valid_annotation(annos):
continue
img_info = coco.loadImgs(img_id)[0]
file_name = img_info['file_name']
caption = img_info['caption']
regions = {}
for anno in annos:
box = anno['bbox']
tokens_positive = anno['tokens_positive']
x1, y1, w, h = box
inter_w = max(0, min(x1 + w, int(img_info['width'])) - max(x1, 0))
inter_h = max(0, min(y1 + h, int(img_info['height'])) - max(y1, 0))
if inter_w * inter_h == 0:
continue
if anno['area'] <= 0 or w < 1 or h < 1:
continue
if anno.get('iscrowd', False):
continue
bbox_xyxy = [
x1, y1,
min(x1 + w, int(img_info['width'])),
min(y1 + h, int(img_info['height']))
]
tokens_positive = sorted(tokens_positive, key=lambda x: x[0])
phrase = []
pre_end_index = -10
for token in tokens_positive:
start_index = token[0]
end_index = token[1]
if pre_end_index + 1 == start_index:
if caption[token[0] - 1] == ' ':
phrase[
-1] = phrase[-1] + ' ' + caption[token[0]:token[1]]
else:
phrase.append(caption[token[0]:token[1]])
else:
phrase.append(caption[token[0]:token[1]])
pre_end_index = end_index
key = ' '.join(phrase)
if key not in regions:
regions[key] = {
'bbox': bbox_xyxy,
'phrase': phrase,
'tokens_positive': tokens_positive
}
else:
old_box = regions[key]['bbox']
if isinstance(old_box[0], list):
old_box.append(bbox_xyxy)
else:
old_box = [old_box, bbox_xyxy]
regions[key]['bbox'] = old_box
out_dict = {
'filename': file_name,
'height': int(img_info['height']),
'width': int(img_info['width']),
'grounding': {
'caption': caption
}
}
region_list = []
for key, value in regions.items():
phrase = value['phrase']
if len(phrase) == 1:
phrase = phrase[0]
region_list.append({
'bbox': value['bbox'],
'phrase': phrase,
'tokens_positive': value['tokens_positive']
})
out_dict['grounding']['regions'] = region_list
out_results.append(out_dict)
if args.out_ann is None:
out_path = args.input[:-5] + '_vg.json'
else:
out_path = args.out_ann
with jsonlines.open(out_path, mode='w') as writer:
writer.write_all(out_results)
print(f'save to {out_path}')
# goldg+: final_mixed_train_no_coco.json +
# final_flickr_separateGT_train.json +
# final_mixed_train_only_coco.json
if __name__ == '__main__':
parser = argparse.ArgumentParser('goldg to odvg format.', add_help=True)
parser.add_argument('input', type=str, help='input list name')
parser.add_argument('--out-ann', '-o', type=str)
args = parser.parse_args()
goldg2odvg(args)
|
from typing import Dict, Optional, TypeVar
from google.protobuf import json_format
from jina.excepts import BadRequestType
from jina.helper import typename
from jina.proto import jina_pb2
from jina.types.mixin import ProtoTypeMixin
StatusSourceType = TypeVar('StatusSourceType', jina_pb2.StatusProto, str, Dict, bytes)
class StatusMessage(ProtoTypeMixin):
"""Represents a Status message used for health check of the Flow"""
def __init__(
self,
status_object: Optional[StatusSourceType] = None,
):
self._pb_body = jina_pb2.StatusProto()
try:
if isinstance(status_object, jina_pb2.StatusProto):
self._pb_body = status_object
elif isinstance(status_object, dict):
json_format.ParseDict(status_object, self._pb_body)
elif isinstance(status_object, str):
json_format.Parse(status_object, self._pb_body)
elif isinstance(status_object, bytes):
self._pb_body.ParseFromString(status_object)
elif status_object is not None:
# note ``None`` is not considered as a bad type
raise ValueError(f'{typename(status_object)} is not recognizable')
else:
self._pb_body = jina_pb2.StatusProto()
except Exception as ex:
raise BadRequestType(
f'fail to construct a {self.__class__} object from {status_object}'
) from ex
def set_exception(self, ex: Exception):
"""Set exception information into the Status Message
:param ex: The Exception to be filled
"""
import traceback
self.proto.code = jina_pb2.StatusProto.ERROR
self.proto.description = repr(ex)
self.proto.exception.name = ex.__class__.__name__
self.proto.exception.args.extend([str(v) for v in ex.args])
self.proto.exception.stacks.extend(
traceback.format_exception(type(ex), value=ex, tb=ex.__traceback__)
)
def set_code(self, code):
"""Set the code of the Status Message
:param code: The code to be added
"""
self.proto.code = code
|
from typing import Dict, Optional, TypeVar
from google.protobuf import json_format
from jina.excepts import BadRequestType
from jina.helper import typename
from jina.proto import jina_pb2
from jina.types.mixin import ProtoTypeMixin
StatusSourceType = TypeVar('StatusSourceType', jina_pb2.StatusProto, str, Dict, bytes)
class StatusMessage(ProtoTypeMixin):
"""Represents a Status message used for health check of the Flow"""
def __init__(
self,
status_object: Optional[StatusSourceType] = None,
):
self._pb_body = jina_pb2.StatusProto()
try:
if isinstance(status_object, jina_pb2.StatusProto):
self._pb_body = status_object
elif isinstance(status_object, dict):
json_format.ParseDict(status_object, self._pb_body)
elif isinstance(status_object, str):
json_format.Parse(status_object, self._pb_body)
elif isinstance(status_object, bytes):
self._pb_body.ParseFromString(status_object)
elif status_object is not None:
# note ``None`` is not considered as a bad type
raise ValueError(f'{typename(status_object)} is not recognizable')
else:
self._pb_body = jina_pb2.StatusProto()
except Exception as ex:
raise BadRequestType(
f'fail to construct a {self.__class__} object from {status_object}'
) from ex
def set_exception(self, ex: Exception):
"""Set exception information into the Status Message
:param ex: The Exception to be filled
"""
import traceback
self.proto.code = jina_pb2.StatusProto.ERROR
self.proto.description = repr(ex)
self.proto.exception.name = ex.__class__.__name__
self.proto.exception.args.extend([str(v) for v in ex.args])
self.proto.exception.stacks.extend(
traceback.format_exception(etype=type(ex), value=ex, tb=ex.__traceback__)
)
def set_code(self, code):
"""Set the code of the Status Message
:param code: The code to be added
"""
self.proto.code = code
|
from typing import Any
from langchain_core.exceptions import OutputParserException
from langchain.output_parsers import ResponseSchema, StructuredOutputParser
def test_parse() -> None:
"""Test parsing structured output."""
response_schemas = [
ResponseSchema(name="name", description="desc"),
ResponseSchema(name="age", description="desc"),
]
parser = StructuredOutputParser.from_response_schemas(response_schemas)
# Test valid JSON input
text = '```json\n{"name": "John", "age": 30}\n```'
expected_result = {"name": "John", "age": 30}
result = parser.parse(text)
assert result == expected_result, f"Expected {expected_result}, but got {result}"
# Test invalid JSON input
text = '```json\n{"name": "John"}\n```'
try:
parser.parse(text)
except OutputParserException:
pass # Test passes if OutputParserException is raised
else:
msg = f"Expected OutputParserException, but got {parser.parse(text)}"
raise AssertionError(msg)
def test_output_type() -> None:
"""Test the output type of the structured output parser is Dict[str, Any]."""
response_schemas = [
ResponseSchema(name="name", description="desc"),
ResponseSchema(name="age", description="desc"),
]
parser = StructuredOutputParser.from_response_schemas(response_schemas)
assert parser.OutputType == dict[str, Any]
|
from typing import Any
from langchain_core.exceptions import OutputParserException
from langchain.output_parsers import ResponseSchema, StructuredOutputParser
def test_parse() -> None:
"""Test parsing structured output."""
response_schemas = [
ResponseSchema(name="name", description="desc"),
ResponseSchema(name="age", description="desc"),
]
parser = StructuredOutputParser.from_response_schemas(response_schemas)
# Test valid JSON input
text = '```json\n{"name": "John", "age": 30}\n```'
expected_result = {"name": "John", "age": 30}
result = parser.parse(text)
assert result == expected_result, f"Expected {expected_result}, but got {result}"
# Test invalid JSON input
text = '```json\n{"name": "John"}\n```'
try:
parser.parse(text)
except OutputParserException:
pass # Test passes if OutputParserException is raised
else:
assert False, f"Expected OutputParserException, but got {parser.parse(text)}"
def test_output_type() -> None:
"""Test the output type of the structured output parser is Dict[str, Any]."""
response_schemas = [
ResponseSchema(name="name", description="desc"),
ResponseSchema(name="age", description="desc"),
]
parser = StructuredOutputParser.from_response_schemas(response_schemas)
assert parser.OutputType == dict[str, Any]
|
import random
import asyncio
import time
import aiohttp
import grpc
def _raise_last_attempt(err, attempt):
if isinstance(err, asyncio.CancelledError):
trailing_metadata = grpc.aio.Metadata()
trailing_metadata.add('jina-client-attempts', str(attempt))
raise grpc.aio.AioRpcError(
code=grpc.StatusCode.CANCELLED,
initial_metadata=grpc.aio.Metadata(),
trailing_metadata=trailing_metadata,
)
elif isinstance(err, grpc.aio.AioRpcError):
trailing_metadata = err.trailing_metadata() or grpc.aio.Metadata()
trailing_metadata.add('jina-client-attempts', str(attempt))
raise grpc.aio.AioRpcError(
code=err.code(),
details=err.details(),
initial_metadata=err.initial_metadata(),
trailing_metadata=trailing_metadata,
debug_error_string=err.debug_error_string(),
)
elif isinstance(err, aiohttp.ClientConnectorCertificateError):
raise err
elif isinstance(err, aiohttp.ClientError):
raise ConnectionError(str(err))
else:
raise err
def sync_wait_or_raise_err(
attempt: int,
err: Exception,
max_attempts: float,
backoff_multiplier: float,
initial_backoff: float,
max_backoff: float,
):
"""
Accepts retry parameters and the underlying. The error is raised if the max_attempts has been reached otherwise the
method waits based on the backoff calculations.
:param attempt: Number of the current attempt.
:param err: Underlying error that was raised by the operation.
:param max_attempts: Maximum number of attempts that are allowed.
:param backoff_multiplier: Factor that will be raised to the exponent of (attempt - 1) for calculating the backoff wait time.
:param initial_backoff: The backoff time on the first error. This will be multiplied by the backoff_multiplier exponent for subsequent wait time calculations.
:param max_backoff: The maximum backoff wait time.
"""
if attempt == max_attempts:
_raise_last_attempt(err, attempt)
else:
time.sleep(
_wait_time(attempt, backoff_multiplier, initial_backoff, max_backoff)
)
async def wait_or_raise_err(
attempt: int,
err: Exception,
max_attempts: float,
backoff_multiplier: float,
initial_backoff: float,
max_backoff: float,
):
"""
Accepts retry parameters and the underlying. The error is raised if the max_attempts has been reached otherwise the
method waits based on the backoff calculations.
:param attempt: Number of the current attempt.
:param err: Underlying error that was raised by the operation.
:param max_attempts: Maximum number of attempts that are allowed.
:param backoff_multiplier: Factor that will be raised to the exponent of (attempt - 1) for calculating the backoff wait time.
:param initial_backoff: The backoff time on the first error. This will be multiplied by the backoff_multiplier exponent for subsequent wait time calculations.
:param max_backoff: The maximum backoff wait time.
"""
if attempt == max_attempts:
_raise_last_attempt(err, attempt)
else:
await asyncio.sleep(
_wait_time(attempt, backoff_multiplier, initial_backoff, max_backoff)
)
def _wait_time(attempt, backoff_multiplier, initial_backoff, max_backoff):
if attempt == 1:
wait_time = initial_backoff
else:
wait_time = random.uniform(
0,
min(initial_backoff * backoff_multiplier ** (attempt - 1), max_backoff),
)
return wait_time
|
import random
import asyncio
import time
import aiohttp
import grpc
def _raise_last_attempt(err, attempt):
if isinstance(err, asyncio.CancelledError):
trailing_metadata = grpc.aio.Metadata()
trailing_metadata.add('jina-client-attempts', str(attempt))
raise grpc.aio.AioRpcError(
code=grpc.StatusCode.CANCELLED,
initial_metadata=grpc.aio.Metadata(),
trailing_metadata=trailing_metadata,
)
elif isinstance(err, grpc.aio.AioRpcError):
trailing_metadata = err.trailing_metadata() or grpc.aio.Metadata()
trailing_metadata.add('jina-client-attempts', str(attempt))
raise grpc.aio.AioRpcError(
code=err.code(),
details=err.details(),
initial_metadata=err.initial_metadata(),
trailing_metadata=trailing_metadata,
debug_error_string=err.debug_error_string(),
)
elif isinstance(err, aiohttp.ClientConnectorCertificateError):
raise err
elif isinstance(err, aiohttp.ClientError):
raise ConnectionError(str(err))
else:
raise err
def sync_wait_or_raise_err(attempt: int,
err: Exception,
max_attempts: float,
backoff_multiplier: float,
initial_backoff: float,
max_backoff: float,
):
"""
Accepts retry parameters and the underlying. The error is raised if the max_attempts has been reached otherwise the
method waits based on the backoff calculations.
:param attempt: Number of the current attempt.
:param err: Underlying error that was raised by the operation.
:param max_attempts: Maximum number of attempts that are allowed.
:param backoff_multiplier: Factor that will be raised to the exponent of (attempt - 1) for calculating the backoff wait time.
:param initial_backoff: The backoff time on the first error. This will be multiplied by the backoff_multiplier exponent for subsequent wait time calculations.
:param max_backoff: The maximum backoff wait time.
"""
if attempt == max_attempts:
_raise_last_attempt(err, attempt)
else:
time.sleep(_wait_time(attempt, backoff_multiplier, initial_backoff, max_backoff))
async def wait_or_raise_err(
attempt: int,
err: Exception,
max_attempts: float,
backoff_multiplier: float,
initial_backoff: float,
max_backoff: float,
):
"""
Accepts retry parameters and the underlying. The error is raised if the max_attempts has been reached otherwise the
method waits based on the backoff calculations.
:param attempt: Number of the current attempt.
:param err: Underlying error that was raised by the operation.
:param max_attempts: Maximum number of attempts that are allowed.
:param backoff_multiplier: Factor that will be raised to the exponent of (attempt - 1) for calculating the backoff wait time.
:param initial_backoff: The backoff time on the first error. This will be multiplied by the backoff_multiplier exponent for subsequent wait time calculations.
:param max_backoff: The maximum backoff wait time.
"""
if attempt == max_attempts:
_raise_last_attempt(err, attempt)
else:
await asyncio.sleep(_wait_time(attempt, backoff_multiplier, initial_backoff, max_backoff))
def _wait_time(attempt, backoff_multiplier, initial_backoff, max_backoff):
if attempt == 1:
wait_time = initial_backoff
else:
wait_time = random.uniform(
0,
min(initial_backoff * backoff_multiplier ** (attempt - 1), max_backoff),
)
return wait_time
|
import argparse
import json
import logging
import os
import tarfile
from functools import partial
from multiprocessing import Pool
def create_logger(output_file):
logger = logging.getLogger('grit_logger')
logger.setLevel(logging.INFO) # set logger output level
formatter = logging.Formatter('%(asctime)s - %(message)s')
fh = logging.FileHandler(output_file)
fh.setLevel(logging.INFO)
fh.setFormatter(formatter)
console = logging.StreamHandler()
console.setLevel(logging.INFO)
logger.addHandler(fh)
logger.addHandler(console)
return logger
def count_download_image(download_json_dir, logger):
parquet_files = [
f for f in os.listdir(download_json_dir) if f.endswith('.json')
]
len = 0
for file in parquet_files:
with open(os.path.join(download_json_dir, file), 'r') as f:
data = json.load(f)
len = len + int(data['successes'])
logger.info(file + 'has ' + str(data['successes']) +
' successful images')
logger.info('all files finished.', str(len),
'images have been successfully downloaded.')
def tar_processing(tar_path, output_dir, logger):
filepath = untar(tar_path, logger)
json_files = [f for f in os.listdir(filepath) if f.endswith('.json')]
all_data = []
cnt = 0
for file in json_files:
with open(os.path.join(filepath, file), 'r') as f:
df = json.load(f)
cnt = cnt + 1
all_data.extend([df])
dir_name = os.path.basename(filepath)
# write all data to a json file
logger.info(f'{dir_name} has {cnt} jsons')
json_name = os.path.basename(filepath) + '.json'
if not os.path.exists(os.path.join(output_dir, 'annotations')):
os.mkdir(os.path.join(output_dir, 'annotations'))
with open(os.path.join(output_dir, 'annotations', json_name), 'w') as f:
json.dump(all_data, f)
logger.info(f'{dir_name} completed')
cp_rm(filepath, output_dir)
return os.path.basename(filepath)
def untar(filepath, logger):
if tarfile.is_tarfile(filepath):
new_folder = os.path.splitext(filepath)[0]
tar_name = os.path.basename(filepath)
with tarfile.open(filepath) as tar:
members = tar.getmembers()
if not os.path.exists(new_folder):
os.mkdir(new_folder)
else:
f = os.listdir(new_folder)
if len(members) == len(f):
logger.info(f'{tar_name} already decompressed')
return new_folder
logger.info(f'{tar_name} decompressing...')
os.system(f'tar -xf {filepath} -C {new_folder}')
logger.info(f'{tar_name} decompressed!')
return new_folder
def cp_rm(filepath, output_dir):
# delete txt/json
for file in os.listdir(filepath):
if file.endswith('.txt') or file.endswith('.json'):
os.remove(os.path.join(filepath, file))
# move images to output dir
target_dir = os.path.join(output_dir, 'images')
if not os.path.exists(os.path.join(output_dir, 'images')):
os.mkdir(os.path.join(output_dir, 'images'))
os.system('mv -f {} {}'.format(filepath, target_dir))
def main(args):
logger = create_logger(args.log_name)
all_file_name = [
os.path.join(args.image_dir, file)
for file in os.listdir(args.image_dir) if file.endswith('.tar')
]
all_file_name.sort()
func = partial(tar_processing, output_dir=args.output_dir, logger=logger)
with Pool(processes=args.num_process) as pool:
result = pool.imap(func=func, iterable=all_file_name) # noqa
# print(result)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('image_dir', type=str) # grit raw directory
parser.add_argument('output_dir', type=str)
parser.add_argument('--num-process', default=10)
parser.add_argument('--log-name', type=str, default='grit_processing.log')
args = parser.parse_args()
main(args)
|
import argparse
import json
import logging
import os
import tarfile
from functools import partial
from multiprocessing import Pool
def create_logger(output_file):
logger = logging.getLogger('grit_logger')
logger.setLevel(logging.INFO) # set logger output level
formatter = logging.Formatter('%(asctime)s - %(message)s')
fh = logging.FileHandler(output_file)
fh.setLevel(logging.INFO)
fh.setFormatter(formatter)
console = logging.StreamHandler()
console.setLevel(logging.INFO)
logger.addHandler(fh)
logger.addHandler(console)
return logger
def count_download_image(download_json_dir, logger):
parquet_files = [
f for f in os.listdir(download_json_dir) if f.endswith('.json')
]
len = 0
for file in parquet_files:
with open(os.path.join(download_json_dir, file), 'r') as f:
data = json.load(f)
len = len + int(data['successes'])
logger.info(file + 'has ' + str(data['successes']) +
' successful images')
logger.info('all files finished.', str(len),
'images have been successfully downloaded.')
def tar_processing(tar_path, output_dir, logger):
"""解压tar文件到对应名字的文件夹,并提取所有的json combine后,删除其他保存图片."""
# 创建文件夹并解压
filepath = untar(tar_path, logger)
'''将所有json融合为一个json'''
# 获取解压后目录下所有的.json文件
json_files = [f for f in os.listdir(filepath) if f.endswith('.json')]
# 初始化一个空的列表来存储所有的数据
all_data = []
cnt = 0
for file in json_files:
with open(os.path.join(filepath, file), 'r') as f:
df = json.load(f)
cnt = cnt + 1
# 将DataFrame转换为.json格式,并添加到all_data列表中
all_data.extend([df])
dir_name = os.path.basename(filepath)
# write all data to a json file
logger.info(f'{dir_name} has {cnt} jsons')
json_name = os.path.basename(filepath) + '.json'
if not os.path.exists(os.path.join(output_dir, 'annotations')):
os.mkdir(os.path.join(output_dir, 'annotations'))
with open(os.path.join(output_dir, 'annotations', json_name), 'w') as f:
json.dump(all_data, f)
logger.info(f'{dir_name} completed')
cp_rm(filepath, output_dir)
return os.path.basename(filepath)
def untar(filepath, logger):
# 如果文件是tar文件,就解压它
if tarfile.is_tarfile(filepath):
# 创建一个新的文件夹,和tar文件同名,但去掉后缀
new_folder = os.path.splitext(filepath)[0]
tar_name = os.path.basename(filepath)
with tarfile.open(filepath) as tar:
# 获取tar文件中的所有成员
members = tar.getmembers()
if not os.path.exists(new_folder):
os.mkdir(new_folder)
else:
f = os.listdir(new_folder)
# 打开tar文件,并解压到新的文件夹中
if len(members) == len(f):
logger.info(f'{tar_name} already decompressed')
return new_folder
logger.info(f'{tar_name} decompressing...')
os.system(f'tar -xf {filepath} -C {new_folder}')
logger.info(f'{tar_name} decompressed!')
return new_folder
def cp_rm(filepath, output_dir):
# delete txt/json
for file in os.listdir(filepath):
if file.endswith('.txt') or file.endswith('.json'):
os.remove(os.path.join(filepath, file))
# move images to output dir
target_dir = os.path.join(output_dir, 'images')
if not os.path.exists(os.path.join(output_dir, 'images')):
os.mkdir(os.path.join(output_dir, 'images'))
os.system('mv -f {} {}'.format(filepath, target_dir))
parser = argparse.ArgumentParser()
# parser.add_argument('-d', '--download_json_dir', type=str, default=None)
parser.add_argument('image_dir', type=str) # grit raw directory
parser.add_argument('output_dir', type=str) # processed grit output dir
parser.add_argument('--log_name', type=str, default='grit_processing.log')
args = parser.parse_args()
def main(args):
logger = create_logger(args.log_name)
# if args.download_json_dir != None:
# count_download_image(args.download_json_dir, logger)
if args.image_dir is not None:
all_file_name = [
os.path.join(args.image_dir, file)
for file in os.listdir(args.image_dir) if file.endswith('.tar')
]
all_file_name.sort()
func = partial(
tar_processing, output_dir=args.output_dir, logger=logger)
with Pool(processes=10) as pool:
result = pool.imap(func=func, iterable=all_file_name)
for r in result:
print(result)
if __name__ == '__main__':
main(args)
|
import os
from typing import Optional
import pytest
from docarray import BaseDoc, DocArray
from docarray.documents import ImageDoc
from tests import TOYDATA_DIR
@pytest.fixture()
def nested_doc_cls():
class MyDoc(BaseDoc):
count: Optional[int]
text: str
class MyDocNested(MyDoc):
image: ImageDoc
image2: ImageDoc
return MyDocNested
def test_to_from_csv(tmpdir, nested_doc_cls):
da = DocArray[nested_doc_cls](
[
nested_doc_cls(
count=0,
text='hello',
image=ImageDoc(url='aux.png'),
image2=ImageDoc(url='aux.png'),
),
nested_doc_cls(text='hello world', image=ImageDoc(), image2=ImageDoc()),
]
)
tmp_file = str(tmpdir / 'tmp.csv')
da.to_csv(tmp_file)
assert os.path.isfile(tmp_file)
da_from = DocArray[nested_doc_cls].from_csv(tmp_file)
for doc1, doc2 in zip(da, da_from):
assert doc1 == doc2
def test_from_csv_nested(nested_doc_cls):
da = DocArray[nested_doc_cls].from_csv(
file_path=str(TOYDATA_DIR / 'docs_nested.csv')
)
assert len(da) == 3
for i, doc in enumerate(da):
assert doc.count.__class__ == int
assert doc.count == int(f'{i}{i}{i}')
assert doc.text.__class__ == str
assert doc.text == f'hello {i}'
assert doc.image.__class__ == ImageDoc
assert doc.image.tensor is None
assert doc.image.embedding is None
assert doc.image.bytes_ is None
assert doc.image2.__class__ == ImageDoc
assert doc.image2.tensor is None
assert doc.image2.embedding is None
assert doc.image2.bytes_ is None
assert da[0].image2.url == 'image_10.png'
assert da[1].image2.url is None
assert da[2].image2.url is None
@pytest.fixture()
def nested_doc():
class Inner(BaseDoc):
img: Optional[ImageDoc]
class Middle(BaseDoc):
img: Optional[ImageDoc]
inner: Optional[Inner]
class Outer(BaseDoc):
img: Optional[ImageDoc]
middle: Optional[Middle]
doc = Outer(
img=ImageDoc(), middle=Middle(img=ImageDoc(), inner=Inner(img=ImageDoc()))
)
return doc
def test_from_csv_without_schema_raise_exception():
with pytest.raises(TypeError, match='no document schema defined'):
DocArray.from_csv(file_path=str(TOYDATA_DIR / 'docs_nested.csv'))
def test_from_csv_with_wrong_schema_raise_exception(nested_doc):
with pytest.raises(ValueError, match='Column names do not match the schema'):
DocArray[nested_doc.__class__].from_csv(file_path=str(TOYDATA_DIR / 'docs.csv'))
|
import os
from typing import Optional
import pytest
from docarray import BaseDocument, DocumentArray
from docarray.documents import ImageDoc
from tests import TOYDATA_DIR
@pytest.fixture()
def nested_doc_cls():
class MyDoc(BaseDocument):
count: Optional[int]
text: str
class MyDocNested(MyDoc):
image: ImageDoc
image2: ImageDoc
return MyDocNested
def test_to_from_csv(tmpdir, nested_doc_cls):
da = DocumentArray[nested_doc_cls](
[
nested_doc_cls(
count=0,
text='hello',
image=ImageDoc(url='aux.png'),
image2=ImageDoc(url='aux.png'),
),
nested_doc_cls(text='hello world', image=ImageDoc(), image2=ImageDoc()),
]
)
tmp_file = str(tmpdir / 'tmp.csv')
da.to_csv(tmp_file)
assert os.path.isfile(tmp_file)
da_from = DocumentArray[nested_doc_cls].from_csv(tmp_file)
for doc1, doc2 in zip(da, da_from):
assert doc1 == doc2
def test_from_csv_nested(nested_doc_cls):
da = DocumentArray[nested_doc_cls].from_csv(
file_path=str(TOYDATA_DIR / 'docs_nested.csv')
)
assert len(da) == 3
for i, doc in enumerate(da):
assert doc.count.__class__ == int
assert doc.count == int(f'{i}{i}{i}')
assert doc.text.__class__ == str
assert doc.text == f'hello {i}'
assert doc.image.__class__ == ImageDoc
assert doc.image.tensor is None
assert doc.image.embedding is None
assert doc.image.bytes_ is None
assert doc.image2.__class__ == ImageDoc
assert doc.image2.tensor is None
assert doc.image2.embedding is None
assert doc.image2.bytes_ is None
assert da[0].image2.url == 'image_10.png'
assert da[1].image2.url is None
assert da[2].image2.url is None
@pytest.fixture()
def nested_doc():
class Inner(BaseDocument):
img: Optional[ImageDoc]
class Middle(BaseDocument):
img: Optional[ImageDoc]
inner: Optional[Inner]
class Outer(BaseDocument):
img: Optional[ImageDoc]
middle: Optional[Middle]
doc = Outer(
img=ImageDoc(), middle=Middle(img=ImageDoc(), inner=Inner(img=ImageDoc()))
)
return doc
def test_from_csv_without_schema_raise_exception():
with pytest.raises(TypeError, match='no document schema defined'):
DocumentArray.from_csv(file_path=str(TOYDATA_DIR / 'docs_nested.csv'))
def test_from_csv_with_wrong_schema_raise_exception(nested_doc):
with pytest.raises(ValueError, match='Column names do not match the schema'):
DocumentArray[nested_doc.__class__].from_csv(
file_path=str(TOYDATA_DIR / 'docs.csv')
)
|
# mypy: allow-untyped-defs
import torch._C._lazy
def reset():
"""Resets all metric counters."""
torch._C._lazy._reset_metrics()
def counter_names():
"""Retrieves all the currently active counter names."""
return torch._C._lazy._counter_names()
def counter_value(name: str):
"""Return the value of the counter with the specified name"""
return torch._C._lazy._counter_value(name)
def metrics_report():
"""Return the combined (lazy core and backend) metric report"""
return torch._C._lazy._metrics_report()
|
# mypy: allow-untyped-defs
import torch._C._lazy
def reset():
"""Resets all metric counters."""
torch._C._lazy._reset_metrics()
def counter_names():
"""Retrieves all the currently active counter names."""
return torch._C._lazy._counter_names()
def counter_value(name: str):
"""Return the value of the counter with the speficied name"""
return torch._C._lazy._counter_value(name)
def metrics_report():
"""Return the combined (lazy core and backend) metric report"""
return torch._C._lazy._metrics_report()
|
# Copyright (c) OpenMMLab. All rights reserved.
from .conditional_detr_layers import (ConditionalDetrTransformerDecoder,
ConditionalDetrTransformerDecoderLayer)
from .dab_detr_layers import (DABDetrTransformerDecoder,
DABDetrTransformerDecoderLayer,
DABDetrTransformerEncoder)
from .ddq_detr_layers import DDQTransformerDecoder
from .deformable_detr_layers import (DeformableDetrTransformerDecoder,
DeformableDetrTransformerDecoderLayer,
DeformableDetrTransformerEncoder,
DeformableDetrTransformerEncoderLayer)
from .detr_layers import (DetrTransformerDecoder, DetrTransformerDecoderLayer,
DetrTransformerEncoder, DetrTransformerEncoderLayer)
from .dino_layers import CdnQueryGenerator, DinoTransformerDecoder
from .grounding_dino_layers import (GroundingDinoTransformerDecoder,
GroundingDinoTransformerDecoderLayer,
GroundingDinoTransformerEncoder)
from .mask2former_layers import (Mask2FormerTransformerDecoder,
Mask2FormerTransformerDecoderLayer,
Mask2FormerTransformerEncoder)
from .utils import (MLP, AdaptivePadding, ConditionalAttention, DynamicConv,
PatchEmbed, PatchMerging, coordinate_to_encoding,
inverse_sigmoid, nchw_to_nlc, nlc_to_nchw)
__all__ = [
'nlc_to_nchw', 'nchw_to_nlc', 'AdaptivePadding', 'PatchEmbed',
'PatchMerging', 'inverse_sigmoid', 'DynamicConv', 'MLP',
'DetrTransformerEncoder', 'DetrTransformerDecoder',
'DetrTransformerEncoderLayer', 'DetrTransformerDecoderLayer',
'DeformableDetrTransformerEncoder', 'DeformableDetrTransformerDecoder',
'DeformableDetrTransformerEncoderLayer',
'DeformableDetrTransformerDecoderLayer', 'coordinate_to_encoding',
'ConditionalAttention', 'DABDetrTransformerDecoderLayer',
'DABDetrTransformerDecoder', 'DABDetrTransformerEncoder',
'DDQTransformerDecoder', 'ConditionalDetrTransformerDecoder',
'ConditionalDetrTransformerDecoderLayer', 'DinoTransformerDecoder',
'CdnQueryGenerator', 'Mask2FormerTransformerEncoder',
'Mask2FormerTransformerDecoderLayer', 'Mask2FormerTransformerDecoder',
'GroundingDinoTransformerDecoderLayer', 'GroundingDinoTransformerEncoder',
'GroundingDinoTransformerDecoder'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .conditional_detr_layers import (ConditionalDetrTransformerDecoder,
ConditionalDetrTransformerDecoderLayer)
from .dab_detr_layers import (DABDetrTransformerDecoder,
DABDetrTransformerDecoderLayer,
DABDetrTransformerEncoder)
from .deformable_detr_layers import (DeformableDetrTransformerDecoder,
DeformableDetrTransformerDecoderLayer,
DeformableDetrTransformerEncoder,
DeformableDetrTransformerEncoderLayer)
from .detr_layers import (DetrTransformerDecoder, DetrTransformerDecoderLayer,
DetrTransformerEncoder, DetrTransformerEncoderLayer)
from .dino_layers import CdnQueryGenerator, DinoTransformerDecoder
from .mask2former_layers import (Mask2FormerTransformerDecoder,
Mask2FormerTransformerDecoderLayer,
Mask2FormerTransformerEncoder)
from .utils import (MLP, AdaptivePadding, ConditionalAttention, DynamicConv,
PatchEmbed, PatchMerging, coordinate_to_encoding,
inverse_sigmoid, nchw_to_nlc, nlc_to_nchw)
__all__ = [
'nlc_to_nchw', 'nchw_to_nlc', 'AdaptivePadding', 'PatchEmbed',
'PatchMerging', 'inverse_sigmoid', 'DynamicConv', 'MLP',
'DetrTransformerEncoder', 'DetrTransformerDecoder',
'DetrTransformerEncoderLayer', 'DetrTransformerDecoderLayer',
'DeformableDetrTransformerEncoder', 'DeformableDetrTransformerDecoder',
'DeformableDetrTransformerEncoderLayer',
'DeformableDetrTransformerDecoderLayer', 'coordinate_to_encoding',
'ConditionalAttention', 'DABDetrTransformerDecoderLayer',
'DABDetrTransformerDecoder', 'DABDetrTransformerEncoder',
'ConditionalDetrTransformerDecoder',
'ConditionalDetrTransformerDecoderLayer', 'DinoTransformerDecoder',
'CdnQueryGenerator', 'Mask2FormerTransformerEncoder',
'Mask2FormerTransformerDecoderLayer', 'Mask2FormerTransformerDecoder'
]
|
from __future__ import annotations
import os
import sys
from typing import Any, BinaryIO, Optional, Tuple, Type, TypeVar, Union
import PIL.Image
import torch
from torchvision.prototype.datapoints._datapoint import Datapoint
from torchvision.prototype.utils._internal import fromfile, ReadOnlyTensorBuffer
D = TypeVar("D", bound="EncodedData")
class EncodedData(Datapoint):
@classmethod
def _wrap(cls: Type[D], tensor: torch.Tensor) -> D:
return tensor.as_subclass(cls)
def __new__(
cls,
data: Any,
*,
dtype: Optional[torch.dtype] = None,
device: Optional[Union[torch.device, str, int]] = None,
requires_grad: bool = False,
) -> EncodedData:
tensor = cls._to_tensor(data, dtype=dtype, device=device, requires_grad=requires_grad)
# TODO: warn / bail out if we encounter a tensor with shape other than (N,) or with dtype other than uint8?
return cls._wrap(tensor)
@classmethod
def wrap_like(cls: Type[D], other: D, tensor: torch.Tensor) -> D:
return cls._wrap(tensor)
@classmethod
def from_file(cls: Type[D], file: BinaryIO, **kwargs: Any) -> D:
encoded_data = cls(fromfile(file, dtype=torch.uint8, byte_order=sys.byteorder), **kwargs)
file.close()
return encoded_data
@classmethod
def from_path(cls: Type[D], path: Union[str, os.PathLike], **kwargs: Any) -> D:
with open(path, "rb") as file:
return cls.from_file(file, **kwargs)
class EncodedImage(EncodedData):
# TODO: Use @functools.cached_property if we can depend on Python 3.8
@property
def spatial_size(self) -> Tuple[int, int]:
if not hasattr(self, "_spatial_size"):
with PIL.Image.open(ReadOnlyTensorBuffer(self)) as image:
self._spatial_size = image.height, image.width
return self._spatial_size
|
from __future__ import annotations
import os
import sys
from typing import Any, BinaryIO, Optional, Tuple, Type, TypeVar, Union
import PIL.Image
import torch
from torchvision.prototype.features._feature import _Feature
from torchvision.prototype.utils._internal import fromfile, ReadOnlyTensorBuffer
D = TypeVar("D", bound="EncodedData")
class EncodedData(_Feature):
@classmethod
def _wrap(cls: Type[D], tensor: torch.Tensor) -> D:
return tensor.as_subclass(cls)
def __new__(
cls,
data: Any,
*,
dtype: Optional[torch.dtype] = None,
device: Optional[Union[torch.device, str, int]] = None,
requires_grad: bool = False,
) -> EncodedData:
tensor = cls._to_tensor(data, dtype=dtype, device=device, requires_grad=requires_grad)
# TODO: warn / bail out if we encounter a tensor with shape other than (N,) or with dtype other than uint8?
return cls._wrap(tensor)
@classmethod
def wrap_like(cls: Type[D], other: D, tensor: torch.Tensor) -> D:
return cls._wrap(tensor)
@classmethod
def from_file(cls: Type[D], file: BinaryIO, **kwargs: Any) -> D:
encoded_data = cls(fromfile(file, dtype=torch.uint8, byte_order=sys.byteorder), **kwargs)
file.close()
return encoded_data
@classmethod
def from_path(cls: Type[D], path: Union[str, os.PathLike], **kwargs: Any) -> D:
with open(path, "rb") as file:
return cls.from_file(file, **kwargs)
class EncodedImage(EncodedData):
# TODO: Use @functools.cached_property if we can depend on Python 3.8
@property
def spatial_size(self) -> Tuple[int, int]:
if not hasattr(self, "_spatial_size"):
with PIL.Image.open(ReadOnlyTensorBuffer(self)) as image:
self._spatial_size = image.height, image.width
return self._spatial_size
|
from dataclasses import dataclass
from functools import partial
from typing import Callable
import torch
import torchaudio
from torchaudio.prototype.models import conv_tasnet_base, hdemucs_high
@dataclass
class SourceSeparationBundle:
"""torchaudio.prototype.pipelines.SourceSeparationBundle()
Dataclass that bundles components for performing source separation.
Example
>>> import torchaudio
>>> from torchaudio.prototype.pipelines import CONVTASNET_BASE_LIBRI2MIX
>>> import torch
>>>
>>> # Build the separation model.
>>> model = CONVTASNET_BASE_LIBRI2MIX.get_model()
>>> 100%|███████████████████████████████|19.1M/19.1M [00:04<00:00, 4.93MB/s]
>>>
>>> # Instantiate the test set of Libri2Mix dataset.
>>> dataset = torchaudio.datasets.LibriMix("/home/datasets/", subset="test")
>>>
>>> # Apply source separation on mixture audio.
>>> for i, data in enumerate(dataset):
>>> sample_rate, mixture, clean_sources = data
>>> # Make sure the shape of input suits the model requirement.
>>> mixture = mixture.reshape(1, 1, -1)
>>> estimated_sources = model(mixture)
>>> score = si_snr_pit(estimated_sources, clean_sources) # for demonstration
>>> print(f"Si-SNR score is : {score}.)
>>> break
>>> Si-SNR score is : 16.24.
>>>
"""
_model_path: str
_model_factory_func: Callable[[], torch.nn.Module]
_sample_rate: int
@property
def sample_rate(self) -> int:
"""Sample rate of the audio that the model is trained on.
:type: int
"""
return self._sample_rate
def get_model(self) -> torch.nn.Module:
"""Construct the model and load the pretrained weight."""
model = self._model_factory_func()
path = torchaudio.utils.download_asset(self._model_path)
state_dict = torch.load(path)
model.load_state_dict(state_dict)
model.eval()
return model
CONVTASNET_BASE_LIBRI2MIX = SourceSeparationBundle(
_model_path="models/conv_tasnet_base_libri2mix.pt",
_model_factory_func=partial(conv_tasnet_base, num_sources=2),
_sample_rate=8000,
)
CONVTASNET_BASE_LIBRI2MIX.__doc__ = """Pre-trained *ConvTasNet* [:footcite:`Luo_2019`] pipeline for source separation.
The underlying model is constructed by :py:func:`torchaudio.prototyoe.models.conv_tasnet_base`
and utilizes weights trained on *Libri2Mix dataset* [:footcite:`cosentino2020librimix`] using training script
``lightning_train.py`` `here <https://github.com/pytorch/audio/tree/release/0.12/examples/source_separation/>`__
with default arguments.
Please refer to :py:class:`SourceSeparationBundle` for usage instructions.
"""
HDEMUCS_HIGH_MUSDB_PLUS = SourceSeparationBundle(
_model_path="models/hdemucs_high_trained.pt",
_model_factory_func=partial(hdemucs_high, sources=["drums", "bass", "other", "vocals"]),
_sample_rate=44100,
)
HDEMUCS_HIGH_MUSDB_PLUS.__doc__ = """Pre-trained *Hybrid Demucs* [:footcite:`defossez2021hybrid`] pipeline for music
source separation. The underlying model is constructed by
:py:func:`torchaudio.prototyoe.models.hdemucs_high` and utilizes weights trained on MUSDB-HQ [:footcite:`MUSDB18HQ`]
and internal extra training data, all at the same sample rate of 44.1 kHZ. The model separates mixture music into
“drums”, “base”, “vocals”, and “other” sources. Training was performed in the original HDemucs repository
`here <https://github.com/facebookresearch/demucs/>`__.
"""
|
from dataclasses import dataclass
from functools import partial
from typing import Callable
import torch
import torchaudio
from torchaudio.prototype.models import conv_tasnet_base, hdemucs_high
@dataclass
class SourceSeparationBundle:
"""torchaudio.prototype.pipelines.SourceSeparationBundle()
Dataclass that bundles components for performing source separation.
Example
>>> import torchaudio
>>> from torchaudio.prototype.pipelines import CONVTASNET_BASE_LIBRI2MIX
>>> import torch
>>>
>>> # Build the separation model.
>>> model = CONVTASNET_BASE_LIBRI2MIX.get_model()
>>> 100%|███████████████████████████████|19.1M/19.1M [00:04<00:00, 4.93MB/s]
>>>
>>> # Instantiate the test set of Libri2Mix dataset.
>>> dataset = torchaudio.datasets.LibriMix("/home/datasets/", subset="test")
>>>
>>> # Apply source separation on mixture audio.
>>> for i, data in enumerate(dataset):
>>> sample_rate, mixture, clean_sources = data
>>> # Make sure the shape of input suits the model requirement.
>>> mixture = mixture.reshape(1, 1, -1)
>>> estimated_sources = model(mixture)
>>> score = si_snr_pit(estimated_sources, clean_sources) # for demonstration
>>> print(f"Si-SNR score is : {score}.)
>>> break
>>> Si-SNR score is : 16.24.
>>>
"""
_model_path: str
_model_factory_func: Callable[[], torch.nn.Module]
_sample_rate: int
@property
def sample_rate(self) -> int:
"""Sample rate of the audio that the model is trained on.
:type: int
"""
return self._sample_rate
def get_model(self) -> torch.nn.Module:
"""Construct the model and load the pretrained weight."""
model = self._model_factory_func()
path = torchaudio.utils.download_asset(self._model_path)
state_dict = torch.load(path)
model.load_state_dict(state_dict)
model.eval()
return model
CONVTASNET_BASE_LIBRI2MIX = SourceSeparationBundle(
_model_path="models/conv_tasnet_base_libri2mix.pt",
_model_factory_func=partial(conv_tasnet_base, num_sources=2),
_sample_rate=8000,
)
CONVTASNET_BASE_LIBRI2MIX.__doc__ = """Pre-trained *ConvTasNet* [:footcite:`Luo_2019`] pipeline for source separation.
The underlying model is constructed by :py:func:`torchaudio.prototyoe.models.conv_tasnet_base`
and utilizes weights trained on *Libri2Mix dataset* [:footcite:`cosentino2020librimix`] using training script
``lightning_train.py`` `here <https://github.com/pytorch/audio/tree/release/0.12/examples/source_separation/>`__
with default arguments.
Please refer to :py:class:`SourceSeparationBundle` for usage instructions.
"""
HDEMUCS_HIGH_MUSDB_PLUS = SourceSeparationBundle(
_model_path="models/hdemucs_high_trained.pt",
_model_factory_func=partial(hdemucs_high, sources=["drums", "bass", "other", "vocals"], sample_rate=44100),
_sample_rate=44100,
)
HDEMUCS_HIGH_MUSDB_PLUS.__doc__ = """Pre-trained *Hybrid Demucs* [:footcite:`defossez2021hybrid`] pipeline for music
source separation. The underlying model is constructed by
:py:func:`torchaudio.prototyoe.models.hdemucs_high` and utilizes weights trained on MUSDB-HQ [:footcite:`MUSDB18HQ`]
and internal extra training data, all at the same sample rate of 44.1 kHZ. The model separates mixture music into
“drums”, “base”, “vocals”, and “other” sources. Training was performed in the original HDemucs repository
`here <https://github.com/facebookresearch/demucs/>`__.
"""
|
import tempfile
import unittest
import numpy as np
import pytest
import torch
from diffusers import DiffusionPipeline
from diffusers.models.attention_processor import Attention, AttnAddedKVProcessor
from diffusers.utils.testing_utils import torch_device
class AttnAddedKVProcessorTests(unittest.TestCase):
def get_constructor_arguments(self, only_cross_attention: bool = False):
query_dim = 10
if only_cross_attention:
cross_attention_dim = 12
else:
# when only cross attention is not set, the cross attention dim must be the same as the query dim
cross_attention_dim = query_dim
return {
"query_dim": query_dim,
"cross_attention_dim": cross_attention_dim,
"heads": 2,
"dim_head": 4,
"added_kv_proj_dim": 6,
"norm_num_groups": 1,
"only_cross_attention": only_cross_attention,
"processor": AttnAddedKVProcessor(),
}
def get_forward_arguments(self, query_dim, added_kv_proj_dim):
batch_size = 2
hidden_states = torch.rand(batch_size, query_dim, 3, 2)
encoder_hidden_states = torch.rand(batch_size, 4, added_kv_proj_dim)
attention_mask = None
return {
"hidden_states": hidden_states,
"encoder_hidden_states": encoder_hidden_states,
"attention_mask": attention_mask,
}
def test_only_cross_attention(self):
# self and cross attention
torch.manual_seed(0)
constructor_args = self.get_constructor_arguments(only_cross_attention=False)
attn = Attention(**constructor_args)
self.assertTrue(attn.to_k is not None)
self.assertTrue(attn.to_v is not None)
forward_args = self.get_forward_arguments(
query_dim=constructor_args["query_dim"], added_kv_proj_dim=constructor_args["added_kv_proj_dim"]
)
self_and_cross_attn_out = attn(**forward_args)
# only self attention
torch.manual_seed(0)
constructor_args = self.get_constructor_arguments(only_cross_attention=True)
attn = Attention(**constructor_args)
self.assertTrue(attn.to_k is None)
self.assertTrue(attn.to_v is None)
forward_args = self.get_forward_arguments(
query_dim=constructor_args["query_dim"], added_kv_proj_dim=constructor_args["added_kv_proj_dim"]
)
only_cross_attn_out = attn(**forward_args)
self.assertTrue((only_cross_attn_out != self_and_cross_attn_out).all())
class DeprecatedAttentionBlockTests(unittest.TestCase):
@pytest.fixture(scope="session")
def is_dist_enabled(pytestconfig):
return pytestconfig.getoption("dist") == "loadfile"
@pytest.mark.xfail(
condition=torch.device(torch_device).type == "cuda" and is_dist_enabled,
reason="Test currently fails on our GPU CI because of `loadfile`. Note that it only fails when the tests are distributed from `pytest ... tests/models`. If the tests are run individually, even with `loadfile` it won't fail.",
strict=True,
)
def test_conversion_when_using_device_map(self):
pipe = DiffusionPipeline.from_pretrained(
"hf-internal-testing/tiny-stable-diffusion-torch", safety_checker=None
)
pre_conversion = pipe(
"foo",
num_inference_steps=2,
generator=torch.Generator("cpu").manual_seed(0),
output_type="np",
).images
# the initial conversion succeeds
pipe = DiffusionPipeline.from_pretrained(
"hf-internal-testing/tiny-stable-diffusion-torch", device_map="balanced", safety_checker=None
)
conversion = pipe(
"foo",
num_inference_steps=2,
generator=torch.Generator("cpu").manual_seed(0),
output_type="np",
).images
with tempfile.TemporaryDirectory() as tmpdir:
# save the converted model
pipe.save_pretrained(tmpdir)
# can also load the converted weights
pipe = DiffusionPipeline.from_pretrained(tmpdir, device_map="balanced", safety_checker=None)
after_conversion = pipe(
"foo",
num_inference_steps=2,
generator=torch.Generator("cpu").manual_seed(0),
output_type="np",
).images
self.assertTrue(np.allclose(pre_conversion, conversion, atol=1e-3))
self.assertTrue(np.allclose(conversion, after_conversion, atol=1e-3))
|
import tempfile
import unittest
import numpy as np
import torch
from diffusers import DiffusionPipeline
from diffusers.models.attention_processor import Attention, AttnAddedKVProcessor
class AttnAddedKVProcessorTests(unittest.TestCase):
def get_constructor_arguments(self, only_cross_attention: bool = False):
query_dim = 10
if only_cross_attention:
cross_attention_dim = 12
else:
# when only cross attention is not set, the cross attention dim must be the same as the query dim
cross_attention_dim = query_dim
return {
"query_dim": query_dim,
"cross_attention_dim": cross_attention_dim,
"heads": 2,
"dim_head": 4,
"added_kv_proj_dim": 6,
"norm_num_groups": 1,
"only_cross_attention": only_cross_attention,
"processor": AttnAddedKVProcessor(),
}
def get_forward_arguments(self, query_dim, added_kv_proj_dim):
batch_size = 2
hidden_states = torch.rand(batch_size, query_dim, 3, 2)
encoder_hidden_states = torch.rand(batch_size, 4, added_kv_proj_dim)
attention_mask = None
return {
"hidden_states": hidden_states,
"encoder_hidden_states": encoder_hidden_states,
"attention_mask": attention_mask,
}
def test_only_cross_attention(self):
# self and cross attention
torch.manual_seed(0)
constructor_args = self.get_constructor_arguments(only_cross_attention=False)
attn = Attention(**constructor_args)
self.assertTrue(attn.to_k is not None)
self.assertTrue(attn.to_v is not None)
forward_args = self.get_forward_arguments(
query_dim=constructor_args["query_dim"], added_kv_proj_dim=constructor_args["added_kv_proj_dim"]
)
self_and_cross_attn_out = attn(**forward_args)
# only self attention
torch.manual_seed(0)
constructor_args = self.get_constructor_arguments(only_cross_attention=True)
attn = Attention(**constructor_args)
self.assertTrue(attn.to_k is None)
self.assertTrue(attn.to_v is None)
forward_args = self.get_forward_arguments(
query_dim=constructor_args["query_dim"], added_kv_proj_dim=constructor_args["added_kv_proj_dim"]
)
only_cross_attn_out = attn(**forward_args)
self.assertTrue((only_cross_attn_out != self_and_cross_attn_out).all())
class DeprecatedAttentionBlockTests(unittest.TestCase):
def test_conversion_when_using_device_map(self):
pipe = DiffusionPipeline.from_pretrained(
"hf-internal-testing/tiny-stable-diffusion-torch", safety_checker=None
)
pre_conversion = pipe(
"foo",
num_inference_steps=2,
generator=torch.Generator("cpu").manual_seed(0),
output_type="np",
).images
# the initial conversion succeeds
pipe = DiffusionPipeline.from_pretrained(
"hf-internal-testing/tiny-stable-diffusion-torch", device_map="balanced", safety_checker=None
)
conversion = pipe(
"foo",
num_inference_steps=2,
generator=torch.Generator("cpu").manual_seed(0),
output_type="np",
).images
with tempfile.TemporaryDirectory() as tmpdir:
# save the converted model
pipe.save_pretrained(tmpdir)
# can also load the converted weights
pipe = DiffusionPipeline.from_pretrained(tmpdir, device_map="balanced", safety_checker=None)
after_conversion = pipe(
"foo",
num_inference_steps=2,
generator=torch.Generator("cpu").manual_seed(0),
output_type="np",
).images
self.assertTrue(np.allclose(pre_conversion, conversion, atol=1e-3))
self.assertTrue(np.allclose(conversion, after_conversion, atol=1e-3))
|
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import subprocess
import pytest
from executor.audioclip_text import AudioCLIPTextEncoder
from jina import Document, DocumentArray, Flow
_EMBEDDING_DIM = 1024
@pytest.mark.parametrize('request_size', [1, 10, 50, 100])
def test_integration(request_size: int):
docs = DocumentArray(
[Document(text='just some random text here') for _ in range(50)]
)
with Flow(return_results=True).add(uses=AudioCLIPTextEncoder) as flow:
resp = flow.post(
on='/index',
inputs=docs,
request_size=request_size,
return_results=True,
)
assert sum(len(resp_batch.docs) for resp_batch in resp) == 50
for r in resp:
for doc in r.docs:
assert doc.embedding.shape == (_EMBEDDING_DIM,)
@pytest.mark.docker
def test_docker_runtime(build_docker_image: str):
with pytest.raises(subprocess.TimeoutExpired):
subprocess.run(
[
'jina',
'executor',
f'--uses=docker://{build_docker_image}',
'--volumes=.cache:/workspace/.cache',
],
timeout=30,
check=True,
)
@pytest.mark.gpu
@pytest.mark.docker
def test_docker_runtime_gpu(build_docker_image_gpu: str):
with pytest.raises(subprocess.TimeoutExpired):
subprocess.run(
[
'jina',
'pea',
f'--uses=docker://{build_docker_image_gpu}',
'--gpus',
'all',
'--uses-with',
'device:"cuda"',
],
timeout=30,
check=True,
)
|
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import subprocess
import pytest
from executor.audioclip_text import AudioCLIPTextEncoder
from jina import Document, DocumentArray, Flow
_EMBEDDING_DIM = 1024
@pytest.mark.parametrize('request_size', [1, 10, 50, 100])
def test_integration(request_size: int):
docs = DocumentArray(
[Document(text='just some random text here') for _ in range(50)]
)
with Flow(return_results=True).add(uses=AudioCLIPTextEncoder) as flow:
resp = flow.post(
on='/index',
inputs=docs,
request_size=request_size,
return_results=True,
)
assert sum(len(resp_batch.docs) for resp_batch in resp) == 50
for r in resp:
for doc in r.docs:
assert doc.embedding.shape == (_EMBEDDING_DIM,)
@pytest.mark.docker
def test_docker_runtime(build_docker_image: str):
with pytest.raises(subprocess.TimeoutExpired):
subprocess.run(
[
'jina',
'executor',
f'--uses=docker://{build_docker_image}',
'--volumes=.cache:/workspace/.cache',
],
timeout=30,
check=True,
)
|
from typing import TYPE_CHECKING, Any, Dict, List, Optional, TypeVar
import numpy as np
from pydantic import parse_obj_as
from docarray.typing.proto_register import _register_proto
from docarray.typing.tensor.ndarray import NdArray
from docarray.typing.url.mimetypes import MESH_EXTRA_EXTENSIONS
from docarray.typing.url.url_3d.url_3d import Url3D
if TYPE_CHECKING:
from docarray.documents.mesh.vertices_and_faces import VerticesAndFaces
T = TypeVar('T', bound='Mesh3DUrl')
@_register_proto(proto_type_name='mesh_url')
class Mesh3DUrl(Url3D):
"""
URL to a file containing 3D mesh information.
Can be remote (web) URL, or a local file path.
"""
@classmethod
def extra_extensions(cls) -> List[str]:
"""
Returns a list of additional file extensions that are valid for this class
but cannot be identified by the mimetypes library.
"""
return MESH_EXTRA_EXTENSIONS
def load(
self: T,
skip_materials: bool = True,
trimesh_args: Optional[Dict[str, Any]] = None,
) -> 'VerticesAndFaces':
"""
Load the data from the url into a [`VerticesAndFaces`][docarray.documents.VerticesAndFaces]
object containing vertices and faces information.
---
```python
from docarray import BaseDoc
from docarray.typing import Mesh3DUrl, NdArray
class MyDoc(BaseDoc):
mesh_url: Mesh3DUrl
doc = MyDoc(mesh_url="https://people.sc.fsu.edu/~jburkardt/data/obj/al.obj")
tensors = doc.mesh_url.load()
assert isinstance(tensors.vertices, NdArray)
assert isinstance(tensors.faces, NdArray)
```
:param skip_materials: Skip materials if True, else skip.
:param trimesh_args: dictionary of additional arguments for `trimesh.load()`
or `trimesh.load_remote()`.
:return: VerticesAndFaces object containing vertices and faces information.
"""
from docarray.documents.mesh.vertices_and_faces import VerticesAndFaces
if not trimesh_args:
trimesh_args = {}
mesh = self._load_trimesh_instance(
force='mesh', skip_materials=skip_materials, **trimesh_args
)
vertices = parse_obj_as(NdArray, mesh.vertices.view(np.ndarray))
faces = parse_obj_as(NdArray, mesh.faces.view(np.ndarray))
return VerticesAndFaces(vertices=vertices, faces=faces)
def display(self) -> None:
"""
Plot mesh from url.
This loads the Trimesh instance of the 3D mesh, and then displays it.
"""
from IPython.display import display
mesh = self._load_trimesh_instance(skip_materials=False)
display(mesh.show())
|
from typing import TYPE_CHECKING, Any, Dict, Optional, TypeVar
import numpy as np
from pydantic import parse_obj_as
from docarray.typing.proto_register import _register_proto
from docarray.typing.tensor.ndarray import NdArray
from docarray.typing.url.url_3d.url_3d import Url3D
if TYPE_CHECKING:
from docarray.documents.mesh.vertices_and_faces import VerticesAndFaces
T = TypeVar('T', bound='Mesh3DUrl')
@_register_proto(proto_type_name='mesh_url')
class Mesh3DUrl(Url3D):
"""
URL to a file containing 3D mesh information.
Can be remote (web) URL, or a local file path.
"""
def load(
self: T,
skip_materials: bool = True,
trimesh_args: Optional[Dict[str, Any]] = None,
) -> 'VerticesAndFaces':
"""
Load the data from the url into a [`VerticesAndFaces`][docarray.documents.VerticesAndFaces]
object containing vertices and faces information.
---
```python
from docarray import BaseDoc
from docarray.typing import Mesh3DUrl, NdArray
class MyDoc(BaseDoc):
mesh_url: Mesh3DUrl
doc = MyDoc(mesh_url="https://people.sc.fsu.edu/~jburkardt/data/obj/al.obj")
tensors = doc.mesh_url.load()
assert isinstance(tensors.vertices, NdArray)
assert isinstance(tensors.faces, NdArray)
```
:param skip_materials: Skip materials if True, else skip.
:param trimesh_args: dictionary of additional arguments for `trimesh.load()`
or `trimesh.load_remote()`.
:return: VerticesAndFaces object containing vertices and faces information.
"""
from docarray.documents.mesh.vertices_and_faces import VerticesAndFaces
if not trimesh_args:
trimesh_args = {}
mesh = self._load_trimesh_instance(
force='mesh', skip_materials=skip_materials, **trimesh_args
)
vertices = parse_obj_as(NdArray, mesh.vertices.view(np.ndarray))
faces = parse_obj_as(NdArray, mesh.faces.view(np.ndarray))
return VerticesAndFaces(vertices=vertices, faces=faces)
def display(self) -> None:
"""
Plot mesh from url.
This loads the Trimesh instance of the 3D mesh, and then displays it.
"""
from IPython.display import display
mesh = self._load_trimesh_instance(skip_materials=False)
display(mesh.show())
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.