input
stringlengths 33
5k
| output
stringlengths 32
5k
|
---|---|
import json
from typing import List
import requests
from langchain_core.documents import Document
from langchain_core.utils import secret_from_env
from pydantic import BaseModel, Field, SecretStr
class BraveSearchWrapper(BaseModel):
"""Wrapper around the Brave search engine."""
api_key: SecretStr = Field(
default_factory=secret_from_env(["BRAVE_SEARCH_API_KEY"])
)
"""The API key to use for the Brave search engine."""
search_kwargs: dict = Field(default_factory=dict)
"""Additional keyword arguments to pass to the search request."""
base_url: str = "https://api.search.brave.com/res/v1/web/search"
"""The base URL for the Brave search engine."""
def run(self, query: str) -> str:
"""Query the Brave search engine and return the results as a JSON string.
Args:
query: The query to search for.
Returns: The results as a JSON string.
"""
web_search_results = self._search_request(query=query)
final_results = [
{
"title": item.get("title"),
"link": item.get("url"),
"snippet": " ".join(
filter(
None, [item.get("description"), *item.get("extra_snippets", [])]
)
),
}
for item in web_search_results
]
return json.dumps(final_results)
def download_documents(self, query: str) -> List[Document]:
"""Query the Brave search engine and return the results as a list of Documents.
Args:
query: The query to search for.
Returns: The results as a list of Documents.
"""
results = self._search_request(query)
return [
Document(
page_content=" ".join(
filter(
None, [item.get("description"), *item.get("extra_snippets", [])]
)
),
metadata={"title": item.get("title"), "link": item.get("url")},
)
for item in results
]
def _search_request(self, query: str) -> List[dict]:
headers = {
"X-Subscription-Token": self.api_key.get_secret_value(),
"Accept": "application/json",
}
req = requests.PreparedRequest()
params = {**self.search_kwargs, **{"q": query, "extra_snippets": True}}
req.prepare_url(self.base_url, params)
if req.url is None:
raise ValueError("prepared url is None, this should not happen")
response = requests.get(req.url, headers=headers)
if not response.ok:
raise Exception(f"HTTP error {response.status_code}")
return response.json().get("web", {}).get("results", [])
|
import json
from typing import List
import requests
from langchain_core.documents import Document
from pydantic import BaseModel, Field
class BraveSearchWrapper(BaseModel):
"""Wrapper around the Brave search engine."""
api_key: str
"""The API key to use for the Brave search engine."""
search_kwargs: dict = Field(default_factory=dict)
"""Additional keyword arguments to pass to the search request."""
base_url: str = "https://api.search.brave.com/res/v1/web/search"
"""The base URL for the Brave search engine."""
def run(self, query: str) -> str:
"""Query the Brave search engine and return the results as a JSON string.
Args:
query: The query to search for.
Returns: The results as a JSON string.
"""
web_search_results = self._search_request(query=query)
final_results = [
{
"title": item.get("title"),
"link": item.get("url"),
"snippet": " ".join(
filter(
None, [item.get("description"), *item.get("extra_snippets", [])]
)
),
}
for item in web_search_results
]
return json.dumps(final_results)
def download_documents(self, query: str) -> List[Document]:
"""Query the Brave search engine and return the results as a list of Documents.
Args:
query: The query to search for.
Returns: The results as a list of Documents.
"""
results = self._search_request(query)
return [
Document(
page_content=" ".join(
filter(
None, [item.get("description"), *item.get("extra_snippets", [])]
)
),
metadata={"title": item.get("title"), "link": item.get("url")},
)
for item in results
]
def _search_request(self, query: str) -> List[dict]:
headers = {
"X-Subscription-Token": self.api_key,
"Accept": "application/json",
}
req = requests.PreparedRequest()
params = {**self.search_kwargs, **{"q": query, "extra_snippets": True}}
req.prepare_url(self.base_url, params)
if req.url is None:
raise ValueError("prepared url is None, this should not happen")
response = requests.get(req.url, headers=headers)
if not response.ok:
raise Exception(f"HTTP error {response.status_code}")
return response.json().get("web", {}).get("results", [])
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Any, Callable, Optional, Union
from torch.testing import assert_allclose as _assert_allclose
from mmengine.utils import digit_version
from mmengine.utils.dl_utils import TORCH_VERSION
def assert_allclose(
actual: Any,
expected: Any,
rtol: Optional[float] = None,
atol: Optional[float] = None,
equal_nan: bool = True,
msg: Optional[Union[str, Callable]] = '',
) -> None:
"""Asserts that ``actual`` and ``expected`` are close. A wrapper function
of ``torch.testing.assert_allclose``.
Args:
actual (Any): Actual input.
expected (Any): Expected input.
rtol (Optional[float]): Relative tolerance. If specified ``atol`` must
also be specified. If omitted, default values based on the
:attr:`~torch.Tensor.dtype` are selected with the below table.
atol (Optional[float]): Absolute tolerance. If specified :attr:`rtol`
must also be specified. If omitted, default values based on the
:attr:`~torch.Tensor.dtype` are selected with the below table.
equal_nan (bool): If ``True``, two ``NaN`` values will be considered
equal.
msg (Optional[Union[str, Callable]]): Optional error message to use if
the values of corresponding tensors mismatch. Unused when PyTorch
< 1.6.
"""
if 'parrots' not in TORCH_VERSION and \
digit_version(TORCH_VERSION) >= digit_version('1.6'):
_assert_allclose(
actual,
expected,
rtol=rtol,
atol=atol,
equal_nan=equal_nan,
msg=msg)
else:
# torch.testing.assert_allclose has no ``msg`` argument
# when PyTorch < 1.6
_assert_allclose(
actual, expected, rtol=rtol, atol=atol, equal_nan=equal_nan)
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Any, Callable, Optional, Union
from torch.testing import assert_allclose as _assert_allclose
from mmengine.utils import TORCH_VERSION, digit_version
def assert_allclose(
actual: Any,
expected: Any,
rtol: Optional[float] = None,
atol: Optional[float] = None,
equal_nan: bool = True,
msg: Optional[Union[str, Callable]] = '',
) -> None:
"""Asserts that ``actual`` and ``expected`` are close. A wrapper function
of ``torch.testing.assert_allclose``.
Args:
actual (Any): Actual input.
expected (Any): Expected input.
rtol (Optional[float]): Relative tolerance. If specified ``atol`` must
also be specified. If omitted, default values based on the
:attr:`~torch.Tensor.dtype` are selected with the below table.
atol (Optional[float]): Absolute tolerance. If specified :attr:`rtol`
must also be specified. If omitted, default values based on the
:attr:`~torch.Tensor.dtype` are selected with the below table.
equal_nan (bool): If ``True``, two ``NaN`` values will be considered
equal.
msg (Optional[Union[str, Callable]]): Optional error message to use if
the values of corresponding tensors mismatch. Unused when PyTorch
< 1.6.
"""
if 'parrots' not in TORCH_VERSION and \
digit_version(TORCH_VERSION) >= digit_version('1.6'):
_assert_allclose(
actual,
expected,
rtol=rtol,
atol=atol,
equal_nan=equal_nan,
msg=msg)
else:
# torch.testing.assert_allclose has no ``msg`` argument
# when PyTorch < 1.6
_assert_allclose(
actual, expected, rtol=rtol, atol=atol, equal_nan=equal_nan)
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Sequence
from mmengine.hooks import Hook
from mmengine.model import is_model_wrapper
from mmdet.registry import HOOKS
@HOOKS.register_module()
class YOLOXModeSwitchHook(Hook):
"""Switch the mode of YOLOX during training.
This hook turns off the mosaic and mixup data augmentation and switches
to use L1 loss in bbox_head.
Args:
num_last_epochs (int): The number of latter epochs in the end of the
training to close the data augmentation and switch to L1 loss.
Defaults to 15.
skip_type_keys (Sequence[str], optional): Sequence of type string to be
skip pipeline. Defaults to ('Mosaic', 'RandomAffine', 'MixUp').
"""
def __init__(
self,
num_last_epochs: int = 15,
skip_type_keys: Sequence[str] = ('Mosaic', 'RandomAffine', 'MixUp')
) -> None:
self.num_last_epochs = num_last_epochs
self.skip_type_keys = skip_type_keys
self._restart_dataloader = False
self._has_switched = False
def before_train_epoch(self, runner) -> None:
"""Close mosaic and mixup augmentation and switches to use L1 loss."""
epoch = runner.epoch
train_loader = runner.train_dataloader
model = runner.model
# TODO: refactor after mmengine using model wrapper
if is_model_wrapper(model):
model = model.module
epoch_to_be_switched = ((epoch + 1) >=
runner.max_epochs - self.num_last_epochs)
if epoch_to_be_switched and not self._has_switched:
runner.logger.info('No mosaic and mixup aug now!')
# The dataset pipeline cannot be updated when persistent_workers
# is True, so we need to force the dataloader's multi-process
# restart. This is a very hacky approach.
train_loader.dataset.update_skip_type_keys(self.skip_type_keys)
if hasattr(train_loader, 'persistent_workers'
) and train_loader.persistent_workers is True:
train_loader._DataLoader__initialized = False
train_loader._iterator = None
self._restart_dataloader = True
runner.logger.info('Add additional L1 loss now!')
model.bbox_head.use_l1 = True
self._has_switched = True
else:
# Once the restart is complete, we need to restore
# the initialization flag.
if self._restart_dataloader:
train_loader._DataLoader__initialized = True
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Sequence
from mmengine.hooks import Hook
from mmengine.model import is_model_wrapper
from mmdet.registry import HOOKS
@HOOKS.register_module()
class YOLOXModeSwitchHook(Hook):
"""Switch the mode of YOLOX during training.
This hook turns off the mosaic and mixup data augmentation and switches
to use L1 loss in bbox_head.
Args:
num_last_epochs (int): The number of latter epochs in the end of the
training to close the data augmentation and switch to L1 loss.
Defaults to 15.
skip_type_keys (Sequence[str], optional): Sequence of type string to be
skip pipeline. Defaults to ('Mosaic', 'RandomAffine', 'MixUp').
"""
def __init__(
self,
num_last_epochs: int = 15,
skip_type_keys: Sequence[str] = ('Mosaic', 'RandomAffine', 'MixUp')
) -> None:
self.num_last_epochs = num_last_epochs
self.skip_type_keys = skip_type_keys
self._restart_dataloader = False
def before_train_epoch(self, runner) -> None:
"""Close mosaic and mixup augmentation and switches to use L1 loss."""
epoch = runner.epoch
train_loader = runner.train_dataloader
model = runner.model
# TODO: refactor after mmengine using model wrapper
if is_model_wrapper(model):
model = model.module
if (epoch + 1) == runner.max_epochs - self.num_last_epochs:
runner.logger.info('No mosaic and mixup aug now!')
# The dataset pipeline cannot be updated when persistent_workers
# is True, so we need to force the dataloader's multi-process
# restart. This is a very hacky approach.
train_loader.dataset.update_skip_type_keys(self.skip_type_keys)
if hasattr(train_loader, 'persistent_workers'
) and train_loader.persistent_workers is True:
train_loader._DataLoader__initialized = False
train_loader._iterator = None
self._restart_dataloader = True
runner.logger.info('Add additional L1 loss now!')
model.bbox_head.use_l1 = True
else:
# Once the restart is complete, we need to restore
# the initialization flag.
if self._restart_dataloader:
train_loader._DataLoader__initialized = True
|
import numpy as np
import pytest
from tensorflow import data as tf_data
from keras.src import backend
from keras.src import layers
from keras.src import testing
class StringLookupTest(testing.TestCase):
# TODO: increase coverage. Most features aren't being tested.
def test_config(self):
layer = layers.StringLookup(
output_mode="int",
vocabulary=["a", "b", "c"],
oov_token="[OOV]",
mask_token="[MASK]",
)
self.run_class_serialization_test(layer)
def test_adapt_flow(self):
layer = layers.StringLookup(
output_mode="int",
)
layer.adapt(["a", "a", "a", "b", "b", "c"])
input_data = ["b", "c", "d"]
output = layer(input_data)
self.assertTrue(backend.is_tensor(output))
self.assertAllClose(output, np.array([2, 3, 0]))
def test_fixed_vocabulary(self):
layer = layers.StringLookup(
output_mode="int",
vocabulary=["a", "b", "c"],
)
input_data = ["b", "c", "d"]
output = layer(input_data)
self.assertTrue(backend.is_tensor(output))
self.assertAllClose(output, np.array([2, 3, 0]))
@pytest.mark.skipif(
not backend.backend() == "tensorflow", reason="Requires tf.SparseTensor"
)
def test_sparse_inputs(self):
import tensorflow as tf
layer = layers.StringLookup(
output_mode="int",
vocabulary=["a", "b", "c"],
)
input_data = tf.SparseTensor(
indices=[[0, 0], [1, 1], [2, 2]],
values=["b", "c", "d"],
dense_shape=(3, 3),
)
output = layer(input_data)
self.assertIsInstance(output, tf.SparseTensor)
self.assertAllClose(output, np.array([[2, 0, 0], [0, 3, 0], [0, 0, 0]]))
self.assertAllClose(output.values, np.array([2, 3, 0]))
def test_set_vocabulary(self):
layer = layers.StringLookup(
output_mode="int",
)
layer.set_vocabulary(["a", "b", "c"])
input_data = ["b", "c", "d"]
output = layer(input_data)
self.assertTrue(backend.is_tensor(output))
self.assertAllClose(output, np.array([2, 3, 0]))
def test_tf_data_compatibility(self):
layer = layers.StringLookup(
output_mode="int",
vocabulary=["a", "b", "c"],
)
input_data = ["b", "c", "d"]
ds = tf_data.Dataset.from_tensor_slices(input_data).batch(3).map(layer)
for output in ds.take(1):
output = output.numpy()
self.assertAllClose(output, np.array([2, 3, 0]))
|
import numpy as np
from tensorflow import data as tf_data
from keras.src import backend
from keras.src import layers
from keras.src import testing
class StringLookupTest(testing.TestCase):
# TODO: increase coverage. Most features aren't being tested.
def test_config(self):
layer = layers.StringLookup(
output_mode="int",
vocabulary=["a", "b", "c"],
oov_token="[OOV]",
mask_token="[MASK]",
)
self.run_class_serialization_test(layer)
def test_adapt_flow(self):
layer = layers.StringLookup(
output_mode="int",
)
layer.adapt(["a", "a", "a", "b", "b", "c"])
input_data = ["b", "c", "d"]
output = layer(input_data)
self.assertTrue(backend.is_tensor(output))
self.assertAllClose(output, np.array([2, 3, 0]))
def test_fixed_vocabulary(self):
layer = layers.StringLookup(
output_mode="int",
vocabulary=["a", "b", "c"],
)
input_data = ["b", "c", "d"]
output = layer(input_data)
self.assertTrue(backend.is_tensor(output))
self.assertAllClose(output, np.array([2, 3, 0]))
def test_set_vocabulary(self):
layer = layers.StringLookup(
output_mode="int",
)
layer.set_vocabulary(["a", "b", "c"])
input_data = ["b", "c", "d"]
output = layer(input_data)
self.assertTrue(backend.is_tensor(output))
self.assertAllClose(output, np.array([2, 3, 0]))
def test_tf_data_compatibility(self):
layer = layers.StringLookup(
output_mode="int",
vocabulary=["a", "b", "c"],
)
input_data = ["b", "c", "d"]
ds = tf_data.Dataset.from_tensor_slices(input_data).batch(3).map(layer)
for output in ds.take(1):
output = output.numpy()
self.assertAllClose(output, np.array([2, 3, 0]))
|
# Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import torch
from mmdet.models.dense_heads import VFNetHead
def test_vfnet_head_loss():
"""Tests vfnet head loss when truth is empty and non-empty."""
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'scale_factor': 1,
'pad_shape': (s, s, 3)
}]
train_cfg = mmcv.Config(
dict(
assigner=dict(type='ATSSAssigner', topk=9),
allowed_border=-1,
pos_weight=-1,
debug=False))
# since Focal Loss is not supported on CPU
self = VFNetHead(
num_classes=4,
in_channels=1,
train_cfg=train_cfg,
loss_cls=dict(type='VarifocalLoss', use_sigmoid=True, loss_weight=1.0))
if torch.cuda.is_available():
self.cuda()
feat = [
torch.rand(1, 1, s // feat_size, s // feat_size).cuda()
for feat_size in [4, 8, 16, 32, 64]
]
cls_scores, bbox_preds, bbox_preds_refine = self.forward(feat)
# Test that empty ground truth encourages the network to predict
# background
gt_bboxes = [torch.empty((0, 4)).cuda()]
gt_labels = [torch.LongTensor([]).cuda()]
gt_bboxes_ignore = None
empty_gt_losses = self.loss(cls_scores, bbox_preds, bbox_preds_refine,
gt_bboxes, gt_labels, img_metas,
gt_bboxes_ignore)
# When there is no truth, the cls loss should be nonzero but there
# should be no box loss.
empty_cls_loss = empty_gt_losses['loss_cls']
empty_box_loss = empty_gt_losses['loss_bbox']
assert empty_cls_loss.item() > 0, 'cls loss should be non-zero'
assert empty_box_loss.item() == 0, (
'there should be no box loss when there are no true boxes')
# When truth is non-empty then both cls and box loss should be nonzero
# for random inputs
gt_bboxes = [
torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]).cuda(),
]
gt_labels = [torch.LongTensor([2]).cuda()]
one_gt_losses = self.loss(cls_scores, bbox_preds, bbox_preds_refine,
gt_bboxes, gt_labels, img_metas,
gt_bboxes_ignore)
onegt_cls_loss = one_gt_losses['loss_cls']
onegt_box_loss = one_gt_losses['loss_bbox']
assert onegt_cls_loss.item() > 0, 'cls loss should be non-zero'
assert onegt_box_loss.item() > 0, 'box loss should be non-zero'
|
import mmcv
import torch
from mmdet.models.dense_heads import VFNetHead
def test_vfnet_head_loss():
"""Tests vfnet head loss when truth is empty and non-empty."""
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'scale_factor': 1,
'pad_shape': (s, s, 3)
}]
train_cfg = mmcv.Config(
dict(
assigner=dict(type='ATSSAssigner', topk=9),
allowed_border=-1,
pos_weight=-1,
debug=False))
# since Focal Loss is not supported on CPU
self = VFNetHead(
num_classes=4,
in_channels=1,
train_cfg=train_cfg,
loss_cls=dict(type='VarifocalLoss', use_sigmoid=True, loss_weight=1.0))
if torch.cuda.is_available():
self.cuda()
feat = [
torch.rand(1, 1, s // feat_size, s // feat_size).cuda()
for feat_size in [4, 8, 16, 32, 64]
]
cls_scores, bbox_preds, bbox_preds_refine = self.forward(feat)
# Test that empty ground truth encourages the network to predict
# background
gt_bboxes = [torch.empty((0, 4)).cuda()]
gt_labels = [torch.LongTensor([]).cuda()]
gt_bboxes_ignore = None
empty_gt_losses = self.loss(cls_scores, bbox_preds, bbox_preds_refine,
gt_bboxes, gt_labels, img_metas,
gt_bboxes_ignore)
# When there is no truth, the cls loss should be nonzero but there
# should be no box loss.
empty_cls_loss = empty_gt_losses['loss_cls']
empty_box_loss = empty_gt_losses['loss_bbox']
assert empty_cls_loss.item() > 0, 'cls loss should be non-zero'
assert empty_box_loss.item() == 0, (
'there should be no box loss when there are no true boxes')
# When truth is non-empty then both cls and box loss should be nonzero
# for random inputs
gt_bboxes = [
torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]).cuda(),
]
gt_labels = [torch.LongTensor([2]).cuda()]
one_gt_losses = self.loss(cls_scores, bbox_preds, bbox_preds_refine,
gt_bboxes, gt_labels, img_metas,
gt_bboxes_ignore)
onegt_cls_loss = one_gt_losses['loss_cls']
onegt_box_loss = one_gt_losses['loss_bbox']
assert onegt_cls_loss.item() > 0, 'cls loss should be non-zero'
assert onegt_box_loss.item() > 0, 'box loss should be non-zero'
|
_base_ = ['./yolox_x_8xb4-80e_crowdhuman-mot17halftrain_test-mot17halfval.py']
data_root = 'data/MOT20/'
img_scale = (1600, 896) # width, height
# model settings
model = dict(
data_preprocessor=dict(batch_augments=[
dict(type='BatchSyncRandomResize', random_size_range=(640, 1152))
]))
train_pipeline = [
dict(
type='Mosaic',
img_scale=img_scale,
pad_val=114.0,
bbox_clip_border=True),
dict(
type='RandomAffine',
scaling_ratio_range=(0.1, 2),
border=(-img_scale[0] // 2, -img_scale[1] // 2),
bbox_clip_border=True),
dict(
type='MixUp',
img_scale=img_scale,
ratio_range=(0.8, 1.6),
pad_val=114.0,
bbox_clip_border=True),
dict(type='YOLOXHSVRandomAug'),
dict(type='RandomFlip', prob=0.5),
dict(
type='Resize',
scale=img_scale,
keep_ratio=True,
clip_object_border=True),
dict(type='Pad', size_divisor=32, pad_val=dict(img=(114.0, 114.0, 114.0))),
dict(type='FilterAnnotations', min_gt_bbox_wh=(1, 1), keep_empty=False),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile', backend_args=_base_.backend_args),
dict(type='Resize', scale=img_scale, keep_ratio=True),
dict(type='Pad', size_divisor=32, pad_val=dict(img=(114.0, 114.0, 114.0))),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(
dataset=dict(
type='MultiImageMixDataset',
dataset=dict(
type='ConcatDataset',
datasets=[
dict(
type='CocoDataset',
data_root=data_root,
ann_file='annotations/train_cocoformat.json',
data_prefix=dict(img='train'),
filter_cfg=dict(filter_empty_gt=True, min_size=32),
metainfo=dict(classes=('pedestrian', )),
pipeline=[
dict(
type='LoadImageFromFile',
backend_args=_base_.backend_args),
dict(type='LoadAnnotations', with_bbox=True),
]),
dict(
type='CocoDataset',
data_root='data/crowdhuman',
ann_file='annotations/crowdhuman_train.json',
data_prefix=dict(img='train'),
filter_cfg=dict(filter_empty_gt=True, min_size=32),
metainfo=dict(classes=('pedestrian', )),
pipeline=[
dict(
type='LoadImageFromFile',
backend_args=_base_.backend_args),
dict(type='LoadAnnotations', with_bbox=True),
]),
dict(
type='CocoDataset',
data_root='data/crowdhuman',
ann_file='annotations/crowdhuman_val.json',
data_prefix=dict(img='val'),
filter_cfg=dict(filter_empty_gt=True, min_size=32),
metainfo=dict(classes=('pedestrian', )),
pipeline=[
dict(
type='LoadImageFromFile',
backend_args=_base_.backend_args),
dict(type='LoadAnnotations', with_bbox=True),
]),
]),
pipeline=train_pipeline))
val_dataloader = dict(
dataset=dict(
data_root='data/MOT17', ann_file='annotations/train_cocoformat.json'))
test_dataloader = val_dataloader
# evaluator
val_evaluator = dict(ann_file='data/MOT17/annotations/train_cocoformat.json')
test_evaluator = val_evaluator
|
_base_ = ['./yolox_x_8xb4-80e_crowdhuman-mot17halftrain_test-mot17halfval.py']
data_root = 'data/MOT20/'
img_scale = (1600, 896) # width, height
# model settings
model = dict(
data_preprocessor=dict(batch_augments=[
dict(type='BatchSyncRandomResize', random_size_range=(640, 1152))
]))
train_pipeline = [
dict(
type='Mosaic',
img_scale=img_scale,
pad_val=114.0,
bbox_clip_border=True),
dict(
type='RandomAffine',
scaling_ratio_range=(0.1, 2),
border=(-img_scale[0] // 2, -img_scale[1] // 2),
bbox_clip_border=True),
dict(
type='MixUp',
img_scale=img_scale,
ratio_range=(0.8, 1.6),
pad_val=114.0,
bbox_clip_border=True),
dict(type='YOLOXHSVRandomAug'),
dict(type='RandomFlip', prob=0.5),
dict(
type='Resize',
scale=img_scale,
keep_ratio=True,
clip_object_border=True),
dict(type='Pad', size_divisor=32, pad_val=dict(img=(114.0, 114.0, 114.0))),
dict(type='FilterAnnotations', min_gt_bbox_wh=(1, 1), keep_empty=False),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='Resize', scale=img_scale, keep_ratio=True),
dict(type='Pad', size_divisor=32, pad_val=dict(img=(114.0, 114.0, 114.0))),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(
dataset=dict(
type='MultiImageMixDataset',
dataset=dict(
type='ConcatDataset',
datasets=[
dict(
type='CocoDataset',
data_root=data_root,
ann_file='annotations/train_cocoformat.json',
data_prefix=dict(img='train'),
filter_cfg=dict(filter_empty_gt=True, min_size=32),
metainfo=dict(classes=('pedestrian', )),
pipeline=[
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
]),
dict(
type='CocoDataset',
data_root='data/crowdhuman',
ann_file='annotations/crowdhuman_train.json',
data_prefix=dict(img='train'),
filter_cfg=dict(filter_empty_gt=True, min_size=32),
metainfo=dict(classes=('pedestrian', )),
pipeline=[
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
]),
dict(
type='CocoDataset',
data_root='data/crowdhuman',
ann_file='annotations/crowdhuman_val.json',
data_prefix=dict(img='val'),
filter_cfg=dict(filter_empty_gt=True, min_size=32),
metainfo=dict(classes=('pedestrian', )),
pipeline=[
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
]),
]),
pipeline=train_pipeline))
val_dataloader = dict(
dataset=dict(
data_root='data/MOT17', ann_file='annotations/train_cocoformat.json'))
test_dataloader = val_dataloader
# evaluator
val_evaluator = dict(ann_file='data/MOT17/annotations/train_cocoformat.json')
test_evaluator = val_evaluator
|
"""
This is a simple application for sparse encoder: Computing embeddings.
we have multiple sentences and we want to compute their embeddings.
The embeddings are sparse, meaning that most of the values are zero.
The embeddings are stored in a sparse matrix format, which is more efficient for storage and computation.
we can also visualize the top tokens for each text."""
from sentence_transformers import SparseEncoder
# Initialize the SPLADE model
model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
# Embed a list of sentences
sentences = [
"This framework generates embeddings for each input sentence",
"Sentences are passed as a list of string.",
"The quick brown fox jumps over the lazy dog.",
]
# Generate embeddings
embeddings = model.encode(sentences)
# Print embedding sim and sparsity
print(f"Embedding dim: {model.get_sentence_embedding_dimension()}")
stats = model.sparsity(embeddings)
print(f"Embedding sparsity: {stats}")
print(f"Average non-zero dimensions: {stats['active_dims']:.2f}")
print(f"Sparsity percentage: {stats['sparsity_ratio']:.2%}")
"""
Embedding dim: 30522
Embedding sparsity: {'active_dims': 56.333335876464844, 'sparsity_ratio': 0.9981543366792325}
Average non-zero dimensions: 56.33
Sparsity percentage: 99.82%
"""
# Visualize top tokens for each text
top_k = 10
token_weights = model.decode(embeddings, top_k=top_k)
print(f"\nTop tokens {top_k} for each text:")
# The result is a list of sentence embeddings as numpy arrays
for i, sentence in enumerate(sentences):
token_scores = ", ".join([f'("{token.strip()}", {value:.2f})' for token, value in token_weights[i]])
print(f"{i}: {sentence} -> Top tokens: {token_scores}")
"""
Top tokens 10 for each text:
0: This framework generates embeddings for each input sentence -> Top tokens: ("framework", 2.19), ("##bed", 2.12), ("input", 1.99), ("each", 1.60), ("em", 1.58), ("sentence", 1.49), ("generate", 1.42), ("##ding", 1.33), ("sentences", 1.10), ("create", 0.93)
1: Sentences are passed as a list of string. -> Top tokens: ("string", 2.72), ("pass", 2.24), ("sentences", 2.15), ("passed", 2.07), ("sentence", 1.90), ("strings", 1.86), ("list", 1.84), ("lists", 1.49), ("as", 1.18), ("passing", 0.73)
2: The quick brown fox jumps over the lazy dog. -> Top tokens: ("lazy", 2.18), ("fox", 1.67), ("brown", 1.56), ("over", 1.52), ("dog", 1.50), ("quick", 1.49), ("jump", 1.39), ("dogs", 1.25), ("foxes", 0.99), ("jumping", 0.84)
"""
# Example of using max_active_dims during encoding
print("\n--- Using max_active_dims during encoding ---")
# Generate embeddings with limited active dimensions
embeddings_limited = model.encode(sentences, max_active_dims=32)
stats_limited = model.sparsity(embeddings_limited)
print(f"Limited embedding sparsity: {stats_limited}")
print(f"Average non-zero dimensions: {stats_limited['active_dims']:.2f}")
print(f"Sparsity percentage: {stats_limited['sparsity_ratio']:.2%}")
"""
--- Using max_active_dims during encoding ---
Limited embedding sparsity: {'active_dims': 32.0, 'sparsity_ratio': 0.9989516139030457}
Average non-zero dimensions: 32.00
Sparsity percentage: 99.90%
"""
# Comparing memory usage
print("\n--- Comparing memory usage ---")
def get_memory_size(tensor):
if tensor.is_sparse:
# For sparse tensors, only count non-zero elements
return (
tensor._values().element_size() * tensor._values().nelement()
+ tensor._indices().element_size() * tensor._indices().nelement()
)
else:
return tensor.element_size() * tensor.nelement()
print(f"Original embeddings memory: {get_memory_size(embeddings) / 1024:.2f} KB")
print(f"Embeddings with max_active_dims=32 memory: {get_memory_size(embeddings_limited) / 1024:.2f} KB")
"""
--- Comparing memory usage ---
Original embeddings memory: 3.32 KB
Embeddings with max_active_dims=32 memory: 1.88 KB
"""
|
"""
This is a simple application for sparse encoder: Computing embeddings.
we have multiple sentences and we want to compute their embeddings.
The embeddings are sparse, meaning that most of the values are zero.
The embeddings are stored in a sparse matrix format, which is more efficient for storage and computation.
we can also visualize the top tokens for each text."""
from sentence_transformers import SparseEncoder
# Initialize the SPLADE model
model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
# Embed a list of sentences
sentences = [
"This framework generates embeddings for each input sentence",
"Sentences are passed as a list of string.",
"The quick brown fox jumps over the lazy dog.",
]
# Generate embeddings
embeddings = model.encode(sentences)
# Print embedding sim and sparsity
print(f"Embedding dim: {model.get_sentence_embedding_dimension()}")
stats = model.sparsity(embeddings)
print(f"Embedding sparsity: {stats}")
print(f"Average non-zero dimensions: {stats['active_dims']:.2f}")
print(f"Sparsity percentage: {stats['sparsity_ratio']:.2%}")
"""
Embedding dim: 30522
Embedding sparsity: {'active_dims': 56.66666793823242, 'sparsity_ratio': 0.9981433749198914}
Average non-zero dimensions: 56.67
Sparsity percentage: 99.81%
"""
# Visualize top tokens for each text
top_k = 10
token_weights = model.decode(embeddings, top_k=top_k)
print(f"\nTop tokens {top_k} for each text:")
# The result is a list of sentence embeddings as numpy arrays
for i, sentence in enumerate(sentences):
token_scores = ", ".join([f'("{token.strip()}", {value:.2f})' for token, value in token_weights[i]])
print(f"{i}: {sentence} -> Top tokens: {token_scores}")
"""
Top tokens 10 for each text:
0: This framework generates embeddings for each input sentence -> Top tokens: ("framework", 2.19), ("##bed", 2.12), ("input", 1.99), ("each", 1.60), ("em", 1.58), ("sentence", 1.49), ("generate", 1.42), ("##ding", 1.33), ("sentences", 1.10), ("create", 0.93)
1: Sentences are passed as a list of string. -> Top tokens: ("string", 2.72), ("pass", 2.24), ("sentences", 2.15), ("passed", 2.07), ("sentence", 1.90), ("strings", 1.86), ("list", 1.84), ("lists", 1.49), ("as", 1.18), ("passing", 0.73)
2: The quick brown fox jumps over the lazy dog. -> Top tokens: ("lazy", 2.18), ("fox", 1.67), ("brown", 1.56), ("over", 1.52), ("dog", 1.50), ("quick", 1.49), ("jump", 1.39), ("dogs", 1.25), ("foxes", 0.99), ("jumping", 0.84)
"""
# Example of using max_active_dims during encoding
print("\n--- Using max_active_dims during encoding ---")
# Generate embeddings with limited active dimensions
embeddings_limited = model.encode(sentences, max_active_dims=32)
stats_limited = model.sparsity(embeddings_limited)
print(f"Limited embedding sparsity: {stats_limited}")
print(f"Average non-zero dimensions: {stats_limited['active_dims']:.2f}")
print(f"Sparsity percentage: {stats_limited['sparsity_ratio']:.2%}")
"""
--- Using max_active_dims during encoding ---
Limited embedding sparsity: {'active_dims': 32.0, 'sparsity_ratio': 0.9989516139030457}
Average non-zero dimensions: 32.00
Sparsity percentage: 99.90%
"""
# Comparing memory usage
print("\n--- Comparing memory usage ---")
def get_memory_size(tensor):
if tensor.is_sparse:
# For sparse tensors, only count non-zero elements
return (
tensor._values().element_size() * tensor._values().nelement()
+ tensor._indices().element_size() * tensor._indices().nelement()
)
else:
return tensor.element_size() * tensor.nelement()
print(f"Original embeddings memory: {get_memory_size(embeddings) / 1024:.2f} KB")
print(f"Embeddings with max_active_dims=32 memory: {get_memory_size(embeddings_limited) / 1024:.2f} KB")
"""
--- Comparing memory usage ---
Original embeddings memory: 3.32 KB
Embeddings with max_active_dims=32 memory: 1.88 KB
"""
|
_base_ = 'tridentnet_r50-caffe_1x_coco.py'
train_pipeline = [
dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='RandomChoiceResize',
scales=[(1333, 640), (1333, 672), (1333, 704), (1333, 736),
(1333, 768), (1333, 800)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
|
_base_ = 'tridentnet_r50-caffe_1x_coco.py'
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='RandomChoiceResize',
scales=[(1333, 640), (1333, 672), (1333, 704), (1333, 736),
(1333, 768), (1333, 800)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
|
"""String output parser."""
from langchain_core.output_parsers.transform import BaseTransformOutputParser
class StrOutputParser(BaseTransformOutputParser[str]):
"""OutputParser that parses LLMResult into the top likely string."""
@classmethod
def is_lc_serializable(cls) -> bool:
"""StrOutputParser is serializable.
Returns:
True
"""
return True
@classmethod
def get_lc_namespace(cls) -> list[str]:
"""Get the namespace of the langchain object.
Default is ["langchain", "schema", "output_parser"].
"""
return ["langchain", "schema", "output_parser"]
@property
def _type(self) -> str:
"""Return the output parser type for serialization."""
return "default"
def parse(self, text: str) -> str:
"""Returns the input text with no changes."""
return text
|
"""String output parser."""
from langchain_core.output_parsers.transform import BaseTransformOutputParser
class StrOutputParser(BaseTransformOutputParser[str]):
"""OutputParser that parses LLMResult into the top likely string."""
@classmethod
def is_lc_serializable(cls) -> bool:
"""StrOutputParser is serializable.
Returns:
True
"""
return True
@classmethod
def get_lc_namespace(cls) -> list[str]:
"""Get the namespace of the langchain object.
Default is ["langchain", "schema", "output_parser"].
"""
return ["langchain", "schema", "output_parser"]
@property
def _type(self) -> str:
"""Return the output parser type for serialization."""
return "default"
def parse(self, text: str) -> str:
"""Returns the input text with no changes."""
return text
StrOutputParser.model_rebuild()
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmengine import Config # isort:skip
cfg = Config.fromfile('tests/data/config/py_config/simple_config.py')
item5 = cfg.item1[0] + cfg.item2.a
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmcv import Config # isort:skip
cfg = Config.fromfile('tests/data/config/py_config/simple_config.py')
item5 = cfg.item1[0] + cfg.item2.a
|
import sys
import pytest
from fastapi._compat import PYDANTIC_V2
from inline_snapshot import Snapshot
needs_py39 = pytest.mark.skipif(sys.version_info < (3, 9), reason="requires python3.9+")
needs_py310 = pytest.mark.skipif(
sys.version_info < (3, 10), reason="requires python3.10+"
)
needs_pydanticv2 = pytest.mark.skipif(not PYDANTIC_V2, reason="requires Pydantic v2")
needs_pydanticv1 = pytest.mark.skipif(PYDANTIC_V2, reason="requires Pydantic v1")
def pydantic_snapshot(
*,
v2: Snapshot,
v1: Snapshot, # TODO: remove v1 argument when deprecating Pydantic v1
):
"""
This function should be used like this:
>>> assert value == pydantic_snapshot(v2=snapshot(),v1=snapshot())
inline-snapshot will create the snapshots when pytest is executed for each versions of pydantic.
It is also possible to use the function inside snapshots for version-specific values.
>>> assert value == snapshot({
"data": "some data",
"version_specific": pydantic_snapshot(v2=snapshot(),v1=snapshot()),
})
"""
return v2 if PYDANTIC_V2 else v1
|
import sys
import pytest
from fastapi._compat import PYDANTIC_V2
needs_py39 = pytest.mark.skipif(sys.version_info < (3, 9), reason="requires python3.9+")
needs_py310 = pytest.mark.skipif(
sys.version_info < (3, 10), reason="requires python3.10+"
)
needs_pydanticv2 = pytest.mark.skipif(not PYDANTIC_V2, reason="requires Pydantic v2")
needs_pydanticv1 = pytest.mark.skipif(PYDANTIC_V2, reason="requires Pydantic v1")
|
# Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import torch
from mmdet.core import bbox2roi
from mmdet.models.roi_heads.bbox_heads import SABLHead
from .utils import _dummy_bbox_sampling
def test_sabl_bbox_head_loss():
"""Tests bbox head loss when truth is empty and non-empty."""
self = SABLHead(
num_classes=4,
cls_in_channels=3,
reg_in_channels=3,
cls_out_channels=3,
reg_offset_out_channels=3,
reg_cls_out_channels=3,
roi_feat_size=7)
# Dummy proposals
proposal_list = [
torch.Tensor([[23.6667, 23.8757, 228.6326, 153.8874]]),
]
target_cfg = mmcv.Config(dict(pos_weight=1))
# Test bbox loss when truth is empty
gt_bboxes = [torch.empty((0, 4))]
gt_labels = [torch.LongTensor([])]
sampling_results = _dummy_bbox_sampling(proposal_list, gt_bboxes,
gt_labels)
bbox_targets = self.get_targets(sampling_results, gt_bboxes, gt_labels,
target_cfg)
labels, label_weights, bbox_targets, bbox_weights = bbox_targets
# Create dummy features "extracted" for each sampled bbox
num_sampled = sum(len(res.bboxes) for res in sampling_results)
rois = bbox2roi([res.bboxes for res in sampling_results])
dummy_feats = torch.rand(num_sampled, 3, 7, 7)
cls_scores, bbox_preds = self.forward(dummy_feats)
losses = self.loss(cls_scores, bbox_preds, rois, labels, label_weights,
bbox_targets, bbox_weights)
assert losses.get('loss_cls', 0) > 0, 'cls-loss should be non-zero'
assert losses.get('loss_bbox_cls',
0) == 0, 'empty gt bbox-cls-loss should be zero'
assert losses.get('loss_bbox_reg',
0) == 0, 'empty gt bbox-reg-loss should be zero'
# Test bbox loss when truth is non-empty
gt_bboxes = [
torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]),
]
gt_labels = [torch.LongTensor([2])]
sampling_results = _dummy_bbox_sampling(proposal_list, gt_bboxes,
gt_labels)
rois = bbox2roi([res.bboxes for res in sampling_results])
bbox_targets = self.get_targets(sampling_results, gt_bboxes, gt_labels,
target_cfg)
labels, label_weights, bbox_targets, bbox_weights = bbox_targets
# Create dummy features "extracted" for each sampled bbox
num_sampled = sum(len(res.bboxes) for res in sampling_results)
dummy_feats = torch.rand(num_sampled, 3, 7, 7)
cls_scores, bbox_preds = self.forward(dummy_feats)
losses = self.loss(cls_scores, bbox_preds, rois, labels, label_weights,
bbox_targets, bbox_weights)
assert losses.get('loss_bbox_cls',
0) > 0, 'empty gt bbox-cls-loss should be zero'
assert losses.get('loss_bbox_reg',
0) > 0, 'empty gt bbox-reg-loss should be zero'
|
import mmcv
import torch
from mmdet.core import bbox2roi
from mmdet.models.roi_heads.bbox_heads import SABLHead
from .utils import _dummy_bbox_sampling
def test_sabl_bbox_head_loss():
"""Tests bbox head loss when truth is empty and non-empty."""
self = SABLHead(
num_classes=4,
cls_in_channels=3,
reg_in_channels=3,
cls_out_channels=3,
reg_offset_out_channels=3,
reg_cls_out_channels=3,
roi_feat_size=7)
# Dummy proposals
proposal_list = [
torch.Tensor([[23.6667, 23.8757, 228.6326, 153.8874]]),
]
target_cfg = mmcv.Config(dict(pos_weight=1))
# Test bbox loss when truth is empty
gt_bboxes = [torch.empty((0, 4))]
gt_labels = [torch.LongTensor([])]
sampling_results = _dummy_bbox_sampling(proposal_list, gt_bboxes,
gt_labels)
bbox_targets = self.get_targets(sampling_results, gt_bboxes, gt_labels,
target_cfg)
labels, label_weights, bbox_targets, bbox_weights = bbox_targets
# Create dummy features "extracted" for each sampled bbox
num_sampled = sum(len(res.bboxes) for res in sampling_results)
rois = bbox2roi([res.bboxes for res in sampling_results])
dummy_feats = torch.rand(num_sampled, 3, 7, 7)
cls_scores, bbox_preds = self.forward(dummy_feats)
losses = self.loss(cls_scores, bbox_preds, rois, labels, label_weights,
bbox_targets, bbox_weights)
assert losses.get('loss_cls', 0) > 0, 'cls-loss should be non-zero'
assert losses.get('loss_bbox_cls',
0) == 0, 'empty gt bbox-cls-loss should be zero'
assert losses.get('loss_bbox_reg',
0) == 0, 'empty gt bbox-reg-loss should be zero'
# Test bbox loss when truth is non-empty
gt_bboxes = [
torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]),
]
gt_labels = [torch.LongTensor([2])]
sampling_results = _dummy_bbox_sampling(proposal_list, gt_bboxes,
gt_labels)
rois = bbox2roi([res.bboxes for res in sampling_results])
bbox_targets = self.get_targets(sampling_results, gt_bboxes, gt_labels,
target_cfg)
labels, label_weights, bbox_targets, bbox_weights = bbox_targets
# Create dummy features "extracted" for each sampled bbox
num_sampled = sum(len(res.bboxes) for res in sampling_results)
dummy_feats = torch.rand(num_sampled, 3, 7, 7)
cls_scores, bbox_preds = self.forward(dummy_feats)
losses = self.loss(cls_scores, bbox_preds, rois, labels, label_weights,
bbox_targets, bbox_weights)
assert losses.get('loss_bbox_cls',
0) > 0, 'empty gt bbox-cls-loss should be zero'
assert losses.get('loss_bbox_reg',
0) > 0, 'empty gt bbox-reg-loss should be zero'
|
# Copyright (c) OpenMMLab. All rights reserved.
from abc import ABCMeta, abstractmethod
from mmcv.runner import BaseModule
from ...builder import build_loss
class BasePanopticFusionHead(BaseModule, metaclass=ABCMeta):
"""Base class for panoptic heads."""
def __init__(self,
num_things_classes=80,
num_stuff_classes=53,
test_cfg=None,
loss_panoptic=None,
init_cfg=None,
**kwargs):
super(BasePanopticFusionHead, self).__init__(init_cfg)
self.num_things_classes = num_things_classes
self.num_stuff_classes = num_stuff_classes
self.num_classes = num_things_classes + num_stuff_classes
self.test_cfg = test_cfg
if loss_panoptic:
self.loss_panoptic = build_loss(loss_panoptic)
else:
self.loss_panoptic = None
@property
def with_loss(self):
"""bool: whether the panoptic head contains loss function."""
return self.loss_panoptic is not None
@abstractmethod
def forward_train(self, gt_masks=None, gt_semantic_seg=None, **kwargs):
"""Forward function during training."""
@abstractmethod
def simple_test(self,
img_metas,
det_labels,
mask_preds,
seg_preds,
det_bboxes,
cfg=None,
**kwargs):
"""Test without augmentation."""
|
from abc import ABCMeta, abstractmethod
from mmcv.runner import BaseModule
from ...builder import build_loss
class BasePanopticFusionHead(BaseModule, metaclass=ABCMeta):
"""Base class for panoptic heads."""
def __init__(self,
num_things_classes=80,
num_stuff_classes=53,
test_cfg=None,
loss_panoptic=None,
init_cfg=None,
**kwargs):
super(BasePanopticFusionHead, self).__init__(init_cfg)
self.num_things_classes = num_things_classes
self.num_stuff_classes = num_stuff_classes
self.num_classes = num_things_classes + num_stuff_classes
self.test_cfg = test_cfg
if loss_panoptic:
self.loss_panoptic = build_loss(loss_panoptic)
else:
self.loss_panoptic = None
@property
def with_loss(self):
"""bool: whether the panoptic head contains loss function."""
return self.loss_panoptic is not None
@abstractmethod
def forward_train(self, gt_masks=None, gt_semantic_seg=None, **kwargs):
"""Forward function during training."""
@abstractmethod
def simple_test(self,
img_metas,
det_labels,
mask_preds,
seg_preds,
det_bboxes,
cfg=None,
**kwargs):
"""Test without augmentation."""
|
import os
import numpy as np
import pytest
import torch
from pydantic.tools import parse_obj_as
from docarray import BaseDocument
from docarray.typing import (
AudioNdArray,
AudioTorchTensor,
VideoNdArray,
VideoTorchTensor,
)
from docarray.utils.misc import is_tf_available
tf_available = is_tf_available()
if tf_available:
import tensorflow as tf
import tensorflow._api.v2.experimental.numpy as tnp
from docarray.typing.tensor.video import VideoTensorFlowTensor
@pytest.mark.parametrize(
'tensor,cls_video_tensor,cls_tensor',
[
(torch.zeros(1, 224, 224, 3), VideoTorchTensor, torch.Tensor),
(np.zeros((1, 224, 224, 3)), VideoNdArray, np.ndarray),
],
)
def test_set_video_tensor(tensor, cls_video_tensor, cls_tensor):
class MyVideoDoc(BaseDocument):
tensor: cls_video_tensor
doc = MyVideoDoc(tensor=tensor)
assert isinstance(doc.tensor, cls_video_tensor)
assert isinstance(doc.tensor, cls_tensor)
assert (doc.tensor == tensor).all()
@pytest.mark.tensorflow
def test_set_video_tensor_tensorflow():
class MyVideoDoc(BaseDocument):
tensor: VideoTensorFlowTensor
doc = MyVideoDoc(tensor=tf.zeros((1, 224, 224, 3)))
assert isinstance(doc.tensor, VideoTensorFlowTensor)
assert isinstance(doc.tensor.tensor, tf.Tensor)
assert tnp.allclose(doc.tensor.tensor, tf.zeros((1, 224, 224, 3)))
@pytest.mark.parametrize(
'cls_tensor,tensor',
[
(VideoNdArray, np.zeros((1, 224, 224, 3))),
(VideoTorchTensor, torch.zeros(1, 224, 224, 3)),
(VideoTorchTensor, np.zeros((1, 224, 224, 3))),
],
)
def test_validation(cls_tensor, tensor):
arr = parse_obj_as(cls_tensor, tensor)
assert isinstance(arr, cls_tensor)
@pytest.mark.tensorflow
def test_validation_tensorflow():
arr = parse_obj_as(VideoTensorFlowTensor, np.zeros((1, 224, 224, 3)))
assert isinstance(arr, VideoTensorFlowTensor)
arr = parse_obj_as(VideoTensorFlowTensor, tf.zeros((1, 224, 224, 3)))
assert isinstance(arr, VideoTensorFlowTensor)
arr = parse_obj_as(VideoTensorFlowTensor, torch.zeros((1, 224, 224, 3)))
assert isinstance(arr, VideoTensorFlowTensor)
@pytest.mark.parametrize(
'cls_tensor,tensor',
[
(VideoNdArray, torch.zeros(1, 224, 224, 3)),
(VideoTorchTensor, torch.zeros(224, 3)),
(VideoTorchTensor, torch.zeros(1, 224, 224, 100)),
(VideoNdArray, 'hello'),
(VideoTorchTensor, 'hello'),
],
)
def test_illegal_validation(cls_tensor, tensor):
match = str(cls_tensor).split('.')[-1][:-2]
with pytest.raises(ValueError, match=match):
parse_obj_as(cls_tensor, tensor)
@pytest.mark.parametrize(
'cls_tensor,tensor,proto_key',
[
(
VideoTorchTensor,
torch.zeros(1, 224, 224, 3),
VideoTorchTensor._proto_type_name,
),
(VideoNdArray, np.zeros((1, 224, 224, 3)), VideoNdArray._proto_type_name),
],
)
def test_proto_tensor(cls_tensor, tensor, proto_key):
tensor = parse_obj_as(cls_tensor, tensor)
proto = tensor._to_node_protobuf()
assert proto_key in str(proto)
@pytest.mark.tensorflow
def test_proto_tensor_tensorflow():
tensor = parse_obj_as(VideoTensorFlowTensor, tf.zeros((1, 224, 224, 3)))
proto = tensor._to_node_protobuf()
assert VideoTensorFlowTensor._proto_type_name in str(proto)
@pytest.mark.parametrize(
'video_tensor',
[
parse_obj_as(VideoTorchTensor, torch.zeros(1, 224, 224, 3)),
parse_obj_as(VideoNdArray, np.zeros((1, 224, 224, 3))),
],
)
def test_save_video_tensor_to_file(video_tensor, tmpdir):
tmp_file = str(tmpdir / 'tmp.mp4')
video_tensor.save(tmp_file)
assert os.path.isfile(tmp_file)
@pytest.mark.parametrize(
'video_tensor',
[
parse_obj_as(VideoTorchTensor, torch.zeros(1, 224, 224, 3)),
parse_obj_as(VideoNdArray, np.zeros((1, 224, 224, 3))),
],
)
def test_save_video_tensor_to_bytes(video_tensor, tmpdir):
b = video_tensor.to_bytes()
isinstance(b, bytes)
@pytest.mark.tensorflow
def test_save_video_tensorflow_tensor_to_file(tmpdir):
tmp_file = str(tmpdir / 'tmp.mp4')
video_tensor = parse_obj_as(VideoTensorFlowTensor, tf.zeros((1, 224, 224, 3)))
video_tensor.save(tmp_file)
assert os.path.isfile(tmp_file)
@pytest.mark.parametrize(
'video_tensor',
[
parse_obj_as(VideoTorchTensor, torch.zeros(1, 224, 224, 3)),
parse_obj_as(VideoNdArray, np.zeros((1, 224, 224, 3))),
],
)
@pytest.mark.parametrize(
'audio_tensor',
[
parse_obj_as(AudioTorchTensor, torch.randn(100, 1, 1024).to(torch.float32)),
parse_obj_as(AudioNdArray, np.random.randn(100, 1, 1024).astype('float32')),
],
)
def test_save_video_tensor_to_file_including_audio(video_tensor, audio_tensor, tmpdir):
tmp_file = str(tmpdir / 'tmp.mp4')
video_tensor.save(tmp_file, audio_tensor=audio_tensor)
assert os.path.isfile(tmp_file)
|
import os
import numpy as np
import pytest
import torch
from pydantic.tools import parse_obj_as
from docarray import BaseDocument
from docarray.typing import (
AudioNdArray,
AudioTorchTensor,
VideoNdArray,
VideoTorchTensor,
)
from docarray.utils.misc import is_tf_available
tf_available = is_tf_available()
if tf_available:
import tensorflow as tf
import tensorflow._api.v2.experimental.numpy as tnp
from docarray.typing.tensor.video import VideoTensorFlowTensor
@pytest.mark.parametrize(
'tensor,cls_video_tensor,cls_tensor',
[
(torch.zeros(1, 224, 224, 3), VideoTorchTensor, torch.Tensor),
(np.zeros((1, 224, 224, 3)), VideoNdArray, np.ndarray),
],
)
def test_set_video_tensor(tensor, cls_video_tensor, cls_tensor):
class MyVideoDoc(BaseDocument):
tensor: cls_video_tensor
doc = MyVideoDoc(tensor=tensor)
assert isinstance(doc.tensor, cls_video_tensor)
assert isinstance(doc.tensor, cls_tensor)
assert (doc.tensor == tensor).all()
@pytest.mark.tensorflow
def test_set_video_tensor_tensorflow():
class MyVideoDoc(BaseDocument):
tensor: VideoTensorFlowTensor
doc = MyVideoDoc(tensor=tf.zeros((1, 224, 224, 3)))
assert isinstance(doc.tensor, VideoTensorFlowTensor)
assert isinstance(doc.tensor.tensor, tf.Tensor)
assert tnp.allclose(doc.tensor.tensor, tf.zeros((1, 224, 224, 3)))
@pytest.mark.parametrize(
'cls_tensor,tensor',
[
(VideoNdArray, np.zeros((1, 224, 224, 3))),
(VideoTorchTensor, torch.zeros(1, 224, 224, 3)),
(VideoTorchTensor, np.zeros((1, 224, 224, 3))),
],
)
def test_validation(cls_tensor, tensor):
arr = parse_obj_as(cls_tensor, tensor)
assert isinstance(arr, cls_tensor)
@pytest.mark.tensorflow
def test_validation_tensorflow():
arr = parse_obj_as(VideoTensorFlowTensor, np.zeros((1, 224, 224, 3)))
assert isinstance(arr, VideoTensorFlowTensor)
arr = parse_obj_as(VideoTensorFlowTensor, tf.zeros((1, 224, 224, 3)))
assert isinstance(arr, VideoTensorFlowTensor)
arr = parse_obj_as(VideoTensorFlowTensor, torch.zeros((1, 224, 224, 3)))
assert isinstance(arr, VideoTensorFlowTensor)
@pytest.mark.parametrize(
'cls_tensor,tensor',
[
(VideoNdArray, torch.zeros(1, 224, 224, 3)),
(VideoTorchTensor, torch.zeros(224, 3)),
(VideoTorchTensor, torch.zeros(1, 224, 224, 100)),
(VideoNdArray, 'hello'),
(VideoTorchTensor, 'hello'),
],
)
def test_illegal_validation(cls_tensor, tensor):
match = str(cls_tensor).split('.')[-1][:-2]
with pytest.raises(ValueError, match=match):
parse_obj_as(cls_tensor, tensor)
@pytest.mark.parametrize(
'cls_tensor,tensor,proto_key',
[
(
VideoTorchTensor,
torch.zeros(1, 224, 224, 3),
VideoTorchTensor._proto_type_name,
),
(VideoNdArray, np.zeros((1, 224, 224, 3)), VideoNdArray._proto_type_name),
],
)
def test_proto_tensor(cls_tensor, tensor, proto_key):
tensor = parse_obj_as(cls_tensor, tensor)
proto = tensor._to_node_protobuf()
assert proto_key in str(proto)
@pytest.mark.tensorflow
def test_proto_tensor_tensorflow():
tensor = parse_obj_as(VideoTensorFlowTensor, tf.zeros((1, 224, 224, 3)))
proto = tensor._to_node_protobuf()
assert VideoTensorFlowTensor._proto_type_name in str(proto)
@pytest.mark.parametrize(
'video_tensor',
[
parse_obj_as(VideoTorchTensor, torch.zeros(1, 224, 224, 3)),
parse_obj_as(VideoNdArray, np.zeros((1, 224, 224, 3))),
],
)
def test_save_video_tensor_to_file(video_tensor, tmpdir):
tmp_file = str(tmpdir / 'tmp.mp4')
video_tensor.save(tmp_file)
assert os.path.isfile(tmp_file)
@pytest.mark.tensorflow
def test_save_video_tensorflow_tensor_to_file(tmpdir):
tmp_file = str(tmpdir / 'tmp.mp4')
video_tensor = parse_obj_as(VideoTensorFlowTensor, tf.zeros((1, 224, 224, 3)))
video_tensor.save(tmp_file)
assert os.path.isfile(tmp_file)
@pytest.mark.parametrize(
'video_tensor',
[
parse_obj_as(VideoTorchTensor, torch.zeros(1, 224, 224, 3)),
parse_obj_as(VideoNdArray, np.zeros((1, 224, 224, 3))),
],
)
@pytest.mark.parametrize(
'audio_tensor',
[
parse_obj_as(AudioTorchTensor, torch.randn(100, 1, 1024).to(torch.float32)),
parse_obj_as(AudioNdArray, np.random.randn(100, 1, 1024).astype('float32')),
],
)
def test_save_video_tensor_to_file_including_audio(video_tensor, audio_tensor, tmpdir):
tmp_file = str(tmpdir / 'tmp.mp4')
video_tensor.save(tmp_file, audio_tensor=audio_tensor)
assert os.path.isfile(tmp_file)
|
import unittest
import torch
from transformers import AutoTokenizer, Gemma2Config, Gemma2Model
from diffusers import (
AutoencoderKL,
FlowMatchEulerDiscreteScheduler,
Lumina2Text2ImgPipeline,
Lumina2Transformer2DModel,
)
from ..test_pipelines_common import PipelineTesterMixin
class Lumina2Text2ImgPipelinePipelineFastTests(unittest.TestCase, PipelineTesterMixin):
pipeline_class = Lumina2Text2ImgPipeline
params = frozenset(
[
"prompt",
"height",
"width",
"guidance_scale",
"negative_prompt",
"prompt_embeds",
"negative_prompt_embeds",
]
)
batch_params = frozenset(["prompt", "negative_prompt"])
required_optional_params = frozenset(
[
"num_inference_steps",
"generator",
"latents",
"return_dict",
"callback_on_step_end",
"callback_on_step_end_tensor_inputs",
]
)
supports_dduf = False
test_xformers_attention = False
test_layerwise_casting = True
def get_dummy_components(self):
torch.manual_seed(0)
transformer = Lumina2Transformer2DModel(
sample_size=4,
patch_size=2,
in_channels=4,
hidden_size=8,
num_layers=2,
num_attention_heads=1,
num_kv_heads=1,
multiple_of=16,
ffn_dim_multiplier=None,
norm_eps=1e-5,
scaling_factor=1.0,
axes_dim_rope=[4, 2, 2],
cap_feat_dim=8,
)
torch.manual_seed(0)
vae = AutoencoderKL(
sample_size=32,
in_channels=3,
out_channels=3,
block_out_channels=(4,),
layers_per_block=1,
latent_channels=4,
norm_num_groups=1,
use_quant_conv=False,
use_post_quant_conv=False,
shift_factor=0.0609,
scaling_factor=1.5035,
)
scheduler = FlowMatchEulerDiscreteScheduler()
tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/dummy-gemma")
torch.manual_seed(0)
config = Gemma2Config(
head_dim=4,
hidden_size=8,
intermediate_size=8,
num_attention_heads=2,
num_hidden_layers=2,
num_key_value_heads=2,
sliding_window=2,
)
text_encoder = Gemma2Model(config)
components = {
"transformer": transformer.eval(),
"vae": vae.eval(),
"scheduler": scheduler,
"text_encoder": text_encoder.eval(),
"tokenizer": tokenizer,
}
return components
def get_dummy_inputs(self, device, seed=0):
if str(device).startswith("mps"):
generator = torch.manual_seed(seed)
else:
generator = torch.Generator(device="cpu").manual_seed(seed)
inputs = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 5.0,
"height": 32,
"width": 32,
"output_type": "np",
}
return inputs
|
import unittest
import numpy as np
import torch
from transformers import AutoTokenizer, Gemma2Config, Gemma2Model
from diffusers import (
AutoencoderKL,
FlowMatchEulerDiscreteScheduler,
Lumina2Text2ImgPipeline,
Lumina2Transformer2DModel,
)
from diffusers.utils.testing_utils import torch_device
from ..test_pipelines_common import PipelineTesterMixin
class Lumina2Text2ImgPipelinePipelineFastTests(unittest.TestCase, PipelineTesterMixin):
pipeline_class = Lumina2Text2ImgPipeline
params = frozenset(
[
"prompt",
"height",
"width",
"guidance_scale",
"negative_prompt",
"prompt_embeds",
"negative_prompt_embeds",
]
)
batch_params = frozenset(["prompt", "negative_prompt"])
required_optional_params = frozenset(
[
"num_inference_steps",
"generator",
"latents",
"return_dict",
"callback_on_step_end",
"callback_on_step_end_tensor_inputs",
]
)
supports_dduf = False
test_xformers_attention = False
test_layerwise_casting = True
def get_dummy_components(self):
torch.manual_seed(0)
transformer = Lumina2Transformer2DModel(
sample_size=4,
patch_size=2,
in_channels=4,
hidden_size=8,
num_layers=2,
num_attention_heads=1,
num_kv_heads=1,
multiple_of=16,
ffn_dim_multiplier=None,
norm_eps=1e-5,
scaling_factor=1.0,
axes_dim_rope=[4, 2, 2],
cap_feat_dim=8,
)
torch.manual_seed(0)
vae = AutoencoderKL(
sample_size=32,
in_channels=3,
out_channels=3,
block_out_channels=(4,),
layers_per_block=1,
latent_channels=4,
norm_num_groups=1,
use_quant_conv=False,
use_post_quant_conv=False,
shift_factor=0.0609,
scaling_factor=1.5035,
)
scheduler = FlowMatchEulerDiscreteScheduler()
tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/dummy-gemma")
torch.manual_seed(0)
config = Gemma2Config(
head_dim=4,
hidden_size=8,
intermediate_size=8,
num_attention_heads=2,
num_hidden_layers=2,
num_key_value_heads=2,
sliding_window=2,
)
text_encoder = Gemma2Model(config)
components = {
"transformer": transformer.eval(),
"vae": vae.eval(),
"scheduler": scheduler,
"text_encoder": text_encoder.eval(),
"tokenizer": tokenizer,
}
return components
def get_dummy_inputs(self, device, seed=0):
if str(device).startswith("mps"):
generator = torch.manual_seed(seed)
else:
generator = torch.Generator(device="cpu").manual_seed(seed)
inputs = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 5.0,
"height": 32,
"width": 32,
"output_type": "np",
}
return inputs
def test_lumina_prompt_embeds(self):
pipe = self.pipeline_class(**self.get_dummy_components()).to(torch_device)
inputs = self.get_dummy_inputs(torch_device)
output_with_prompt = pipe(**inputs).images[0]
inputs = self.get_dummy_inputs(torch_device)
prompt = inputs.pop("prompt")
do_classifier_free_guidance = inputs["guidance_scale"] > 1
(
prompt_embeds,
prompt_attention_mask,
negative_prompt_embeds,
negative_prompt_attention_mask,
) = pipe.encode_prompt(
prompt,
do_classifier_free_guidance=do_classifier_free_guidance,
device=torch_device,
)
output_with_embeds = pipe(
prompt_embeds=prompt_embeds,
prompt_attention_mask=prompt_attention_mask,
**inputs,
).images[0]
max_diff = np.abs(output_with_prompt - output_with_embeds).max()
assert max_diff < 1e-4
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os
import os.path as osp
from mmengine.config import Config, DictAction
from mmengine.evaluator import DumpResults
from mmengine.runner import Runner
from mmdet.engine.hooks.utils import trigger_visualization_hook
from mmdet.registry import RUNNERS
from mmdet.utils import register_all_modules
# TODO: support fuse_conv_bn and format_only
def parse_args():
parser = argparse.ArgumentParser(
description='MMDet test (and eval) a model')
parser.add_argument('config', help='test config file path')
parser.add_argument('checkpoint', help='checkpoint file')
parser.add_argument(
'--work-dir',
help='the directory to save the file containing evaluation metrics')
parser.add_argument(
'--out',
type=str,
help='dump predictions to a pickle file for offline evaluation')
parser.add_argument(
'--show', action='store_true', help='show prediction results')
parser.add_argument(
'--show-dir',
help='directory where painted images will be saved. '
'If specified, it will be automatically saved '
'to the work_dir/timestamp/show_dir')
parser.add_argument(
'--wait-time', type=float, default=2, help='the interval of show (s)')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
parser.add_argument(
'--launcher',
choices=['none', 'pytorch', 'slurm', 'mpi'],
default='none',
help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
args = parser.parse_args()
if 'LOCAL_RANK' not in os.environ:
os.environ['LOCAL_RANK'] = str(args.local_rank)
return args
def main():
args = parse_args()
# register all modules in mmdet into the registries
# do not init the default scope here because it will be init in the runner
register_all_modules(init_default_scope=False)
# load config
cfg = Config.fromfile(args.config)
cfg.launcher = args.launcher
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
# work_dir is determined in this priority: CLI > segment in file > filename
if args.work_dir is not None:
# update configs according to CLI args if args.work_dir is not None
cfg.work_dir = args.work_dir
elif cfg.get('work_dir', None) is None:
# use config filename as default work_dir if cfg.work_dir is None
cfg.work_dir = osp.join('./work_dirs',
osp.splitext(osp.basename(args.config))[0])
cfg.load_from = args.checkpoint
if args.show or args.show_dir:
cfg = trigger_visualization_hook(cfg, args)
# build the runner from config
if 'runner_type' not in cfg:
# build the default runner
runner = Runner.from_cfg(cfg)
else:
# build customized runner from the registry
# if 'runner_type' is set in the cfg
runner = RUNNERS.build(cfg)
# add `DumpResults` dummy metric
if args.out is not None:
assert args.out.endswith(('.pkl', '.pickle')), \
'The dump file must be a pkl file.'
runner.test_evaluator.metrics.append(
DumpResults(out_file_path=args.out))
# start testing
runner.test()
if __name__ == '__main__':
main()
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os
import os.path as osp
from mmengine.config import Config, DictAction
from mmengine.runner import Runner
from mmdet.engine.hooks.utils import trigger_visualization_hook
from mmdet.registry import RUNNERS
from mmdet.utils import add_dump_metric, register_all_modules
# TODO: support fuse_conv_bn and format_only
def parse_args():
parser = argparse.ArgumentParser(
description='MMDet test (and eval) a model')
parser.add_argument('config', help='test config file path')
parser.add_argument('checkpoint', help='checkpoint file')
parser.add_argument(
'--work-dir',
help='the directory to save the file containing evaluation metrics')
parser.add_argument(
'--out',
type=str,
help='dump predictions to a pickle file for offline evaluation')
parser.add_argument(
'--show', action='store_true', help='show prediction results')
parser.add_argument(
'--show-dir',
help='directory where painted images will be saved. '
'If specified, it will be automatically saved '
'to the work_dir/timestamp/show_dir')
parser.add_argument(
'--wait-time', type=float, default=2, help='the interval of show (s)')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
parser.add_argument(
'--launcher',
choices=['none', 'pytorch', 'slurm', 'mpi'],
default='none',
help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
args = parser.parse_args()
if 'LOCAL_RANK' not in os.environ:
os.environ['LOCAL_RANK'] = str(args.local_rank)
return args
def main():
args = parse_args()
# register all modules in mmdet into the registries
# do not init the default scope here because it will be init in the runner
register_all_modules(init_default_scope=False)
# load config
cfg = Config.fromfile(args.config)
cfg.launcher = args.launcher
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
# work_dir is determined in this priority: CLI > segment in file > filename
if args.work_dir is not None:
# update configs according to CLI args if args.work_dir is not None
cfg.work_dir = args.work_dir
elif cfg.get('work_dir', None) is None:
# use config filename as default work_dir if cfg.work_dir is None
cfg.work_dir = osp.join('./work_dirs',
osp.splitext(osp.basename(args.config))[0])
cfg.load_from = args.checkpoint
if args.show or args.show_dir:
cfg = trigger_visualization_hook(cfg, args)
# Dump predictions
if args.out is not None:
assert args.out.endswith(('.pkl', '.pickle')), \
'The dump file must be a pkl file.'
add_dump_metric(args, cfg)
# build the runner from config
if 'runner_type' not in cfg:
# build the default runner
runner = Runner.from_cfg(cfg)
else:
# build customized runner from the registry
# if 'runner_type' is set in the cfg
runner = RUNNERS.build(cfg)
# start testing
runner.test()
if __name__ == '__main__':
main()
|
_base_ = '../ssd/ssd512_coco.py'
model = dict(
bbox_head=dict(type='PISASSDHead'),
train_cfg=dict(isr=dict(k=2., bias=0.), carl=dict(k=1., bias=0.2)))
optim_wrapper = dict(clip_grad=dict(max_norm=35, norm_type=2))
|
_base_ = '../ssd/ssd512_coco.py'
model = dict(
bbox_head=dict(type='PISASSDHead'),
train_cfg=dict(isr=dict(k=2., bias=0.), carl=dict(k=1., bias=0.2)))
default_hooks = dict(
optimizer=dict(
_delete_=True,
type='OptimizerHook',
grad_clip=dict(max_norm=35, norm_type=2)))
|
from __future__ import annotations
import math
from pathlib import Path
import numpy as np
import pytest
from tokenizers import Tokenizer
from sentence_transformers import SentenceTransformer
from sentence_transformers.models.StaticEmbedding import StaticEmbedding
try:
import model2vec
except ImportError:
model2vec = None
skip_if_no_model2vec = pytest.mark.skipif(model2vec is None, reason="The model2vec library is not installed.")
@pytest.fixture(scope="session")
def tokenizer() -> Tokenizer:
return Tokenizer.from_pretrained("bert-base-uncased")
@pytest.fixture
def embedding_weights():
return np.random.rand(30522, 768)
@pytest.fixture
def static_embedding(tokenizer: Tokenizer, embedding_weights) -> StaticEmbedding:
return StaticEmbedding(tokenizer, embedding_weights=embedding_weights)
def test_initialization_with_embedding_weights(tokenizer: Tokenizer, embedding_weights) -> None:
model = StaticEmbedding(tokenizer, embedding_weights=embedding_weights)
assert model.embedding.weight.shape == (30522, 768)
def test_initialization_with_embedding_dim(tokenizer: Tokenizer) -> None:
model = StaticEmbedding(tokenizer, embedding_dim=768)
assert model.embedding.weight.shape == (30522, 768)
def test_tokenize(static_embedding: StaticEmbedding) -> None:
texts = ["Hello world!", "How are you?"]
tokens = static_embedding.tokenize(texts)
assert "input_ids" in tokens
assert "offsets" in tokens
def test_forward(static_embedding: StaticEmbedding) -> None:
texts = ["Hello world!", "How are you?"]
tokens = static_embedding.tokenize(texts)
output = static_embedding(tokens)
assert "sentence_embedding" in output
def test_save_and_load(tmp_path: Path, static_embedding: StaticEmbedding) -> None:
save_dir = tmp_path / "model"
save_dir.mkdir()
static_embedding.save(str(save_dir))
loaded_model = StaticEmbedding.load(str(save_dir))
assert loaded_model.embedding.weight.shape == static_embedding.embedding.weight.shape
@skip_if_no_model2vec()
def test_from_distillation() -> None:
model = StaticEmbedding.from_distillation("sentence-transformers-testing/stsb-bert-tiny-safetensors", pca_dims=32)
assert model.embedding.weight.shape == (29528, 32)
@skip_if_no_model2vec()
def test_from_model2vec() -> None:
model = StaticEmbedding.from_model2vec("minishlab/M2V_base_output")
assert model.embedding.weight.shape == (29528, 256)
def test_loading_model2vec() -> None:
model = SentenceTransformer("minishlab/potion-base-8M")
assert model.get_sentence_embedding_dimension() == 256
assert model.max_seq_length == math.inf
test_sentences = ["It's so sunny outside!", "The sun is shining outside!"]
embeddings = model.encode(test_sentences)
assert embeddings.shape == (2, 256)
similarity = model.similarity(embeddings[0], embeddings[1])
assert similarity.item() > 0.7
|
from __future__ import annotations
import math
from pathlib import Path
import numpy as np
import pytest
from tokenizers import Tokenizer
from sentence_transformers import SentenceTransformer
from sentence_transformers.models.StaticEmbedding import StaticEmbedding
try:
import model2vec
except ImportError:
model2vec = None
skip_if_no_model2vec = pytest.mark.skipif(model2vec is None, reason="The model2vec library is not installed.")
@pytest.fixture
def tokenizer() -> Tokenizer:
return Tokenizer.from_pretrained("bert-base-uncased")
@pytest.fixture
def embedding_weights():
return np.random.rand(30522, 768)
@pytest.fixture
def static_embedding(tokenizer: Tokenizer, embedding_weights) -> StaticEmbedding:
return StaticEmbedding(tokenizer, embedding_weights=embedding_weights)
def test_initialization_with_embedding_weights(tokenizer: Tokenizer, embedding_weights) -> None:
model = StaticEmbedding(tokenizer, embedding_weights=embedding_weights)
assert model.embedding.weight.shape == (30522, 768)
def test_initialization_with_embedding_dim(tokenizer: Tokenizer) -> None:
model = StaticEmbedding(tokenizer, embedding_dim=768)
assert model.embedding.weight.shape == (30522, 768)
def test_tokenize(static_embedding: StaticEmbedding) -> None:
texts = ["Hello world!", "How are you?"]
tokens = static_embedding.tokenize(texts)
assert "input_ids" in tokens
assert "offsets" in tokens
def test_forward(static_embedding: StaticEmbedding) -> None:
texts = ["Hello world!", "How are you?"]
tokens = static_embedding.tokenize(texts)
output = static_embedding(tokens)
assert "sentence_embedding" in output
def test_save_and_load(tmp_path: Path, static_embedding: StaticEmbedding) -> None:
save_dir = tmp_path / "model"
save_dir.mkdir()
static_embedding.save(str(save_dir))
loaded_model = StaticEmbedding.load(str(save_dir))
assert loaded_model.embedding.weight.shape == static_embedding.embedding.weight.shape
@skip_if_no_model2vec()
def test_from_distillation() -> None:
model = StaticEmbedding.from_distillation("sentence-transformers-testing/stsb-bert-tiny-safetensors", pca_dims=32)
assert model.embedding.weight.shape == (29528, 32)
@skip_if_no_model2vec()
def test_from_model2vec() -> None:
model = StaticEmbedding.from_model2vec("minishlab/M2V_base_output")
assert model.embedding.weight.shape == (29528, 256)
def test_loading_model2vec() -> None:
model = SentenceTransformer("minishlab/potion-base-8M")
assert model.get_sentence_embedding_dimension() == 256
assert model.max_seq_length == math.inf
test_sentences = ["It's so sunny outside!", "The sun is shining outside!"]
embeddings = model.encode(test_sentences)
assert embeddings.shape == (2, 256)
similarity = model.similarity(embeddings[0], embeddings[1])
assert similarity.item() > 0.7
|
from prisma.models import User
from backend.blocks.basic import AgentInputBlock, PrintToConsoleBlock
from backend.blocks.text import FillTextTemplateBlock
from backend.data import graph
from backend.data.graph import create_graph
from backend.data.user import get_or_create_user
from backend.util.test import SpinTestServer, wait_execution
async def create_test_user() -> User:
test_user_data = {
"sub": "ef3b97d7-1161-4eb4-92b2-10c24fb154c1",
"email": "testuser#example.com",
"name": "Test User",
}
user = await get_or_create_user(test_user_data)
return user
def create_test_graph() -> graph.Graph:
"""
InputBlock
\
---- FillTextTemplateBlock ---- PrintToConsoleBlock
/
InputBlock
"""
nodes = [
graph.Node(
block_id=AgentInputBlock().id,
input_default={"name": "input_1"},
),
graph.Node(
block_id=AgentInputBlock().id,
input_default={"name": "input_2"},
),
graph.Node(
block_id=FillTextTemplateBlock().id,
input_default={
"format": "{{a}}, {{b}}{{c}}",
"values_#_c": "!!!",
},
),
graph.Node(block_id=PrintToConsoleBlock().id),
]
links = [
graph.Link(
source_id=nodes[0].id,
sink_id=nodes[2].id,
source_name="result",
sink_name="values_#_a",
),
graph.Link(
source_id=nodes[1].id,
sink_id=nodes[2].id,
source_name="result",
sink_name="values_#_b",
),
graph.Link(
source_id=nodes[2].id,
sink_id=nodes[3].id,
source_name="output",
sink_name="text",
),
]
return graph.Graph(
name="TestGraph",
description="Test graph",
nodes=nodes,
links=links,
)
async def sample_agent():
async with SpinTestServer() as server:
test_user = await create_test_user()
test_graph = await create_graph(create_test_graph(), test_user.id)
input_data = {"input_1": "Hello", "input_2": "World"}
response = await server.agent_server.test_execute_graph(
test_graph.id, input_data, test_user.id
)
print(response)
result = await wait_execution(test_user.id, test_graph.id, response["id"], 10)
print(result)
if __name__ == "__main__":
import asyncio
asyncio.run(sample_agent())
|
from prisma.models import User
from backend.blocks.basic import AgentInputBlock, PrintToConsoleBlock
from backend.blocks.text import FillTextTemplateBlock
from backend.data import graph
from backend.data.graph import create_graph
from backend.data.user import get_or_create_user
from backend.util.test import SpinTestServer, wait_execution
async def create_test_user() -> User:
test_user_data = {
"sub": "ef3b97d7-1161-4eb4-92b2-10c24fb154c1",
"email": "testuser#example.com",
"name": "Test User",
}
user = await get_or_create_user(test_user_data)
return user
def create_test_graph() -> graph.Graph:
"""
InputBlock
\
---- FillTextTemplateBlock ---- PrintToConsoleBlock
/
InputBlock
"""
nodes = [
graph.Node(
block_id=AgentInputBlock().id,
input_default={"name": "input_1"},
),
graph.Node(
block_id=AgentInputBlock().id,
input_default={"name": "input_2"},
),
graph.Node(
block_id=FillTextTemplateBlock().id,
input_default={
"format": "{a}, {b}{c}",
"values_#_c": "!!!",
},
),
graph.Node(block_id=PrintToConsoleBlock().id),
]
links = [
graph.Link(
source_id=nodes[0].id,
sink_id=nodes[2].id,
source_name="result",
sink_name="values_#_a",
),
graph.Link(
source_id=nodes[1].id,
sink_id=nodes[2].id,
source_name="result",
sink_name="values_#_b",
),
graph.Link(
source_id=nodes[2].id,
sink_id=nodes[3].id,
source_name="output",
sink_name="text",
),
]
return graph.Graph(
name="TestGraph",
description="Test graph",
nodes=nodes,
links=links,
)
async def sample_agent():
async with SpinTestServer() as server:
test_user = await create_test_user()
test_graph = await create_graph(create_test_graph(), test_user.id)
input_data = {"input_1": "Hello", "input_2": "World"}
response = await server.agent_server.test_execute_graph(
test_graph.id, input_data, test_user.id
)
print(response)
result = await wait_execution(test_user.id, test_graph.id, response["id"], 10)
print(result)
if __name__ == "__main__":
import asyncio
asyncio.run(sample_agent())
|
# Copyright (c) OpenMMLab. All rights reserved.
import logging
from typing import Any, List, Optional, Sequence, Tuple
import torch
from torch.nn.parameter import Parameter
from torch.nn.utils import clip_grad
from mmengine.data import BaseDataSample
from mmengine.registry import HOOKS
from .hook import Hook
DATA_BATCH = Optional[Sequence[Tuple[Any, BaseDataSample]]]
@HOOKS.register_module()
class OptimizerHook(Hook):
"""A hook contains custom operations for the optimizer.
Args:
grad_clip (dict, optional): A config dict to control the clip_grad.
Defaults to None.
detect_anomalous_params (bool): This option is only used for
debugging which will slow down the training speed.
Detect anomalous parameters that are not included in
the computational graph with ``loss`` as the root.
There are two cases
- Parameters were not used during
forward pass.
- Parameters were not used to produce
loss.
Defaults to False.
"""
priority = 'HIGH'
def __init__(self,
grad_clip: Optional[dict] = None,
detect_anomalous_params: bool = False) -> None:
self.grad_clip = grad_clip
self.detect_anomalous_params = detect_anomalous_params
def clip_grads(self, params: List[Parameter]) -> Optional[torch.Tensor]:
"""Clip the gradients of parameters.
Args:
params (list[Parameter]): Model's parameters.
Returns:
Optional[torch.Tensor]: Total norm of the parameters if there is
at least one param requiring gradient, else None.
"""
params = list(
filter(lambda p: p.requires_grad and p.grad is not None, params))
if len(params) > 0:
return clip_grad.clip_grad_norm_(params, **self.grad_clip)
return None
def after_train_iter(self,
runner,
data_batch: DATA_BATCH = None,
outputs: Optional[dict] = None) -> None:
"""All operations need to be finished after each training iteration.
This function will finish following 3 operations:
- Detect any anomalous parameters which are not included in the
training graph. (optional)
- Compute the gradient of model parameters.
- Clip the gradidents of each parameters. (optional)
- Update model parameters with gradients.
Args:
runner (Runner): The runner of the training process.
data_batch (Sequence[Tuple[Any, BaseDataSample]], optional): Data
from dataloader. In order to keep this interface consistent
with other hooks, we keep ``data_batch`` here.
Defaults to None.
outputs (dict, optional): Outputs from model.
In order to keep this interface consistent with other hooks,
we keep ``outputs`` here. Defaults to None.
"""
runner.optimizer.zero_grad()
if self.detect_anomalous_params:
self.detect_anomalous_parameters(runner.outputs['loss'], runner)
runner.outputs['loss'].backward()
if self.grad_clip is not None:
grad_norm = self.clip_grads(runner.model.parameters())
if grad_norm is not None:
# Add grad norm to the logger
runner.log_buffer.update({'grad_norm': float(grad_norm)},
runner.outputs['num_samples'])
runner.optimizer.step()
def detect_anomalous_parameters(self, loss: torch.Tensor, runner) -> None:
"""Detect anomalous parameters that are not included in the graph.
Args:
loss (torch.Tensor): The loss of current iteration.
runner (Runner): The runner of the training process.
"""
logger = runner.logger
parameters_in_graph = set()
visited = set()
def traverse(grad_fn):
if grad_fn is None:
return
if grad_fn not in visited:
visited.add(grad_fn)
if hasattr(grad_fn, 'variable'):
parameters_in_graph.add(grad_fn.variable)
parents = grad_fn.next_functions
if parents is not None:
for parent in parents:
grad_fn = parent[0]
traverse(grad_fn)
traverse(loss.grad_fn)
for n, p in runner.model.named_parameters():
if p not in parameters_in_graph and p.requires_grad:
logger.log(
level=logging.ERROR,
msg=f'{n} with shape {p.size()} is not '
f'in the computational graph \n')
|
# Copyright (c) OpenMMLab. All rights reserved.
import logging
from typing import Any, List, Optional, Sequence, Tuple
import torch
from torch.nn.parameter import Parameter
from torch.nn.utils import clip_grad
from mmengine.data import BaseDataSample
from mmengine.registry import HOOKS
from .hook import Hook
DATA_BATCH = Optional[Sequence[Tuple[Any, BaseDataSample]]]
@HOOKS.register_module()
class OptimizerHook(Hook):
"""A hook contains custom operations for the optimizer.
Args:
grad_clip (dict, optional): A config dict to control the clip_grad.
Defaults to None.
detect_anomalous_params (bool): This option is only used for
debugging which will slow down the training speed.
Detect anomalous parameters that are not included in
the computational graph with ``loss`` as the root.
There are two cases
- Parameters were not used during
forward pass.
- Parameters were not used to produce
loss.
Defaults to False.
"""
priority = 'HIGH'
def __init__(self,
grad_clip: Optional[dict] = None,
detect_anomalous_params: bool = False) -> None:
self.grad_clip = grad_clip
self.detect_anomalous_params = detect_anomalous_params
def clip_grads(self, params: List[Parameter]) -> Optional[torch.Tensor]:
"""Clip the gradients of parameters.
Args:
params (list[Parameter]): Model's parameters.
Returns:
Optional[torch.Tensor]: Total norm of the parameters if there is
at least one param requiring gradient, else None.
"""
params = list(
filter(lambda p: p.requires_grad and p.grad is not None, params))
if len(params) > 0:
return clip_grad.clip_grad_norm_(params, **self.grad_clip)
return None
def after_train_iter(
self,
runner,
data_batch: DATA_BATCH = None,
outputs: Optional[Sequence[BaseDataSample]] = None) -> None:
"""All operations need to be finished after each training iteration.
This function will finish following 3 operations:
- Detect any anomalous parameters which are not included in the
training graph. (optional)
- Compute the gradient of model parameters.
- Clip the gradidents of each parameters. (optional)
- Update model parameters with gradients.
Args:
runner (Runner): The runner of the training process.
data_batch (Sequence[Tuple[Any, BaseDataSample]], optional): Data
from dataloader. In order to keep this interface consistent
with other hooks, we keep ``data_batch`` here.
Defaults to None.
outputs (Sequence[BaseDataSample], optional): Outputs from model.
In order to keep this interface consistent with other hooks,
we keep ``outputs`` here. Defaults to None.
"""
runner.optimizer.zero_grad()
if self.detect_anomalous_params:
self.detect_anomalous_parameters(runner.outputs['loss'], runner)
runner.outputs['loss'].backward()
if self.grad_clip is not None:
grad_norm = self.clip_grads(runner.model.parameters())
if grad_norm is not None:
# Add grad norm to the logger
runner.log_buffer.update({'grad_norm': float(grad_norm)},
runner.outputs['num_samples'])
runner.optimizer.step()
def detect_anomalous_parameters(self, loss: torch.Tensor, runner) -> None:
"""Detect anomalous parameters that are not included in the graph.
Args:
loss (torch.Tensor): The loss of current iteration.
runner (Runner): The runner of the training process.
"""
logger = runner.logger
parameters_in_graph = set()
visited = set()
def traverse(grad_fn):
if grad_fn is None:
return
if grad_fn not in visited:
visited.add(grad_fn)
if hasattr(grad_fn, 'variable'):
parameters_in_graph.add(grad_fn.variable)
parents = grad_fn.next_functions
if parents is not None:
for parent in parents:
grad_fn = parent[0]
traverse(grad_fn)
traverse(loss.grad_fn)
for n, p in runner.model.named_parameters():
if p not in parameters_in_graph and p.requires_grad:
logger.log(
level=logging.ERROR,
msg=f'{n} with shape {p.size()} is not '
f'in the computational graph \n')
|
from jina.serve.runtimes.gateway.websocket.gateway import WebSocketGateway
|
import asyncio
from jina.serve.runtimes.gateway import GatewayRuntime
from jina.serve.runtimes.gateway.websocket.app import get_fastapi_app
__all__ = ['WebSocketGatewayRuntime']
from jina.serve.runtimes.gateway.websocket.gateway import WebSocketGateway
class WebSocketGatewayRuntime(GatewayRuntime):
"""Runtime for Websocket interface."""
async def async_setup(self):
"""
The async method setup the runtime.
Setup the uvicorn server.
"""
self.gateway = WebSocketGateway(
name=self.name,
port=self.args.port,
ssl_keyfile=self.args.ssl_keyfile,
ssl_certfile=self.args.ssl_certfile,
uvicorn_kwargs=self.args.uvicorn_kwargs,
logger=self.logger,
)
self.gateway.set_streamer(
args=self.args,
timeout_send=self.timeout_send,
metrics_registry=self.metrics_registry,
runtime_name=self.args.name,
)
await self.gateway.setup_server()
async def _wait_for_cancel(self):
"""Do NOT override this method when inheriting from :class:`GatewayPod`"""
# handle terminate signals
while not self.is_cancel.is_set() and not self.gateway.should_exit:
await asyncio.sleep(0.1)
await self.async_cancel()
async def async_teardown(self):
"""Shutdown the server."""
await self.gateway.teardown()
async def async_cancel(self):
"""Stop the server."""
await self.gateway.stop_server()
async def async_run_forever(self):
"""Running method of ther server."""
await self.gateway.run_server()
|
"""
===================================
How to write your own v2 transforms
===================================
.. note::
Try on `collab <https://colab.research.google.com/github/pytorch/vision/blob/gh-pages/main/_generated_ipynb_notebooks/plot_custom_transforms.ipynb>`_
or :ref:`go to the end <sphx_glr_download_auto_examples_v2_transforms_plot_custom_transforms.py>` to download the full example code.
This guide explains how to write transforms that are compatible with the
torchvision transforms V2 API.
"""
# %%
import torch
from torchvision import datapoints
from torchvision.transforms import v2
# %%
# Just create a ``nn.Module`` and override the ``forward`` method
# ===============================================================
#
# In most cases, this is all you're going to need, as long as you already know
# the structure of the input that your transform will expect. For example if
# you're just doing image classification, your transform will typically accept a
# single image as input, or a ``(img, label)`` input. So you can just hard-code
# your ``forward`` method to accept just that, e.g.
#
# .. code:: python
#
# class MyCustomTransform(torch.nn.Module):
# def forward(self, img, label):
# # Do some transformations
# return new_img, new_label
#
# .. note::
#
# This means that if you have a custom transform that is already compatible
# with the V1 transforms (those in ``torchvision.transforms``), it will
# still work with the V2 transforms without any change!
#
# We will illustrate this more completely below with a typical detection case,
# where our samples are just images, bounding boxes and labels:
class MyCustomTransform(torch.nn.Module):
def forward(self, img, bboxes, label): # we assume inputs are always structured like this
print(
f"I'm transforming an image of shape {img.shape} "
f"with bboxes = {bboxes}\n{label = }"
)
# Do some transformations. Here, we're just passing though the input
return img, bboxes, label
transforms = v2.Compose([
MyCustomTransform(),
v2.RandomResizedCrop((224, 224), antialias=True),
v2.RandomHorizontalFlip(p=1),
v2.Normalize(mean=[0, 0, 0], std=[1, 1, 1])
])
H, W = 256, 256
img = torch.rand(3, H, W)
bboxes = datapoints.BoundingBoxes(
torch.tensor([[0, 10, 10, 20], [50, 50, 70, 70]]),
format="XYXY",
canvas_size=(H, W)
)
label = 3
out_img, out_bboxes, out_label = transforms(img, bboxes, label)
# %%
print(f"Output image shape: {out_img.shape}\nout_bboxes = {out_bboxes}\n{out_label = }")
# %%
# .. note::
# While working with datapoint classes in your code, make sure to
# familiarize yourself with this section:
# :ref:`datapoint_unwrapping_behaviour`
#
# Supporting arbitrary input structures
# =====================================
#
# In the section above, we have assumed that you already know the structure of
# your inputs and that you're OK with hard-coding this expected structure in
# your code. If you want your custom transforms to be as flexible as possible,
# this can be a bit limiting.
#
# A key feature of the builtin Torchvision V2 transforms is that they can accept
# arbitrary input structure and return the same structure as output (with
# transformed entries). For example, transforms can accept a single image, or a
# tuple of ``(img, label)``, or an arbitrary nested dictionary as input:
structured_input = {
"img": img,
"annotations": (bboxes, label),
"something_that_will_be_ignored": (1, "hello")
}
structured_output = v2.RandomHorizontalFlip(p=1)(structured_input)
assert isinstance(structured_output, dict)
assert structured_output["something_that_will_be_ignored"] == (1, "hello")
print(f"The transformed bboxes are:\n{structured_output['annotations'][0]}")
# %%
# If you want to reproduce this behavior in your own transform, we invite you to
# look at our `code
# <https://github.com/pytorch/vision/blob/main/torchvision/transforms/v2/_transform.py>`_
# and adapt it to your needs.
#
# In brief, the core logic is to unpack the input into a flat list using `pytree
# <https://github.com/pytorch/pytorch/blob/main/torch/utils/_pytree.py>`_, and
# then transform only the entries that can be transformed (the decision is made
# based on the **class** of the entries, as all datapoints are
# tensor-subclasses) plus some custom logic that is out of score here - check the
# code for details. The (potentially transformed) entries are then repacked and
# returned, in the same structure as the input.
#
# We do not provide public dev-facing tools to achieve that at this time, but if
# this is something that would be valuable to you, please let us know by opening
# an issue on our `GitHub repo <https://github.com/pytorch/vision/issues>`_.
|
"""
===================================
How to write your own v2 transforms
===================================
.. note::
Try on `collab <https://colab.research.google.com/github/pytorch/vision/blob/gh-pages/main/_generated_ipynb_notebooks/plot_custom_transforms.ipynb>`_
or :ref:`go to the end <sphx_glr_download_auto_examples_v2_transforms_plot_custom_transforms.py>` to download the full example code.
This guide explains how to write transforms that are compatible with the
torchvision transforms V2 API.
"""
# %%
import torch
from torchvision import datapoints
from torchvision.transforms import v2
# %%
# Just create a ``nn.Module`` and override the ``forward`` method
# ===============================================================
#
# In most cases, this is all you're going to need, as long as you already know
# the structure of the input that your transform will expect. For example if
# you're just doing image classification, your transform will typically accept a
# single image as input, or a ``(img, label)`` input. So you can just hard-code
# your ``forward`` method to accept just that, e.g.
#
# .. code:: python
#
# class MyCustomTransform(torch.nn.Module):
# def forward(self, img, label):
# # Do some transformations
# return new_img, new_label
#
# .. note::
#
# This means that if you have a custom transform that is already compatible
# with the V1 transforms (those in ``torchvision.transforms``), it will
# still work with the V2 transforms without any change!
#
# We will illustrate this more completely below with a typical detection case,
# where our samples are just images, bounding boxes and labels:
class MyCustomTransform(torch.nn.Module):
def forward(self, img, bboxes, label): # we assume inputs are always structured like this
print(
f"I'm transforming an image of shape {img.shape} "
f"with bboxes = {bboxes}\n{label = }"
)
# Do some transformations. Here, we're just passing though the input
return img, bboxes, label
transforms = v2.Compose([
MyCustomTransform(),
v2.RandomResizedCrop((224, 224), antialias=True),
v2.RandomHorizontalFlip(p=1),
v2.Normalize(mean=[0, 0, 0], std=[1, 1, 1])
])
H, W = 256, 256
img = torch.rand(3, H, W)
bboxes = datapoints.BoundingBoxes(
torch.tensor([[0, 10, 10, 20], [50, 50, 70, 70]]),
format="XYXY",
canvas_size=(H, W)
)
label = 3
out_img, out_bboxes, out_label = transforms(img, bboxes, label)
# %%
print(f"Output image shape: {out_img.shape}\nout_bboxes = {out_bboxes}\n{out_label = }")
# %%
# .. note::
# While working with datapoint classes in your code, make sure to
# familiarize yourself with this section:
# :ref:`datapoint_unwrapping_behaviour`
#
# Supporting arbitrary input structures
# =====================================
#
# In the section above, we have assumed that you already know the structure of
# your inputs and that you're OK with hard-coding this expected structure in
# your code. If you want your custom transforms to be as flexible as possible,
# this can be a bit limitting.
#
# A key feature of the builtin Torchvision V2 transforms is that they can accept
# arbitrary input structure and return the same structure as output (with
# transformed entries). For example, transforms can accept a single image, or a
# tuple of ``(img, label)``, or an arbitrary nested dictionary as input:
structured_input = {
"img": img,
"annotations": (bboxes, label),
"something_that_will_be_ignored": (1, "hello")
}
structured_output = v2.RandomHorizontalFlip(p=1)(structured_input)
assert isinstance(structured_output, dict)
assert structured_output["something_that_will_be_ignored"] == (1, "hello")
print(f"The transformed bboxes are:\n{structured_output['annotations'][0]}")
# %%
# If you want to reproduce this behavior in your own transform, we invite you to
# look at our `code
# <https://github.com/pytorch/vision/blob/main/torchvision/transforms/v2/_transform.py>`_
# and adapt it to your needs.
#
# In brief, the core logic is to unpack the input into a flat list using `pytree
# <https://github.com/pytorch/pytorch/blob/main/torch/utils/_pytree.py>`_, and
# then transform only the entries that can be transformed (the decision is made
# based on the **class** of the entries, as all datapoints are
# tensor-subclasses) plus some custom logic that is out of score here - check the
# code for details. The (potentially transformed) entries are then repacked and
# returned, in the same structure as the input.
#
# We do not provide public dev-facing tools to achieve that at this time, but if
# this is something that would be valuable to you, please let us know by opening
# an issue on our `GitHub repo <https://github.com/pytorch/vision/issues>`_.
|
import unittest
import torch
import torchaudio.prototype.functional as F
from torchaudio_unittest.common_utils import nested_params, TestBaseMixin, torch_script
class TorchScriptConsistencyTestImpl(TestBaseMixin):
def _assert_consistency(self, func, inputs, shape_only=False):
inputs_ = []
for i in inputs:
if torch.is_tensor(i):
i = i.to(device=self.device, dtype=self.dtype)
inputs_.append(i)
ts_func = torch_script(func)
torch.random.manual_seed(40)
output = func(*inputs_)
torch.random.manual_seed(40)
ts_output = ts_func(*inputs_)
if shape_only:
ts_output = ts_output.shape
output = output.shape
self.assertEqual(ts_output, output)
@nested_params(
["convolve", "fftconvolve"],
["full", "valid", "same"],
)
def test_convolve(self, fn, mode):
leading_dims = (2, 3, 2)
L_x, L_y = 32, 55
x = torch.rand(*leading_dims, L_x, dtype=self.dtype, device=self.device)
y = torch.rand(*leading_dims, L_y, dtype=self.dtype, device=self.device)
self._assert_consistency(getattr(F, fn), (x, y, mode))
def test_add_noise(self):
leading_dims = (2, 3)
L = 31
waveform = torch.rand(*leading_dims, L, dtype=self.dtype, device=self.device, requires_grad=True)
noise = torch.rand(*leading_dims, L, dtype=self.dtype, device=self.device, requires_grad=True)
lengths = torch.rand(*leading_dims, dtype=self.dtype, device=self.device, requires_grad=True)
snr = torch.rand(*leading_dims, dtype=self.dtype, device=self.device, requires_grad=True) * 10
self._assert_consistency(F.add_noise, (waveform, noise, lengths, snr))
def test_barkscale_fbanks(self):
if self.device != torch.device("cpu"):
raise unittest.SkipTest("No need to perform test on device other than CPU")
n_stft = 100
f_min = 0.0
f_max = 20.0
n_barks = 10
sample_rate = 16000
self._assert_consistency(F.barkscale_fbanks, (n_stft, f_min, f_max, n_barks, sample_rate, "traunmuller"))
def test_oscillator_bank(self):
num_frames, num_pitches, sample_rate = 8000, 8, 8000
freq = torch.rand((num_frames, num_pitches), dtype=self.dtype, device=self.device)
amps = torch.ones_like(freq)
self._assert_consistency(F.oscillator_bank, (freq, amps, sample_rate, "sum"))
def test_extend_pitch(self):
num_frames = 5
input = torch.ones((num_frames, 1), device=self.device, dtype=self.dtype)
num_pitches = 7
pattern = [i + 1.0 for i in range(num_pitches)]
self._assert_consistency(F.extend_pitch, (input, num_pitches))
self._assert_consistency(F.extend_pitch, (input, pattern))
self._assert_consistency(F.extend_pitch, (input, torch.tensor(pattern)))
def test_sinc_ir(self):
cutoff = torch.tensor([0, 0.5, 1.0], device=self.device, dtype=self.dtype)
self._assert_consistency(F.sinc_impulse_response, (cutoff, 513, False))
self._assert_consistency(F.sinc_impulse_response, (cutoff, 513, True))
def test_speed(self):
leading_dims = (3, 2)
T = 200
waveform = torch.rand(*leading_dims, T, dtype=self.dtype, device=self.device, requires_grad=True)
lengths = torch.randint(1, T, leading_dims, dtype=self.dtype, device=self.device)
self._assert_consistency(F.speed, (waveform, lengths, 1000, 1.1))
def test_preemphasis(self):
waveform = torch.rand(3, 2, 100, device=self.device, dtype=self.dtype)
coeff = 0.9
self._assert_consistency(F.preemphasis, (waveform, coeff))
def test_deemphasis(self):
waveform = torch.rand(3, 2, 100, device=self.device, dtype=self.dtype)
coeff = 0.9
self._assert_consistency(F.deemphasis, (waveform, coeff))
def test_freq_ir(self):
mags = torch.tensor([0, 0.5, 1.0], device=self.device, dtype=self.dtype)
self._assert_consistency(F.frequency_impulse_response, (mags,))
|
import unittest
import torch
import torchaudio.prototype.functional as F
from torchaudio_unittest.common_utils import nested_params, TestBaseMixin, torch_script
class TorchScriptConsistencyTestImpl(TestBaseMixin):
def _assert_consistency(self, func, inputs, shape_only=False):
inputs_ = []
for i in inputs:
if torch.is_tensor(i):
i = i.to(device=self.device, dtype=self.dtype)
inputs_.append(i)
ts_func = torch_script(func)
torch.random.manual_seed(40)
output = func(*inputs_)
torch.random.manual_seed(40)
ts_output = ts_func(*inputs_)
if shape_only:
ts_output = ts_output.shape
output = output.shape
self.assertEqual(ts_output, output)
@nested_params(
["convolve", "fftconvolve"],
["full", "valid", "same"],
)
def test_convolve(self, fn, mode):
leading_dims = (2, 3, 2)
L_x, L_y = 32, 55
x = torch.rand(*leading_dims, L_x, dtype=self.dtype, device=self.device)
y = torch.rand(*leading_dims, L_y, dtype=self.dtype, device=self.device)
self._assert_consistency(getattr(F, fn), (x, y, mode))
def test_add_noise(self):
leading_dims = (2, 3)
L = 31
waveform = torch.rand(*leading_dims, L, dtype=self.dtype, device=self.device, requires_grad=True)
noise = torch.rand(*leading_dims, L, dtype=self.dtype, device=self.device, requires_grad=True)
lengths = torch.rand(*leading_dims, dtype=self.dtype, device=self.device, requires_grad=True)
snr = torch.rand(*leading_dims, dtype=self.dtype, device=self.device, requires_grad=True) * 10
self._assert_consistency(F.add_noise, (waveform, noise, lengths, snr))
def test_barkscale_fbanks(self):
if self.device != torch.device("cpu"):
raise unittest.SkipTest("No need to perform test on device other than CPU")
n_stft = 100
f_min = 0.0
f_max = 20.0
n_barks = 10
sample_rate = 16000
self._assert_consistency(F.barkscale_fbanks, (n_stft, f_min, f_max, n_barks, sample_rate, "traunmuller"))
def test_oscillator_bank(self):
num_frames, num_pitches, sample_rate = 8000, 8, 8000
freq = torch.rand((num_frames, num_pitches), dtype=self.dtype, device=self.device)
amps = torch.ones_like(freq)
self._assert_consistency(F.oscillator_bank, (freq, amps, sample_rate, "sum"))
def test_extend_pitch(self):
num_frames = 5
input = torch.ones((num_frames, 1), device=self.device, dtype=self.dtype)
num_pitches = 7
pattern = [i + 1.0 for i in range(num_pitches)]
self._assert_consistency(F.extend_pitch, (input, num_pitches))
self._assert_consistency(F.extend_pitch, (input, pattern))
self._assert_consistency(F.extend_pitch, (input, torch.tensor(pattern)))
def test_sinc_ir(self):
cutoff = torch.tensor([0, 0.5, 1.0], device=self.device, dtype=self.dtype)
self._assert_consistency(F.sinc_impulse_response, (cutoff, 513, False))
self._assert_consistency(F.sinc_impulse_response, (cutoff, 513, True))
def test_speed(self):
leading_dims = (3, 2)
T = 200
waveform = torch.rand(*leading_dims, T, dtype=self.dtype, device=self.device, requires_grad=True)
lengths = torch.randint(1, T, leading_dims, dtype=self.dtype, device=self.device)
self._assert_consistency(F.speed, (waveform, lengths, 1000, 1.1))
def test_preemphasis(self):
waveform = torch.rand(3, 2, 100, device=self.device, dtype=self.dtype)
coeff = 0.9
self._assert_consistency(F.preemphasis, (waveform, coeff))
def test_deemphasis(self):
waveform = torch.rand(3, 2, 100, device=self.device, dtype=self.dtype)
coeff = 0.9
self._assert_consistency(F.deemphasis, (waveform, coeff))
|
from __future__ import annotations
from typing import Any
import PIL.Image
import torch
from ._tv_tensor import TVTensor
class Image(TVTensor):
""":class:`torch.Tensor` subclass for images with shape ``[..., C, H, W]``.
.. note::
In the :ref:`transforms <transforms>`, ``Image`` instances are largely
interchangeable with pure :class:`torch.Tensor`. See
:ref:`this note <passthrough_heuristic>` for more details.
Args:
data (tensor-like, PIL.Image.Image): Any data that can be turned into a tensor with :func:`torch.as_tensor` as
well as PIL images.
dtype (torch.dtype, optional): Desired data type. If omitted, will be inferred from
``data``.
device (torch.device, optional): Desired device. If omitted and ``data`` is a
:class:`torch.Tensor`, the device is taken from it. Otherwise, the image is constructed on the CPU.
requires_grad (bool, optional): Whether autograd should record operations. If omitted and
``data`` is a :class:`torch.Tensor`, the value is taken from it. Otherwise, defaults to ``False``.
"""
def __new__(
cls,
data: Any,
*,
dtype: torch.dtype | None = None,
device: torch.device | str | int | None = None,
requires_grad: bool | None = None,
) -> Image:
if isinstance(data, PIL.Image.Image):
from torchvision.transforms.v2 import functional as F
data = F.pil_to_tensor(data)
tensor = cls._to_tensor(data, dtype=dtype, device=device, requires_grad=requires_grad)
if tensor.ndim < 2:
raise ValueError
elif tensor.ndim == 2:
tensor = tensor.unsqueeze(0)
return tensor.as_subclass(cls)
def __repr__(self, *, tensor_contents: Any = None) -> str: # type: ignore[override]
return self._make_repr()
|
from __future__ import annotations
from typing import Any, Optional, Union
import PIL.Image
import torch
from ._tv_tensor import TVTensor
class Image(TVTensor):
""":class:`torch.Tensor` subclass for images with shape ``[..., C, H, W]``.
.. note::
In the :ref:`transforms <transforms>`, ``Image`` instances are largely
interchangeable with pure :class:`torch.Tensor`. See
:ref:`this note <passthrough_heuristic>` for more details.
Args:
data (tensor-like, PIL.Image.Image): Any data that can be turned into a tensor with :func:`torch.as_tensor` as
well as PIL images.
dtype (torch.dtype, optional): Desired data type. If omitted, will be inferred from
``data``.
device (torch.device, optional): Desired device. If omitted and ``data`` is a
:class:`torch.Tensor`, the device is taken from it. Otherwise, the image is constructed on the CPU.
requires_grad (bool, optional): Whether autograd should record operations. If omitted and
``data`` is a :class:`torch.Tensor`, the value is taken from it. Otherwise, defaults to ``False``.
"""
def __new__(
cls,
data: Any,
*,
dtype: Optional[torch.dtype] = None,
device: Optional[Union[torch.device, str, int]] = None,
requires_grad: Optional[bool] = None,
) -> Image:
if isinstance(data, PIL.Image.Image):
from torchvision.transforms.v2 import functional as F
data = F.pil_to_tensor(data)
tensor = cls._to_tensor(data, dtype=dtype, device=device, requires_grad=requires_grad)
if tensor.ndim < 2:
raise ValueError
elif tensor.ndim == 2:
tensor = tensor.unsqueeze(0)
return tensor.as_subclass(cls)
def __repr__(self, *, tensor_contents: Any = None) -> str: # type: ignore[override]
return self._make_repr()
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmcv.utils import Registry, build_from_cfg
MATCH_COST = Registry('Match Cost')
def build_match_cost(cfg, default_args=None):
"""Builder of IoU calculator."""
return build_from_cfg(cfg, MATCH_COST, default_args)
|
from mmcv.utils import Registry, build_from_cfg
MATCH_COST = Registry('Match Cost')
def build_match_cost(cfg, default_args=None):
"""Builder of IoU calculator."""
return build_from_cfg(cfg, MATCH_COST, default_args)
|
__version__ = '0.17.1'
import os
from docarray.document import Document
from docarray.array import DocumentArray
from docarray.dataclasses import dataclass, field
if 'DA_RICH_HANDLER' in os.environ:
from rich.traceback import install
install()
|
__version__ = '0.17.0'
import os
from docarray.document import Document
from docarray.array import DocumentArray
from docarray.dataclasses import dataclass, field
if 'DA_RICH_HANDLER' in os.environ:
from rich.traceback import install
install()
|
import pytest
from jina.enums import GatewayProtocolType
from jina.helper import ArgNamespace
from jina.parsers import set_gateway_parser, set_pod_parser
@pytest.mark.parametrize(
'port,expected_port',
[
('12345', [12345]),
([12345], [12345]),
([12345, 12344], [12345, 12344]),
],
)
@pytest.mark.parametrize(
'protocol,expected_protocol',
[
('http', [GatewayProtocolType.HTTP]),
(['GRPC'], [GatewayProtocolType.GRPC]),
(['grpc', 'http'], [GatewayProtocolType.GRPC, GatewayProtocolType.HTTP]),
],
)
def test_multiple_port_protocol_gateway_kwargs(
port, protocol, expected_port, expected_protocol
):
args = ArgNamespace.kwargs2namespace(
{'port': port, 'protocol': protocol}, set_gateway_parser()
)
assert args.port == expected_port
assert args.protocol == expected_protocol
@pytest.mark.parametrize(
'port,expected_port',
[
(['12345'], [12345]),
(['12345', '12344'], [12345, 12344]),
(['12345, 12344'], [12345, 12344]),
],
)
@pytest.mark.parametrize(
'protocol,expected_protocol',
[
(['http'], [GatewayProtocolType.HTTP]),
(['GRPC'], [GatewayProtocolType.GRPC]),
(['grpc', 'http'], [GatewayProtocolType.GRPC, GatewayProtocolType.HTTP]),
],
)
def test_multiple_port_protocol_gateway_args_list(
port, protocol, expected_port, expected_protocol
):
args = set_gateway_parser().parse_args(
['--port'] + port + ['--protocol'] + protocol
)
assert args.port == expected_port
assert args.protocol == expected_protocol
@pytest.mark.parametrize(
'port,expected_port',
[
(['12345'], [12345]),
(['12345', '12344'], [12345, 12344]),
(['12345, 12344'], [12345, 12344]),
],
)
def test_pod_port_cast(port, expected_port):
args = set_pod_parser().parse_args(['--port'] + port)
assert args.port == expected_port
def test_pod_port_default():
args = set_pod_parser().parse_args([])
assert isinstance(args.port, list)
assert len(args.port) == 1
@pytest.mark.parametrize(
'host,expected_host',
[
(['localhost'], ['localhost']),
(['0.0.0.0,localhost'], ['0.0.0.0', 'localhost']),
(['0.0.0.0,localhost', '127.0.0.1'], ['0.0.0.0', 'localhost', '127.0.0.1']),
],
)
def test_pod_host_cast(host, expected_host):
args = set_pod_parser().parse_args(['--host'] + host)
assert args.host == expected_host
def test_pod_host_default():
from jina.constants import __default_host__
args = set_pod_parser().parse_args([])
assert args.host == [__default_host__]
def test_default_port_protocol_gateway():
args = set_gateway_parser().parse_args([])
assert len(args.port) == 1
assert args.protocol == [GatewayProtocolType.GRPC]
def test_get_non_defaults_args():
args = set_gateway_parser().parse_args(
[
'--port',
'12345',
'12344',
'--protocol',
'grpc',
'--uses',
'MyCustomGateway',
'--uses-with',
'{"arg":"value"}',
]
)
non_defaults = ArgNamespace.get_non_defaults_args(
args,
set_gateway_parser(),
)
assert non_defaults['port'] == [12345, 12344]
assert 'protocol' not in non_defaults
assert non_defaults['uses'] == 'MyCustomGateway'
assert non_defaults['uses_with'] == {'arg': 'value'}
|
import pytest
from jina.enums import GatewayProtocolType
from jina.helper import ArgNamespace
from jina.parsers import set_gateway_parser, set_pod_parser
@pytest.mark.parametrize(
'port,expected_port',
[
('12345', [12345]),
([12345], [12345]),
([12345, 12344], [12345, 12344]),
],
)
@pytest.mark.parametrize(
'protocol,expected_protocol',
[
('http', [GatewayProtocolType.HTTP]),
(['GRPC'], [GatewayProtocolType.GRPC]),
(['grpc', 'http'], [GatewayProtocolType.GRPC, GatewayProtocolType.HTTP]),
],
)
def test_multiple_port_protocol_gateway_kwargs(
port, protocol, expected_port, expected_protocol
):
args = ArgNamespace.kwargs2namespace(
{'port': port, 'protocol': protocol}, set_gateway_parser()
)
assert args.port == expected_port
assert args.protocol == expected_protocol
@pytest.mark.parametrize(
'port,expected_port',
[
(['12345'], [12345]),
(['12345', '12344'], [12345, 12344]),
(['12345, 12344'], [12345, 12344]),
],
)
@pytest.mark.parametrize(
'protocol,expected_protocol',
[
(['http'], [GatewayProtocolType.HTTP]),
(['GRPC'], [GatewayProtocolType.GRPC]),
(['grpc', 'http'], [GatewayProtocolType.GRPC, GatewayProtocolType.HTTP]),
],
)
def test_multiple_port_protocol_gateway_args_list(
port, protocol, expected_port, expected_protocol
):
args = set_gateway_parser().parse_args(
['--port'] + port + ['--protocol'] + protocol
)
assert args.port == expected_port
assert args.protocol == expected_protocol
@pytest.mark.parametrize(
'port,expected_port',
[
(['12345'], [12345]),
(['12345', '12344'], [12345, 12344]),
(['12345, 12344'], [12345, 12344]),
],
)
def test_pod_port_cast(port, expected_port):
args = set_pod_parser().parse_args(['--port'] + port)
assert args.port == expected_port
def test_pod_port_default():
args = set_pod_parser().parse_args([])
assert isinstance(args.port, list)
assert len(args.port) == 1
@pytest.mark.parametrize(
'host,expected_host',
[
(['localhost'], ['localhost']),
(['0.0.0.0,localhost'], ['0.0.0.0', 'localhost']),
(['0.0.0.0,localhost', '127.0.0.1'], ['0.0.0.0', 'localhost', '127.0.0.1']),
],
)
def test_pod_host_cast(host, expected_host):
args = set_pod_parser().parse_args(['--host'] + host)
assert args.host == expected_host
def test_pod_host_default():
from jina.constants import __default_host__
args = set_pod_parser().parse_args([])
assert args.host == [__default_host__]
def test_default_port_protocol_gateway():
args = set_gateway_parser().parse_args([])
assert args.port is None
assert args.protocol == [GatewayProtocolType.GRPC]
def test_get_non_defaults_args():
args = set_gateway_parser().parse_args(
[
'--port',
'12345',
'12344',
'--protocol',
'grpc',
'--uses',
'MyCustomGateway',
'--uses-with',
'{"arg":"value"}',
]
)
non_defaults = ArgNamespace.get_non_defaults_args(
args,
set_gateway_parser(),
)
assert non_defaults['port'] == [12345, 12344]
assert 'protocol' not in non_defaults
assert non_defaults['uses'] == 'MyCustomGateway'
assert non_defaults['uses_with'] == {'arg': 'value'}
|
# Copyright (c) OpenMMLab. All rights reserved.
import warnings
import torch.nn as nn
from mmengine.model import BaseModule
from mmdet.models.backbones import ResNet
from mmdet.models.layers import ResLayer as _ResLayer
from mmdet.registry import MODELS
@MODELS.register_module()
class ResLayer(BaseModule):
def __init__(self,
depth,
stage=3,
stride=2,
dilation=1,
style='pytorch',
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
with_cp=False,
dcn=None,
pretrained=None,
init_cfg=None):
super(ResLayer, self).__init__(init_cfg)
self.norm_eval = norm_eval
self.norm_cfg = norm_cfg
self.stage = stage
self.fp16_enabled = False
block, stage_blocks = ResNet.arch_settings[depth]
stage_block = stage_blocks[stage]
planes = 64 * 2**stage
inplanes = 64 * 2**(stage - 1) * block.expansion
res_layer = _ResLayer(
block,
inplanes,
planes,
stage_block,
stride=stride,
dilation=dilation,
style=style,
with_cp=with_cp,
norm_cfg=self.norm_cfg,
dcn=dcn)
self.add_module(f'layer{stage + 1}', res_layer)
assert not (init_cfg and pretrained), \
'init_cfg and pretrained cannot be specified at the same time'
if isinstance(pretrained, str):
warnings.warn('DeprecationWarning: pretrained is a deprecated, '
'please use "init_cfg" instead')
self.init_cfg = dict(type='Pretrained', checkpoint=pretrained)
elif pretrained is None:
if init_cfg is None:
self.init_cfg = [
dict(type='Kaiming', layer='Conv2d'),
dict(
type='Constant',
val=1,
layer=['_BatchNorm', 'GroupNorm'])
]
else:
raise TypeError('pretrained must be a str or None')
def forward(self, x):
res_layer = getattr(self, f'layer{self.stage + 1}')
out = res_layer(x)
return out
def train(self, mode=True):
super(ResLayer, self).train(mode)
if self.norm_eval:
for m in self.modules():
if isinstance(m, nn.BatchNorm2d):
m.eval()
|
# Copyright (c) OpenMMLab. All rights reserved.
import warnings
import torch.nn as nn
from mmengine.model import BaseModule
from mmdet.models.backbones import ResNet
from mmdet.models.utils import ResLayer as _ResLayer
from mmdet.registry import MODELS
@MODELS.register_module()
class ResLayer(BaseModule):
def __init__(self,
depth,
stage=3,
stride=2,
dilation=1,
style='pytorch',
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
with_cp=False,
dcn=None,
pretrained=None,
init_cfg=None):
super(ResLayer, self).__init__(init_cfg)
self.norm_eval = norm_eval
self.norm_cfg = norm_cfg
self.stage = stage
self.fp16_enabled = False
block, stage_blocks = ResNet.arch_settings[depth]
stage_block = stage_blocks[stage]
planes = 64 * 2**stage
inplanes = 64 * 2**(stage - 1) * block.expansion
res_layer = _ResLayer(
block,
inplanes,
planes,
stage_block,
stride=stride,
dilation=dilation,
style=style,
with_cp=with_cp,
norm_cfg=self.norm_cfg,
dcn=dcn)
self.add_module(f'layer{stage + 1}', res_layer)
assert not (init_cfg and pretrained), \
'init_cfg and pretrained cannot be specified at the same time'
if isinstance(pretrained, str):
warnings.warn('DeprecationWarning: pretrained is a deprecated, '
'please use "init_cfg" instead')
self.init_cfg = dict(type='Pretrained', checkpoint=pretrained)
elif pretrained is None:
if init_cfg is None:
self.init_cfg = [
dict(type='Kaiming', layer='Conv2d'),
dict(
type='Constant',
val=1,
layer=['_BatchNorm', 'GroupNorm'])
]
else:
raise TypeError('pretrained must be a str or None')
def forward(self, x):
res_layer = getattr(self, f'layer{self.stage + 1}')
out = res_layer(x)
return out
def train(self, mode=True):
super(ResLayer, self).train(mode)
if self.norm_eval:
for m in self.modules():
if isinstance(m, nn.BatchNorm2d):
m.eval()
|
# Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import torch
from mmdet.models.dense_heads import TOODHead
def test_tood_head_loss():
"""Tests paa head loss when truth is empty and non-empty."""
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'scale_factor': 1,
'pad_shape': (s, s, 3)
}]
train_cfg = mmcv.Config(
dict(
initial_epoch=4,
initial_assigner=dict(type='ATSSAssigner', topk=9),
assigner=dict(type='TaskAlignedAssigner', topk=13),
alpha=1,
beta=6,
allowed_border=-1,
pos_weight=-1,
debug=False))
test_cfg = mmcv.Config(
dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.6),
max_per_img=100))
# since Focal Loss is not supported on CPU
self = TOODHead(
num_classes=80,
in_channels=1,
stacked_convs=6,
feat_channels=256,
anchor_type='anchor_free',
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
octave_base_scale=8,
scales_per_octave=1,
strides=[8, 16, 32, 64, 128]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[0.1, 0.1, 0.2, 0.2]),
initial_loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
activated=True, # use probability instead of logit as input
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_cls=dict(
type='QualityFocalLoss',
use_sigmoid=True,
activated=True, # use probability instead of logit as input
beta=2.0,
loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=2.0),
train_cfg=train_cfg,
test_cfg=test_cfg)
self.init_weights()
feat = [
torch.rand(1, 1, s // feat_size, s // feat_size)
for feat_size in [8, 16, 32, 64, 128]
]
cls_scores, bbox_preds = self(feat)
# test initial assigner and losses
self.epoch = 0
# Test that empty ground truth encourages the network to predict background
gt_bboxes = [torch.empty((0, 4))]
gt_labels = [torch.LongTensor([])]
gt_bboxes_ignore = None
empty_gt_losses = self.loss(cls_scores, bbox_preds, gt_bboxes, gt_labels,
img_metas, gt_bboxes_ignore)
# When there is no truth, the cls loss should be nonzero but there should
# be no box loss.
empty_cls_loss = empty_gt_losses['loss_cls']
empty_box_loss = empty_gt_losses['loss_bbox']
assert sum(empty_cls_loss).item() > 0, 'cls loss should be non-zero'
assert sum(empty_box_loss).item() == 0, (
'there should be no box loss when there are no true boxes')
# When truth is non-empty then both cls and box loss should be nonzero for
# random inputs
gt_bboxes = [
torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]),
]
gt_labels = [torch.LongTensor([2])]
one_gt_losses = self.loss(cls_scores, bbox_preds, gt_bboxes, gt_labels,
img_metas, gt_bboxes_ignore)
onegt_cls_loss = one_gt_losses['loss_cls']
onegt_box_loss = one_gt_losses['loss_bbox']
assert sum(onegt_cls_loss).item() > 0, 'cls loss should be non-zero'
assert sum(onegt_box_loss).item() > 0, 'box loss should be non-zero'
# test task alignment assigner and losses
self.epoch = 10
# Test that empty ground truth encourages the network to predict background
gt_bboxes = [torch.empty((0, 4))]
gt_labels = [torch.LongTensor([])]
gt_bboxes_ignore = None
empty_gt_losses = self.loss(cls_scores, bbox_preds, gt_bboxes, gt_labels,
img_metas, gt_bboxes_ignore)
# When there is no truth, the cls loss should be nonzero but there should
# be no box loss.
empty_cls_loss = empty_gt_losses['loss_cls']
empty_box_loss = empty_gt_losses['loss_bbox']
assert sum(empty_cls_loss).item() > 0, 'cls loss should be non-zero'
assert sum(empty_box_loss).item() == 0, (
'there should be no box loss when there are no true boxes')
# When truth is non-empty then both cls and box loss should be nonzero for
# random inputs
gt_bboxes = [
torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]),
]
gt_labels = [torch.LongTensor([2])]
one_gt_losses = self.loss(cls_scores, bbox_preds, gt_bboxes, gt_labels,
img_metas, gt_bboxes_ignore)
onegt_cls_loss = one_gt_losses['loss_cls']
onegt_box_loss = one_gt_losses['loss_bbox']
assert sum(onegt_cls_loss).item() > 0, 'cls loss should be non-zero'
assert sum(onegt_box_loss).item() > 0, 'box loss should be non-zero'
|
# Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import torch
from mmdet.models.dense_heads import TOODHead
def test_paa_head_loss():
"""Tests paa head loss when truth is empty and non-empty."""
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'scale_factor': 1,
'pad_shape': (s, s, 3)
}]
train_cfg = mmcv.Config(
dict(
initial_epoch=4,
initial_assigner=dict(type='ATSSAssigner', topk=9),
assigner=dict(type='TaskAlignedAssigner', topk=13),
alpha=1,
beta=6,
allowed_border=-1,
pos_weight=-1,
debug=False))
test_cfg = mmcv.Config(
dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.6),
max_per_img=100))
# since Focal Loss is not supported on CPU
self = TOODHead(
num_classes=80,
in_channels=1,
stacked_convs=6,
feat_channels=256,
anchor_type='anchor_free',
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
octave_base_scale=8,
scales_per_octave=1,
strides=[8, 16, 32, 64, 128]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[0.1, 0.1, 0.2, 0.2]),
initial_loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
activated=True, # use probability instead of logit as input
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_cls=dict(
type='QualityFocalLoss',
use_sigmoid=True,
activated=True, # use probability instead of logit as input
beta=2.0,
loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=2.0),
train_cfg=train_cfg,
test_cfg=test_cfg)
self.init_weights()
feat = [
torch.rand(1, 1, s // feat_size, s // feat_size)
for feat_size in [8, 16, 32, 64, 128]
]
cls_scores, bbox_preds = self(feat)
# test initial assigner and losses
self.epoch = 0
# Test that empty ground truth encourages the network to predict background
gt_bboxes = [torch.empty((0, 4))]
gt_labels = [torch.LongTensor([])]
gt_bboxes_ignore = None
empty_gt_losses = self.loss(cls_scores, bbox_preds, gt_bboxes, gt_labels,
img_metas, gt_bboxes_ignore)
# When there is no truth, the cls loss should be nonzero but there should
# be no box loss.
empty_cls_loss = empty_gt_losses['loss_cls']
empty_box_loss = empty_gt_losses['loss_bbox']
assert sum(empty_cls_loss).item() > 0, 'cls loss should be non-zero'
assert sum(empty_box_loss).item() == 0, (
'there should be no box loss when there are no true boxes')
# When truth is non-empty then both cls and box loss should be nonzero for
# random inputs
gt_bboxes = [
torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]),
]
gt_labels = [torch.LongTensor([2])]
one_gt_losses = self.loss(cls_scores, bbox_preds, gt_bboxes, gt_labels,
img_metas, gt_bboxes_ignore)
onegt_cls_loss = one_gt_losses['loss_cls']
onegt_box_loss = one_gt_losses['loss_bbox']
assert sum(onegt_cls_loss).item() > 0, 'cls loss should be non-zero'
assert sum(onegt_box_loss).item() > 0, 'box loss should be non-zero'
# test task alignment assigner and losses
self.epoch = 10
# Test that empty ground truth encourages the network to predict background
gt_bboxes = [torch.empty((0, 4))]
gt_labels = [torch.LongTensor([])]
gt_bboxes_ignore = None
empty_gt_losses = self.loss(cls_scores, bbox_preds, gt_bboxes, gt_labels,
img_metas, gt_bboxes_ignore)
# When there is no truth, the cls loss should be nonzero but there should
# be no box loss.
empty_cls_loss = empty_gt_losses['loss_cls']
empty_box_loss = empty_gt_losses['loss_bbox']
assert sum(empty_cls_loss).item() > 0, 'cls loss should be non-zero'
assert sum(empty_box_loss).item() == 0, (
'there should be no box loss when there are no true boxes')
# When truth is non-empty then both cls and box loss should be nonzero for
# random inputs
gt_bboxes = [
torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]),
]
gt_labels = [torch.LongTensor([2])]
one_gt_losses = self.loss(cls_scores, bbox_preds, gt_bboxes, gt_labels,
img_metas, gt_bboxes_ignore)
onegt_cls_loss = one_gt_losses['loss_cls']
onegt_box_loss = one_gt_losses['loss_bbox']
assert sum(onegt_cls_loss).item() > 0, 'cls loss should be non-zero'
assert sum(onegt_box_loss).item() > 0, 'box loss should be non-zero'
|
__version__ = '0.13.22'
import os
from .document import Document
from .array import DocumentArray
from .dataclasses import dataclass, field
if 'DA_RICH_HANDLER' in os.environ:
from rich.traceback import install
install()
|
__version__ = '0.13.21'
import os
from .document import Document
from .array import DocumentArray
from .dataclasses import dataclass, field
if 'DA_RICH_HANDLER' in os.environ:
from rich.traceback import install
install()
|
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
from pathlib import Path
import numpy as np
import pytest
from jina import Document, DocumentArray, Executor
from transformer_tf_text_encode import TransformerTFTextEncoder
target_dim = 768
@pytest.fixture()
def docs_generator():
return DocumentArray((Document(text='random text') for _ in range(30)))
def test_config():
ex = Executor.load_config(str(Path(__file__).parents[2] / 'config.yml'))
assert ex.pretrained_model_name_or_path == 'distilbert-base-uncased'
def test_tf_batch(docs_generator):
encoder = TransformerTFTextEncoder()
docs = docs_generator
encoder.encode(docs, parameters={'batch_size': 10, 'traversal_paths': ['r']})
assert len(docs.get_attributes('embedding')) == 30
assert docs[0].embedding.shape == (target_dim,)
@pytest.mark.gpu
def test_encoder_gpu(docs_generator):
encoder = TransformerTFTextEncoder(device='/GPU:0')
docs = DocumentArray((Document(text='random text')))
encoder.encode(docs, {})
assert len(docs.get_attributes('embedding')) == 1
assert docs[0].embedding.shape == (target_dim,)
def test_encodes_semantic_meaning():
sentences = dict()
sentences['A'] = 'Hello, my name is Michael.'
sentences['B'] = 'Today we are going to Disney World.'
sentences['C'] = 'There are animals on the road'
sentences['D'] = 'A dog is running down the road'
encoder = TransformerTFTextEncoder()
embeddings = {}
for id_, sentence in sentences.items():
docs = DocumentArray([Document(text=sentence)])
encoder.encode(docs, parameters={})
embeddings[id_] = docs[0].embedding
def dist(a, b):
a_embedding = embeddings[a]
b_embedding = embeddings[b]
return np.linalg.norm(a_embedding - b_embedding)
small_distance = dist('C', 'D')
assert small_distance < dist('C', 'B')
assert small_distance < dist('C', 'A')
assert small_distance < dist('B', 'A')
@pytest.mark.parametrize(
['docs', 'docs_per_path', 'traversal_path'],
[
(
pytest.lazy_fixture('docs_with_text'),
[[['r'], 10], [['c'], 0], [['cc'], 0]],
['r'],
),
(
pytest.lazy_fixture("docs_with_chunk_text"),
[[['r'], 0], [['c'], 10], [['cc'], 0]],
['c'],
),
(
pytest.lazy_fixture("docs_with_chunk_chunk_text"),
[[['r'], 0], [['c'], 0], [['cc'], 10]],
['cc'],
),
],
)
def test_traversal_path(docs: DocumentArray, docs_per_path, traversal_path):
encoder = TransformerTFTextEncoder()
encoder.encode(docs, parameters={'traversal_paths': traversal_path})
for path, count in docs_per_path:
embeddings = docs.traverse_flat(path).get_attributes("embedding")
assert len([en for en in embeddings if en is not None]) == count
|
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
from pathlib import Path
import numpy as np
import pytest
from jina import Document, DocumentArray, Executor
from transformer_tf_text_encode import TransformerTFTextEncoder
target_dim = 768
@pytest.fixture()
def docs_generator():
return DocumentArray((Document(text='random text') for _ in range(30)))
def test_config():
ex = Executor.load_config(str(Path(__file__).parents[2] / 'config.yml'))
assert ex.pretrained_model_name_or_path == 'distilbert-base-uncased'
def test_tf_batch(docs_generator):
encoder = TransformerTFTextEncoder()
docs = docs_generator
encoder.encode(docs, parameters={'batch_size': 10, 'traversal_paths': ['r']})
assert len(docs.get_attributes('embedding')) == 30
assert docs[0].embedding.shape == (target_dim,)
def test_encodes_semantic_meaning():
sentences = dict()
sentences['A'] = 'Hello, my name is Michael.'
sentences['B'] = 'Today we are going to Disney World.'
sentences['C'] = 'There are animals on the road'
sentences['D'] = 'A dog is running down the road'
encoder = TransformerTFTextEncoder()
embeddings = {}
for id_, sentence in sentences.items():
docs = DocumentArray([Document(text=sentence)])
encoder.encode(docs, parameters={})
embeddings[id_] = docs[0].embedding
def dist(a, b):
a_embedding = embeddings[a]
b_embedding = embeddings[b]
return np.linalg.norm(a_embedding - b_embedding)
small_distance = dist('C', 'D')
assert small_distance < dist('C', 'B')
assert small_distance < dist('C', 'A')
assert small_distance < dist('B', 'A')
@pytest.mark.parametrize(
['docs', 'docs_per_path', 'traversal_path'],
[
(
pytest.lazy_fixture('docs_with_text'),
[[['r'], 10], [['c'], 0], [['cc'], 0]],
['r'],
),
(
pytest.lazy_fixture("docs_with_chunk_text"),
[[['r'], 1], [['c'], 10], [['cc'], 0]],
['c'],
),
(
pytest.lazy_fixture("docs_with_chunk_chunk_text"),
[[['r'], 1], [['c'], 1], [['cc'], 10]],
['cc'],
),
],
)
def test_traversal_path(docs: DocumentArray, docs_per_path, traversal_path):
encoder = TransformerTFTextEncoder()
encoder.encode(docs, parameters={'traversal_paths': traversal_path})
for path, count in docs_per_path:
assert len(docs.traverse_flat(path).get_attributes("embedding")) == count
|
import time
from typing import Callable
from pydantic import Field
from docarray import BaseDoc
from docarray.typing import NdArray
N_DIM = 10
class SimpleSchema(BaseDoc):
text: str = Field(index_name='text_index')
number: int
embedding: NdArray[10] = Field(dim=10, index_name="vector_index")
class SimpleDoc(BaseDoc):
embedding: NdArray[N_DIM] = Field(dim=N_DIM, index_name="vector_index_1")
class NestedDoc(BaseDoc):
d: SimpleDoc
embedding: NdArray[N_DIM] = Field(dim=N_DIM, index_name="vector_index")
class FlatSchema(BaseDoc):
embedding1: NdArray = Field(dim=N_DIM, index_name="vector_index_1")
embedding2: NdArray = Field(dim=N_DIM, index_name="vector_index_2")
def assert_when_ready(callable: Callable, tries: int = 10, interval: float = 2):
"""
Retry callable to account for time taken to change data on the cluster
"""
while True:
try:
callable()
except AssertionError as e:
tries -= 1
if tries == 0:
raise RuntimeError("Retries exhausted.") from e
time.sleep(interval)
else:
return
|
import time
from typing import Callable
from pydantic import Field
from docarray import BaseDoc
from docarray.typing import NdArray
N_DIM = 10
class SimpleSchema(BaseDoc):
text: str = Field(index_name='text_index')
number: int
embedding: NdArray[10] = Field(dim=10, index_name="vector_index")
class SimpleDoc(BaseDoc):
embedding: NdArray[N_DIM] = Field(dim=N_DIM, index_name="vector_index_1")
class NestedDoc(BaseDoc):
d: SimpleDoc
embedding: NdArray[N_DIM] = Field(dim=N_DIM, index_name="vector_index")
class FlatSchema(BaseDoc):
embedding1: NdArray = Field(dim=N_DIM, index_name="vector_index_1")
embedding2: NdArray = Field(dim=N_DIM, index_name="vector_index_2")
def assert_when_ready(callable: Callable, tries: int = 5, interval: float = 2):
"""
Retry callable to account for time taken to change data on the cluster
"""
while True:
try:
callable()
except AssertionError as e:
tries -= 1
if tries == 0:
raise RuntimeError("Retries exhausted.") from e
time.sleep(interval)
else:
return
|
"""
Example of training with Dask on CPU
====================================
"""
from dask import array as da
from dask.distributed import Client, LocalCluster
from xgboost import dask as dxgb
from xgboost.dask import DaskDMatrix
def main(client):
# generate some random data for demonstration
m = 100000
n = 100
rng = da.random.default_rng(1)
X = rng.normal(size=(m, n))
y = X.sum(axis=1)
# DaskDMatrix acts like normal DMatrix, works as a proxy for local
# DMatrix scatter around workers.
dtrain = DaskDMatrix(client, X, y)
# Use train method from xgboost.dask instead of xgboost. This
# distributed version of train returns a dictionary containing the
# resulting booster and evaluation history obtained from
# evaluation metrics.
output = dxgb.train(
client,
{"verbosity": 1, "tree_method": "hist"},
dtrain,
num_boost_round=4,
evals=[(dtrain, "train")],
)
bst = output["booster"]
history = output["history"]
# you can pass output directly into `predict` too.
prediction = dxgb.predict(client, bst, dtrain)
print("Evaluation history:", history)
return prediction
if __name__ == "__main__":
# or use other clusters for scaling
with LocalCluster(n_workers=7, threads_per_worker=4) as cluster:
with Client(cluster) as client:
main(client)
|
"""
Example of training with Dask on CPU
====================================
"""
from dask import array as da
from dask.distributed import Client, LocalCluster
from xgboost import dask as dxgb
from xgboost.dask import DaskDMatrix
def main(client):
# generate some random data for demonstration
m = 100000
n = 100
X = da.random.random(size=(m, n), chunks=100)
y = da.random.random(size=(m,), chunks=100)
# DaskDMatrix acts like normal DMatrix, works as a proxy for local
# DMatrix scatter around workers.
dtrain = DaskDMatrix(client, X, y)
# Use train method from xgboost.dask instead of xgboost. This
# distributed version of train returns a dictionary containing the
# resulting booster and evaluation history obtained from
# evaluation metrics.
output = dxgb.train(
client,
{"verbosity": 1, "tree_method": "hist"},
dtrain,
num_boost_round=4,
evals=[(dtrain, "train")],
)
bst = output["booster"]
history = output["history"]
# you can pass output directly into `predict` too.
prediction = dxgb.predict(client, bst, dtrain)
print("Evaluation history:", history)
return prediction
if __name__ == "__main__":
# or use other clusters for scaling
with LocalCluster(n_workers=7, threads_per_worker=4) as cluster:
with Client(cluster) as client:
main(client)
|
"""langchain-core version information and utilities."""
VERSION = "0.3.59"
|
"""langchain-core version information and utilities."""
VERSION = "0.3.58"
|
"""
This examples trains BERT (or any other transformer model like RoBERTa, DistilBERT etc.) for the STSbenchmark from scratch.
It uses AdaptiveLayerLoss with the powerful CoSENTLoss to train models that perform well even when removing some layers.
It generates sentence embeddings that can be compared using cosine-similarity to measure the similarity.
Usage:
python adaptive_layer_sts.py
OR
python adaptive_layer_sts.py pretrained_transformer_model_name
"""
import traceback
from datasets import load_dataset
from sentence_transformers import losses
from sentence_transformers import SentenceTransformer, SentenceTransformerTrainer, SentenceTransformerTrainingArguments
from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator, SimilarityFunction
import logging
from datetime import datetime
import sys
# Set the log level to INFO to get more information
logging.basicConfig(format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO)
model_name = sys.argv[1] if len(sys.argv) > 1 else "distilbert-base-uncased"
batch_size = 16
num_train_epochs = 4
# Save path of the model
output_dir = f"output/adaptive_layer_sts_{model_name.replace('/', '-')}-{datetime.now().strftime('%Y-%m-%d_%H-%M-%S')}"
# 1. Here we define our SentenceTransformer model. If not already a Sentence Transformer model, it will automatically
# create one with "mean" pooling.
model = SentenceTransformer(model_name)
# If we want, we can limit the maximum sequence length for the model
# model.max_seq_length = 75
logging.info(model)
# 2. Load the STSB dataset: https://huggingface.co/datasets/sentence-transformers/stsb
train_dataset = load_dataset("sentence-transformers/stsb", split="train")
eval_dataset = load_dataset("sentence-transformers/stsb", split="validation")
test_dataset = load_dataset("sentence-transformers/stsb", split="test")
logging.info(train_dataset)
# 3. Define our training loss
# CoSENTLoss (https://sbert.net/docs/package_reference/sentence_transformer/losses.html#cosentloss) needs two text
# columns and one similarity score column (between 0 and 1)
inner_train_loss = losses.CoSENTLoss(model=model)
train_loss = losses.AdaptiveLayerLoss(model, inner_train_loss)
# 4. Define an evaluator for use during training. This is useful to keep track of alongside the evaluation loss.
dev_evaluator = EmbeddingSimilarityEvaluator(
sentences1=eval_dataset["sentence1"],
sentences2=eval_dataset["sentence2"],
scores=eval_dataset["score"],
main_similarity=SimilarityFunction.COSINE,
name="sts-dev",
)
# 5. Define the training arguments
args = SentenceTransformerTrainingArguments(
# Required parameter:
output_dir=output_dir,
# Optional training parameters:
num_train_epochs=num_train_epochs,
per_device_train_batch_size=batch_size,
per_device_eval_batch_size=batch_size,
warmup_ratio=0.1,
fp16=True, # Set to False if you get an error that your GPU can't run on FP16
bf16=False, # Set to True if you have a GPU that supports BF16
# Optional tracking/debugging parameters:
eval_strategy="steps",
eval_steps=100,
save_strategy="steps",
save_steps=100,
save_total_limit=2,
logging_steps=100,
run_name="adaptive-layer-sts", # Will be used in W&B if `wandb` is installed
)
# 6. Create the trainer & start training
trainer = SentenceTransformerTrainer(
model=model,
args=args,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
loss=train_loss,
evaluator=dev_evaluator,
)
trainer.train()
# 7. Evaluate the model performance on the STS Benchmark test dataset
test_evaluator = EmbeddingSimilarityEvaluator(
sentences1=test_dataset["sentence1"],
sentences2=test_dataset["sentence2"],
scores=test_dataset["score"],
main_similarity=SimilarityFunction.COSINE,
name="sts-test",
)
test_evaluator(model)
# 8. Save the trained & evaluated model locally
final_output_dir = f"{output_dir}/final"
model.save(final_output_dir)
# 9. (Optional) save the model to the Hugging Face Hub!
# It is recommended to run `huggingface-cli login` to log into your Hugging Face account first
model_name = model_name if "/" not in model_name else model_name.split("/")[-1]
try:
model.push_to_hub(f"{model_name}-sts-adaptive-layer")
except Exception:
logging.error(
f"Error uploading model to the Hugging Face Hub:\n{traceback.format_exc()}To upload it manually, you can run "
f"`huggingface-cli login`, followed by loading the model using `model = SentenceTransformer({final_output_dir!r})` "
f"and saving it using `model.push_to_hub('{model_name}-sts-adaptive-layer')`."
)
|
"""
This examples trains BERT (or any other transformer model like RoBERTa, DistilBERT etc.) for the STSbenchmark from scratch.
It uses AdaptiveLayerLoss with the powerful CoSENTLoss to train models that perform well even when removing some layers.
It generates sentence embeddings that can be compared using cosine-similarity to measure the similarity.
Usage:
python adaptive_layer_sts.py
OR
python adaptive_layer_sts.py pretrained_transformer_model_name
"""
import traceback
from datasets import load_dataset
from sentence_transformers import losses
from sentence_transformers import SentenceTransformer, SentenceTransformerTrainer, SentenceTransformerTrainingArguments
from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator, SimilarityFunction
import logging
from datetime import datetime
import sys
# Set the log level to INFO to get more information
logging.basicConfig(format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO)
model_name = sys.argv[1] if len(sys.argv) > 1 else "distilbert-base-uncased"
batch_size = 16
num_train_epochs = 4
# Save path of the model
output_dir = f"output/adaptive_layer_sts_{model_name.replace('/', '-')}-{datetime.now().strftime('%Y-%m-%d_%H-%M-%S')}"
# 1. Here we define our SentenceTransformer model. If not already a Sentence Transformer model, it will automatically
# create one with "mean" pooling.
model = SentenceTransformer(model_name)
# If we want, we can limit the maximum sequence length for the model
# model.max_seq_length = 75
logging.info(model)
# 2. Load the STSB dataset: https://huggingface.co/datasets/sentence-transformers/stsb
train_dataset = load_dataset("sentence-transformers/stsb", split="train")
eval_dataset = load_dataset("sentence-transformers/stsb", split="validation")
test_dataset = load_dataset("sentence-transformers/stsb", split="test")
logging.info(train_dataset)
# 3. Define our training loss
# CoSENTLoss (https://sbert.net/docs/package_reference/losses.html#cosentloss) needs two text columns and one
# similarity score column (between 0 and 1)
inner_train_loss = losses.CoSENTLoss(model=model)
train_loss = losses.AdaptiveLayerLoss(model, inner_train_loss)
# 4. Define an evaluator for use during training. This is useful to keep track of alongside the evaluation loss.
dev_evaluator = EmbeddingSimilarityEvaluator(
sentences1=eval_dataset["sentence1"],
sentences2=eval_dataset["sentence2"],
scores=eval_dataset["score"],
main_similarity=SimilarityFunction.COSINE,
name="sts-dev",
)
# 5. Define the training arguments
args = SentenceTransformerTrainingArguments(
# Required parameter:
output_dir=output_dir,
# Optional training parameters:
num_train_epochs=num_train_epochs,
per_device_train_batch_size=batch_size,
per_device_eval_batch_size=batch_size,
warmup_ratio=0.1,
fp16=True, # Set to False if you get an error that your GPU can't run on FP16
bf16=False, # Set to True if you have a GPU that supports BF16
# Optional tracking/debugging parameters:
eval_strategy="steps",
eval_steps=100,
save_strategy="steps",
save_steps=100,
save_total_limit=2,
logging_steps=100,
run_name="adaptive-layer-sts", # Will be used in W&B if `wandb` is installed
)
# 6. Create the trainer & start training
trainer = SentenceTransformerTrainer(
model=model,
args=args,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
loss=train_loss,
evaluator=dev_evaluator,
)
trainer.train()
# 7. Evaluate the model performance on the STS Benchmark test dataset
test_evaluator = EmbeddingSimilarityEvaluator(
sentences1=test_dataset["sentence1"],
sentences2=test_dataset["sentence2"],
scores=test_dataset["score"],
main_similarity=SimilarityFunction.COSINE,
name="sts-test",
)
test_evaluator(model)
# 8. Save the trained & evaluated model locally
final_output_dir = f"{output_dir}/final"
model.save(final_output_dir)
# 9. (Optional) save the model to the Hugging Face Hub!
# It is recommended to run `huggingface-cli login` to log into your Hugging Face account first
model_name = model_name if "/" not in model_name else model_name.split("/")[-1]
try:
model.push_to_hub(f"{model_name}-sts-adaptive-layer")
except Exception:
logging.error(
f"Error uploading model to the Hugging Face Hub:\n{traceback.format_exc()}To upload it manually, you can run "
f"`huggingface-cli login`, followed by loading the model using `model = SentenceTransformer({final_output_dir!r})` "
f"and saving it using `model.push_to_hub('{model_name}-sts-adaptive-layer')`."
)
|
_base_ = './cascade-rcnn_s50_fpn_syncbn-backbone+head_ms-range-1x_coco.py'
model = dict(
backbone=dict(
stem_channels=128,
depth=101,
init_cfg=dict(type='Pretrained',
checkpoint='open-mmlab://resnest101')))
|
_base_ = './cascade_rcnn_s50_fpn_syncbn-backbone+head_mstrain-range_1x_coco.py'
model = dict(
backbone=dict(
stem_channels=128,
depth=101,
init_cfg=dict(type='Pretrained',
checkpoint='open-mmlab://resnest101')))
|
# Copyright (c) OpenMMLab. All rights reserved.
from .checkpoint_hook import CheckpointHook
from .ema_hook import EMAHook
from .empty_cache_hook import EmptyCacheHook
from .hook import Hook
from .iter_timer_hook import IterTimerHook
from .logger_hook import LoggerHook
from .naive_visualization_hook import NaiveVisualizationHook
from .optimizer_hook import OptimizerHook
from .param_scheduler_hook import ParamSchedulerHook
from .runtime_info_hook import RuntimeInfoHook
from .sampler_seed_hook import DistSamplerSeedHook
from .sync_buffer_hook import SyncBuffersHook
__all__ = [
'Hook', 'IterTimerHook', 'DistSamplerSeedHook', 'ParamSchedulerHook',
'OptimizerHook', 'SyncBuffersHook', 'EmptyCacheHook', 'CheckpointHook',
'LoggerHook', 'NaiveVisualizationHook', 'EMAHook', 'RuntimeInfoHook'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .checkpoint_hook import CheckpointHook
from .ema_hook import EMAHook
from .empty_cache_hook import EmptyCacheHook
from .hook import Hook
from .iter_timer_hook import IterTimerHook
from .logger_hook import LoggerHook
from .naive_visualization_hook import NaiveVisualizationHook
from .optimizer_hook import OptimizerHook
from .param_scheduler_hook import ParamSchedulerHook
from .sampler_seed_hook import DistSamplerSeedHook
from .sync_buffer_hook import SyncBuffersHook
__all__ = [
'Hook', 'IterTimerHook', 'DistSamplerSeedHook', 'ParamSchedulerHook',
'OptimizerHook', 'SyncBuffersHook', 'EmptyCacheHook', 'CheckpointHook',
'LoggerHook', 'NaiveVisualizationHook', 'EMAHook'
]
|
import numpy as np
import pytest
import lightgbm
@pytest.fixture(scope="function")
def missing_module_cffi(monkeypatch):
"""Mock 'cffi' not being importable"""
monkeypatch.setattr(lightgbm.compat, "CFFI_INSTALLED", False)
monkeypatch.setattr(lightgbm.basic, "CFFI_INSTALLED", False)
@pytest.fixture(scope="function")
def rng():
return np.random.default_rng()
@pytest.fixture(scope="function")
def rng_fixed_seed():
return np.random.default_rng(seed=42)
|
import numpy as np
import pytest
@pytest.fixture(scope="function")
def rng():
return np.random.default_rng()
@pytest.fixture(scope="function")
def rng_fixed_seed():
return np.random.default_rng(seed=42)
|
from keras.src.backend.common.tensor_attributes import get_tensor_attr
from keras.src.backend.common.tensor_attributes import set_tensor_attr
def set_keras_mask(x, mask):
return set_tensor_attr(x, "_keras_mask", mask)
def get_keras_mask(x):
return get_tensor_attr(x, "_keras_mask")
|
import weakref
from keras.src.backend.common import global_state
def set_keras_mask(x, mask):
try:
x._keras_mask = mask
except AttributeError:
if mask is None:
return
mask_dict = global_state.get_global_attribute("keras_mask_dict")
if mask_dict is None:
mask_dict = weakref.WeakValueDictionary()
global_state.set_global_attribute("keras_mask_dict", mask_dict)
mask_dict[id(x)] = mask
def get_keras_mask(x):
if not hasattr(x, "_keras_mask"):
mask_dict = global_state.get_global_attribute("keras_mask_dict")
if mask_dict is not None:
return mask_dict.get(id(x), None)
return getattr(x, "_keras_mask", None)
|
"""Test Azure AI Search wrapper."""
from langchain_core.documents import Document
from langchain_community.retrievers.azure_ai_search import (
AzureAISearchRetriever,
AzureCognitiveSearchRetriever,
)
def test_azure_ai_search_invoke() -> None:
"""Test valid call to Azure AI Search.
In order to run this test, you should provide
a `service_name`, an 'index_name' and
an azure search `api_key` or 'azure_ad_token'
as arguments for the AzureAISearchRetriever in both tests.
api_version, aiosession and topk_k are optional parameters.
"""
retriever = AzureAISearchRetriever()
documents = retriever.invoke("what is langchain?")
for doc in documents:
assert isinstance(doc, Document)
assert doc.page_content
retriever = AzureAISearchRetriever(top_k=1)
documents = retriever.invoke("what is langchain?")
assert len(documents) <= 1
async def test_azure_ai_search_ainvoke() -> None:
"""Test valid async call to Azure AI Search.
In order to run this test, you should provide
a `service_name`, an 'index_name' and
an azure search `api_key` or 'azure_ad_token'
as arguments for the AzureAISearchRetriever.
"""
retriever = AzureAISearchRetriever()
documents = await retriever.ainvoke("what is langchain?")
for doc in documents:
assert isinstance(doc, Document)
assert doc.page_content
def test_azure_cognitive_search_invoke() -> None:
"""Test valid call to Azure Cognitive Search.
This is to test backwards compatibility of the retriever
"""
retriever = AzureCognitiveSearchRetriever()
documents = retriever.invoke("what is langchain?")
for doc in documents:
assert isinstance(doc, Document)
assert doc.page_content
retriever = AzureCognitiveSearchRetriever(top_k=1)
documents = retriever.invoke("what is langchain?")
assert len(documents) <= 1
async def test_azure_cognitive_search_ainvoke() -> None:
"""Test valid async call to Azure Cognitive Search.
This is to test backwards compatibility of the retriever
"""
retriever = AzureCognitiveSearchRetriever()
documents = await retriever.ainvoke("what is langchain?")
for doc in documents:
assert isinstance(doc, Document)
assert doc.page_content
|
"""Test Azure AI Search wrapper."""
from langchain_core.documents import Document
from langchain_community.retrievers.azure_ai_search import (
AzureAISearchRetriever,
AzureCognitiveSearchRetriever,
)
def test_azure_ai_search_invoke() -> None:
"""Test valid call to Azure AI Search.
In order to run this test, you should provide
a `service_name`, azure search `api_key` and an `index_name`
as arguments for the AzureAISearchRetriever in both tests.
api_version, aiosession and topk_k are optional parameters.
"""
retriever = AzureAISearchRetriever()
documents = retriever.invoke("what is langchain?")
for doc in documents:
assert isinstance(doc, Document)
assert doc.page_content
retriever = AzureAISearchRetriever(top_k=1)
documents = retriever.invoke("what is langchain?")
assert len(documents) <= 1
async def test_azure_ai_search_ainvoke() -> None:
"""Test valid async call to Azure AI Search.
In order to run this test, you should provide
a `service_name`, azure search `api_key` and an `index_name`
as arguments for the AzureAISearchRetriever.
"""
retriever = AzureAISearchRetriever()
documents = await retriever.ainvoke("what is langchain?")
for doc in documents:
assert isinstance(doc, Document)
assert doc.page_content
def test_azure_cognitive_search_invoke() -> None:
"""Test valid call to Azure Cognitive Search.
This is to test backwards compatibility of the retriever
"""
retriever = AzureCognitiveSearchRetriever()
documents = retriever.invoke("what is langchain?")
for doc in documents:
assert isinstance(doc, Document)
assert doc.page_content
retriever = AzureCognitiveSearchRetriever(top_k=1)
documents = retriever.invoke("what is langchain?")
assert len(documents) <= 1
async def test_azure_cognitive_search_ainvoke() -> None:
"""Test valid async call to Azure Cognitive Search.
This is to test backwards compatibility of the retriever
"""
retriever = AzureCognitiveSearchRetriever()
documents = await retriever.ainvoke("what is langchain?")
for doc in documents:
assert isinstance(doc, Document)
assert doc.page_content
|
import logging
import time
from abc import ABC, abstractmethod
from typing import ClassVar
from backend.data.model import OAuth2Credentials
from backend.integrations.providers import ProviderName
logger = logging.getLogger(__name__)
class BaseOAuthHandler(ABC):
# --8<-- [start:BaseOAuthHandler1]
PROVIDER_NAME: ClassVar[ProviderName]
DEFAULT_SCOPES: ClassVar[list[str]] = []
# --8<-- [end:BaseOAuthHandler1]
@abstractmethod
# --8<-- [start:BaseOAuthHandler2]
def __init__(self, client_id: str, client_secret: str, redirect_uri: str): ...
# --8<-- [end:BaseOAuthHandler2]
@abstractmethod
# --8<-- [start:BaseOAuthHandler3]
def get_login_url(self, scopes: list[str], state: str) -> str:
# --8<-- [end:BaseOAuthHandler3]
"""Constructs a login URL that the user can be redirected to"""
...
@abstractmethod
# --8<-- [start:BaseOAuthHandler4]
def exchange_code_for_tokens(
self, code: str, scopes: list[str]
) -> OAuth2Credentials:
# --8<-- [end:BaseOAuthHandler4]
"""Exchanges the acquired authorization code from login for a set of tokens"""
...
@abstractmethod
# --8<-- [start:BaseOAuthHandler5]
def _refresh_tokens(self, credentials: OAuth2Credentials) -> OAuth2Credentials:
# --8<-- [end:BaseOAuthHandler5]
"""Implements the token refresh mechanism"""
...
@abstractmethod
# --8<-- [start:BaseOAuthHandler6]
def revoke_tokens(self, credentials: OAuth2Credentials) -> bool:
# --8<-- [end:BaseOAuthHandler6]
"""Revokes the given token at provider,
returns False provider does not support it"""
...
def refresh_tokens(self, credentials: OAuth2Credentials) -> OAuth2Credentials:
if credentials.provider != self.PROVIDER_NAME:
raise ValueError(
f"{self.__class__.__name__} can not refresh tokens "
f"for other provider '{credentials.provider}'"
)
return self._refresh_tokens(credentials)
def get_access_token(self, credentials: OAuth2Credentials) -> str:
"""Returns a valid access token, refreshing it first if needed"""
if self.needs_refresh(credentials):
credentials = self.refresh_tokens(credentials)
return credentials.access_token.get_secret_value()
def needs_refresh(self, credentials: OAuth2Credentials) -> bool:
"""Indicates whether the given tokens need to be refreshed"""
return (
credentials.access_token_expires_at is not None
and credentials.access_token_expires_at < int(time.time()) + 300
)
def handle_default_scopes(self, scopes: list[str]) -> list[str]:
"""Handles the default scopes for the provider"""
# If scopes are empty, use the default scopes for the provider
if not scopes:
logger.debug(
f"Using default scopes for provider {self.PROVIDER_NAME.value}"
)
scopes = self.DEFAULT_SCOPES
return scopes
|
import logging
import time
from abc import ABC, abstractmethod
from typing import ClassVar
from backend.data.model import OAuth2Credentials
logger = logging.getLogger(__name__)
class BaseOAuthHandler(ABC):
# --8<-- [start:BaseOAuthHandler1]
PROVIDER_NAME: ClassVar[str]
DEFAULT_SCOPES: ClassVar[list[str]] = []
# --8<-- [end:BaseOAuthHandler1]
@abstractmethod
# --8<-- [start:BaseOAuthHandler2]
def __init__(self, client_id: str, client_secret: str, redirect_uri: str): ...
# --8<-- [end:BaseOAuthHandler2]
@abstractmethod
# --8<-- [start:BaseOAuthHandler3]
def get_login_url(self, scopes: list[str], state: str) -> str:
# --8<-- [end:BaseOAuthHandler3]
"""Constructs a login URL that the user can be redirected to"""
...
@abstractmethod
# --8<-- [start:BaseOAuthHandler4]
def exchange_code_for_tokens(
self, code: str, scopes: list[str]
) -> OAuth2Credentials:
# --8<-- [end:BaseOAuthHandler4]
"""Exchanges the acquired authorization code from login for a set of tokens"""
...
@abstractmethod
# --8<-- [start:BaseOAuthHandler5]
def _refresh_tokens(self, credentials: OAuth2Credentials) -> OAuth2Credentials:
# --8<-- [end:BaseOAuthHandler5]
"""Implements the token refresh mechanism"""
...
@abstractmethod
# --8<-- [start:BaseOAuthHandler6]
def revoke_tokens(self, credentials: OAuth2Credentials) -> bool:
# --8<-- [end:BaseOAuthHandler6]
"""Revokes the given token at provider,
returns False provider does not support it"""
...
def refresh_tokens(self, credentials: OAuth2Credentials) -> OAuth2Credentials:
if credentials.provider != self.PROVIDER_NAME:
raise ValueError(
f"{self.__class__.__name__} can not refresh tokens "
f"for other provider '{credentials.provider}'"
)
return self._refresh_tokens(credentials)
def get_access_token(self, credentials: OAuth2Credentials) -> str:
"""Returns a valid access token, refreshing it first if needed"""
if self.needs_refresh(credentials):
credentials = self.refresh_tokens(credentials)
return credentials.access_token.get_secret_value()
def needs_refresh(self, credentials: OAuth2Credentials) -> bool:
"""Indicates whether the given tokens need to be refreshed"""
return (
credentials.access_token_expires_at is not None
and credentials.access_token_expires_at < int(time.time()) + 300
)
def handle_default_scopes(self, scopes: list[str]) -> list[str]:
"""Handles the default scopes for the provider"""
# If scopes are empty, use the default scopes for the provider
if not scopes:
logger.debug(f"Using default scopes for provider {self.PROVIDER_NAME}")
scopes = self.DEFAULT_SCOPES
return scopes
|
from __future__ import annotations
from sentence_transformers.sparse_encoder.evaluation.SparseBinaryClassificationEvaluator import (
SparseBinaryClassificationEvaluator,
)
from sentence_transformers.sparse_encoder.evaluation.SparseEmbeddingSimilarityEvaluator import (
SparseEmbeddingSimilarityEvaluator,
)
from sentence_transformers.sparse_encoder.evaluation.SparseInformationRetrievalEvaluator import (
SparseInformationRetrievalEvaluator,
)
from sentence_transformers.sparse_encoder.evaluation.SparseMSEEvaluator import (
SparseMSEEvaluator,
)
from sentence_transformers.sparse_encoder.evaluation.SparseNanoBEIREvaluator import (
SparseNanoBEIREvaluator,
)
from sentence_transformers.sparse_encoder.evaluation.SparseRerankingEvaluator import (
SparseRerankingEvaluator,
)
from sentence_transformers.sparse_encoder.evaluation.SparseTranslationEvaluator import (
SparseTranslationEvaluator,
)
from sentence_transformers.sparse_encoder.evaluation.SparseTripletEvaluator import (
SparseTripletEvaluator,
)
__all__ = [
"SparseEmbeddingSimilarityEvaluator",
"SparseInformationRetrievalEvaluator",
"SparseBinaryClassificationEvaluator",
"SparseMSEEvaluator",
"SparseNanoBEIREvaluator",
"SparseTripletEvaluator",
"SparseTranslationEvaluator",
"SparseRerankingEvaluator",
]
# TODO: Ask Update to TOM on if they're important : LabelAccuracyEvaluator, ParaphraseMiningEvaluator
|
from __future__ import annotations
from sentence_transformers.sparse_encoder.evaluation.SparseBinaryClassificationEvaluator import (
SparseBinaryClassificationEvaluator,
)
from sentence_transformers.sparse_encoder.evaluation.SparseEmbeddingSimilarityEvaluator import (
SparseEmbeddingSimilarityEvaluator,
)
from sentence_transformers.sparse_encoder.evaluation.SparseInformationRetrievalEvaluator import (
SparseInformationRetrievalEvaluator,
)
from sentence_transformers.sparse_encoder.evaluation.SparseMSEEvaluator import (
SparseMSEEvaluator,
)
from sentence_transformers.sparse_encoder.evaluation.SparseMSEEvaluatorDataFrame import (
SparseMSEEvaluatorDataFrame,
)
from sentence_transformers.sparse_encoder.evaluation.SparseNanoBEIREvaluator import (
SparseNanoBEIREvaluator,
)
from sentence_transformers.sparse_encoder.evaluation.SparseRerankingEvaluator import (
SparseRerankingEvaluator,
)
from sentence_transformers.sparse_encoder.evaluation.SparseTranslationEvaluator import (
SparseTranslationEvaluator,
)
from sentence_transformers.sparse_encoder.evaluation.SparseTripletEvaluator import (
SparseTripletEvaluator,
)
__all__ = [
"SparseEmbeddingSimilarityEvaluator",
"SparseInformationRetrievalEvaluator",
"SparseBinaryClassificationEvaluator",
"SparseMSEEvaluator",
"SparseNanoBEIREvaluator",
"SparseTripletEvaluator",
"SparseTranslationEvaluator",
"SparseRerankingEvaluator",
"SparseMSEEvaluatorDataFrame",
]
# TODO: SparseMSEEvaluatorDataFrame : As for now handle sparse embed with numpy because of : trg_embeddings = np.asarray(self.embed_inputs(model, trg_sentences)) in MSEEvaluatorFromDataFrame check if needed and adapt to it
# TODO: Adapt ParaphraseMiningEvaluator for handling Sparse override (but lot of fct to check esp in utils)
# TODO: Check label accuracy (not understand how to adapt yet) if possible to have Sparse version
|
_base_ = ['./mask2former_swin-b-p4-w12-384_8xb2-lsj-50e_coco-panoptic.py']
pretrained = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_large_patch4_window12_384_22k.pth' # noqa
model = dict(
backbone=dict(
embed_dims=192,
num_heads=[6, 12, 24, 48],
init_cfg=dict(type='Pretrained', checkpoint=pretrained)),
panoptic_head=dict(num_queries=200, in_channels=[192, 384, 768, 1536]))
train_dataloader = dict(batch_size=1, num_workers=1)
# learning policy
max_iters = 737500
param_scheduler = dict(end=max_iters, milestones=[655556, 710184])
# Before 735001th iteration, we do evaluation every 5000 iterations.
# After 735000th iteration, we do evaluation every 737500 iterations,
# which means that we do evaluation at the end of training.'
interval = 5000
dynamic_intervals = [(max_iters // interval * interval + 1, max_iters)]
train_cfg = dict(
max_iters=max_iters,
val_interval=interval,
dynamic_intervals=dynamic_intervals)
|
_base_ = ['./mask2former_swin-b-p4-w12-384_lsj_8x2_50e_coco-panoptic.py']
pretrained = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_large_patch4_window12_384_22k.pth' # noqa
model = dict(
backbone=dict(
embed_dims=192,
num_heads=[6, 12, 24, 48],
init_cfg=dict(type='Pretrained', checkpoint=pretrained)),
panoptic_head=dict(num_queries=200, in_channels=[192, 384, 768, 1536]))
train_dataloader = dict(batch_size=1, num_workers=1)
# learning policy
max_iters = 737500
param_scheduler = dict(end=max_iters, milestones=[655556, 710184])
# Before 735001th iteration, we do evaluation every 5000 iterations.
# After 735000th iteration, we do evaluation every 737500 iterations,
# which means that we do evaluation at the end of training.'
interval = 5000
dynamic_intervals = [(max_iters // interval * interval + 1, max_iters)]
train_cfg = dict(
max_iters=max_iters,
val_interval=interval,
dynamic_intervals=dynamic_intervals)
|
# coding=utf-8
# Copyright 2025 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
import unittest
from diffusers import (
AutoencoderKLWan,
)
from diffusers.utils.testing_utils import (
backend_empty_cache,
enable_full_determinism,
require_torch_accelerator,
torch_device,
)
enable_full_determinism()
@require_torch_accelerator
class AutoencoderKLWanSingleFileTests(unittest.TestCase):
model_class = AutoencoderKLWan
ckpt_path = (
"https://huggingface.co/Comfy-Org/Wan_2.1_ComfyUI_repackaged/blob/main/split_files/vae/wan_2.1_vae.safetensors"
)
repo_id = "Wan-AI/Wan2.1-T2V-1.3B-Diffusers"
def setUp(self):
super().setUp()
gc.collect()
backend_empty_cache(torch_device)
def tearDown(self):
super().tearDown()
gc.collect()
backend_empty_cache(torch_device)
def test_single_file_components(self):
model = self.model_class.from_pretrained(self.repo_id, subfolder="vae")
model_single_file = self.model_class.from_single_file(self.ckpt_path)
PARAMS_TO_IGNORE = ["torch_dtype", "_name_or_path", "_use_default_values", "_diffusers_version"]
for param_name, param_value in model_single_file.config.items():
if param_name in PARAMS_TO_IGNORE:
continue
assert model.config[param_name] == param_value, (
f"{param_name} differs between single file loading and pretrained loading"
)
|
# coding=utf-8
# Copyright 2025 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
import unittest
from diffusers import (
AutoencoderKLWan,
)
from diffusers.utils.testing_utils import (
backend_empty_cache,
enable_full_determinism,
require_torch_accelerator,
torch_device,
)
enable_full_determinism()
@require_torch_accelerator
class AutoencoderKLWanSingleFileTests(unittest.TestCase):
model_class = AutoencoderKLWan
ckpt_path = (
"https://huggingface.co/Comfy-Org/Wan_2.1_ComfyUI_repackaged/blob/main/split_files/vae/wan_2.1_vae.safetensors"
)
repo_id = "Wan-AI/Wan2.1-T2V-1.3B-Diffusers"
def setUp(self):
super().setUp()
gc.collect()
backend_empty_cache(torch_device)
def tearDown(self):
super().tearDown()
gc.collect()
backend_empty_cache(torch_device)
def test_single_file_components(self):
model = self.model_class.from_pretrained(self.repo_id, subfolder="vae")
model_single_file = self.model_class.from_single_file(self.ckpt_path)
PARAMS_TO_IGNORE = ["torch_dtype", "_name_or_path", "_use_default_values", "_diffusers_version"]
for param_name, param_value in model_single_file.config.items():
if param_name in PARAMS_TO_IGNORE:
continue
assert (
model.config[param_name] == param_value
), f"{param_name} differs between single file loading and pretrained loading"
|
import pytest
from langchain_openai import ChatOpenAI, OpenAI
_EXPECTED_NUM_TOKENS = {
"ada": 17,
"babbage": 17,
"curie": 17,
"davinci": 17,
"gpt-4": 12,
"gpt-4-32k": 12,
"gpt-3.5-turbo": 12,
"o1": 12,
"o3": 12,
"gpt-4o": 11,
}
_MODELS = models = ["ada", "babbage", "curie", "davinci"]
_CHAT_MODELS = ["gpt-4", "gpt-4-32k", "gpt-3.5-turbo", "o1", "o3", "gpt-4o"]
@pytest.mark.xfail(reason="Old models require different tiktoken cached file")
@pytest.mark.parametrize("model", _MODELS)
def test_openai_get_num_tokens(model: str) -> None:
"""Test get_tokens."""
llm = OpenAI(model=model)
assert llm.get_num_tokens("表情符号是\n🦜🔗") == _EXPECTED_NUM_TOKENS[model]
@pytest.mark.parametrize("model", _CHAT_MODELS)
def test_chat_openai_get_num_tokens(model: str) -> None:
"""Test get_tokens."""
llm = ChatOpenAI(model=model)
assert llm.get_num_tokens("表情符号是\n🦜🔗") == _EXPECTED_NUM_TOKENS[model]
|
import pytest
from langchain_openai import ChatOpenAI, OpenAI
_EXPECTED_NUM_TOKENS = {
"ada": 17,
"babbage": 17,
"curie": 17,
"davinci": 17,
"gpt-4": 12,
"gpt-4-32k": 12,
"gpt-3.5-turbo": 12,
}
_MODELS = models = ["ada", "babbage", "curie", "davinci"]
_CHAT_MODELS = ["gpt-4", "gpt-4-32k", "gpt-3.5-turbo"]
@pytest.mark.xfail(reason="Old models require different tiktoken cached file")
@pytest.mark.parametrize("model", _MODELS)
def test_openai_get_num_tokens(model: str) -> None:
"""Test get_tokens."""
llm = OpenAI(model=model)
assert llm.get_num_tokens("表情符号是\n🦜🔗") == _EXPECTED_NUM_TOKENS[model]
@pytest.mark.parametrize("model", _CHAT_MODELS)
def test_chat_openai_get_num_tokens(model: str) -> None:
"""Test get_tokens."""
llm = ChatOpenAI(model=model)
assert llm.get_num_tokens("表情符号是\n🦜🔗") == _EXPECTED_NUM_TOKENS[model]
|
import unittest
import torch
import torchaudio.functional as F
from parameterized import parameterized
from torchaudio_unittest.common_utils import PytorchTestCase, skipIfNoSox, TorchaudioTestCase
from .functional_impl import Functional, FunctionalCPUOnly
class TestFunctionalFloat32(Functional, FunctionalCPUOnly, PytorchTestCase):
dtype = torch.float32
device = torch.device("cpu")
@unittest.expectedFailure
def test_lfilter_9th_order_filter_stability(self):
super().test_lfilter_9th_order_filter_stability()
class TestFunctionalFloat64(Functional, PytorchTestCase):
dtype = torch.float64
device = torch.device("cpu")
@skipIfNoSox
class TestApplyCodec(TorchaudioTestCase):
backend = "sox_io"
def _smoke_test(self, format, compression, check_num_frames):
"""
The purpose of this test suite is to verify that apply_codec functionalities do not exhibit
abnormal behaviors.
"""
sample_rate = 8000
num_frames = 3 * sample_rate
num_channels = 2
waveform = torch.rand(num_channels, num_frames)
augmented = F.apply_codec(waveform, sample_rate, format, True, compression)
assert augmented.dtype == waveform.dtype
assert augmented.shape[0] == num_channels
if check_num_frames:
assert augmented.shape[1] == num_frames
def test_wave(self):
self._smoke_test("wav", compression=None, check_num_frames=True)
@parameterized.expand([(96,), (128,), (160,), (192,), (224,), (256,), (320,)])
def test_mp3(self, compression):
self._smoke_test("mp3", compression, check_num_frames=False)
@parameterized.expand([(0,), (1,), (2,), (3,), (4,), (5,), (6,), (7,), (8,)])
def test_flac(self, compression):
self._smoke_test("flac", compression, check_num_frames=False)
@parameterized.expand([(-1,), (0,), (1,), (2,), (3,), (3.6,), (5,), (10,)])
def test_vorbis(self, compression):
self._smoke_test("vorbis", compression, check_num_frames=False)
|
import unittest
import torch
import torchaudio.functional as F
from parameterized import parameterized
from torchaudio_unittest.common_utils import (
PytorchTestCase,
skipIfNoSox,
TorchaudioTestCase,
)
from .functional_impl import Functional, FunctionalCPUOnly
class TestFunctionalFloat32(Functional, FunctionalCPUOnly, PytorchTestCase):
dtype = torch.float32
device = torch.device("cpu")
@unittest.expectedFailure
def test_lfilter_9th_order_filter_stability(self):
super().test_lfilter_9th_order_filter_stability()
class TestFunctionalFloat64(Functional, PytorchTestCase):
dtype = torch.float64
device = torch.device("cpu")
@skipIfNoSox
class TestApplyCodec(TorchaudioTestCase):
backend = "sox_io"
def _smoke_test(self, format, compression, check_num_frames):
"""
The purpose of this test suite is to verify that apply_codec functionalities do not exhibit
abnormal behaviors.
"""
sample_rate = 8000
num_frames = 3 * sample_rate
num_channels = 2
waveform = torch.rand(num_channels, num_frames)
augmented = F.apply_codec(waveform, sample_rate, format, True, compression)
assert augmented.dtype == waveform.dtype
assert augmented.shape[0] == num_channels
if check_num_frames:
assert augmented.shape[1] == num_frames
def test_wave(self):
self._smoke_test("wav", compression=None, check_num_frames=True)
@parameterized.expand([(96,), (128,), (160,), (192,), (224,), (256,), (320,)])
def test_mp3(self, compression):
self._smoke_test("mp3", compression, check_num_frames=False)
@parameterized.expand([(0,), (1,), (2,), (3,), (4,), (5,), (6,), (7,), (8,)])
def test_flac(self, compression):
self._smoke_test("flac", compression, check_num_frames=False)
@parameterized.expand([(-1,), (0,), (1,), (2,), (3,), (3.6,), (5,), (10,)])
def test_vorbis(self, compression):
self._smoke_test("vorbis", compression, check_num_frames=False)
|
"""Utility functions for validating Ollama models."""
from httpx import ConnectError
from ollama import Client, ResponseError
def validate_model(client: Client, model_name: str) -> None:
"""Validate that a model exists in the Ollama instance.
Args:
client: The Ollama client.
model_name: The name of the model to validate.
Raises:
ValueError: If the model is not found or if there's a connection issue.
"""
try:
response = client.list()
model_names: list[str] = [model["name"] for model in response["models"]]
if not any(
model_name == m or m.startswith(f"{model_name}:") for m in model_names
):
msg = (
f"Model `{model_name}` not found in Ollama. Please pull the "
f"model (using `ollama pull {model_name}`) or specify a valid "
f"model name. Available local models: {', '.join(model_names)}"
)
raise ValueError(msg)
except ConnectError as e:
msg = (
"Connection to Ollama failed. Please make sure Ollama is running "
f"and accessible at {client._client.base_url}. "
)
raise ValueError(msg) from e
except ResponseError as e:
msg = (
"Received an error from the Ollama API. "
"Please check your Ollama server logs."
)
raise ValueError(msg) from e
|
"""Utility functions for validating Ollama models."""
from httpx import ConnectError
from ollama import Client, ResponseError
def validate_model(client: Client, model_name: str) -> None:
"""Validate that a model exists in the Ollama instance.
Args:
client: The Ollama client.
model_name: The name of the model to validate.
Raises:
ValueError: If the model is not found or if there's a connection issue.
"""
try:
response = client.list()
model_names: list[str] = [model["name"] for model in response["models"]]
if not any(
model_name == m or m.startswith(f"{model_name}:") for m in model_names
):
raise ValueError(
f"Model `{model_name}` not found in Ollama. Please pull the "
f"model (using `ollama pull {model_name}`) or specify a valid "
f"model name. Available local models: {', '.join(model_names)}"
)
except ConnectError as e:
raise ValueError(
"Connection to Ollama failed. Please make sure Ollama is running "
f"and accessible at {client._client.base_url}. "
) from e
except ResponseError as e:
raise ValueError(
"Received an error from the Ollama API. "
"Please check your Ollama server logs."
) from e
|
"""Tool for the Wikipedia API."""
from typing import Optional, Type
from langchain_core.callbacks import CallbackManagerForToolRun
from langchain_core.tools import BaseTool
from pydantic import BaseModel, Field
from langchain_community.utilities.wikipedia import WikipediaAPIWrapper
class WikipediaQueryInput(BaseModel):
"""Input for the WikipediaQuery tool."""
query: str = Field(description="query to look up on wikipedia")
class WikipediaQueryRun(BaseTool):
"""Tool that searches the Wikipedia API."""
name: str = "wikipedia"
description: str = (
"A wrapper around Wikipedia. "
"Useful for when you need to answer general questions about "
"people, places, companies, facts, historical events, or other subjects. "
"Input should be a search query."
)
api_wrapper: WikipediaAPIWrapper
args_schema: Type[BaseModel] = WikipediaQueryInput
def _run(
self,
query: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the Wikipedia tool."""
return self.api_wrapper.run(query)
|
"""Tool for the Wikipedia API."""
from typing import Optional, Type
from langchain_core.callbacks import CallbackManagerForToolRun
from langchain_core.tools import BaseTool
from pydantic import BaseModel, Field
from langchain_community.utilities.wikipedia import WikipediaAPIWrapper
class WikipediaQueryInput(BaseModel):
"""Input for the WikipediaQuery tool."""
query: str = Field(description="query to look up on wikipedia")
class WikipediaQueryRun(BaseTool): # type: ignore[override, override]
"""Tool that searches the Wikipedia API."""
name: str = "wikipedia"
description: str = (
"A wrapper around Wikipedia. "
"Useful for when you need to answer general questions about "
"people, places, companies, facts, historical events, or other subjects. "
"Input should be a search query."
)
api_wrapper: WikipediaAPIWrapper
args_schema: Type[BaseModel] = WikipediaQueryInput
def _run(
self,
query: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the Wikipedia tool."""
return self.api_wrapper.run(query)
|
_base_ = 'mask-rcnn_r50_fpg_crop640-50e_coco.py'
model = dict(
neck=dict(out_channels=128, inter_channels=128),
rpn_head=dict(in_channels=128),
roi_head=dict(
bbox_roi_extractor=dict(out_channels=128),
bbox_head=dict(in_channels=128),
mask_roi_extractor=dict(out_channels=128),
mask_head=dict(in_channels=128)))
|
_base_ = 'mask_rcnn_r50_fpg_crop640_50e_coco.py'
model = dict(
neck=dict(out_channels=128, inter_channels=128),
rpn_head=dict(in_channels=128),
roi_head=dict(
bbox_roi_extractor=dict(out_channels=128),
bbox_head=dict(in_channels=128),
mask_roi_extractor=dict(out_channels=128),
mask_head=dict(in_channels=128)))
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.core import ConfigType, OptConfigType, OptMultiConfig
from mmdet.registry import MODELS
from .single_stage import SingleStageDetector
@MODELS.register_module()
class TOOD(SingleStageDetector):
r"""Implementation of `TOOD: Task-aligned One-stage Object Detection.
<https://arxiv.org/abs/2108.07755>`_
Args:
backbone (:obj:`ConfigDict` or dict): The backbone module.
neck (:obj:`ConfigDict` or dict): The neck module.
bbox_head (:obj:`ConfigDict` or dict): The bbox head module.
train_cfg (:obj:`ConfigDict` or dict, optional): The training config
of TOOD. Defaults to None.
test_cfg (:obj:`ConfigDict` or dict, optional): The testing config
of TOOD. Defaults to None.
data_preprocessor (:obj:`ConfigDict` or dict, optional): Config of
:class:`DetDataPreprocessor` to process the input data.
Defaults to None.
init_cfg (:obj:`ConfigDict` or dict, optional): the config to control
the initialization. Defaults to None.
"""
def __init__(self,
backbone: ConfigType,
neck: ConfigType,
bbox_head: ConfigType,
train_cfg: OptConfigType = None,
test_cfg: OptConfigType = None,
data_preprocessor: OptConfigType = None,
init_cfg: OptMultiConfig = None) -> None:
super().__init__(
backbone=backbone,
neck=neck,
bbox_head=bbox_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
data_preprocessor=data_preprocessor,
init_cfg=init_cfg)
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.core import ConfigType, OptConfigType, OptMultiConfig
from mmdet.registry import MODELS
from .single_stage import SingleStageDetector
@MODELS.register_module()
class TOOD(SingleStageDetector):
r"""Implementation of `TOOD: Task-aligned One-stage Object Detection.
<https://arxiv.org/abs/2108.07755>`_."""
def __init__(self,
backbone: ConfigType,
neck: ConfigType,
bbox_head: ConfigType,
train_cfg: OptConfigType = None,
test_cfg: OptConfigType = None,
preprocess_cfg: OptConfigType = None,
init_cfg: OptMultiConfig = None) -> None:
super().__init__(
backbone=backbone,
neck=neck,
bbox_head=bbox_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
preprocess_cfg=preprocess_cfg,
init_cfg=init_cfg)
|
import os
from abc import abstractmethod
from unittest import mock
import pytest
from langchain_core.embeddings import Embeddings
from pydantic import SecretStr
from langchain_tests.base import BaseStandardTests
class EmbeddingsTests(BaseStandardTests):
"""
:private:
"""
@property
@abstractmethod
def embeddings_class(self) -> type[Embeddings]: ...
@property
def embedding_model_params(self) -> dict:
return {}
@pytest.fixture
def model(self) -> Embeddings:
return self.embeddings_class(**self.embedding_model_params)
class EmbeddingsUnitTests(EmbeddingsTests):
"""Base class for embeddings unit tests.
Test subclasses must implement the ``embeddings_class`` property to specify the
embeddings model to be tested. You can also override the
``embedding_model_params`` property to specify initialization parameters.
Example:
.. code-block:: python
from typing import Type
from langchain_tests.unit_tests import EmbeddingsUnitTests
from my_package.embeddings import MyEmbeddingsModel
class TestMyEmbeddingsModelUnit(EmbeddingsUnitTests):
@property
def embeddings_class(self) -> Type[MyEmbeddingsModel]:
# Return the embeddings model class to test here
return MyEmbeddingsModel
@property
def embedding_model_params(self) -> dict:
# Return initialization parameters for the model.
return {"model": "model-001"}
.. note::
API references for individual test methods include troubleshooting tips.
Testing initialization from environment variables
Overriding the ``init_from_env_params`` property will enable additional tests
for initialization from environment variables. See below for details.
.. dropdown:: init_from_env_params
This property is used in unit tests to test initialization from
environment variables. It should return a tuple of three dictionaries
that specify the environment variables, additional initialization args,
and expected instance attributes to check.
Defaults to empty dicts. If not overridden, the test is skipped.
Example:
.. code-block:: python
@property
def init_from_env_params(self) -> Tuple[dict, dict, dict]:
return (
{
"MY_API_KEY": "api_key",
},
{
"model": "model-001",
},
{
"my_api_key": "api_key",
},
)
""" # noqa: E501
def test_init(self) -> None:
"""Test model initialization.
.. dropdown:: Troubleshooting
If this test fails, ensure that ``embedding_model_params`` is specified
and the model can be initialized from those params.
"""
model = self.embeddings_class(**self.embedding_model_params)
assert model is not None
@property
def init_from_env_params(self) -> tuple[dict, dict, dict]:
"""This property is used in unit tests to test initialization from environment
variables. It should return a tuple of three dictionaries that specify the
environment variables, additional initialization args, and expected instance
attributes to check."""
return {}, {}, {}
def test_init_from_env(self) -> None:
"""Test initialization from environment variables. Relies on the
``init_from_env_params`` property. Test is skipped if that property is not
set.
.. dropdown:: Troubleshooting
If this test fails, ensure that ``init_from_env_params`` is specified
correctly and that model parameters are properly set from environment
variables during initialization.
"""
env_params, embeddings_params, expected_attrs = self.init_from_env_params
if env_params:
with mock.patch.dict(os.environ, env_params):
model = self.embeddings_class(**embeddings_params)
assert model is not None
for k, expected in expected_attrs.items():
actual = getattr(model, k)
if isinstance(actual, SecretStr):
actual = actual.get_secret_value()
assert actual == expected
|
import os
from abc import abstractmethod
from typing import Tuple, Type
from unittest import mock
import pytest
from langchain_core.embeddings import Embeddings
from pydantic import SecretStr
from langchain_tests.base import BaseStandardTests
class EmbeddingsTests(BaseStandardTests):
"""
:private:
"""
@property
@abstractmethod
def embeddings_class(self) -> Type[Embeddings]: ...
@property
def embedding_model_params(self) -> dict:
return {}
@pytest.fixture
def model(self) -> Embeddings:
return self.embeddings_class(**self.embedding_model_params)
class EmbeddingsUnitTests(EmbeddingsTests):
"""Base class for embeddings unit tests.
Test subclasses must implement the ``embeddings_class`` property to specify the
embeddings model to be tested. You can also override the
``embedding_model_params`` property to specify initialization parameters.
Example:
.. code-block:: python
from typing import Type
from langchain_tests.unit_tests import EmbeddingsUnitTests
from my_package.embeddings import MyEmbeddingsModel
class TestMyEmbeddingsModelUnit(EmbeddingsUnitTests):
@property
def embeddings_class(self) -> Type[MyEmbeddingsModel]:
# Return the embeddings model class to test here
return MyEmbeddingsModel
@property
def embedding_model_params(self) -> dict:
# Return initialization parameters for the model.
return {"model": "model-001"}
.. note::
API references for individual test methods include troubleshooting tips.
Testing initialization from environment variables
Overriding the ``init_from_env_params`` property will enable additional tests
for initialization from environment variables. See below for details.
.. dropdown:: init_from_env_params
This property is used in unit tests to test initialization from
environment variables. It should return a tuple of three dictionaries
that specify the environment variables, additional initialization args,
and expected instance attributes to check.
Defaults to empty dicts. If not overridden, the test is skipped.
Example:
.. code-block:: python
@property
def init_from_env_params(self) -> Tuple[dict, dict, dict]:
return (
{
"MY_API_KEY": "api_key",
},
{
"model": "model-001",
},
{
"my_api_key": "api_key",
},
)
""" # noqa: E501
def test_init(self) -> None:
"""Test model initialization.
.. dropdown:: Troubleshooting
If this test fails, ensure that ``embedding_model_params`` is specified
and the model can be initialized from those params.
"""
model = self.embeddings_class(**self.embedding_model_params)
assert model is not None
@property
def init_from_env_params(self) -> Tuple[dict, dict, dict]:
"""This property is used in unit tests to test initialization from environment
variables. It should return a tuple of three dictionaries that specify the
environment variables, additional initialization args, and expected instance
attributes to check."""
return {}, {}, {}
def test_init_from_env(self) -> None:
"""Test initialization from environment variables. Relies on the
``init_from_env_params`` property. Test is skipped if that property is not
set.
.. dropdown:: Troubleshooting
If this test fails, ensure that ``init_from_env_params`` is specified
correctly and that model parameters are properly set from environment
variables during initialization.
"""
env_params, embeddings_params, expected_attrs = self.init_from_env_params
if env_params:
with mock.patch.dict(os.environ, env_params):
model = self.embeddings_class(**embeddings_params)
assert model is not None
for k, expected in expected_attrs.items():
actual = getattr(model, k)
if isinstance(actual, SecretStr):
actual = actual.get_secret_value()
assert actual == expected
|
_base_ = [
'./faster_rcnn_r50_fpn.py', './mot_challenge.py',
'../../../configs/_base_/default_runtime.py'
]
model = dict(
type='Tracktor',
pretrains=dict(
detector= # noqa: E251
'https://download.openmmlab.com/mmtracking/mot/faster_rcnn/faster-rcnn_r50_fpn_4e_mot17-half-64ee2ed4.pth', # noqa: E501
reid= # noqa: E251
'https://download.openmmlab.com/mmtracking/mot/reid/reid_r50_6e_mot17-4bf6b63d.pth' # noqa: E501
),
detector=dict(
rpn_head=dict(bbox_coder=dict(clip_border=False)),
roi_head=dict(
bbox_head=dict(bbox_coder=dict(
clip_border=False), num_classes=1))),
reid=dict(
type='BaseReID',
backbone=dict(
type='ResNet',
depth=18,
base_channels=2,
num_stages=4,
out_indices=(3, ),
style='pytorch'),
neck=dict(type='GlobalAveragePooling', kernel_size=(8, 4), stride=1),
head=dict(
type='LinearReIDHead',
num_fcs=1,
in_channels=16,
fc_channels=32,
out_channels=16,
num_classes=8,
loss=dict(type='CrossEntropyLoss', loss_weight=1.0),
loss_pairwise=dict(
type='TripletLoss', margin=0.3, loss_weight=1.0),
norm_cfg=dict(type='BN1d'),
act_cfg=dict(type='ReLU'))),
motion=dict(
type='CameraMotionCompensation',
warp_mode='cv2.MOTION_EUCLIDEAN',
num_iters=100,
stop_eps=0.00001),
tracker=dict(
type='TracktorTracker',
obj_score_thr=0.5,
regression=dict(
obj_score_thr=0.5,
nms=dict(type='nms', iou_threshold=0.6),
match_iou_thr=0.3),
reid=dict(
num_samples=10,
img_scale=(256, 128),
img_norm_cfg=None,
match_score_thr=2.0,
match_iou_thr=0.2),
momentums=None,
num_frames_retain=10))
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=100,
warmup_ratio=1.0 / 100,
step=[3])
# runtime settings
total_epochs = 4
evaluation = dict(metric=['bbox', 'track'], interval=1)
search_metrics = ['MOTA', 'IDF1', 'FN', 'FP', 'IDs', 'MT', 'ML']
|
_base_ = [
'./faster_rcnn_r50_fpn.py', './mot_challenge.py',
'../../../configs/_base_/default_runtime.py'
]
model = dict(
type='Tracktor',
pretrains=dict(
detector= # noqa: E251
'https://download.openmmlab.com/mmtracking/mot/faster_rcnn/faster-rcnn_r50_fpn_4e_mot17-half-64ee2ed4.pth', # noqa: E501
reid= # noqa: E251
'https://download.openmmlab.com/mmtracking/mot/reid/reid_r50_6e_mot17-4bf6b63d.pth' # noqa: E501
),
detector=dict(
rpn_head=dict(bbox_coder=dict(clip_border=False)),
roi_head=dict(
bbox_head=dict(bbox_coder=dict(
clip_border=False), num_classes=1))),
reid=dict(
type='BaseReID',
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(3, ),
style='pytorch'),
neck=dict(type='GlobalAveragePooling', kernel_size=(8, 4), stride=1),
head=dict(
type='LinearReIDHead',
num_fcs=1,
in_channels=2048,
fc_channels=1024,
out_channels=128,
num_classes=378,
loss=dict(type='CrossEntropyLoss', loss_weight=1.0),
loss_pairwise=dict(
type='TripletLoss', margin=0.3, loss_weight=1.0),
norm_cfg=dict(type='BN1d'),
act_cfg=dict(type='ReLU'))),
motion=dict(
type='CameraMotionCompensation',
warp_mode='cv2.MOTION_EUCLIDEAN',
num_iters=100,
stop_eps=0.00001),
tracker=dict(
type='TracktorTracker',
obj_score_thr=0.5,
regression=dict(
obj_score_thr=0.5,
nms=dict(type='nms', iou_threshold=0.6),
match_iou_thr=0.3),
reid=dict(
num_samples=10,
img_scale=(256, 128),
img_norm_cfg=None,
match_score_thr=2.0,
match_iou_thr=0.2),
momentums=None,
num_frames_retain=10))
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=100,
warmup_ratio=1.0 / 100,
step=[3])
# runtime settings
total_epochs = 4
evaluation = dict(metric=['bbox', 'track'], interval=1)
search_metrics = ['MOTA', 'IDF1', 'FN', 'FP', 'IDs', 'MT', 'ML']
|
# Licensed to the LF AI & Data foundation under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from docarray import BaseDoc
from docarray.typing import Mesh3DUrl
def test_set_mesh_url():
class MyDocument(BaseDoc):
mesh_url: Mesh3DUrl
d = MyDocument(mesh_url="https://jina.ai/mesh.obj")
assert isinstance(d.mesh_url, Mesh3DUrl)
assert d.mesh_url == "https://jina.ai/mesh.obj"
|
from docarray import BaseDoc
from docarray.typing import Mesh3DUrl
def test_set_mesh_url():
class MyDocument(BaseDoc):
mesh_url: Mesh3DUrl
d = MyDocument(mesh_url="https://jina.ai/mesh.obj")
assert isinstance(d.mesh_url, Mesh3DUrl)
assert d.mesh_url == "https://jina.ai/mesh.obj"
|
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
from typing import Optional, List, Dict
import hnswlib
import numpy as np
from jina import Executor, requests, DocumentArray, Document
from jina_commons import get_logger
from jina_commons.indexers.dump import import_vectors
class HnswlibSearcher(Executor):
"""Hnswlib powered vector indexer
For more information about the Hnswlib supported parameters, please consult:
- https://github.com/nmslib/hnswlib
.. note::
Hnswlib package dependency is only required at the query time.
"""
def __init__(
self,
default_top_k: int = 10,
metric: str = 'cosine',
dump_path: Optional[str] = None,
default_traversal_paths: Optional[List[str]] = None,
is_distance: bool = False,
ef_construction: int = 400,
ef_query: int = 50,
max_connection: int = 64,
*args,
**kwargs,
):
"""
Initialize an HnswlibSearcher
:param default_top_k: get tok k vectors
:param distance: distance can be 'l2', 'ip', or 'cosine'
:param dump_path: the path to load ids and vecs
:param traverse_path: traverse path on docs, e.g. ['r'], ['c']
:param reverse_score: True if add reversed distance as the `similarity` for match score, else return `distance` as score for match score.
:param ef_construction: defines a construction time/accuracy trade-off
:param ef_query: sets the query time accuracy/speed trade-off
:param max_connection: defines tha maximum number of outgoing connections in the graph
:param args:
:param kwargs:
"""
super().__init__(*args, **kwargs)
self.default_top_k = default_top_k
self.metric = metric
self.default_traversal_paths = default_traversal_paths or ['r']
self.is_distance = is_distance
self.ef_construction = ef_construction
self.ef_query = ef_query
self.max_connection = max_connection
self.logger = get_logger(self)
dump_path = dump_path or kwargs.get('runtime_args', {}).get('dump_path', None)
if dump_path is not None:
self.logger.info('Start building "HnswlibSearcher" from dump data')
ids, vecs = import_vectors(dump_path, str(self.runtime_args.pea_id))
self._ids = np.array(list(ids))
self._vecs = np.array(list(vecs))
num_dim = self._vecs.shape[1]
self._indexer = hnswlib.Index(space=self.metric, dim=num_dim)
self._indexer.init_index(max_elements=len(self._vecs), ef_construction=self.ef_construction,
M=self.max_connection)
self._doc_id_to_offset = {}
self._load_index(self._ids, self._vecs)
else:
self.logger.warning(
'No data loaded in "HnswlibSearcher". Use .rolling_update() to re-initialize it...'
)
def _load_index(self, ids, vecs):
for idx, v in enumerate(vecs):
self._indexer.add_items(v.astype(np.float32), idx)
self._doc_id_to_offset[ids[idx]] = idx
self._indexer.set_ef(self.ef_query)
@requests(on='/search')
def search(self, docs: Optional[DocumentArray], parameters: Dict, **kwargs):
if docs is None:
return
if not hasattr(self, '_indexer'):
self.logger.warning('Querying against an empty index')
return
top_k = parameters.get('top_k', self.default_top_k)
traversal_paths = parameters.get(
'traversal_paths', self.default_traversal_paths
)
for doc in docs.traverse_flat(traversal_paths):
indices, dists = self._indexer.knn_query(doc.embedding, k=top_k)
for idx, dist in zip(indices[0], dists[0]):
match = Document(id=self._ids[idx], embedding=self._vecs[idx])
if self.is_distance:
match.scores[self.metric] = dist
else:
if self.metric == 'cosine' or self.metric == 'ip':
match.scores[self.metric] = 1 - dist
else:
match.scores[self.metric] = 1 / (1 + dist)
doc.matches.append(match)
@requests(on='/fill_embedding')
def fill_embedding(self, docs: Optional[DocumentArray], **kwargs):
if docs is None:
return
for doc in docs:
doc.embedding = np.array(
self._indexer.get_items([int(self._doc_id_to_offset[str(doc.id)])])[0]
)
|
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
from typing import Optional, List, Dict
import hnswlib
import numpy as np
from jina import Executor, requests, DocumentArray, Document
from jina_commons import get_logger
from jina_commons.indexers.dump import import_vectors
class HnswlibSearcher(Executor):
"""Hnswlib powered vector indexer
For more information about the Hnswlib supported parameters, please consult:
- https://github.com/nmslib/hnswlib
.. note::
Hnswlib package dependency is only required at the query time.
"""
def __init__(
self,
default_top_k: int = 10,
metric: str = 'cosine',
dump_path: Optional[str] = None,
default_traversal_paths: Optional[List[str]] = None,
is_distance: bool = False,
ef_construction: int = 400,
ef_query: int = 50,
max_connection: int = 64,
*args,
**kwargs,
):
"""
Initialize an HnswlibSearcher
:param default_top_k: get tok k vectors
:param distance: distance can be 'l2', 'ip', or 'cosine'
:param dump_path: the path to load ids and vecs
:param traverse_path: traverse path on docs, e.g. ['r'], ['c']
:param reverse_score: True if add reversed distance as the `similarity` for match score, else return `distance` as score for match score.
:param ef_construction: defines a construction time/accuracy trade-off
:param ef_query: sets the query time accuracy/speed trade-off
:param max_connection: defines tha maximum number of outgoing connections in the graph
:param args:
:param kwargs:
"""
super().__init__(*args, **kwargs)
self.default_top_k = default_top_k
self.metric = metric
self.default_traversal_paths = default_traversal_paths or ['r']
self.is_distance = is_distance
self.ef_construction = ef_construction
self.ef_query = ef_query
self.max_connection = max_connection
self.logger = get_logger(self)
dump_path = dump_path or kwargs.get('runtime_args', {}).get('dump_path', None)
if dump_path is not None:
self.logger.info('Start building "HnswlibSearcher" from dump data')
ids, vecs = import_vectors(dump_path, str(self.metas.pea_id))
self._ids = np.array(list(ids))
self._vecs = np.array(list(vecs))
num_dim = self._vecs.shape[1]
self._indexer = hnswlib.Index(space=self.metric, dim=num_dim)
self._indexer.init_index(max_elements=len(self._vecs), ef_construction=self.ef_construction,
M=self.max_connection)
self._doc_id_to_offset = {}
self._load_index(self._ids, self._vecs)
else:
self.logger.warning(
'No data loaded in "HnswlibSearcher". Use .rolling_update() to re-initialize it...'
)
def _load_index(self, ids, vecs):
for idx, v in enumerate(vecs):
self._indexer.add_items(v.astype(np.float32), idx)
self._doc_id_to_offset[ids[idx]] = idx
self._indexer.set_ef(self.ef_query)
@requests(on='/search')
def search(self, docs: Optional[DocumentArray], parameters: Dict, **kwargs):
if docs is None:
return
if not hasattr(self, '_indexer'):
self.logger.warning('Querying against an empty index')
return
top_k = parameters.get('top_k', self.default_top_k)
traversal_paths = parameters.get(
'traversal_paths', self.default_traversal_paths
)
for doc in docs.traverse_flat(traversal_paths):
indices, dists = self._indexer.knn_query(doc.embedding, k=top_k)
for idx, dist in zip(indices[0], dists[0]):
match = Document(id=self._ids[idx], embedding=self._vecs[idx])
if self.is_distance:
match.scores[self.metric] = dist
else:
if self.metric == 'cosine' or self.metric == 'ip':
match.scores[self.metric] = 1 - dist
else:
match.scores[self.metric] = 1 / (1 + dist)
doc.matches.append(match)
@requests(on='/fill_embedding')
def fill_embedding(self, docs: Optional[DocumentArray], **kwargs):
if docs is None:
return
for doc in docs:
doc.embedding = np.array(
self._indexer.get_items([int(self._doc_id_to_offset[str(doc.id)])])[0]
)
|
_base_ = ['../_base_/models/retinanet_r50_fpn.py', '../common/ms_3x_coco.py']
# optimizer
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')))
optim_wrapper = dict(
optimizer=dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001))
|
_base_ = ['../_base_/models/retinanet_r50_fpn.py', '../common/ms_3x_coco.py']
# optimizer
model = dict(pretrained='torchvision://resnet101', backbone=dict(depth=101))
optim_wrapper = dict(
optimizer=dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001))
|
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
# file_client_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/detection/',
# 'data/': 's3://openmmlab/datasets/detection/'
# }))
file_client_args = dict(backend='disk')
train_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(
batch_size=2,
num_workers=2,
persistent_workers=True,
sampler=dict(type='DefaultSampler', shuffle=True),
batch_sampler=dict(type='AspectRatioBatchSampler'),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instances_train2017.json',
data_prefix=dict(img='train2017/'),
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=train_pipeline))
val_dataloader = dict(
batch_size=1,
num_workers=2,
persistent_workers=True,
drop_last=False,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instances_val2017.json',
data_prefix=dict(img='val2017/'),
test_mode=True,
pipeline=test_pipeline))
test_dataloader = val_dataloader
val_evaluator = dict(
type='CocoMetric',
ann_file=data_root + 'annotations/instances_val2017.json',
metric=['bbox', 'segm'],
format_only=False)
test_evaluator = val_evaluator
# inference on test dataset and
# format the output results for submission.
# test_dataloader = dict(
# batch_size=1,
# num_workers=2,
# persistent_workers=True,
# drop_last=False,
# sampler=dict(type='DefaultSampler', shuffle=False),
# dataset=dict(
# type=dataset_type,
# data_root=data_root,
# ann_file=data_root + 'annotations/image_info_test-dev2017.json',
# data_prefix=dict(img='test2017/'),
# test_mode=True,
# pipeline=test_pipeline))
# test_evaluator = dict(
# type='CocoMetric',
# metric=['bbox', 'segm'],
# format_only=True,
# ann_file=data_root + 'annotations/image_info_test-dev2017.json',
# outfile_prefix='./work_dirs/coco_instance/test')
|
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
# file_client_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/detection/',
# 'data/': 's3://openmmlab/datasets/detection/'
# }))
file_client_args = dict(backend='disk')
train_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(
batch_size=2,
num_workers=2,
persistent_workers=True,
sampler=dict(type='DefaultSampler', shuffle=True),
batch_sampler=dict(type='AspectRatioBatchSampler'),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instances_train2017.json',
data_prefix=dict(img='train2017/'),
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=train_pipeline))
val_dataloader = dict(
batch_size=1,
num_workers=2,
persistent_workers=True,
drop_last=False,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instances_val2017.json',
data_prefix=dict(img='val2017/'),
test_mode=True,
pipeline=test_pipeline))
test_dataloader = val_dataloader
val_evaluator = dict(
type='CocoMetric',
ann_file=data_root + 'annotations/instances_val2017.json',
metric=['bbox', 'segm'])
test_evaluator = val_evaluator
|
from __future__ import annotations
import re
from typing import TYPE_CHECKING, Any
if TYPE_CHECKING:
import numpy as np
from torch import Tensor
from sentence_transformers.SentenceTransformer import SentenceTransformer
class SentenceEvaluator:
"""
Base class for all evaluators. Notably, this class introduces the ``greater_is_better`` and ``primary_metric``
attributes. The former is a boolean indicating whether a higher evaluation score is better, which is used
for choosing the best checkpoint if ``load_best_model_at_end`` is set to ``True`` in the training arguments.
The latter is a string indicating the primary metric for the evaluator. This has to be defined whenever
the evaluator returns a dictionary of metrics, and the primary metric is the key pointing to the primary
metric, i.e. the one that is used for model selection and/or logging.
Extend this class and implement __call__ for custom evaluators.
"""
def __init__(self):
self.greater_is_better = True
self.primary_metric = None
def __call__(
self, model: SentenceTransformer, output_path: str | None = None, epoch: int = -1, steps: int = -1
) -> float | dict[str, float]:
"""
This is called during training to evaluate the model.
It returns a score for the evaluation with a higher score indicating a better result.
Args:
model: the model to evaluate
output_path: path where predictions and metrics are written
to
epoch: the epoch where the evaluation takes place. This is
used for the file prefixes. If this is -1, then we
assume evaluation on test data.
steps: the steps in the current epoch at time of the
evaluation. This is used for the file prefixes. If this
is -1, then we assume evaluation at the end of the
epoch.
Returns:
Either a score for the evaluation with a higher score
indicating a better result, or a dictionary with scores. If
the latter is chosen, then `evaluator.primary_metric` must
be defined
"""
pass
def prefix_name_to_metrics(self, metrics: dict[str, float], name: str) -> dict[str, float]:
def maybe_to_float(value: Any) -> Any:
try:
return float(value)
except ValueError:
return value
if not name:
return {key: maybe_to_float(value) for key, value in metrics.items()}
metrics = {name + "_" + key: maybe_to_float(value) for key, value in metrics.items()}
if hasattr(self, "primary_metric") and not self.primary_metric.startswith(name + "_"):
self.primary_metric = name + "_" + self.primary_metric
return metrics
def store_metrics_in_model_card_data(
self, model: SentenceTransformer, metrics: dict[str, Any], epoch: int = 0, step: int = 0
) -> None:
model.model_card_data.set_evaluation_metrics(self, metrics, epoch, step)
@property
def description(self) -> str:
"""
Returns a human-readable description of the evaluator: BinaryClassificationEvaluator -> Binary Classification
1. Replace "CE" prefix with "CrossEncoder"
2. Remove "Evaluator" from the class name
3. Add a space before every capital letter
"""
class_name = self.__class__.__name__
if class_name.startswith("CE"):
class_name = "CrossEncoder" + class_name[2:]
try:
index = class_name.index("Evaluator")
class_name = class_name[:index]
except IndexError:
pass
return re.sub(r"([a-z])([A-Z])", r"\g<1> \g<2>", class_name)
def get_config_dict(self) -> dict[str, Any]:
"""
Return a dictionary with all meaningful configuration values of the evaluator to store in the model card.
"""
return {}
def embed_inputs(
self,
model: SentenceTransformer,
sentences: str | list[str] | np.ndarray,
**kwargs,
) -> list[Tensor] | np.ndarray | Tensor | dict[str, Tensor] | list[dict[str, Tensor]]:
"""
Call the encoder method of the model pass
Args:
model (SentenceTransformer): Model we are evaluating
sentences (str | list[str] | np.ndarray): Text that we are embedding
Returns:
list[Tensor] | np.ndarray | Tensor | dict[str, Tensor] | list[dict[str, Tensor]]: The associated embedding
"""
return model.encode(sentences, **kwargs)
|
from __future__ import annotations
import re
from typing import TYPE_CHECKING, Any
if TYPE_CHECKING:
from sentence_transformers.SentenceTransformer import SentenceTransformer
class SentenceEvaluator:
"""
Base class for all evaluators. Notably, this class introduces the ``greater_is_better`` and ``primary_metric``
attributes. The former is a boolean indicating whether a higher evaluation score is better, which is used
for choosing the best checkpoint if ``load_best_model_at_end`` is set to ``True`` in the training arguments.
The latter is a string indicating the primary metric for the evaluator. This has to be defined whenever
the evaluator returns a dictionary of metrics, and the primary metric is the key pointing to the primary
metric, i.e. the one that is used for model selection and/or logging.
Extend this class and implement __call__ for custom evaluators.
"""
def __init__(self):
self.greater_is_better = True
self.primary_metric = None
def __call__(
self, model: SentenceTransformer, output_path: str = None, epoch: int = -1, steps: int = -1
) -> float | dict[str, float]:
"""
This is called during training to evaluate the model.
It returns a score for the evaluation with a higher score indicating a better result.
Args:
model: the model to evaluate
output_path: path where predictions and metrics are written
to
epoch: the epoch where the evaluation takes place. This is
used for the file prefixes. If this is -1, then we
assume evaluation on test data.
steps: the steps in the current epoch at time of the
evaluation. This is used for the file prefixes. If this
is -1, then we assume evaluation at the end of the
epoch.
Returns:
Either a score for the evaluation with a higher score
indicating a better result, or a dictionary with scores. If
the latter is chosen, then `evaluator.primary_metric` must
be defined
"""
pass
def prefix_name_to_metrics(self, metrics: dict[str, float], name: str) -> dict[str, float]:
def maybe_to_float(value: Any) -> Any:
try:
return float(value)
except ValueError:
return value
if not name:
return {key: maybe_to_float(value) for key, value in metrics.items()}
metrics = {name + "_" + key: maybe_to_float(value) for key, value in metrics.items()}
if hasattr(self, "primary_metric") and not self.primary_metric.startswith(name + "_"):
self.primary_metric = name + "_" + self.primary_metric
return metrics
def store_metrics_in_model_card_data(
self, model: SentenceTransformer, metrics: dict[str, Any], epoch: int = 0, step: int = 0
) -> None:
model.model_card_data.set_evaluation_metrics(self, metrics, epoch, step)
@property
def description(self) -> str:
"""
Returns a human-readable description of the evaluator: BinaryClassificationEvaluator -> Binary Classification
1. Replace "CE" prefix with "CrossEncoder"
2. Remove "Evaluator" from the class name
3. Add a space before every capital letter
"""
class_name = self.__class__.__name__
if class_name.startswith("CE"):
class_name = "CrossEncoder" + class_name[2:]
try:
index = class_name.index("Evaluator")
class_name = class_name[:index]
except IndexError:
pass
return re.sub(r"([a-z])([A-Z])", r"\g<1> \g<2>", class_name)
def get_config_dict(self) -> dict[str, Any]:
"""
Return a dictionary with all meaningful configuration values of the evaluator to store in the model card.
"""
return {}
|
from torch import Tensor
from torch import nn
from typing import Dict
import os
import json
class Dropout(nn.Module):
"""Dropout layer.
Args:
dropout: Sets a dropout value for dense layer.
"""
def __init__(self, dropout: float = 0.2):
super(Dropout, self).__init__()
self.dropout = dropout
self.dropout_layer = nn.Dropout(self.dropout)
def forward(self, features: Dict[str, Tensor]):
features.update({"sentence_embedding": self.dropout_layer(features["sentence_embedding"])})
return features
def save(self, output_path):
with open(os.path.join(output_path, "config.json"), "w") as fOut:
json.dump({"dropout": self.dropout}, fOut)
@staticmethod
def load(input_path):
with open(os.path.join(input_path, "config.json")) as fIn:
config = json.load(fIn)
model = Dropout(**config)
return model
|
from torch import Tensor
from torch import nn
from typing import Dict
import os
import json
class Dropout(nn.Module):
"""Dropout layer.
:param dropout: Sets a dropout value for dense layer.
"""
def __init__(self, dropout: float = 0.2):
super(Dropout, self).__init__()
self.dropout = dropout
self.dropout_layer = nn.Dropout(self.dropout)
def forward(self, features: Dict[str, Tensor]):
features.update({"sentence_embedding": self.dropout_layer(features["sentence_embedding"])})
return features
def save(self, output_path):
with open(os.path.join(output_path, "config.json"), "w") as fOut:
json.dump({"dropout": self.dropout}, fOut)
@staticmethod
def load(input_path):
with open(os.path.join(input_path, "config.json")) as fIn:
config = json.load(fIn)
model = Dropout(**config)
return model
|
import subprocess
import sys
from unittest.mock import patch
import fastapi.cli
import pytest
def test_fastapi_cli():
result = subprocess.run(
[
sys.executable,
"-m",
"coverage",
"run",
"-m",
"fastapi",
"dev",
"non_existent_file.py",
],
capture_output=True,
encoding="utf-8",
)
assert result.returncode == 1, result.stdout
assert "Path does not exist non_existent_file.py" in result.stdout
def test_fastapi_cli_not_installed():
with patch.object(fastapi.cli, "cli_main", None):
with pytest.raises(RuntimeError) as exc_info:
fastapi.cli.main()
assert "To use the fastapi command, please install" in str(exc_info.value)
|
import subprocess
import sys
from unittest.mock import patch
import fastapi.cli
import pytest
def test_fastapi_cli():
result = subprocess.run(
[
sys.executable,
"-m",
"coverage",
"run",
"-m",
"fastapi",
"dev",
"non_existent_file.py",
],
capture_output=True,
encoding="utf-8",
)
assert result.returncode == 1, result.stdout
assert "Using path non_existent_file.py" in result.stdout
def test_fastapi_cli_not_installed():
with patch.object(fastapi.cli, "cli_main", None):
with pytest.raises(RuntimeError) as exc_info:
fastapi.cli.main()
assert "To use the fastapi command, please install" in str(exc_info.value)
|
"""
Computes embeddings
"""
import numpy as np
from sentence_transformers import SentenceTransformer
def test_encode_token_embeddings(paraphrase_distilroberta_base_v1_model: SentenceTransformer) -> None:
"""
Test that encode(output_value='token_embeddings') works
"""
model = paraphrase_distilroberta_base_v1_model
sent = [
"Hello Word, a test sentence",
"Here comes another sentence",
"My final sentence",
"Sentences",
"Sentence five five five five five five five",
]
emb = model.encode(sent, output_value="token_embeddings", batch_size=2)
assert len(emb) == len(sent)
for s, e in zip(sent, emb):
assert len(model.tokenize([s])["input_ids"][0]) == e.shape[0]
def test_encode_single_sentences(paraphrase_distilroberta_base_v1_model: SentenceTransformer) -> None:
model = paraphrase_distilroberta_base_v1_model
# Single sentence
emb = model.encode("Hello Word, a test sentence")
assert emb.shape == (768,)
assert abs(np.sum(emb) - 7.9811716) < 0.002
# Single sentence as list
emb = model.encode(["Hello Word, a test sentence"])
assert emb.shape == (1, 768)
assert abs(np.sum(emb) - 7.9811716) < 0.002
# Sentence list
emb = model.encode(
[
"Hello Word, a test sentence",
"Here comes another sentence",
"My final sentence",
]
)
assert emb.shape == (3, 768)
assert abs(np.sum(emb) - 22.968266) < 0.007
def test_encode_normalize(paraphrase_distilroberta_base_v1_model: SentenceTransformer) -> None:
model = paraphrase_distilroberta_base_v1_model
emb = model.encode(
[
"Hello Word, a test sentence",
"Here comes another sentence",
"My final sentence",
],
normalize_embeddings=True,
)
assert emb.shape == (3, 768)
for norm in np.linalg.norm(emb, axis=1):
assert abs(norm - 1) < 0.001
def test_encode_tuple_sentences(paraphrase_distilroberta_base_v1_model: SentenceTransformer) -> None:
model = paraphrase_distilroberta_base_v1_model
# Input a sentence tuple
emb = model.encode([("Hello Word, a test sentence", "Second input for model")])
assert emb.shape == (1, 768)
assert abs(np.sum(emb) - 9.503508) < 0.002
# List of sentence tuples
emb = model.encode(
[
("Hello Word, a test sentence", "Second input for model"),
("My second tuple", "With two inputs"),
("Final tuple", "final test"),
]
)
assert emb.shape == (3, 768)
assert abs(np.sum(emb) - 32.14627) < 0.002
|
"""
Computes embeddings
"""
import numpy as np
from sentence_transformers import SentenceTransformer
def test_encode_token_embeddings(paraphrase_distilroberta_base_v1_model: SentenceTransformer) -> None:
"""
Test that encode(output_value='token_embeddings') works
:return:
"""
model = paraphrase_distilroberta_base_v1_model
sent = [
"Hello Word, a test sentence",
"Here comes another sentence",
"My final sentence",
"Sentences",
"Sentence five five five five five five five",
]
emb = model.encode(sent, output_value="token_embeddings", batch_size=2)
assert len(emb) == len(sent)
for s, e in zip(sent, emb):
assert len(model.tokenize([s])["input_ids"][0]) == e.shape[0]
def test_encode_single_sentences(paraphrase_distilroberta_base_v1_model: SentenceTransformer) -> None:
model = paraphrase_distilroberta_base_v1_model
# Single sentence
emb = model.encode("Hello Word, a test sentence")
assert emb.shape == (768,)
assert abs(np.sum(emb) - 7.9811716) < 0.002
# Single sentence as list
emb = model.encode(["Hello Word, a test sentence"])
assert emb.shape == (1, 768)
assert abs(np.sum(emb) - 7.9811716) < 0.002
# Sentence list
emb = model.encode(
[
"Hello Word, a test sentence",
"Here comes another sentence",
"My final sentence",
]
)
assert emb.shape == (3, 768)
assert abs(np.sum(emb) - 22.968266) < 0.007
def test_encode_normalize(paraphrase_distilroberta_base_v1_model: SentenceTransformer) -> None:
model = paraphrase_distilroberta_base_v1_model
emb = model.encode(
[
"Hello Word, a test sentence",
"Here comes another sentence",
"My final sentence",
],
normalize_embeddings=True,
)
assert emb.shape == (3, 768)
for norm in np.linalg.norm(emb, axis=1):
assert abs(norm - 1) < 0.001
def test_encode_tuple_sentences(paraphrase_distilroberta_base_v1_model: SentenceTransformer) -> None:
model = paraphrase_distilroberta_base_v1_model
# Input a sentence tuple
emb = model.encode([("Hello Word, a test sentence", "Second input for model")])
assert emb.shape == (1, 768)
assert abs(np.sum(emb) - 9.503508) < 0.002
# List of sentence tuples
emb = model.encode(
[
("Hello Word, a test sentence", "Second input for model"),
("My second tuple", "With two inputs"),
("Final tuple", "final test"),
]
)
assert emb.shape == (3, 768)
assert abs(np.sum(emb) - 32.14627) < 0.002
|
import logging
from datasets import load_dataset
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.evaluation import SparseTranslationEvaluator
logging.basicConfig(format="%(message)s", level=logging.INFO)
# Load a model, not mutilingual but hope to see some on the hub soon
model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
# Load a parallel sentences dataset
dataset = load_dataset("sentence-transformers/parallel-sentences-news-commentary", "en-nl", split="train[:1000]")
# Initialize the TranslationEvaluator using the same texts from two languages
translation_evaluator = SparseTranslationEvaluator(
source_sentences=dataset["english"],
target_sentences=dataset["non_english"],
name="news-commentary-en-nl",
)
results = translation_evaluator(model)
"""
Evaluating translation matching Accuracy of the model on the news-commentary-en-nl dataset:
Accuracy src2trg: 41.40
Accuracy trg2src: 47.70
Model Sparsity Stats: Row Non-Zero Mean: 113.6150016784668, Row Sparsity Mean: 0.9962776005268097
"""
# Print the results
print(f"Primary metric: {translation_evaluator.primary_metric}")
# => Primary metric: news-commentary-en-nl_mean_accuracy
print(f"Primary metric value: {results[translation_evaluator.primary_metric]:.4f}")
# => Primary metric value: 0.4455
|
import logging
from datasets import load_dataset
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.evaluation import SparseTranslationEvaluator
logging.basicConfig(format="%(message)s", level=logging.INFO)
# Load a model, not mutilingual but hope to see some on the hub soon
model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
# Load a parallel sentences dataset
dataset = load_dataset("sentence-transformers/parallel-sentences-news-commentary", "en-nl", split="train[:1000]")
# Initialize the TranslationEvaluator using the same texts from two languages
translation_evaluator = SparseTranslationEvaluator(
source_sentences=dataset["english"],
target_sentences=dataset["non_english"],
name="news-commentary-en-nl",
)
results = translation_evaluator(model)
"""
Evaluating translation matching Accuracy of the model on the news-commentary-en-nl dataset:
Accuracy src2trg: 41.40
Accuracy trg2src: 47.70
"""
# Print the results
print(f"Primary metric: {translation_evaluator.primary_metric}")
# => Primary metric: news-commentary-en-nl_mean_accuracy
print(f"Primary metric value: {results[translation_evaluator.primary_metric]:.4f}")
# => Primary metric value: 0.4455
|
# Copyright (c) OpenMMLab. All rights reserved.
from .backbones import * # noqa: F401,F403
from .data_preprocessors import * # noqa: F401,F403
from .dense_heads import * # noqa: F401,F403
from .detectors import * # noqa: F401,F403
from .layers import * # noqa: F401,F403
from .losses import * # noqa: F401,F403
from .necks import * # noqa: F401,F403
from .roi_heads import * # noqa: F401,F403
from .seg_heads import * # noqa: F401,F403
from .task_modules import * # noqa: F401,F403
from .test_time_augs import * # noqa: F401,F403
|
# Copyright (c) OpenMMLab. All rights reserved.
from .backbones import * # noqa: F401,F403
from .builder import (BACKBONES, DETECTORS, HEADS, LOSSES, NECKS,
ROI_EXTRACTORS, SHARED_HEADS, build_backbone,
build_detector, build_head, build_loss, build_neck,
build_roi_extractor, build_shared_head)
from .data_preprocessors import * # noqa: F401,F403
from .dense_heads import * # noqa: F401,F403
from .detectors import * # noqa: F401,F403
from .layers import * # noqa: F401,F403
from .losses import * # noqa: F401,F403
from .necks import * # noqa: F401,F403
from .roi_heads import * # noqa: F401,F403
from .seg_heads import * # noqa: F401,F403
from .task_modules import * # noqa: F401,F403
from .test_time_augs import * # noqa: F401,F403
__all__ = [
'BACKBONES', 'NECKS', 'ROI_EXTRACTORS', 'SHARED_HEADS', 'HEADS', 'LOSSES',
'DETECTORS', 'build_backbone', 'build_neck', 'build_roi_extractor',
'build_shared_head', 'build_head', 'build_loss', 'build_detector'
]
|
import numpy as np
import pytest
import torch
from docarray import BaseDocument, DocumentArray
from docarray.documents import Image, Text
from docarray.typing import (
AnyEmbedding,
AnyTensor,
AnyUrl,
ImageBytes,
ImageUrl,
Mesh3DUrl,
NdArray,
PointCloud3DUrl,
TextUrl,
TorchEmbedding,
TorchTensor,
)
from docarray.typing.tensor import NdArrayEmbedding
@pytest.mark.proto
def test_multi_modal_doc_proto():
class MyMultiModalDoc(BaseDocument):
image: Image
text: Text
doc = MyMultiModalDoc(
image=Image(tensor=np.zeros((3, 224, 224))), text=Text(text='hello')
)
MyMultiModalDoc.from_protobuf(doc.to_protobuf())
@pytest.mark.proto
def test_all_types():
class NestedDoc(BaseDocument):
tensor: NdArray
class MyDoc(BaseDocument):
img_url: ImageUrl
txt_url: TextUrl
mesh_url: Mesh3DUrl
point_cloud_url: PointCloud3DUrl
any_url: AnyUrl
torch_tensor: TorchTensor
torch_tensor_param: TorchTensor[224, 224, 3]
np_array: NdArray
np_array_param: NdArray[224, 224, 3]
generic_nd_array: AnyTensor
generic_torch_tensor: AnyTensor
embedding: AnyEmbedding
torch_embedding: TorchEmbedding[128]
np_embedding: NdArrayEmbedding[128]
nested_docs: DocumentArray[NestedDoc]
bytes_: bytes
img_bytes: ImageBytes
doc = MyDoc(
img_url='test.png',
txt_url='test.txt',
mesh_url='test.obj',
point_cloud_url='test.obj',
any_url='www.jina.ai',
torch_tensor=torch.zeros((3, 224, 224)),
torch_tensor_param=torch.zeros((3, 224, 224)),
np_array=np.zeros((3, 224, 224)),
np_array_param=np.zeros((3, 224, 224)),
generic_nd_array=np.zeros((3, 224, 224)),
generic_torch_tensor=torch.zeros((3, 224, 224)),
embedding=np.zeros((3, 224, 224)),
torch_embedding=torch.zeros((128,)),
np_embedding=np.zeros((128,)),
nested_docs=DocumentArray[NestedDoc]([NestedDoc(tensor=np.zeros((128,)))]),
bytes_=b'hello',
img_bytes=b'img',
)
doc = doc.to_protobuf()
doc = MyDoc.from_protobuf(doc)
assert doc.img_url == 'test.png'
assert doc.txt_url == 'test.txt'
assert doc.mesh_url == 'test.obj'
assert doc.point_cloud_url == 'test.obj'
assert doc.any_url == 'www.jina.ai'
assert (doc.torch_tensor == torch.zeros((3, 224, 224))).all()
assert isinstance(doc.torch_tensor, torch.Tensor)
assert (doc.torch_tensor_param == torch.zeros((224, 224, 3))).all()
assert isinstance(doc.torch_tensor_param, torch.Tensor)
assert (doc.np_array == np.zeros((3, 224, 224))).all()
assert isinstance(doc.np_array, np.ndarray)
assert doc.np_array.flags.writeable
assert (doc.np_array_param == np.zeros((224, 224, 3))).all()
assert isinstance(doc.np_array_param, np.ndarray)
assert (doc.generic_nd_array == np.zeros((3, 224, 224))).all()
assert isinstance(doc.generic_nd_array, np.ndarray)
assert (doc.generic_torch_tensor == torch.zeros((3, 224, 224))).all()
assert isinstance(doc.generic_torch_tensor, torch.Tensor)
assert (doc.torch_embedding == torch.zeros((128,))).all()
assert isinstance(doc.torch_embedding, torch.Tensor)
assert (doc.np_embedding == np.zeros((128,))).all()
assert isinstance(doc.np_embedding, np.ndarray)
assert (doc.embedding == np.zeros((3, 224, 224))).all()
assert (doc.embedding == np.zeros((3, 224, 224))).all()
assert doc.bytes_ == b'hello'
assert doc.img_bytes == b'img'
|
import numpy as np
import torch
from docarray import BaseDocument, DocumentArray
from docarray.documents import Image, Text
from docarray.typing import (
AnyEmbedding,
AnyTensor,
AnyUrl,
ImageBytes,
ImageUrl,
Mesh3DUrl,
NdArray,
PointCloud3DUrl,
TextUrl,
TorchEmbedding,
TorchTensor,
)
from docarray.typing.tensor import NdArrayEmbedding
def test_multi_modal_doc_proto():
class MyMultiModalDoc(BaseDocument):
image: Image
text: Text
doc = MyMultiModalDoc(
image=Image(tensor=np.zeros((3, 224, 224))), text=Text(text='hello')
)
MyMultiModalDoc.from_protobuf(doc.to_protobuf())
def test_all_types():
class NestedDoc(BaseDocument):
tensor: NdArray
class MyDoc(BaseDocument):
img_url: ImageUrl
txt_url: TextUrl
mesh_url: Mesh3DUrl
point_cloud_url: PointCloud3DUrl
any_url: AnyUrl
torch_tensor: TorchTensor
torch_tensor_param: TorchTensor[224, 224, 3]
np_array: NdArray
np_array_param: NdArray[224, 224, 3]
generic_nd_array: AnyTensor
generic_torch_tensor: AnyTensor
embedding: AnyEmbedding
torch_embedding: TorchEmbedding[128]
np_embedding: NdArrayEmbedding[128]
nested_docs: DocumentArray[NestedDoc]
bytes_: bytes
img_bytes: ImageBytes
doc = MyDoc(
img_url='test.png',
txt_url='test.txt',
mesh_url='test.obj',
point_cloud_url='test.obj',
any_url='www.jina.ai',
torch_tensor=torch.zeros((3, 224, 224)),
torch_tensor_param=torch.zeros((3, 224, 224)),
np_array=np.zeros((3, 224, 224)),
np_array_param=np.zeros((3, 224, 224)),
generic_nd_array=np.zeros((3, 224, 224)),
generic_torch_tensor=torch.zeros((3, 224, 224)),
embedding=np.zeros((3, 224, 224)),
torch_embedding=torch.zeros((128,)),
np_embedding=np.zeros((128,)),
nested_docs=DocumentArray[NestedDoc]([NestedDoc(tensor=np.zeros((128,)))]),
bytes_=b'hello',
img_bytes=b'img',
)
doc = doc.to_protobuf()
doc = MyDoc.from_protobuf(doc)
assert doc.img_url == 'test.png'
assert doc.txt_url == 'test.txt'
assert doc.mesh_url == 'test.obj'
assert doc.point_cloud_url == 'test.obj'
assert doc.any_url == 'www.jina.ai'
assert (doc.torch_tensor == torch.zeros((3, 224, 224))).all()
assert isinstance(doc.torch_tensor, torch.Tensor)
assert (doc.torch_tensor_param == torch.zeros((224, 224, 3))).all()
assert isinstance(doc.torch_tensor_param, torch.Tensor)
assert (doc.np_array == np.zeros((3, 224, 224))).all()
assert isinstance(doc.np_array, np.ndarray)
assert doc.np_array.flags.writeable
assert (doc.np_array_param == np.zeros((224, 224, 3))).all()
assert isinstance(doc.np_array_param, np.ndarray)
assert (doc.generic_nd_array == np.zeros((3, 224, 224))).all()
assert isinstance(doc.generic_nd_array, np.ndarray)
assert (doc.generic_torch_tensor == torch.zeros((3, 224, 224))).all()
assert isinstance(doc.generic_torch_tensor, torch.Tensor)
assert (doc.torch_embedding == torch.zeros((128,))).all()
assert isinstance(doc.torch_embedding, torch.Tensor)
assert (doc.np_embedding == np.zeros((128,))).all()
assert isinstance(doc.np_embedding, np.ndarray)
assert (doc.embedding == np.zeros((3, 224, 224))).all()
assert (doc.embedding == np.zeros((3, 224, 224))).all()
assert doc.bytes_ == b'hello'
assert doc.img_bytes == b'img'
|
# Copyright (c) OpenMMLab. All rights reserved.
from .data_preprocessor import (BatchFixedSizePad, BatchResize,
BatchSyncRandomResize, BoxInstDataPreprocessor,
DetDataPreprocessor,
MultiBranchDataPreprocessor)
__all__ = [
'DetDataPreprocessor', 'BatchSyncRandomResize', 'BatchFixedSizePad',
'MultiBranchDataPreprocessor', 'BatchResize', 'BoxInstDataPreprocessor'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .data_preprocessor import (BatchFixedSizePad, BatchResize,
BatchSyncRandomResize, DetDataPreprocessor,
MultiBranchDataPreprocessor)
__all__ = [
'DetDataPreprocessor', 'BatchSyncRandomResize', 'BatchFixedSizePad',
'MultiBranchDataPreprocessor', 'BatchResize'
]
|
import asyncio
import os
import random
import string
import tempfile
import time
import pytest
from jina import helper
@pytest.fixture(scope='function')
def random_workspace_name():
"""Generate a random workspace name with digits and letters."""
rand = ''.join(random.choices(string.ascii_uppercase + string.digits, k=6))
return f'JINA_TEST_WORKSPACE_{rand}'
@pytest.fixture(scope='function')
def test_metas(tmpdir, random_workspace_name):
from jina.serve.executors.metas import get_default_metas
os.environ[random_workspace_name] = str(tmpdir)
metas = get_default_metas()
metas['workspace'] = os.environ[random_workspace_name]
yield metas
del os.environ[random_workspace_name]
@pytest.fixture()
def docker_compose(request):
os.system(
f"docker-compose -f {request.param} --project-directory . up --build -d --remove-orphans"
)
time.sleep(10)
yield
os.system(
f"docker-compose -f {request.param} --project-directory . down --remove-orphans"
)
@pytest.fixture(scope='function')
def port_generator():
generated_ports = set()
def random_port():
port = helper.random_port()
while port in generated_ports:
port = helper.random_port()
generated_ports.add(port)
return port
return random_port
@pytest.fixture(autouse=True)
def test_log_level(monkeypatch):
monkeypatch.setenv('JINA_LOG_LEVEL', 'DEBUG')
@pytest.fixture(autouse=True)
def test_grpc_fork_support_false(monkeypatch):
monkeypatch.setenv('GRPC_ENABLE_FORK_SUPPORT', 'true')
@pytest.fixture(autouse=True)
def test_timeout_ctrl_time(monkeypatch):
monkeypatch.setenv('JINA_DEFAULT_TIMEOUT_CTRL', '500')
@pytest.fixture(autouse=True)
def test_disable_telemetry(monkeypatch):
monkeypatch.setenv('JINA_OPTOUT_TELEMETRY', 'True')
@pytest.fixture(autouse=True)
def tmpfile(tmpdir):
tmpfile = f'jina_test_{next(tempfile._get_candidate_names())}.db'
return tmpdir / tmpfile
@pytest.fixture(scope='session')
def event_loop(request):
"""
Valid only for `pytest.mark.asyncio` tests
"""
loop = asyncio.get_event_loop_policy().new_event_loop()
yield loop
loop.close()
|
import asyncio
import os
import random
import string
import tempfile
import time
import pytest
from jina import helper
@pytest.fixture(scope='function')
def random_workspace_name():
"""Generate a random workspace name with digits and letters."""
rand = ''.join(random.choices(string.ascii_uppercase + string.digits, k=6))
return f'JINA_TEST_WORKSPACE_{rand}'
@pytest.fixture(scope='function')
def test_metas(tmpdir, random_workspace_name):
from jina.serve.executors.metas import get_default_metas
os.environ[random_workspace_name] = str(tmpdir)
metas = get_default_metas()
metas['workspace'] = os.environ[random_workspace_name]
yield metas
del os.environ[random_workspace_name]
@pytest.fixture()
def docker_compose(request):
os.system(
f"docker-compose -f {request.param} --project-directory . up --build -d --remove-orphans"
)
time.sleep(10)
yield
os.system(
f"docker-compose -f {request.param} --project-directory . down --remove-orphans"
)
@pytest.fixture(scope='function')
def port_generator():
generated_ports = set()
def random_port():
port = helper.random_port()
while port in generated_ports:
port = helper.random_port()
generated_ports.add(port)
return port
return random_port
@pytest.fixture(autouse=True)
def test_log_level(monkeypatch):
monkeypatch.setenv('JINA_LOG_LEVEL', 'DEBUG')
@pytest.fixture(autouse=True)
def test_grpc_fork_support_false(monkeypatch):
monkeypatch.setenv('GRPC_ENABLE_FORK_SUPPORT', 'true')
@pytest.fixture(autouse=True)
def test_timeout_ctrl_time(monkeypatch):
monkeypatch.setenv('JINA_DEFAULT_TIMEOUT_CTRL', '500')
@pytest.fixture(autouse=True)
def test_disable_telemetry(monkeypatch):
monkeypatch.setenv('JINA_OPTOUT_TELEMETRY', 'True')
@pytest.fixture(autouse=True)
def tmpfile(tmpdir):
tmpfile = f'jina_test_{next(tempfile._get_candidate_names())}.db'
return tmpdir / tmpfile
@pytest.fixture(scope='session')
def event_loop(request):
"""
Valid only for `pytest.mark.asyncio` tests
"""
loop = asyncio.get_event_loop_policy().new_event_loop()
yield loop
loop.close()
|
import importlib
import pytest
from fastapi.testclient import TestClient
from ...utils import needs_py310, needs_pydanticv1
@pytest.fixture(
name="client",
params=[
"tutorial001_pv1",
pytest.param("tutorial001_pv1_py310", marks=needs_py310),
],
)
def get_client(request: pytest.FixtureRequest):
mod = importlib.import_module(f"docs_src.schema_extra_example.{request.param}")
client = TestClient(mod.app)
return client
@needs_pydanticv1
def test_post_body_example(client: TestClient):
response = client.put(
"/items/5",
json={
"name": "Foo",
"description": "A very nice Item",
"price": 35.4,
"tax": 3.2,
},
)
assert response.status_code == 200
@needs_pydanticv1
def test_openapi_schema(client: TestClient):
response = client.get("/openapi.json")
assert response.status_code == 200, response.text
# insert_assert(response.json())
assert response.json() == {
"openapi": "3.1.0",
"info": {"title": "FastAPI", "version": "0.1.0"},
"paths": {
"/items/{item_id}": {
"put": {
"summary": "Update Item",
"operationId": "update_item_items__item_id__put",
"parameters": [
{
"required": True,
"schema": {"type": "integer", "title": "Item Id"},
"name": "item_id",
"in": "path",
}
],
"requestBody": {
"content": {
"application/json": {
"schema": {"$ref": "#/components/schemas/Item"}
}
},
"required": True,
},
"responses": {
"200": {
"description": "Successful Response",
"content": {"application/json": {"schema": {}}},
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/HTTPValidationError"
}
}
},
},
},
}
}
},
"components": {
"schemas": {
"HTTPValidationError": {
"properties": {
"detail": {
"items": {"$ref": "#/components/schemas/ValidationError"},
"type": "array",
"title": "Detail",
}
},
"type": "object",
"title": "HTTPValidationError",
},
"Item": {
"properties": {
"name": {"type": "string", "title": "Name"},
"description": {"type": "string", "title": "Description"},
"price": {"type": "number", "title": "Price"},
"tax": {"type": "number", "title": "Tax"},
},
"type": "object",
"required": ["name", "price"],
"title": "Item",
"examples": [
{
"name": "Foo",
"description": "A very nice Item",
"price": 35.4,
"tax": 3.2,
}
],
},
"ValidationError": {
"properties": {
"loc": {
"items": {
"anyOf": [{"type": "string"}, {"type": "integer"}]
},
"type": "array",
"title": "Location",
},
"msg": {"type": "string", "title": "Message"},
"type": {"type": "string", "title": "Error Type"},
},
"type": "object",
"required": ["loc", "msg", "type"],
"title": "ValidationError",
},
}
},
}
|
import pytest
from fastapi.testclient import TestClient
from ...utils import needs_pydanticv1
@pytest.fixture(name="client")
def get_client():
from docs_src.schema_extra_example.tutorial001_pv1 import app
client = TestClient(app)
return client
@needs_pydanticv1
def test_post_body_example(client: TestClient):
response = client.put(
"/items/5",
json={
"name": "Foo",
"description": "A very nice Item",
"price": 35.4,
"tax": 3.2,
},
)
assert response.status_code == 200
@needs_pydanticv1
def test_openapi_schema(client: TestClient):
response = client.get("/openapi.json")
assert response.status_code == 200, response.text
# insert_assert(response.json())
assert response.json() == {
"openapi": "3.1.0",
"info": {"title": "FastAPI", "version": "0.1.0"},
"paths": {
"/items/{item_id}": {
"put": {
"summary": "Update Item",
"operationId": "update_item_items__item_id__put",
"parameters": [
{
"required": True,
"schema": {"type": "integer", "title": "Item Id"},
"name": "item_id",
"in": "path",
}
],
"requestBody": {
"content": {
"application/json": {
"schema": {"$ref": "#/components/schemas/Item"}
}
},
"required": True,
},
"responses": {
"200": {
"description": "Successful Response",
"content": {"application/json": {"schema": {}}},
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/HTTPValidationError"
}
}
},
},
},
}
}
},
"components": {
"schemas": {
"HTTPValidationError": {
"properties": {
"detail": {
"items": {"$ref": "#/components/schemas/ValidationError"},
"type": "array",
"title": "Detail",
}
},
"type": "object",
"title": "HTTPValidationError",
},
"Item": {
"properties": {
"name": {"type": "string", "title": "Name"},
"description": {"type": "string", "title": "Description"},
"price": {"type": "number", "title": "Price"},
"tax": {"type": "number", "title": "Tax"},
},
"type": "object",
"required": ["name", "price"],
"title": "Item",
"examples": [
{
"name": "Foo",
"description": "A very nice Item",
"price": 35.4,
"tax": 3.2,
}
],
},
"ValidationError": {
"properties": {
"loc": {
"items": {
"anyOf": [{"type": "string"}, {"type": "integer"}]
},
"type": "array",
"title": "Location",
},
"msg": {"type": "string", "title": "Message"},
"type": {"type": "string", "title": "Error Type"},
},
"type": "object",
"required": ["loc", "msg", "type"],
"title": "ValidationError",
},
}
},
}
|
import logging
from langchain_core.callbacks import (
AsyncCallbackManagerForRetrieverRun,
CallbackManagerForRetrieverRun,
)
from langchain_core.documents import Document
from langchain_core.language_models import BaseLLM
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import BasePromptTemplate
from langchain_core.prompts.prompt import PromptTemplate
from langchain_core.retrievers import BaseRetriever
from langchain_core.runnables import Runnable
logger = logging.getLogger(__name__)
# Default template
DEFAULT_TEMPLATE = """You are an assistant tasked with taking a natural language \
query from a user and converting it into a query for a vectorstore. \
In this process, you strip out information that is not relevant for \
the retrieval task. Here is the user query: {question}"""
# Default prompt
DEFAULT_QUERY_PROMPT = PromptTemplate.from_template(DEFAULT_TEMPLATE)
class RePhraseQueryRetriever(BaseRetriever):
"""Given a query, use an LLM to re-phrase it.
Then, retrieve docs for the re-phrased query."""
retriever: BaseRetriever
llm_chain: Runnable
@classmethod
def from_llm(
cls,
retriever: BaseRetriever,
llm: BaseLLM,
prompt: BasePromptTemplate = DEFAULT_QUERY_PROMPT,
) -> "RePhraseQueryRetriever":
"""Initialize from llm using default template.
The prompt used here expects a single input: `question`
Args:
retriever: retriever to query documents from
llm: llm for query generation using DEFAULT_QUERY_PROMPT
prompt: prompt template for query generation
Returns:
RePhraseQueryRetriever
"""
llm_chain = prompt | llm | StrOutputParser()
return cls(
retriever=retriever,
llm_chain=llm_chain,
)
def _get_relevant_documents(
self,
query: str,
*,
run_manager: CallbackManagerForRetrieverRun,
) -> list[Document]:
"""Get relevant documents given a user question.
Args:
query: user question
Returns:
Relevant documents for re-phrased question
"""
re_phrased_question = self.llm_chain.invoke(
query,
{"callbacks": run_manager.get_child()},
)
logger.info(f"Re-phrased question: {re_phrased_question}")
return self.retriever.invoke(
re_phrased_question,
config={"callbacks": run_manager.get_child()},
)
async def _aget_relevant_documents(
self,
query: str,
*,
run_manager: AsyncCallbackManagerForRetrieverRun,
) -> list[Document]:
raise NotImplementedError
|
import logging
from langchain_core.callbacks import (
AsyncCallbackManagerForRetrieverRun,
CallbackManagerForRetrieverRun,
)
from langchain_core.documents import Document
from langchain_core.language_models import BaseLLM
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import BasePromptTemplate
from langchain_core.prompts.prompt import PromptTemplate
from langchain_core.retrievers import BaseRetriever
from langchain_core.runnables import Runnable
logger = logging.getLogger(__name__)
# Default template
DEFAULT_TEMPLATE = """You are an assistant tasked with taking a natural language \
query from a user and converting it into a query for a vectorstore. \
In this process, you strip out information that is not relevant for \
the retrieval task. Here is the user query: {question}"""
# Default prompt
DEFAULT_QUERY_PROMPT = PromptTemplate.from_template(DEFAULT_TEMPLATE)
class RePhraseQueryRetriever(BaseRetriever):
"""Given a query, use an LLM to re-phrase it.
Then, retrieve docs for the re-phrased query."""
retriever: BaseRetriever
llm_chain: Runnable
@classmethod
def from_llm(
cls,
retriever: BaseRetriever,
llm: BaseLLM,
prompt: BasePromptTemplate = DEFAULT_QUERY_PROMPT,
) -> "RePhraseQueryRetriever":
"""Initialize from llm using default template.
The prompt used here expects a single input: `question`
Args:
retriever: retriever to query documents from
llm: llm for query generation using DEFAULT_QUERY_PROMPT
prompt: prompt template for query generation
Returns:
RePhraseQueryRetriever
"""
llm_chain = prompt | llm | StrOutputParser()
return cls(
retriever=retriever,
llm_chain=llm_chain,
)
def _get_relevant_documents(
self,
query: str,
*,
run_manager: CallbackManagerForRetrieverRun,
) -> list[Document]:
"""Get relevant documents given a user question.
Args:
query: user question
Returns:
Relevant documents for re-phrased question
"""
re_phrased_question = self.llm_chain.invoke(
query, {"callbacks": run_manager.get_child()}
)
logger.info(f"Re-phrased question: {re_phrased_question}")
return self.retriever.invoke(
re_phrased_question, config={"callbacks": run_manager.get_child()}
)
async def _aget_relevant_documents(
self,
query: str,
*,
run_manager: AsyncCallbackManagerForRetrieverRun,
) -> list[Document]:
raise NotImplementedError
|
_base_ = [
'../_base_/models/retinanet_r50_fpn.py',
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
pretrained = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_tiny_patch4_window7_224.pth' # noqa
model = dict(
backbone=dict(
_delete_=True,
type='SwinTransformer',
embed_dims=96,
depths=[2, 2, 6, 2],
num_heads=[3, 6, 12, 24],
window_size=7,
mlp_ratio=4,
qkv_bias=True,
qk_scale=None,
drop_rate=0.,
attn_drop_rate=0.,
drop_path_rate=0.2,
patch_norm=True,
out_indices=(1, 2, 3),
# Please only add indices that would be used
# in FPN, otherwise some parameter will not be used
with_cp=False,
convert_weights=True,
init_cfg=dict(type='Pretrained', checkpoint=pretrained)),
neck=dict(in_channels=[192, 384, 768], start_level=0, num_outs=5))
optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001)
|
_base_ = [
'../_base_/models/retinanet_r50_fpn.py',
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
pretrained = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_tiny_patch4_window7_224.pth' # noqa
model = dict(
backbone=dict(
_delete_=True,
type='SwinTransformer',
embed_dims=96,
depths=[2, 2, 6, 2],
num_heads=[3, 6, 12, 24],
window_size=7,
mlp_ratio=4,
qkv_bias=True,
qk_scale=None,
drop_rate=0.,
attn_drop_rate=0.,
drop_path_rate=0.2,
patch_norm=True,
out_indices=(1, 2, 3),
# Please only add indices that would be used
# in FPN, otherwise some parameter will not be used
with_cp=False,
init_cfg=dict(type='Pretrained', checkpoint=pretrained)),
neck=dict(in_channels=[192, 384, 768], start_level=0, num_outs=5))
optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001)
|
from typing import TYPE_CHECKING, List
from docarray.typing.tensor.abstract_tensor import AbstractTensor
if TYPE_CHECKING:
from docarray.array import DocVec
from docarray.array.any_array import AnyDocArray
class DocArraySummary:
def __init__(self, docs: 'AnyDocArray'):
self.docs = docs
def summary(self) -> None:
"""
Print a summary of this DocList object and a summary of the schema of its
Document type.
"""
from rich import box
from rich.console import Console
from rich.panel import Panel
from rich.table import Table
from docarray.array import DocVec
table = Table(box=box.SIMPLE, highlight=True)
table.show_header = False
table.add_row('Type', self.docs.__class__.__name__)
table.add_row('Length', str(len(self.docs)), end_section=True)
if isinstance(self.docs, DocVec):
table.add_row('Stacked columns:')
stacked_fields = self._get_stacked_fields(docs=self.docs)
for field_name in stacked_fields:
val = self.docs
for attr in field_name.split('.'):
val = getattr(val, attr)
if isinstance(val, AbstractTensor):
comp_be = val.get_comp_backend()
if comp_be.to_numpy(comp_be.isnan(val)).all():
col_2 = f'None ({val.__class__.__name__})'
else:
col_2 = (
f'{val.__class__.__name__} of shape {comp_be.shape(val)}'
f', dtype: {comp_be.dtype(val)}'
)
if comp_be.device(val):
col_2 += f', device: {comp_be.device(val)}'
table.add_row(f' • {field_name}:', col_2)
Console().print(Panel(table, title='DocList Summary', expand=False))
self.docs.doc_type.schema_summary()
@staticmethod
def _get_stacked_fields(docs: 'DocVec') -> List[str]: # TODO this might
# broken
"""
Return a list of the field names of a DocVec instance that are
doc_vec, i.e. all the fields that are of type AbstractTensor. Nested field
paths are separated by dot, such as: 'attr.nested_attr'.
"""
fields = []
for field_name, value_tens in docs._storage.tensor_columns.items():
fields.append(field_name)
for field_name, value_doc in docs._storage.doc_columns.items():
if value_doc is not None:
fields.extend(
[
f'{field_name}.{x}'
for x in DocArraySummary._get_stacked_fields(docs=value_doc)
]
)
return fields
|
from typing import TYPE_CHECKING, List
from docarray.typing.tensor.abstract_tensor import AbstractTensor
if TYPE_CHECKING:
from docarray.array import DocVec
from docarray.array.any_array import AnyDocArray
class DocArraySummary:
def __init__(self, docs: 'AnyDocArray'):
self.docs = docs
def summary(self) -> None:
"""
Print a summary of this DocList object and a summary of the schema of its
Document type.
"""
from rich import box
from rich.console import Console
from rich.panel import Panel
from rich.table import Table
from docarray.array import DocVec
table = Table(box=box.SIMPLE, highlight=True)
table.show_header = False
table.add_row('Type', self.docs.__class__.__name__)
table.add_row('Length', str(len(self.docs)), end_section=True)
if isinstance(self.docs, DocVec):
table.add_row('Stacked columns:')
stacked_fields = self._get_stacked_fields(docs=self.docs)
for field_name in stacked_fields:
val = self.docs
for attr in field_name.split('.'):
val = getattr(val, attr)
if isinstance(val, AbstractTensor):
comp_be = val.get_comp_backend()
if comp_be.to_numpy(comp_be.isnan(val)).all():
col_2 = f'None ({val.__class__.__name__})'
else:
col_2 = (
f'{val.__class__.__name__} of shape {comp_be.shape(val)}'
f', dtype: {comp_be.dtype(val)}'
)
if comp_be.device(val):
col_2 += f', device: {comp_be.device(val)}'
table.add_row(f' • {field_name}:', col_2)
Console().print(Panel(table, title='DocList Summary', expand=False))
self.docs.doc_type.schema_summary()
@staticmethod
def _get_stacked_fields(docs: 'DocVec') -> List[str]: # TODO this might
# broken
"""
Return a list of the field names of a DocVec instance that are
doc_vec, i.e. all the fields that are of type AbstractTensor. Nested field
paths are separated by dot, such as: 'attr.nested_attr'.
"""
fields = []
for field_name, value_tens in docs._storage.tensor_columns.items():
fields.append(field_name)
for field_name, value_doc in docs._storage.doc_columns.items():
fields.extend(
[
f'{field_name}.{x}'
for x in DocArraySummary._get_stacked_fields(docs=value_doc)
]
)
return fields
|
import prisma
AGENT_NODE_INCLUDE: prisma.types.AgentNodeInclude = {
"Input": True,
"Output": True,
"Webhook": True,
"AgentBlock": True,
}
AGENT_GRAPH_INCLUDE: prisma.types.AgentGraphInclude = {
"AgentNodes": {"include": AGENT_NODE_INCLUDE} # type: ignore
}
EXECUTION_RESULT_INCLUDE: prisma.types.AgentNodeExecutionInclude = {
"Input": True,
"Output": True,
"AgentNode": True,
"AgentGraphExecution": True,
}
GRAPH_EXECUTION_INCLUDE: prisma.types.AgentGraphExecutionInclude = {
"AgentNodeExecutions": {
"include": {
"Input": True,
"Output": True,
"AgentNode": True,
"AgentGraphExecution": True,
}
}
}
INTEGRATION_WEBHOOK_INCLUDE: prisma.types.IntegrationWebhookInclude = {
"AgentNodes": {"include": AGENT_NODE_INCLUDE} # type: ignore
}
def library_agent_include(user_id: str) -> prisma.types.LibraryAgentInclude:
return {
"Agent": {
"include": {
**AGENT_GRAPH_INCLUDE,
"AgentGraphExecution": {"where": {"userId": user_id}},
}
},
"Creator": True,
}
|
import prisma
AGENT_NODE_INCLUDE: prisma.types.AgentNodeInclude = {
"Input": True,
"Output": True,
"Webhook": True,
"AgentBlock": True,
}
AGENT_GRAPH_INCLUDE: prisma.types.AgentGraphInclude = {
"AgentNodes": {"include": AGENT_NODE_INCLUDE} # type: ignore
}
EXECUTION_RESULT_INCLUDE: prisma.types.AgentNodeExecutionInclude = {
"Input": True,
"Output": True,
"AgentNode": True,
"AgentGraphExecution": True,
}
GRAPH_EXECUTION_INCLUDE: prisma.types.AgentGraphExecutionInclude = {
"AgentNodeExecutions": {
"include": {
"Input": True,
"Output": True,
"AgentNode": True,
"AgentGraphExecution": True,
}
}
}
INTEGRATION_WEBHOOK_INCLUDE: prisma.types.IntegrationWebhookInclude = {
"AgentNodes": {"include": AGENT_NODE_INCLUDE} # type: ignore
}
|
from __future__ import annotations
from typing import Callable
try:
from typing import Self
except ImportError:
from typing_extensions import Self
from torch import Tensor, nn
from sentence_transformers.models.Module import Module
from sentence_transformers.util import fullname, import_from_string
class Dense(Module):
"""
Feed-forward function with activation function.
This layer takes a fixed-sized sentence embedding and passes it through a feed-forward layer. Can be used to generate deep averaging networks (DAN).
Args:
in_features: Size of the input dimension
out_features: Output size
bias: Add a bias vector
activation_function: Pytorch activation function applied on
output
init_weight: Initial value for the matrix of the linear layer
init_bias: Initial value for the bias of the linear layer
"""
config_keys: list[str] = [
"in_features",
"out_features",
"bias",
"activation_function",
]
def __init__(
self,
in_features: int,
out_features: int,
bias: bool = True,
activation_function: Callable[[Tensor], Tensor] | None = nn.Tanh(),
init_weight: Tensor | None = None,
init_bias: Tensor | None = None,
):
super().__init__()
self.in_features = in_features
self.out_features = out_features
self.bias = bias
self.activation_function = nn.Identity() if activation_function is None else activation_function
self.linear = nn.Linear(in_features, out_features, bias=bias)
if init_weight is not None:
self.linear.weight = nn.Parameter(init_weight)
if init_bias is not None:
self.linear.bias = nn.Parameter(init_bias)
def forward(self, features: dict[str, Tensor]):
features.update({"sentence_embedding": self.activation_function(self.linear(features["sentence_embedding"]))})
return features
def get_sentence_embedding_dimension(self) -> int:
return self.out_features
def get_config_dict(self):
return {
"in_features": self.in_features,
"out_features": self.out_features,
"bias": self.bias,
"activation_function": fullname(self.activation_function),
}
def save(self, output_path: str, *args, safe_serialization: bool = True, **kwargs) -> None:
self.save_config(output_path)
self.save_torch_weights(output_path, safe_serialization=safe_serialization)
def __repr__(self):
return f"Dense({self.get_config_dict()})"
@classmethod
def load(
cls,
model_name_or_path: str,
subfolder: str = "",
token: bool | str | None = None,
cache_folder: str | None = None,
revision: str | None = None,
local_files_only: bool = False,
**kwargs,
) -> Self:
hub_kwargs = {
"subfolder": subfolder,
"token": token,
"cache_folder": cache_folder,
"revision": revision,
"local_files_only": local_files_only,
}
config = cls.load_config(model_name_or_path=model_name_or_path, **hub_kwargs)
config["activation_function"] = import_from_string(config["activation_function"])()
model = cls(**config)
model = cls.load_torch_weights(model_name_or_path=model_name_or_path, model=model, **hub_kwargs)
return model
|
from __future__ import annotations
import json
import os
from typing import Callable
import torch
from safetensors.torch import load_model as load_safetensors_model
from safetensors.torch import save_model as save_safetensors_model
from torch import Tensor, nn
from sentence_transformers.util import fullname, import_from_string
class Dense(nn.Module):
"""
Feed-forward function with activation function.
This layer takes a fixed-sized sentence embedding and passes it through a feed-forward layer. Can be used to generate deep averaging networks (DAN).
Args:
in_features: Size of the input dimension
out_features: Output size
bias: Add a bias vector
activation_function: Pytorch activation function applied on
output
init_weight: Initial value for the matrix of the linear layer
init_bias: Initial value for the bias of the linear layer
"""
def __init__(
self,
in_features: int,
out_features: int,
bias: bool = True,
activation_function: Callable[[Tensor], Tensor] | None = nn.Tanh(),
init_weight: Tensor | None = None,
init_bias: Tensor | None = None,
):
super().__init__()
self.in_features = in_features
self.out_features = out_features
self.bias = bias
self.activation_function = nn.Identity() if activation_function is None else activation_function
self.linear = nn.Linear(in_features, out_features, bias=bias)
if init_weight is not None:
self.linear.weight = nn.Parameter(init_weight)
if init_bias is not None:
self.linear.bias = nn.Parameter(init_bias)
def forward(self, features: dict[str, Tensor]):
features.update({"sentence_embedding": self.activation_function(self.linear(features["sentence_embedding"]))})
return features
def get_sentence_embedding_dimension(self) -> int:
return self.out_features
def get_config_dict(self):
return {
"in_features": self.in_features,
"out_features": self.out_features,
"bias": self.bias,
"activation_function": fullname(self.activation_function),
}
def save(self, output_path, safe_serialization: bool = True) -> None:
with open(os.path.join(output_path, "config.json"), "w") as fOut:
json.dump(self.get_config_dict(), fOut)
if safe_serialization:
save_safetensors_model(self, os.path.join(output_path, "model.safetensors"))
else:
torch.save(self.state_dict(), os.path.join(output_path, "pytorch_model.bin"))
def __repr__(self):
return f"Dense({self.get_config_dict()})"
@staticmethod
def load(input_path):
with open(os.path.join(input_path, "config.json")) as fIn:
config = json.load(fIn)
config["activation_function"] = import_from_string(config["activation_function"])()
model = Dense(**config)
if os.path.exists(os.path.join(input_path, "model.safetensors")):
load_safetensors_model(model, os.path.join(input_path, "model.safetensors"))
else:
model.load_state_dict(
torch.load(
os.path.join(input_path, "pytorch_model.bin"), map_location=torch.device("cpu"), weights_only=True
)
)
return model
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import List, Tuple, Union
import mmcv
import numpy as np
from mmengine.utils import is_str
def palette_val(palette: List[tuple]) -> List[tuple]:
"""Convert palette to matplotlib palette.
Args:
palette (List[tuple]): A list of color tuples.
Returns:
List[tuple[float]]: A list of RGB matplotlib color tuples.
"""
new_palette = []
for color in palette:
color = [c / 255 for c in color]
new_palette.append(tuple(color))
return new_palette
def get_palette(palette: Union[List[tuple], str, tuple],
num_classes: int) -> List[Tuple[int]]:
"""Get palette from various inputs.
Args:
palette (list[tuple] | str | tuple): palette inputs.
num_classes (int): the number of classes.
Returns:
list[tuple[int]]: A list of color tuples.
"""
assert isinstance(num_classes, int)
if isinstance(palette, list):
dataset_palette = palette
elif isinstance(palette, tuple):
dataset_palette = [palette] * num_classes
elif palette == 'random' or palette is None:
state = np.random.get_state()
# random color
np.random.seed(42)
palette = np.random.randint(0, 256, size=(num_classes, 3))
np.random.set_state(state)
dataset_palette = [tuple(c) for c in palette]
elif palette == 'coco':
from mmdet.datasets import CocoDataset, CocoPanopticDataset
dataset_palette = CocoDataset.METAINFO['palette']
if len(dataset_palette) < num_classes:
dataset_palette = CocoPanopticDataset.METAINFO['palette']
elif palette == 'citys':
from mmdet.datasets import CityscapesDataset
dataset_palette = CityscapesDataset.METAINFO['palette']
elif palette == 'voc':
from mmdet.datasets import VOCDataset
dataset_palette = VOCDataset.METAINFO['palette']
elif is_str(palette):
dataset_palette = [mmcv.color_val(palette)[::-1]] * num_classes
else:
raise TypeError(f'Invalid type for palette: {type(palette)}')
assert len(dataset_palette) >= num_classes, \
'The length of palette should not be less than `num_classes`.'
return dataset_palette
def _get_adaptive_scales(areas: np.ndarray,
min_area: int = 800,
max_area: int = 30000) -> np.ndarray:
"""Get adaptive scales according to areas.
The scale range is [0.5, 1.0]. When the area is less than
``min_area``, the scale is 0.5 while the area is larger than
``max_area``, the scale is 1.0.
Args:
areas (ndarray): The areas of bboxes or masks with the
shape of (n, ).
min_area (int): Lower bound areas for adaptive scales.
Defaults to 800.
max_area (int): Upper bound areas for adaptive scales.
Defaults to 30000.
Returns:
ndarray: The adaotive scales with the shape of (n, ).
"""
scales = 0.5 + (areas - min_area) / (max_area - min_area)
scales = np.clip(scales, 0.5, 1.0)
return scales
def jitter_color(color: tuple) -> tuple:
"""Randomly jitter the given color in order to better distinguish instances
with the same class.
Args:
color (tuple): The RGB color tuple. Each value is between [0, 255].
Returns:
tuple: The jittered color tuple.
"""
jitter = np.random.rand(3)
jitter = (jitter / np.linalg.norm(jitter) - 0.5) * 0.5 * 255
color = np.clip(jitter + color, 0, 255).astype(np.uint8)
return tuple(color)
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import List, Tuple, Union
import mmcv
import numpy as np
from mmengine.utils import is_str
def palette_val(palette: List[tuple]) -> List[tuple]:
"""Convert palette to matplotlib palette.
Args:
palette (List[tuple]): A list of color tuples.
Returns:
List[tuple[float]]: A list of RGB matplotlib color tuples.
"""
new_palette = []
for color in palette:
color = [c / 255 for c in color]
new_palette.append(tuple(color))
return new_palette
def get_palette(palette: Union[List[tuple], str, tuple],
num_classes: int) -> List[Tuple[int]]:
"""Get palette from various inputs.
Args:
palette (list[tuple] | str | tuple): palette inputs.
num_classes (int): the number of classes.
Returns:
list[tuple[int]]: A list of color tuples.
"""
assert isinstance(num_classes, int)
if isinstance(palette, list):
dataset_palette = palette
elif isinstance(palette, tuple):
dataset_palette = [palette] * num_classes
elif palette == 'random' or palette is None:
state = np.random.get_state()
# random color
np.random.seed(42)
palette = np.random.randint(0, 256, size=(num_classes, 3))
np.random.set_state(state)
dataset_palette = [tuple(c) for c in palette]
elif palette == 'coco':
from mmdet.datasets import CocoDataset, CocoPanopticDataset
dataset_palette = CocoDataset.METAINFO['PALETTE']
if len(dataset_palette) < num_classes:
dataset_palette = CocoPanopticDataset.METAINFO['PALETTE']
elif palette == 'citys':
from mmdet.datasets import CityscapesDataset
dataset_palette = CityscapesDataset.METAINFO['PALETTE']
elif palette == 'voc':
from mmdet.datasets import VOCDataset
dataset_palette = VOCDataset.METAINFO['PALETTE']
elif is_str(palette):
dataset_palette = [mmcv.color_val(palette)[::-1]] * num_classes
else:
raise TypeError(f'Invalid type for palette: {type(palette)}')
assert len(dataset_palette) >= num_classes, \
'The length of palette should not be less than `num_classes`.'
return dataset_palette
def _get_adaptive_scales(areas: np.ndarray,
min_area: int = 800,
max_area: int = 30000) -> np.ndarray:
"""Get adaptive scales according to areas.
The scale range is [0.5, 1.0]. When the area is less than
``min_area``, the scale is 0.5 while the area is larger than
``max_area``, the scale is 1.0.
Args:
areas (ndarray): The areas of bboxes or masks with the
shape of (n, ).
min_area (int): Lower bound areas for adaptive scales.
Defaults to 800.
max_area (int): Upper bound areas for adaptive scales.
Defaults to 30000.
Returns:
ndarray: The adaotive scales with the shape of (n, ).
"""
scales = 0.5 + (areas - min_area) / (max_area - min_area)
scales = np.clip(scales, 0.5, 1.0)
return scales
def jitter_color(color: tuple) -> tuple:
"""Randomly jitter the given color in order to better distinguish instances
with the same class.
Args:
color (tuple): The RGB color tuple. Each value is between [0, 255].
Returns:
tuple: The jittered color tuple.
"""
jitter = np.random.rand(3)
jitter = (jitter / np.linalg.norm(jitter) - 0.5) * 0.5 * 255
color = np.clip(jitter + color, 0, 255).astype(np.uint8)
return tuple(color)
|
# Copyright (c) OpenMMLab. All rights reserved.
from .registry import Registry, build_from_cfg
from .root import (DATA_SAMPLERS, DATASETS, EVALUATORS, HOOKS, MODEL_WRAPPERS,
MODELS, OPTIMIZER_CONSTRUCTORS, OPTIMIZERS,
PARAM_SCHEDULERS, RUNNER_CONSTRUCTORS, RUNNERS, TASK_UTILS,
TRANSFORMS, VISUALIZERS, WEIGHT_INITIALIZERS)
__all__ = [
'Registry', 'build_from_cfg', 'RUNNERS', 'RUNNER_CONSTRUCTORS', 'HOOKS',
'DATASETS', 'DATA_SAMPLERS', 'TRANSFORMS', 'MODELS', 'WEIGHT_INITIALIZERS',
'OPTIMIZERS', 'OPTIMIZER_CONSTRUCTORS', 'TASK_UTILS', 'PARAM_SCHEDULERS',
'EVALUATORS', 'MODEL_WRAPPERS', 'VISUALIZERS'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .registry import Registry, build_from_cfg
from .root import (DATA_SAMPLERS, DATASETS, EVALUATORS, HOOKS, MODEL_WRAPPERS,
MODELS, OPTIMIZER_CONSTRUCTORS, OPTIMIZERS,
PARAM_SCHEDULERS, RUNNER_CONSTRUCTORS, RUNNERS, TASK_UTILS,
TRANSFORMS, WEIGHT_INITIALIZERS)
__all__ = [
'Registry', 'build_from_cfg', 'RUNNERS', 'RUNNER_CONSTRUCTORS', 'HOOKS',
'DATASETS', 'DATA_SAMPLERS', 'TRANSFORMS', 'MODELS', 'WEIGHT_INITIALIZERS',
'OPTIMIZERS', 'OPTIMIZER_CONSTRUCTORS', 'TASK_UTILS', 'PARAM_SCHEDULERS',
'EVALUATORS', 'MODEL_WRAPPERS'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.core.utils import ConfigType, OptConfigType, OptMultiConfig
from mmdet.registry import MODELS
from .two_stage import TwoStageDetector
@MODELS.register_module()
class GridRCNN(TwoStageDetector):
"""Grid R-CNN.
This detector is the implementation of:
- Grid R-CNN (https://arxiv.org/abs/1811.12030)
- Grid R-CNN Plus: Faster and Better (https://arxiv.org/abs/1906.05688)
"""
def __init__(self,
backbone: ConfigType,
rpn_head: ConfigType,
roi_head: ConfigType,
train_cfg: ConfigType,
test_cfg: ConfigType,
neck: OptConfigType = None,
data_preprocessor: OptConfigType = None,
init_cfg: OptMultiConfig = None) -> None:
super().__init__(
backbone=backbone,
neck=neck,
rpn_head=rpn_head,
roi_head=roi_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
data_preprocessor=data_preprocessor,
init_cfg=init_cfg)
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.registry import MODELS
from .two_stage import TwoStageDetector
@MODELS.register_module()
class GridRCNN(TwoStageDetector):
"""Grid R-CNN.
This detector is the implementation of:
- Grid R-CNN (https://arxiv.org/abs/1811.12030)
- Grid R-CNN Plus: Faster and Better (https://arxiv.org/abs/1906.05688)
"""
def __init__(self,
backbone,
rpn_head,
roi_head,
train_cfg,
test_cfg,
neck=None,
pretrained=None,
init_cfg=None):
super(GridRCNN, self).__init__(
backbone=backbone,
neck=neck,
rpn_head=rpn_head,
roi_head=roi_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
pretrained=pretrained,
init_cfg=init_cfg)
|
import csv
import gzip
import logging
import os
from datetime import datetime
from torch.utils.data import DataLoader
from sentence_transformers import InputExample, LoggingHandler, SentenceTransformer, losses, models, util
from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator
#### Just some code to print debug information to stdout
logging.basicConfig(
format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO, handlers=[LoggingHandler()]
)
#### /print debug information to stdout
## Training parameters
model_name = "distilbert-base-uncased"
batch_size = 128
epochs = 1
max_seq_length = 75
# Save path to store our model
model_save_path = "output/training_stsb_ct-improved-{}-{}".format(
model_name, datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
)
################# Train sentences #################
# We use 1 Million sentences from Wikipedia to train our model
wikipedia_dataset_path = "data/wiki1m_for_simcse.txt"
if not os.path.exists(wikipedia_dataset_path):
util.http_get(
"https://huggingface.co/datasets/princeton-nlp/datasets-for-simcse/resolve/main/wiki1m_for_simcse.txt",
wikipedia_dataset_path,
)
# train_sentences are simply your list of sentences
train_sentences = []
with open(wikipedia_dataset_path, "r", encoding="utf8") as fIn:
for line in fIn:
train_sentences.append(InputExample(texts=[line.strip(), line.strip()]))
################# Download and load STSb #################
data_folder = "data/stsbenchmark"
sts_dataset_path = f"{data_folder}/stsbenchmark.tsv.gz"
if not os.path.exists(sts_dataset_path):
util.http_get("https://sbert.net/datasets/stsbenchmark.tsv.gz", sts_dataset_path)
dev_samples = []
test_samples = []
with gzip.open(sts_dataset_path, "rt", encoding="utf8") as fIn:
reader = csv.DictReader(fIn, delimiter="\t", quoting=csv.QUOTE_NONE)
for row in reader:
score = float(row["score"]) / 5.0 # Normalize score to range 0 ... 1
inp_example = InputExample(texts=[row["sentence1"], row["sentence2"]], label=score)
if row["split"] == "dev":
dev_samples.append(inp_example)
elif row["split"] == "test":
test_samples.append(inp_example)
dev_evaluator = EmbeddingSimilarityEvaluator.from_input_examples(dev_samples, name="sts-dev")
test_evaluator = EmbeddingSimilarityEvaluator.from_input_examples(test_samples, name="sts-test")
################# Initialize an SBERT model #################
word_embedding_model = models.Transformer(model_name, max_seq_length=max_seq_length)
pooling_model = models.Pooling(word_embedding_model.get_word_embedding_dimension())
model = SentenceTransformer(modules=[word_embedding_model, pooling_model])
# For ContrastiveTension we need a special data loader to construct batches with the desired properties
train_dataloader = DataLoader(train_sentences, batch_size=batch_size, shuffle=True, drop_last=True)
# As loss, we losses.ContrastiveTensionLoss
train_loss = losses.ContrastiveTensionLossInBatchNegatives(model, scale=1, similarity_fct=util.dot_score)
# Train the model
model.fit(
train_objectives=[(train_dataloader, train_loss)],
evaluator=dev_evaluator,
epochs=1,
evaluation_steps=1000,
warmup_steps=1000,
output_path=model_save_path,
optimizer_params={"lr": 5e-5},
use_amp=True, # Set to True, if your GPU supports FP16 cores
)
########### Load the model and evaluate on test set
model = SentenceTransformer(model_save_path)
test_evaluator(model)
|
import torch
from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator
from sentence_transformers import SentenceTransformer, LoggingHandler, models, util, InputExample
from sentence_transformers import losses
import os
import gzip
import csv
from datetime import datetime
import logging
from torch.utils.data import DataLoader
#### Just some code to print debug information to stdout
logging.basicConfig(format='%(asctime)s - %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
level=logging.INFO,
handlers=[LoggingHandler()])
#### /print debug information to stdout
## Training parameters
model_name = 'distilbert-base-uncased'
batch_size = 128
epochs = 1
max_seq_length = 75
# Save path to store our model
model_save_path = 'output/training_stsb_ct-improved-{}-{}'.format(model_name, datetime.now().strftime("%Y-%m-%d_%H-%M-%S"))
################# Train sentences #################
# We use 1 Million sentences from Wikipedia to train our model
wikipedia_dataset_path = 'data/wiki1m_for_simcse.txt'
if not os.path.exists(wikipedia_dataset_path):
util.http_get('https://huggingface.co/datasets/princeton-nlp/datasets-for-simcse/resolve/main/wiki1m_for_simcse.txt', wikipedia_dataset_path)
# train_sentences are simply your list of sentences
train_sentences = []
with open(wikipedia_dataset_path, 'r', encoding='utf8') as fIn:
for line in fIn:
train_sentences.append(InputExample(texts=[line.strip(), line.strip()]))
################# Download and load STSb #################
data_folder = 'data/stsbenchmark'
sts_dataset_path = f'{data_folder}/stsbenchmark.tsv.gz'
if not os.path.exists(sts_dataset_path):
util.http_get('https://sbert.net/datasets/stsbenchmark.tsv.gz', sts_dataset_path)
dev_samples = []
test_samples = []
with gzip.open(sts_dataset_path, 'rt', encoding='utf8') as fIn:
reader = csv.DictReader(fIn, delimiter='\t', quoting=csv.QUOTE_NONE)
for row in reader:
score = float(row['score']) / 5.0 # Normalize score to range 0 ... 1
inp_example = InputExample(texts=[row['sentence1'], row['sentence2']], label=score)
if row['split'] == 'dev':
dev_samples.append(inp_example)
elif row['split'] == 'test':
test_samples.append(inp_example)
dev_evaluator = EmbeddingSimilarityEvaluator.from_input_examples(dev_samples, name='sts-dev')
test_evaluator = EmbeddingSimilarityEvaluator.from_input_examples(test_samples, name='sts-test')
################# Intialize an SBERT model #################
word_embedding_model = models.Transformer(model_name, max_seq_length=max_seq_length)
pooling_model = models.Pooling(word_embedding_model.get_word_embedding_dimension())
model = SentenceTransformer(modules=[word_embedding_model, pooling_model])
# For ContrastiveTension we need a special data loader to construct batches with the desired properties
train_dataloader = DataLoader(train_sentences, batch_size=batch_size, shuffle=True, drop_last=True)
# As loss, we losses.ContrastiveTensionLoss
train_loss = losses.ContrastiveTensionLossInBatchNegatives(model, scale=1, similarity_fct=util.dot_score)
# Train the model
model.fit(train_objectives=[(train_dataloader, train_loss)],
evaluator=dev_evaluator,
epochs=1,
evaluation_steps=1000,
warmup_steps=1000,
output_path=model_save_path,
optimizer_params={'lr': 5e-5},
use_amp=True #Set to True, if your GPU supports FP16 cores
)
########### Load the model and evaluate on test set
model = SentenceTransformer(model_save_path)
test_evaluator(model)
|
import os
import shutil
import pytest
@pytest.fixture(scope="session", autouse=True)
def download_cache():
os.system('scripts/download_full.sh')
yield
shutil.rmtree('.cache', ignore_errors=True)
|
import os
import shutil
import pytest
@pytest.fixture(scope="session", autouse=True)
def download_cache():
os.system('scripts/download_full.sh')
yield
shutil.rmtree('.cache')
|
from keras.src.api_export import keras_export
from keras.src.layers.pooling.base_pooling import BasePooling
@keras_export(["keras.layers.MaxPooling3D", "keras.layers.MaxPool3D"])
class MaxPooling3D(BasePooling):
"""Max pooling operation for 3D data (spatial or spatio-temporal).
Downsamples the input along its spatial dimensions (depth, height, and
width) by taking the maximum value over an input window (of size defined by
`pool_size`) for each channel of the input. The window is shifted by
`strides` along each dimension.
Args:
pool_size: int or tuple of 3 integers, factors by which to downscale
(dim1, dim2, dim3). If only one integer is specified, the same
window length will be used for all dimensions.
strides: int or tuple of 3 integers, or None. Strides values. If None,
it will default to `pool_size`. If only one int is specified, the
same stride size will be used for all dimensions.
padding: string, either `"valid"` or `"same"` (case-insensitive).
`"valid"` means no padding. `"same"` results in padding evenly to
the left/right or up/down of the input such that output has the same
height/width dimension as the input.
data_format: string, either `"channels_last"` or `"channels_first"`.
The ordering of the dimensions in the inputs. `"channels_last"`
corresponds to inputs with shape
`(batch, spatial_dim1, spatial_dim2, spatial_dim3, channels)` while
`"channels_first"` corresponds to inputs with shape
`(batch, channels, spatial_dim1, spatial_dim2, spatial_dim3)`.
It defaults to the `image_data_format` value found in your Keras
config file at `~/.keras/keras.json`. If you never set it, then it
will be `"channels_last"`.
Input shape:
- If `data_format="channels_last"`:
5D tensor with shape:
`(batch_size, spatial_dim1, spatial_dim2, spatial_dim3, channels)`
- If `data_format="channels_first"`:
5D tensor with shape:
`(batch_size, channels, spatial_dim1, spatial_dim2, spatial_dim3)`
Output shape:
- If `data_format="channels_last"`:
5D tensor with shape:
`(batch_size, pooled_dim1, pooled_dim2, pooled_dim3, channels)`
- If `data_format="channels_first"`:
5D tensor with shape:
`(batch_size, channels, pooled_dim1, pooled_dim2, pooled_dim3)`
Example:
```python
depth = 30
height = 30
width = 30
channels = 3
inputs = keras.layers.Input(shape=(depth, height, width, channels))
layer = keras.layers.MaxPooling3D(pool_size=3)
outputs = layer(inputs) # Shape: (batch_size, 10, 10, 10, 3)
```
"""
def __init__(
self,
pool_size=(2, 2, 2),
strides=None,
padding="valid",
data_format=None,
name=None,
**kwargs
):
super().__init__(
pool_size,
strides,
pool_dimensions=3,
pool_mode="max",
padding=padding,
data_format=data_format,
name=name,
**kwargs,
)
|
from keras.src.api_export import keras_export
from keras.src.layers.pooling.base_pooling import BasePooling
@keras_export(["keras.layers.MaxPooling3D", "keras.layers.MaxPool3D"])
class MaxPooling3D(BasePooling):
"""Max pooling operation for 3D data (spatial or spatio-temporal).
Downsamples the input along its spatial dimensions (depth, height, and
width) by taking the maximum value over an input window (of size defined by
`pool_size`) for each channel of the input. The window is shifted by
`strides` along each dimension.
Args:
pool_size: int or tuple of 3 integers, factors by which to downscale
(dim1, dim2, dim3). If only one integer is specified, the same
window length will be used for all dimensions.
strides: int or tuple of 3 integers, or None. Strides values. If None,
it will default to `pool_size`. If only one int is specified, the
same stride size will be used for all dimensions.
padding: string, either `"valid"` or `"same"` (case-insensitive).
`"valid"` means no padding. `"same"` results in padding evenly to
the left/right or up/down of the input such that output has the same
height/width dimension as the input.
data_format: string, either `"channels_last"` or `"channels_first"`.
The ordering of the dimensions in the inputs. `"channels_last"`
corresponds to inputs with shape
`(batch, spatial_dim1, spatial_dim2, spatial_dim3, channels)` while
`"channels_first"` corresponds to inputs with shape
`(batch, channels, spatial_dim1, spatial_dim2, spatial_dim3)`.
It defaults to the `image_data_format` value found in your Keras
config file at `~/.keras/keras.json`. If you never set it, then it
will be `"channels_last"`.
Input shape:
- If `data_format="channels_last"`:
5D tensor with shape:
`(batch_size, spatial_dim1, spatial_dim2, spatial_dim3, channels)`
- If `data_format="channels_first"`:
5D tensor with shape:
`(batch_size, channels, spatial_dim1, spatial_dim2, spatial_dim3)`
Output shape:
- If `data_format="channels_last"`:
5D tensor with shape:
`(batch_size, pooled_dim1, pooled_dim2, pooled_dim3, channels)`
- If `data_format="channels_first"`:
5D tensor with shape:
`(batch_size, channels, pooled_dim1, pooled_dim2, pooled_dim3)`
Example:
```python
depth = 30
height = 30
width = 30
channels = 3
inputs = keras.layers.Input(shape=(depth, height, width, channels))
layer = keras.layers.MaxPooling3D(pool_size=3)
outputs = layer(inputs) # Shape: (batch_size, 10, 10, 10, 3)
```
"""
def __init__(
self,
pool_size=(2, 2, 2),
strides=None,
padding="valid",
data_format=None,
name=None,
**kwargs
):
super().__init__(
pool_size,
strides,
pool_dimensions=3,
pool_mode="max",
padding=padding,
data_format=data_format,
name=name,
**kwargs,
)
|
# coding=utf-8
# Copyright 2025 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from collections import defaultdict
def overwrite_file(file, class_name, test_name, correct_line, done_test):
_id = f"{file}_{class_name}_{test_name}"
done_test[_id] += 1
with open(file, "r") as f:
lines = f.readlines()
class_regex = f"class {class_name}("
test_regex = f"{4 * ' '}def {test_name}("
line_begin_regex = f"{8 * ' '}{correct_line.split()[0]}"
another_line_begin_regex = f"{16 * ' '}{correct_line.split()[0]}"
in_class = False
in_func = False
in_line = False
insert_line = False
count = 0
spaces = 0
new_lines = []
for line in lines:
if line.startswith(class_regex):
in_class = True
elif in_class and line.startswith(test_regex):
in_func = True
elif in_class and in_func and (line.startswith(line_begin_regex) or line.startswith(another_line_begin_regex)):
spaces = len(line.split(correct_line.split()[0])[0])
count += 1
if count == done_test[_id]:
in_line = True
if in_class and in_func and in_line:
if ")" not in line:
continue
else:
insert_line = True
if in_class and in_func and in_line and insert_line:
new_lines.append(f"{spaces * ' '}{correct_line}")
in_class = in_func = in_line = insert_line = False
else:
new_lines.append(line)
with open(file, "w") as f:
for line in new_lines:
f.write(line)
def main(correct, fail=None):
if fail is not None:
with open(fail, "r") as f:
test_failures = {l.strip() for l in f.readlines()}
else:
test_failures = None
with open(correct, "r") as f:
correct_lines = f.readlines()
done_tests = defaultdict(int)
for line in correct_lines:
file, class_name, test_name, correct_line = line.split("::")
if test_failures is None or "::".join([file, class_name, test_name]) in test_failures:
overwrite_file(file, class_name, test_name, correct_line, done_tests)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--correct_filename", help="filename of tests with expected result")
parser.add_argument("--fail_filename", help="filename of test failures", type=str, default=None)
args = parser.parse_args()
main(args.correct_filename, args.fail_filename)
|
# coding=utf-8
# Copyright 2024 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from collections import defaultdict
def overwrite_file(file, class_name, test_name, correct_line, done_test):
_id = f"{file}_{class_name}_{test_name}"
done_test[_id] += 1
with open(file, "r") as f:
lines = f.readlines()
class_regex = f"class {class_name}("
test_regex = f"{4 * ' '}def {test_name}("
line_begin_regex = f"{8 * ' '}{correct_line.split()[0]}"
another_line_begin_regex = f"{16 * ' '}{correct_line.split()[0]}"
in_class = False
in_func = False
in_line = False
insert_line = False
count = 0
spaces = 0
new_lines = []
for line in lines:
if line.startswith(class_regex):
in_class = True
elif in_class and line.startswith(test_regex):
in_func = True
elif in_class and in_func and (line.startswith(line_begin_regex) or line.startswith(another_line_begin_regex)):
spaces = len(line.split(correct_line.split()[0])[0])
count += 1
if count == done_test[_id]:
in_line = True
if in_class and in_func and in_line:
if ")" not in line:
continue
else:
insert_line = True
if in_class and in_func and in_line and insert_line:
new_lines.append(f"{spaces * ' '}{correct_line}")
in_class = in_func = in_line = insert_line = False
else:
new_lines.append(line)
with open(file, "w") as f:
for line in new_lines:
f.write(line)
def main(correct, fail=None):
if fail is not None:
with open(fail, "r") as f:
test_failures = {l.strip() for l in f.readlines()}
else:
test_failures = None
with open(correct, "r") as f:
correct_lines = f.readlines()
done_tests = defaultdict(int)
for line in correct_lines:
file, class_name, test_name, correct_line = line.split("::")
if test_failures is None or "::".join([file, class_name, test_name]) in test_failures:
overwrite_file(file, class_name, test_name, correct_line, done_tests)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--correct_filename", help="filename of tests with expected result")
parser.add_argument("--fail_filename", help="filename of test failures", type=str, default=None)
args = parser.parse_args()
main(args.correct_filename, args.fail_filename)
|
from __future__ import annotations
from typing import Any, List
from langchain_text_splitters.base import TextSplitter
class NLTKTextSplitter(TextSplitter):
"""Splitting text using NLTK package."""
def __init__(
self,
separator: str = "\n\n",
language: str = "english",
*,
use_span_tokenize: bool = False,
**kwargs: Any,
) -> None:
"""Initialize the NLTK splitter."""
super().__init__(**kwargs)
self._separator = separator
self._language = language
self._use_span_tokenize = use_span_tokenize
if self._use_span_tokenize and self._separator != "":
raise ValueError("When use_span_tokenize is True, separator should be ''")
try:
import nltk
if self._use_span_tokenize:
self._tokenizer = nltk.tokenize._get_punkt_tokenizer(self._language)
else:
self._tokenizer = nltk.tokenize.sent_tokenize
except ImportError:
raise ImportError(
"NLTK is not installed, please install it with `pip install nltk`."
)
def split_text(self, text: str) -> List[str]:
"""Split incoming text and return chunks."""
# First we naively split the large input into a bunch of smaller ones.
if self._use_span_tokenize:
spans = list(self._tokenizer.span_tokenize(text))
splits = []
for i, (start, end) in enumerate(spans):
if i > 0:
prev_end = spans[i - 1][1]
sentence = text[prev_end:start] + text[start:end]
else:
sentence = text[start:end]
splits.append(sentence)
else:
splits = self._tokenizer(text, language=self._language)
return self._merge_splits(splits, self._separator)
|
from __future__ import annotations
from typing import Any, List
from langchain_text_splitters.base import TextSplitter
class NLTKTextSplitter(TextSplitter):
"""Splitting text using NLTK package."""
def __init__(
self,
separator: str = "\n\n",
language: str = "english",
*,
use_span_tokenize: bool = False,
**kwargs: Any,
) -> None:
"""Initialize the NLTK splitter."""
super().__init__(**kwargs)
self._separator = separator
self._language = language
self._use_span_tokenize = use_span_tokenize
if self._use_span_tokenize and self._separator != "":
raise ValueError("When use_span_tokenize is True, separator should be ''")
try:
if self._use_span_tokenize:
from nltk.tokenize import _get_punkt_tokenizer
self._tokenizer = _get_punkt_tokenizer(self._language)
else:
from nltk.tokenize import sent_tokenize
self._tokenizer = sent_tokenize
except ImportError:
raise ImportError(
"NLTK is not installed, please install it with `pip install nltk`."
)
def split_text(self, text: str) -> List[str]:
"""Split incoming text and return chunks."""
# First we naively split the large input into a bunch of smaller ones.
if self._use_span_tokenize:
spans = list(self._tokenizer.span_tokenize(text))
splits = []
for i, (start, end) in enumerate(spans):
if i > 0:
prev_end = spans[i - 1][1]
sentence = text[prev_end:start] + text[start:end]
else:
sentence = text[start:end]
splits.append(sentence)
else:
splits = self._tokenizer(text, language=self._language)
return self._merge_splits(splits, self._separator)
|
import io
import logging
from enum import Enum
import replicate
import replicate.exceptions
import requests
from prisma.models import AgentGraph
from replicate.helpers import FileOutput
from backend.data.graph import Graph
from backend.util.settings import Settings
logger = logging.getLogger(__name__)
class ImageSize(str, Enum):
LANDSCAPE = "1024x768"
class ImageStyle(str, Enum):
DIGITAL_ART = "digital art"
async def generate_agent_image(agent: Graph | AgentGraph) -> io.BytesIO:
"""
Generate an image for an agent using Flux model via Replicate API.
Args:
agent (Graph): The agent to generate an image for
Returns:
io.BytesIO: The generated image as bytes
"""
try:
settings = Settings()
if not settings.secrets.replicate_api_key:
raise ValueError("Missing Replicate API key in settings")
# Construct prompt from agent details
prompt = f"Create a visually engaging app store thumbnail for the AI agent that highlights what it does in a clear and captivating way:\n- **Name**: {agent.name}\n- **Description**: {agent.description}\nFocus on showcasing its core functionality with an appealing design."
# Set up Replicate client
client = replicate.Client(api_token=settings.secrets.replicate_api_key)
# Model parameters
input_data = {
"prompt": prompt,
"width": 1024,
"height": 768,
"aspect_ratio": "4:3",
"output_format": "jpg",
"output_quality": 90,
"num_inference_steps": 30,
"guidance": 3.5,
"negative_prompt": "blurry, low quality, distorted, deformed",
"disable_safety_checker": True,
}
try:
# Run model
output = client.run("black-forest-labs/flux-1.1-pro", input=input_data)
# Depending on the model output, extract the image URL or bytes
# If the output is a list of FileOutput or URLs
if isinstance(output, list) and output:
if isinstance(output[0], FileOutput):
image_bytes = output[0].read()
else:
# If it's a URL string, fetch the image bytes
result_url = output[0]
response = requests.get(result_url)
response.raise_for_status()
image_bytes = response.content
elif isinstance(output, FileOutput):
image_bytes = output.read()
elif isinstance(output, str):
# Output is a URL
response = requests.get(output)
response.raise_for_status()
image_bytes = response.content
else:
raise RuntimeError("Unexpected output format from the model.")
return io.BytesIO(image_bytes)
except replicate.exceptions.ReplicateError as e:
if e.status == 401:
raise RuntimeError("Invalid Replicate API token") from e
raise RuntimeError(f"Replicate API error: {str(e)}") from e
except Exception as e:
logger.exception("Failed to generate agent image")
raise RuntimeError(f"Image generation failed: {str(e)}")
|
import io
import logging
from enum import Enum
import replicate
import replicate.exceptions
import requests
from replicate.helpers import FileOutput
from backend.data.graph import Graph
from backend.util.settings import Settings
logger = logging.getLogger(__name__)
class ImageSize(str, Enum):
LANDSCAPE = "1024x768"
class ImageStyle(str, Enum):
DIGITAL_ART = "digital art"
async def generate_agent_image(agent: Graph) -> io.BytesIO:
"""
Generate an image for an agent using Flux model via Replicate API.
Args:
agent (Graph): The agent to generate an image for
Returns:
io.BytesIO: The generated image as bytes
"""
try:
settings = Settings()
if not settings.secrets.replicate_api_key:
raise ValueError("Missing Replicate API key in settings")
# Construct prompt from agent details
prompt = f"Create a visually engaging app store thumbnail for the AI agent that highlights what it does in a clear and captivating way:\n- **Name**: {agent.name}\n- **Description**: {agent.description}\nFocus on showcasing its core functionality with an appealing design."
# Set up Replicate client
client = replicate.Client(api_token=settings.secrets.replicate_api_key)
# Model parameters
input_data = {
"prompt": prompt,
"width": 1024,
"height": 768,
"aspect_ratio": "4:3",
"output_format": "jpg",
"output_quality": 90,
"num_inference_steps": 30,
"guidance": 3.5,
"negative_prompt": "blurry, low quality, distorted, deformed",
"disable_safety_checker": True,
}
try:
# Run model
output = client.run("black-forest-labs/flux-1.1-pro", input=input_data)
# Depending on the model output, extract the image URL or bytes
# If the output is a list of FileOutput or URLs
if isinstance(output, list) and output:
if isinstance(output[0], FileOutput):
image_bytes = output[0].read()
else:
# If it's a URL string, fetch the image bytes
result_url = output[0]
response = requests.get(result_url)
response.raise_for_status()
image_bytes = response.content
elif isinstance(output, FileOutput):
image_bytes = output.read()
elif isinstance(output, str):
# Output is a URL
response = requests.get(output)
response.raise_for_status()
image_bytes = response.content
else:
raise RuntimeError("Unexpected output format from the model.")
return io.BytesIO(image_bytes)
except replicate.exceptions.ReplicateError as e:
if e.status == 401:
raise RuntimeError("Invalid Replicate API token") from e
raise RuntimeError(f"Replicate API error: {str(e)}") from e
except Exception as e:
logger.exception("Failed to generate agent image")
raise RuntimeError(f"Image generation failed: {str(e)}")
|
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import subprocess
from typing import Callable
import pytest
from jina import Flow
from ...audioclip_text import AudioCLIPTextEncoder
@pytest.mark.parametrize("request_size", [1, 10, 50, 100])
def test_integration(data_generator: Callable, request_size: int):
with Flow(return_results=True).add(uses=AudioCLIPTextEncoder) as flow:
resp = flow.post(
on="/index",
inputs=data_generator(),
request_size=request_size,
return_results=True,
)
assert min(len(resp) * request_size, 50) == 50
for r in resp:
for doc in r.docs:
assert doc.embedding is not None
assert doc.embedding.shape == (1024,)
@pytest.mark.docker
def test_docker_runtime(build_docker_image: str):
with pytest.raises(subprocess.TimeoutExpired):
subprocess.run(
[
'jina',
'executor',
f'--uses=docker://{build_docker_image}',
'--volumes=.cache:/workspace/.cache',
],
timeout=30,
check=True,
)
|
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
from typing import Callable
import pytest
from jina import Flow
from ...audioclip_text import AudioCLIPTextEncoder
@pytest.mark.parametrize("request_size", [1, 10, 50, 100])
def test_integration(data_generator: Callable, request_size: int):
with Flow(return_results=True).add(uses=AudioCLIPTextEncoder) as flow:
resp = flow.post(
on="/index",
inputs=data_generator(),
request_size=request_size,
return_results=True,
)
assert min(len(resp) * request_size, 50) == 50
for r in resp:
for doc in r.docs:
assert doc.embedding is not None
assert doc.embedding.shape == (1024,)
|
_base_ = './fcos_hrnetv2p-w32-gn-head_4xb4-1x_coco.py'
model = dict(
data_preprocessor=dict(
mean=[103.53, 116.28, 123.675],
std=[57.375, 57.12, 58.395],
bgr_to_rgb=False))
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='RandomChoiceResize',
scales=[(1333, 640), (1333, 800)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
# learning policy
max_epochs = 24
train_cfg = dict(max_epochs=max_epochs)
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[16, 22],
gamma=0.1)
]
|
_base_ = './fcos_hrnetv2p_w32_gn-head_4x4_1x_coco.py'
model = dict(
data_preprocessor=dict(
mean=[103.53, 116.28, 123.675],
std=[57.375, 57.12, 58.395],
bgr_to_rgb=False))
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='RandomChoiceResize',
scales=[(1333, 640), (1333, 800)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
# learning policy
max_epochs = 24
train_cfg = dict(max_epochs=max_epochs)
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[16, 22],
gamma=0.1)
]
|
from typing import Type
from docarray.proto import DocumentArrayProto, NodeProto
from ..abstract_array import AbstractDocumentArray
class ProtoArrayMixin(AbstractDocumentArray):
@classmethod
def from_protobuf(
cls: Type[AbstractDocumentArray], pb_msg: 'DocumentArrayProto'
) -> AbstractDocumentArray:
"""create a Document from a protobuf message"""
return cls(cls.document_type.from_protobuf(od) for od in pb_msg.docs)
def to_protobuf(self) -> 'DocumentArrayProto':
"""Convert DocumentArray into a Protobuf message.
:param ndarray_type: can be ``list`` or ``numpy``,
if set it will force all ndarray-like object from all
Documents to ``List`` or ``numpy.ndarray``.
:return: the protobuf message
"""
dap = DocumentArrayProto()
for doc in self:
dap.docs.append(doc.to_protobuf())
return dap
def _to_node_protobuf(self) -> NodeProto:
"""Convert a DocumentArray into a NodeProto protobuf message.
This function should be called when a DocumentArray
is nested into another Document that need to be converted into a protobuf
:return: the nested item protobuf message
"""
return NodeProto(chunks=self.to_protobuf())
|
from typing import Type
from docarray.proto import DocumentArrayProto, NodeProto
from ..abstract_array import AbstractDocumentArray
class ProtoArrayMixin(AbstractDocumentArray):
@classmethod
def from_protobuf(
cls: Type[AbstractDocumentArray], pb_msg: 'DocumentArrayProto'
) -> AbstractDocumentArray:
"""create a Document from a protobuf message"""
return cls(cls.document_type.from_protobuf(od) for od in pb_msg.docs)
def to_protobuf(self) -> 'DocumentArrayProto':
"""Convert DocumentArray into a Protobuf message.
:param ndarray_type: can be ``list`` or ``numpy``,
if set it will force all ndarray-like object from all
Documents to ``List`` or ``numpy.ndarray``.
:return: the protobuf message
"""
dap = DocumentArrayProto()
for doc in self:
dap.docs.append(doc.to_protobuf())
return dap
def _to_nested_item_protobuf(self) -> 'NodeProto':
"""Convert a DocumentArray into a nested item protobuf message.
This function should be called when a DocumentArray
is nested into another Document that need to be converted into a protobuf
:return: the nested item protobuf message
"""
return NodeProto(chunks=self.to_protobuf())
|
"""Init file of LlamaIndex."""
__version__ = "0.12.36"
import logging
from logging import NullHandler
from typing import Callable, Optional
try:
# Force pants to install eval_type_backport on 3.9
import eval_type_backport # noqa # type: ignore
except ImportError:
pass
# response
from llama_index.core.base.response.schema import Response
# import global eval handler
from llama_index.core.callbacks.global_handlers import set_global_handler
from llama_index.core.data_structs.struct_type import IndexStructType
from llama_index.core.embeddings.mock_embed_model import MockEmbedding
# indices
# loading
from llama_index.core.indices import (
ComposableGraph,
DocumentSummaryIndex,
GPTDocumentSummaryIndex,
GPTKeywordTableIndex,
GPTListIndex,
GPTRAKEKeywordTableIndex,
GPTSimpleKeywordTableIndex,
GPTTreeIndex,
GPTVectorStoreIndex,
KeywordTableIndex,
KnowledgeGraphIndex,
ListIndex,
PropertyGraphIndex,
RAKEKeywordTableIndex,
SimpleKeywordTableIndex,
SummaryIndex,
TreeIndex,
VectorStoreIndex,
load_graph_from_storage,
load_index_from_storage,
load_indices_from_storage,
)
# structured
from llama_index.core.indices.common.struct_store.base import (
SQLDocumentContextBuilder,
)
# prompt helper
from llama_index.core.indices.prompt_helper import PromptHelper
# prompts
from llama_index.core.prompts import (
BasePromptTemplate,
ChatPromptTemplate,
# backwards compatibility
Prompt,
PromptTemplate,
SelectorPromptTemplate,
)
from llama_index.core.readers import SimpleDirectoryReader, download_loader
# Response Synthesizer
from llama_index.core.response_synthesizers.factory import get_response_synthesizer
from llama_index.core.schema import Document, QueryBundle
from llama_index.core.service_context import (
ServiceContext,
set_global_service_context,
)
# global settings
from llama_index.core.settings import Settings
# storage
from llama_index.core.storage.storage_context import StorageContext
# sql wrapper
from llama_index.core.utilities.sql_wrapper import SQLDatabase
# global tokenizer
from llama_index.core.utils import get_tokenizer, set_global_tokenizer
# best practices for library logging:
# https://docs.python.org/3/howto/logging.html#configuring-logging-for-a-library
logging.getLogger(__name__).addHandler(NullHandler())
__all__ = [
"StorageContext",
"ServiceContext",
"ComposableGraph",
# indices
"SummaryIndex",
"VectorStoreIndex",
"SimpleKeywordTableIndex",
"KeywordTableIndex",
"RAKEKeywordTableIndex",
"TreeIndex",
"DocumentSummaryIndex",
"KnowledgeGraphIndex",
"PropertyGraphIndex",
# indices - legacy names
"GPTKeywordTableIndex",
"GPTKnowledgeGraphIndex",
"GPTSimpleKeywordTableIndex",
"GPTRAKEKeywordTableIndex",
"GPTListIndex",
"ListIndex",
"GPTTreeIndex",
"GPTVectorStoreIndex",
"GPTDocumentSummaryIndex",
"Prompt",
"PromptTemplate",
"BasePromptTemplate",
"ChatPromptTemplate",
"SelectorPromptTemplate",
"SummaryPrompt",
"TreeInsertPrompt",
"TreeSelectPrompt",
"TreeSelectMultiplePrompt",
"RefinePrompt",
"QuestionAnswerPrompt",
"KeywordExtractPrompt",
"QueryKeywordExtractPrompt",
"Response",
"Document",
"SimpleDirectoryReader",
"MockEmbedding",
"SQLDatabase",
"SQLDocumentContextBuilder",
"SQLContextBuilder",
"PromptHelper",
"IndexStructType",
"download_loader",
"load_graph_from_storage",
"load_index_from_storage",
"load_indices_from_storage",
"QueryBundle",
"get_response_synthesizer",
"set_global_service_context",
"set_global_handler",
"set_global_tokenizer",
"get_tokenizer",
"Settings",
]
# eval global toggle
from llama_index.core.callbacks.base_handler import BaseCallbackHandler
global_handler: Optional[BaseCallbackHandler] = None
# NOTE: keep for backwards compatibility
SQLContextBuilder = SQLDocumentContextBuilder
# global tokenizer
global_tokenizer: Optional[Callable[[str], list]] = None
|
"""Init file of LlamaIndex."""
__version__ = "0.12.35"
import logging
from logging import NullHandler
from typing import Callable, Optional
try:
# Force pants to install eval_type_backport on 3.9
import eval_type_backport # noqa # type: ignore
except ImportError:
pass
# response
from llama_index.core.base.response.schema import Response
# import global eval handler
from llama_index.core.callbacks.global_handlers import set_global_handler
from llama_index.core.data_structs.struct_type import IndexStructType
from llama_index.core.embeddings.mock_embed_model import MockEmbedding
# indices
# loading
from llama_index.core.indices import (
ComposableGraph,
DocumentSummaryIndex,
GPTDocumentSummaryIndex,
GPTKeywordTableIndex,
GPTListIndex,
GPTRAKEKeywordTableIndex,
GPTSimpleKeywordTableIndex,
GPTTreeIndex,
GPTVectorStoreIndex,
KeywordTableIndex,
KnowledgeGraphIndex,
ListIndex,
PropertyGraphIndex,
RAKEKeywordTableIndex,
SimpleKeywordTableIndex,
SummaryIndex,
TreeIndex,
VectorStoreIndex,
load_graph_from_storage,
load_index_from_storage,
load_indices_from_storage,
)
# structured
from llama_index.core.indices.common.struct_store.base import (
SQLDocumentContextBuilder,
)
# prompt helper
from llama_index.core.indices.prompt_helper import PromptHelper
# prompts
from llama_index.core.prompts import (
BasePromptTemplate,
ChatPromptTemplate,
# backwards compatibility
Prompt,
PromptTemplate,
SelectorPromptTemplate,
)
from llama_index.core.readers import SimpleDirectoryReader, download_loader
# Response Synthesizer
from llama_index.core.response_synthesizers.factory import get_response_synthesizer
from llama_index.core.schema import Document, QueryBundle
from llama_index.core.service_context import (
ServiceContext,
set_global_service_context,
)
# global settings
from llama_index.core.settings import Settings
# storage
from llama_index.core.storage.storage_context import StorageContext
# sql wrapper
from llama_index.core.utilities.sql_wrapper import SQLDatabase
# global tokenizer
from llama_index.core.utils import get_tokenizer, set_global_tokenizer
# best practices for library logging:
# https://docs.python.org/3/howto/logging.html#configuring-logging-for-a-library
logging.getLogger(__name__).addHandler(NullHandler())
__all__ = [
"StorageContext",
"ServiceContext",
"ComposableGraph",
# indices
"SummaryIndex",
"VectorStoreIndex",
"SimpleKeywordTableIndex",
"KeywordTableIndex",
"RAKEKeywordTableIndex",
"TreeIndex",
"DocumentSummaryIndex",
"KnowledgeGraphIndex",
"PropertyGraphIndex",
# indices - legacy names
"GPTKeywordTableIndex",
"GPTKnowledgeGraphIndex",
"GPTSimpleKeywordTableIndex",
"GPTRAKEKeywordTableIndex",
"GPTListIndex",
"ListIndex",
"GPTTreeIndex",
"GPTVectorStoreIndex",
"GPTDocumentSummaryIndex",
"Prompt",
"PromptTemplate",
"BasePromptTemplate",
"ChatPromptTemplate",
"SelectorPromptTemplate",
"SummaryPrompt",
"TreeInsertPrompt",
"TreeSelectPrompt",
"TreeSelectMultiplePrompt",
"RefinePrompt",
"QuestionAnswerPrompt",
"KeywordExtractPrompt",
"QueryKeywordExtractPrompt",
"Response",
"Document",
"SimpleDirectoryReader",
"MockEmbedding",
"SQLDatabase",
"SQLDocumentContextBuilder",
"SQLContextBuilder",
"PromptHelper",
"IndexStructType",
"download_loader",
"load_graph_from_storage",
"load_index_from_storage",
"load_indices_from_storage",
"QueryBundle",
"get_response_synthesizer",
"set_global_service_context",
"set_global_handler",
"set_global_tokenizer",
"get_tokenizer",
"Settings",
]
# eval global toggle
from llama_index.core.callbacks.base_handler import BaseCallbackHandler
global_handler: Optional[BaseCallbackHandler] = None
# NOTE: keep for backwards compatibility
SQLContextBuilder = SQLDocumentContextBuilder
# global tokenizer
global_tokenizer: Optional[Callable[[str], list]] = None
|
"""
Top-level module of Jina.
The primary function of this module is to import all of the public Jina
interfaces into a single place. The interfaces themselves are located in
sub-modules, as described below.
"""
import os as _os
import platform as _platform
import signal as _signal
import sys as _sys
import warnings as _warnings
import docarray as _docarray
if _sys.version_info < (3, 7, 0):
raise OSError(f'Jina requires Python >= 3.7, but yours is {_sys.version_info}')
def _warning_on_one_line(message, category, filename, lineno, *args, **kwargs):
return '\033[1;33m%s: %s\033[0m \033[1;30m(raised from %s:%s)\033[0m\n' % (
category.__name__,
message,
filename,
lineno,
)
_warnings.formatwarning = _warning_on_one_line
_warnings.simplefilter('always', DeprecationWarning)
# fix fork error on MacOS but seems no effect? must do EXPORT manually before jina start
_os.environ['OBJC_DISABLE_INITIALIZE_FORK_SAFETY'] = 'YES'
# JINA_MP_START_METHOD has higher priority than os-patch
_start_method = _os.environ.get('JINA_MP_START_METHOD', None)
if _start_method and _start_method.lower() in {'fork', 'spawn', 'forkserver'}:
from multiprocessing import set_start_method as _set_start_method
try:
_set_start_method(_start_method.lower())
_warnings.warn(
f'multiprocessing start method is set to `{_start_method.lower()}`'
)
except Exception as e:
_warnings.warn(
f'failed to set multiprocessing start_method to `{_start_method.lower()}`: {e!r}'
)
elif _sys.version_info >= (3, 8, 0) and _platform.system() == 'Darwin':
# DO SOME OS-WISE PATCHES
# temporary fix for python 3.8 on macos where the default start is set to "spawn"
# https://docs.python.org/3/library/multiprocessing.html#contexts-and-start-methods
from multiprocessing import set_start_method as _set_start_method
_set_start_method('fork')
# do not change this line manually
# this is managed by git tag and updated on every release
# NOTE: this represents the NEXT release version
__version__ = '3.16.1'
# do not change this line manually
# this is managed by proto/build-proto.sh and updated on every execution
__proto_version__ = '0.1.27'
try:
__docarray_version__ = _docarray.__version__
except AttributeError as e:
raise RuntimeError(
'`docarray` dependency is not installed correctly, please reinstall with `pip install -U --force-reinstall docarray`'
)
try:
_signal.signal(_signal.SIGINT, _signal.default_int_handler)
except Exception as exc:
_warnings.warn(f'failed to set default signal handler: {exc!r}`')
def _set_nofile(nofile_atleast=4096):
"""
Set nofile soft limit to at least 4096, useful for running matlplotlib/seaborn on
parallel executing plot generators vs. Ubuntu default ulimit -n 1024 or OS X El Captian 256
temporary setting extinguishing with Python session.
:param nofile_atleast: nofile soft limit
:return: nofile soft limit and nofile hard limit
"""
try:
import resource as res
except ImportError: # Windows
res = None
if res is None:
return (None,) * 2
soft, ohard = res.getrlimit(res.RLIMIT_NOFILE)
hard = ohard
if soft < nofile_atleast:
soft = nofile_atleast
if hard < soft:
hard = soft
try:
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except (ValueError, res.error):
try:
hard = soft
print(f'trouble with max limit, retrying with soft,hard {soft},{hard}')
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except Exception:
print('failed to set ulimit, giving up')
soft, hard = res.getrlimit(res.RLIMIT_NOFILE)
return soft, hard
_set_nofile()
# ONLY FIRST CLASS CITIZENS ARE ALLOWED HERE, namely Document, Executor Flow
# Document
from jina._docarray import Document, DocumentArray
# Client
from jina.clients import Client
# Deployment
from jina.orchestrate.deployments import Deployment
from jina.orchestrate.flow.asyncio import AsyncFlow
# Flow
from jina.orchestrate.flow.base import Flow
# Executor
from jina.serve.executors import BaseExecutor as Executor
from jina.serve.executors.decorators import dynamic_batching, monitor, requests
# Custom Gateway
from jina.serve.runtimes.gateway.gateway import Gateway
|
"""
Top-level module of Jina.
The primary function of this module is to import all of the public Jina
interfaces into a single place. The interfaces themselves are located in
sub-modules, as described below.
"""
import os as _os
import platform as _platform
import signal as _signal
import sys as _sys
import warnings as _warnings
import docarray as _docarray
if _sys.version_info < (3, 7, 0):
raise OSError(f'Jina requires Python >= 3.7, but yours is {_sys.version_info}')
def _warning_on_one_line(message, category, filename, lineno, *args, **kwargs):
return '\033[1;33m%s: %s\033[0m \033[1;30m(raised from %s:%s)\033[0m\n' % (
category.__name__,
message,
filename,
lineno,
)
_warnings.formatwarning = _warning_on_one_line
_warnings.simplefilter('always', DeprecationWarning)
# fix fork error on MacOS but seems no effect? must do EXPORT manually before jina start
_os.environ['OBJC_DISABLE_INITIALIZE_FORK_SAFETY'] = 'YES'
# JINA_MP_START_METHOD has higher priority than os-patch
_start_method = _os.environ.get('JINA_MP_START_METHOD', None)
if _start_method and _start_method.lower() in {'fork', 'spawn', 'forkserver'}:
from multiprocessing import set_start_method as _set_start_method
try:
_set_start_method(_start_method.lower())
_warnings.warn(
f'multiprocessing start method is set to `{_start_method.lower()}`'
)
except Exception as e:
_warnings.warn(
f'failed to set multiprocessing start_method to `{_start_method.lower()}`: {e!r}'
)
elif _sys.version_info >= (3, 8, 0) and _platform.system() == 'Darwin':
# DO SOME OS-WISE PATCHES
# temporary fix for python 3.8 on macos where the default start is set to "spawn"
# https://docs.python.org/3/library/multiprocessing.html#contexts-and-start-methods
from multiprocessing import set_start_method as _set_start_method
_set_start_method('fork')
# do not change this line manually
# this is managed by git tag and updated on every release
# NOTE: this represents the NEXT release version
__version__ = '3.16.0'
# do not change this line manually
# this is managed by proto/build-proto.sh and updated on every execution
__proto_version__ = '0.1.27'
try:
__docarray_version__ = _docarray.__version__
except AttributeError as e:
raise RuntimeError(
'`docarray` dependency is not installed correctly, please reinstall with `pip install -U --force-reinstall docarray`'
)
try:
_signal.signal(_signal.SIGINT, _signal.default_int_handler)
except Exception as exc:
_warnings.warn(f'failed to set default signal handler: {exc!r}`')
def _set_nofile(nofile_atleast=4096):
"""
Set nofile soft limit to at least 4096, useful for running matlplotlib/seaborn on
parallel executing plot generators vs. Ubuntu default ulimit -n 1024 or OS X El Captian 256
temporary setting extinguishing with Python session.
:param nofile_atleast: nofile soft limit
:return: nofile soft limit and nofile hard limit
"""
try:
import resource as res
except ImportError: # Windows
res = None
if res is None:
return (None,) * 2
soft, ohard = res.getrlimit(res.RLIMIT_NOFILE)
hard = ohard
if soft < nofile_atleast:
soft = nofile_atleast
if hard < soft:
hard = soft
try:
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except (ValueError, res.error):
try:
hard = soft
print(f'trouble with max limit, retrying with soft,hard {soft},{hard}')
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except Exception:
print('failed to set ulimit, giving up')
soft, hard = res.getrlimit(res.RLIMIT_NOFILE)
return soft, hard
_set_nofile()
# ONLY FIRST CLASS CITIZENS ARE ALLOWED HERE, namely Document, Executor Flow
# Document
from jina._docarray import Document, DocumentArray
# Client
from jina.clients import Client
# Deployment
from jina.orchestrate.deployments import Deployment
from jina.orchestrate.flow.asyncio import AsyncFlow
# Flow
from jina.orchestrate.flow.base import Flow
# Executor
from jina.serve.executors import BaseExecutor as Executor
from jina.serve.executors.decorators import dynamic_batching, monitor, requests
# Custom Gateway
from jina.serve.runtimes.gateway.gateway import Gateway
|
from __future__ import annotations
import json
import os
import torch
from safetensors.torch import load_model as load_safetensors_model
from safetensors.torch import save_model as save_safetensors_model
from torch import nn
class LSTM(nn.Module):
"""Bidirectional LSTM running over word embeddings."""
def __init__(
self,
word_embedding_dimension: int,
hidden_dim: int,
num_layers: int = 1,
dropout: float = 0,
bidirectional: bool = True,
):
nn.Module.__init__(self)
self.config_keys = ["word_embedding_dimension", "hidden_dim", "num_layers", "dropout", "bidirectional"]
self.word_embedding_dimension = word_embedding_dimension
self.hidden_dim = hidden_dim
self.num_layers = num_layers
self.dropout = dropout
self.bidirectional = bidirectional
self.embeddings_dimension = hidden_dim
if self.bidirectional:
self.embeddings_dimension *= 2
self.encoder = nn.LSTM(
word_embedding_dimension,
hidden_dim,
num_layers=num_layers,
dropout=dropout,
bidirectional=bidirectional,
batch_first=True,
)
def forward(self, features):
token_embeddings = features["token_embeddings"]
sentence_lengths = torch.clamp(features["sentence_lengths"], min=1)
packed = nn.utils.rnn.pack_padded_sequence(
token_embeddings, sentence_lengths.cpu(), batch_first=True, enforce_sorted=False
)
packed = self.encoder(packed)
unpack = nn.utils.rnn.pad_packed_sequence(packed[0], batch_first=True)[0]
features.update({"token_embeddings": unpack})
return features
def get_word_embedding_dimension(self) -> int:
return self.embeddings_dimension
def tokenize(self, text: str, **kwargs) -> list[int]:
raise NotImplementedError()
def save(self, output_path: str, safe_serialization: bool = True):
with open(os.path.join(output_path, "lstm_config.json"), "w") as fOut:
json.dump(self.get_config_dict(), fOut, indent=2)
device = next(self.parameters()).device
if safe_serialization:
save_safetensors_model(self.cpu(), os.path.join(output_path, "model.safetensors"))
self.to(device)
else:
torch.save(self.state_dict(), os.path.join(output_path, "pytorch_model.bin"))
def get_config_dict(self):
return {key: self.__dict__[key] for key in self.config_keys}
@staticmethod
def load(input_path: str):
with open(os.path.join(input_path, "lstm_config.json"), "r") as fIn:
config = json.load(fIn)
model = LSTM(**config)
if os.path.exists(os.path.join(input_path, "model.safetensors")):
load_safetensors_model(model, os.path.join(input_path, "model.safetensors"))
else:
model.load_state_dict(
torch.load(os.path.join(input_path, "pytorch_model.bin"), map_location=torch.device("cpu"))
)
return model
|
import json
import os
from typing import List
import torch
from safetensors.torch import load_model as load_safetensors_model
from safetensors.torch import save_model as save_safetensors_model
from torch import nn
class LSTM(nn.Module):
"""Bidirectional LSTM running over word embeddings."""
def __init__(
self,
word_embedding_dimension: int,
hidden_dim: int,
num_layers: int = 1,
dropout: float = 0,
bidirectional: bool = True,
):
nn.Module.__init__(self)
self.config_keys = ["word_embedding_dimension", "hidden_dim", "num_layers", "dropout", "bidirectional"]
self.word_embedding_dimension = word_embedding_dimension
self.hidden_dim = hidden_dim
self.num_layers = num_layers
self.dropout = dropout
self.bidirectional = bidirectional
self.embeddings_dimension = hidden_dim
if self.bidirectional:
self.embeddings_dimension *= 2
self.encoder = nn.LSTM(
word_embedding_dimension,
hidden_dim,
num_layers=num_layers,
dropout=dropout,
bidirectional=bidirectional,
batch_first=True,
)
def forward(self, features):
token_embeddings = features["token_embeddings"]
sentence_lengths = torch.clamp(features["sentence_lengths"], min=1)
packed = nn.utils.rnn.pack_padded_sequence(
token_embeddings, sentence_lengths.cpu(), batch_first=True, enforce_sorted=False
)
packed = self.encoder(packed)
unpack = nn.utils.rnn.pad_packed_sequence(packed[0], batch_first=True)[0]
features.update({"token_embeddings": unpack})
return features
def get_word_embedding_dimension(self) -> int:
return self.embeddings_dimension
def tokenize(self, text: str, **kwargs) -> List[int]:
raise NotImplementedError()
def save(self, output_path: str, safe_serialization: bool = True):
with open(os.path.join(output_path, "lstm_config.json"), "w") as fOut:
json.dump(self.get_config_dict(), fOut, indent=2)
device = next(self.parameters()).device
if safe_serialization:
save_safetensors_model(self.cpu(), os.path.join(output_path, "model.safetensors"))
self.to(device)
else:
torch.save(self.state_dict(), os.path.join(output_path, "pytorch_model.bin"))
def get_config_dict(self):
return {key: self.__dict__[key] for key in self.config_keys}
@staticmethod
def load(input_path: str):
with open(os.path.join(input_path, "lstm_config.json"), "r") as fIn:
config = json.load(fIn)
model = LSTM(**config)
if os.path.exists(os.path.join(input_path, "model.safetensors")):
load_safetensors_model(model, os.path.join(input_path, "model.safetensors"))
else:
model.load_state_dict(
torch.load(os.path.join(input_path, "pytorch_model.bin"), map_location=torch.device("cpu"))
)
return model
|
from keras.src import initializers
from keras.src import ops
from keras.src.api_export import keras_export
from keras.src.optimizers import optimizer
@keras_export(["keras.optimizers.Adagrad"])
class Adagrad(optimizer.Optimizer):
"""Optimizer that implements the Adagrad algorithm.
Adagrad is an optimizer with parameter-specific learning rates,
which are adapted relative to how frequently a parameter gets
updated during training. The more updates a parameter receives,
the smaller the updates.
Args:
learning_rate: A float, a
`keras.optimizers.schedules.LearningRateSchedule` instance, or
a callable that takes no arguments and returns the actual value to
use. The learning rate. Defaults to `0.001`. Note that `Adagrad`
tends to benefit from higher initial learning rate values compared
to other optimizers. To match the exact form in the original paper,
use `1.0`.
initial_accumulator_value: Floating point value. Starting value for the
accumulators (per-parameter momentum values). Must be non-negative.
epsilon: Small floating point value for maintaining numerical stability.
{{base_optimizer_keyword_args}}
Reference:
- [Duchi et al., 2011](
http://www.jmlr.org/papers/volume12/duchi11a/duchi11a.pdf).
"""
def __init__(
self,
learning_rate=0.001,
initial_accumulator_value=0.1,
epsilon=1e-7,
weight_decay=None,
clipnorm=None,
clipvalue=None,
global_clipnorm=None,
use_ema=False,
ema_momentum=0.99,
ema_overwrite_frequency=None,
loss_scale_factor=None,
gradient_accumulation_steps=None,
name="adagrad",
**kwargs,
):
super().__init__(
learning_rate=learning_rate,
weight_decay=weight_decay,
clipnorm=clipnorm,
clipvalue=clipvalue,
global_clipnorm=global_clipnorm,
use_ema=use_ema,
ema_momentum=ema_momentum,
ema_overwrite_frequency=ema_overwrite_frequency,
loss_scale_factor=loss_scale_factor,
gradient_accumulation_steps=gradient_accumulation_steps,
name=name,
**kwargs,
)
self.initial_accumulator_value = initial_accumulator_value
self.epsilon = epsilon
def build(self, var_list):
if self.built:
return
super().build(var_list)
initializer = initializers.Constant(self.initial_accumulator_value)
self._accumulators = self.add_optimizer_variables(
var_list, "accumulator", initializer=initializer
)
def update_step(self, gradient, variable, learning_rate):
"""Update step given gradient and the associated model variable."""
lr = ops.cast(learning_rate, variable.dtype)
gradient = ops.cast(gradient, variable.dtype)
accumulator = self._accumulators[self._get_variable_index(variable)]
self.assign_add(accumulator, ops.square(gradient))
self.assign_sub(
variable,
ops.divide(
ops.multiply(lr, gradient),
ops.sqrt(ops.add(accumulator, self.epsilon)),
),
)
def get_config(self):
config = super().get_config()
config.update(
{
"initial_accumulator_value": self.initial_accumulator_value,
"epsilon": self.epsilon,
}
)
return config
Adagrad.__doc__ = Adagrad.__doc__.replace(
"{{base_optimizer_keyword_args}}", optimizer.base_optimizer_keyword_args
)
|
from keras.src import initializers
from keras.src import ops
from keras.src.api_export import keras_export
from keras.src.optimizers import optimizer
@keras_export(["keras.optimizers.Adagrad"])
class Adagrad(optimizer.Optimizer):
"""Optimizer that implements the Adagrad algorithm.
Adagrad is an optimizer with parameter-specific learning rates,
which are adapted relative to how frequently a parameter gets
updated during training. The more updates a parameter receives,
the smaller the updates.
Args:
learning_rate: A float, a
`keras.optimizers.schedules.LearningRateSchedule` instance, or
a callable that takes no arguments and returns the actual value to
use. The learning rate. Defaults to `0.001`. Note that `Adagrad`
tends to benefit from higher initial learning rate values compared
to other optimizers. To match the exact form in the original paper,
use `1.0`.
initial_accumulator_value: Floating point value. Starting value for the
accumulators (per-parameter momentum values). Must be non-negative.
epsilon: Small floating point value for maintaining numerical stability.
{{base_optimizer_keyword_args}}
Reference:
- [Duchi et al., 2011](
http://www.jmlr.org/papers/volume12/duchi11a/duchi11a.pdf).
"""
def __init__(
self,
learning_rate=0.001,
initial_accumulator_value=0.1,
epsilon=1e-7,
weight_decay=None,
clipnorm=None,
clipvalue=None,
global_clipnorm=None,
use_ema=False,
ema_momentum=0.99,
ema_overwrite_frequency=None,
loss_scale_factor=None,
gradient_accumulation_steps=None,
name="adagrad",
**kwargs,
):
super().__init__(
learning_rate=learning_rate,
weight_decay=weight_decay,
clipnorm=clipnorm,
clipvalue=clipvalue,
global_clipnorm=global_clipnorm,
use_ema=use_ema,
ema_momentum=ema_momentum,
ema_overwrite_frequency=ema_overwrite_frequency,
loss_scale_factor=loss_scale_factor,
gradient_accumulation_steps=gradient_accumulation_steps,
name=name,
**kwargs,
)
self.initial_accumulator_value = initial_accumulator_value
self.epsilon = epsilon
def build(self, var_list):
if self.built:
return
super().build(var_list)
self._accumulators = []
initializer = initializers.Constant(self.initial_accumulator_value)
for var in var_list:
self._accumulators.append(
self.add_variable(
shape=var.shape,
initializer=initializer,
dtype=var.dtype,
name="accumulator",
)
)
def update_step(self, gradient, variable, learning_rate):
"""Update step given gradient and the associated model variable."""
lr = ops.cast(learning_rate, variable.dtype)
gradient = ops.cast(gradient, variable.dtype)
accumulator = self._accumulators[self._get_variable_index(variable)]
self.assign_add(accumulator, ops.square(gradient))
self.assign_sub(
variable,
ops.divide(
ops.multiply(lr, gradient),
ops.sqrt(ops.add(accumulator, self.epsilon)),
),
)
def get_config(self):
config = super().get_config()
config.update(
{
"initial_accumulator_value": self.initial_accumulator_value,
"epsilon": self.epsilon,
}
)
return config
Adagrad.__doc__ = Adagrad.__doc__.replace(
"{{base_optimizer_keyword_args}}", optimizer.base_optimizer_keyword_args
)
|
# Copyright (c) OpenMMLab. All rights reserved.
from collections import OrderedDict
from mmcv.runner import get_dist_info
from mmcv.runner.hooks import Hook
from torch import nn
from mmdet.registry import HOOKS
from ..utils.dist_utils import all_reduce_dict
def get_norm_states(module):
async_norm_states = OrderedDict()
for name, child in module.named_modules():
if isinstance(child, nn.modules.batchnorm._NormBase):
for k, v in child.state_dict().items():
async_norm_states['.'.join([name, k])] = v
return async_norm_states
@HOOKS.register_module()
class SyncNormHook(Hook):
"""Synchronize Norm states after training epoch, currently used in YOLOX.
Args:
num_last_epochs (int): The number of latter epochs in the end of the
training to switch to synchronizing norm interval. Default: 15.
interval (int): Synchronizing norm interval. Default: 1.
"""
def __init__(self, num_last_epochs=15, interval=1):
self.interval = interval
self.num_last_epochs = num_last_epochs
def before_train_epoch(self, runner):
epoch = runner.epoch
if (epoch + 1) == runner.max_epochs - self.num_last_epochs:
# Synchronize norm every epoch.
self.interval = 1
def after_train_epoch(self, runner):
"""Synchronizing norm."""
epoch = runner.epoch
module = runner.model
if (epoch + 1) % self.interval == 0:
_, world_size = get_dist_info()
if world_size == 1:
return
norm_states = get_norm_states(module)
if len(norm_states) == 0:
return
norm_states = all_reduce_dict(norm_states, op='mean')
module.load_state_dict(norm_states, strict=False)
|
# Copyright (c) OpenMMLab. All rights reserved.
from collections import OrderedDict
from mmcv.runner import get_dist_info
from mmcv.runner.hooks import HOOKS, Hook
from torch import nn
from ..utils.dist_utils import all_reduce_dict
def get_norm_states(module):
async_norm_states = OrderedDict()
for name, child in module.named_modules():
if isinstance(child, nn.modules.batchnorm._NormBase):
for k, v in child.state_dict().items():
async_norm_states['.'.join([name, k])] = v
return async_norm_states
@HOOKS.register_module()
class SyncNormHook(Hook):
"""Synchronize Norm states after training epoch, currently used in YOLOX.
Args:
num_last_epochs (int): The number of latter epochs in the end of the
training to switch to synchronizing norm interval. Default: 15.
interval (int): Synchronizing norm interval. Default: 1.
"""
def __init__(self, num_last_epochs=15, interval=1):
self.interval = interval
self.num_last_epochs = num_last_epochs
def before_train_epoch(self, runner):
epoch = runner.epoch
if (epoch + 1) == runner.max_epochs - self.num_last_epochs:
# Synchronize norm every epoch.
self.interval = 1
def after_train_epoch(self, runner):
"""Synchronizing norm."""
epoch = runner.epoch
module = runner.model
if (epoch + 1) % self.interval == 0:
_, world_size = get_dist_info()
if world_size == 1:
return
norm_states = get_norm_states(module)
if len(norm_states) == 0:
return
norm_states = all_reduce_dict(norm_states, op='mean')
module.load_state_dict(norm_states, strict=False)
|
from typing import Any, Type, TypeVar, Union, cast
import numpy as np
from docarray.typing.tensor.image.abstract_image_tensor import AbstractImageTensor
from docarray.typing.tensor.image.image_ndarray import ImageNdArray
from docarray.typing.tensor.tensor import AnyTensor
from docarray.utils._internal.misc import (
is_jax_available,
is_tf_available,
is_torch_available,
)
jax_available = is_jax_available()
if jax_available:
import jax.numpy as jnp # type: ignore
from docarray.typing.tensor.image.image_jax_array import ImageJaxArray
from docarray.typing.tensor.jaxarray import JaxArray
torch_available = is_torch_available()
if torch_available:
import torch
from docarray.typing.tensor.image.image_torch_tensor import ImageTorchTensor
from docarray.typing.tensor.torch_tensor import TorchTensor
tf_available = is_tf_available()
if tf_available:
import tensorflow as tf # type: ignore
from docarray.typing.tensor.image.image_tensorflow_tensor import (
ImageTensorFlowTensor,
)
from docarray.typing.tensor.tensorflow_tensor import TensorFlowTensor
T = TypeVar("T", bound="ImageTensor")
class ImageTensor(AnyTensor, AbstractImageTensor):
"""
Represents an image tensor object that can be used with TensorFlow, PyTorch, and NumPy type.
---
'''python
from docarray import BaseDoc
from docarray.typing import ImageTensor
class MyImageDoc(BaseDoc):
image: ImageTensor
# Example usage with TensorFlow:
import tensorflow as tf
doc = MyImageDoc(image=tf.zeros((1000, 2)))
type(doc.image) # ImageTensorFlowTensor
# Example usage with PyTorch:
import torch
doc = MyImageDoc(image=torch.zeros((1000, 2)))
type(doc.image) # ImageTorchTensor
# Example usage with NumPy:
import numpy as np
doc = MyImageDoc(image=np.zeros((1000, 2)))
type(doc.image) # ImageNdArray
'''
---
Returns:
Union[ImageTorchTensor, ImageTensorFlowTensor, ImageNdArray]: The validated and converted image tensor.
Raises:
TypeError: If the input type is not one of [torch.Tensor, tensorflow.Tensor, numpy.ndarray].
"""
@classmethod
def _docarray_validate(
cls: Type[T],
value: Union[T, np.ndarray, Any],
):
if torch_available:
if isinstance(value, TorchTensor):
return cast(ImageTorchTensor, value)
elif isinstance(value, torch.Tensor):
return ImageTorchTensor._docarray_from_native(value) # noqa
if tf_available:
if isinstance(value, TensorFlowTensor):
return cast(ImageTensorFlowTensor, value)
elif isinstance(value, tf.Tensor):
return ImageTensorFlowTensor._docarray_from_native(value) # noqa
if jax_available:
if isinstance(value, JaxArray):
return cast(ImageJaxArray, value)
elif isinstance(value, jnp.ndarray):
return ImageJaxArray._docarray_from_native(value) # noqa
try:
return ImageNdArray._docarray_validate(value)
except Exception: # noqa
pass
raise TypeError(
f"Expected one of [torch.Tensor, tensorflow.Tensor, numpy.ndarray] "
f"compatible type, got {type(value)}"
)
|
from typing import TYPE_CHECKING, Any, Type, TypeVar, Union, cast
import numpy as np
from docarray.typing.tensor.image.abstract_image_tensor import AbstractImageTensor
from docarray.typing.tensor.image.image_ndarray import ImageNdArray
from docarray.typing.tensor.tensor import AnyTensor
from docarray.utils._internal.misc import (
is_jax_available,
is_tf_available,
is_torch_available,
)
jax_available = is_jax_available()
if jax_available:
import jax.numpy as jnp # type: ignore
from docarray.typing.tensor.image.image_jax_array import ImageJaxArray
from docarray.typing.tensor.jaxarray import JaxArray
torch_available = is_torch_available()
if torch_available:
import torch
from docarray.typing.tensor.image.image_torch_tensor import ImageTorchTensor
from docarray.typing.tensor.torch_tensor import TorchTensor
tf_available = is_tf_available()
if tf_available:
import tensorflow as tf # type: ignore
from docarray.typing.tensor.image.image_tensorflow_tensor import (
ImageTensorFlowTensor,
)
from docarray.typing.tensor.tensorflow_tensor import TensorFlowTensor
if TYPE_CHECKING:
from pydantic import BaseConfig
from pydantic.fields import ModelField
T = TypeVar("T", bound="ImageTensor")
class ImageTensor(AnyTensor, AbstractImageTensor):
"""
Represents an image tensor object that can be used with TensorFlow, PyTorch, and NumPy type.
---
'''python
from docarray import BaseDoc
from docarray.typing import ImageTensor
class MyImageDoc(BaseDoc):
image: ImageTensor
# Example usage with TensorFlow:
import tensorflow as tf
doc = MyImageDoc(image=tf.zeros((1000, 2)))
type(doc.image) # ImageTensorFlowTensor
# Example usage with PyTorch:
import torch
doc = MyImageDoc(image=torch.zeros((1000, 2)))
type(doc.image) # ImageTorchTensor
# Example usage with NumPy:
import numpy as np
doc = MyImageDoc(image=np.zeros((1000, 2)))
type(doc.image) # ImageNdArray
'''
---
Returns:
Union[ImageTorchTensor, ImageTensorFlowTensor, ImageNdArray]: The validated and converted image tensor.
Raises:
TypeError: If the input type is not one of [torch.Tensor, tensorflow.Tensor, numpy.ndarray].
"""
@classmethod
def __get_validators__(cls):
yield cls.validate
@classmethod
def validate(
cls: Type[T],
value: Union[T, np.ndarray, Any],
field: "ModelField",
config: "BaseConfig",
):
if torch_available:
if isinstance(value, TorchTensor):
return cast(ImageTorchTensor, value)
elif isinstance(value, torch.Tensor):
return ImageTorchTensor._docarray_from_native(value) # noqa
if tf_available:
if isinstance(value, TensorFlowTensor):
return cast(ImageTensorFlowTensor, value)
elif isinstance(value, tf.Tensor):
return ImageTensorFlowTensor._docarray_from_native(value) # noqa
if jax_available:
if isinstance(value, JaxArray):
return cast(ImageJaxArray, value)
elif isinstance(value, jnp.ndarray):
return ImageJaxArray._docarray_from_native(value) # noqa
try:
return ImageNdArray.validate(value, field, config)
except Exception: # noqa
pass
raise TypeError(
f"Expected one of [torch.Tensor, tensorflow.Tensor, numpy.ndarray] "
f"compatible type, got {type(value)}"
)
|
"""
This file runs Masked Language Model. You provide a training file. Each line is interpreted as a sentence / paragraph.
Optionally, you can also provide a dev file.
The fine-tuned model is stored in the output/model_name folder.
Usage:
python train_mlm.py model_name data/train_sentences.txt [data/dev_sentences.txt]
"""
import gzip
import sys
from datetime import datetime
from transformers import (
AutoModelForMaskedLM,
AutoTokenizer,
DataCollatorForLanguageModeling,
DataCollatorForWholeWordMask,
Trainer,
TrainingArguments,
)
if len(sys.argv) < 3:
print("Usage: python train_mlm.py model_name data/train_sentences.txt [data/dev_sentences.txt]")
exit()
model_name = sys.argv[1]
per_device_train_batch_size = 64
save_steps = 1000 # Save model every 1k steps
num_train_epochs = 3 # Number of epochs
use_fp16 = False # Set to True, if your GPU supports FP16 operations
max_length = 100 # Max length for a text input
do_whole_word_mask = True # If set to true, whole words are masked
mlm_prob = 0.15 # Probability that a word is replaced by a [MASK] token
# Load the model
model = AutoModelForMaskedLM.from_pretrained(model_name)
tokenizer = AutoTokenizer.from_pretrained(model_name)
output_dir = "output/{}-{}".format(model_name.replace("/", "_"), datetime.now().strftime("%Y-%m-%d_%H-%M-%S"))
print("Save checkpoints to:", output_dir)
##### Load our training datasets
train_sentences = []
train_path = sys.argv[2]
with gzip.open(train_path, "rt", encoding="utf8") if train_path.endswith(".gz") else open(
train_path, "r", encoding="utf8"
) as fIn:
for line in fIn:
line = line.strip()
if len(line) >= 10:
train_sentences.append(line)
print("Train sentences:", len(train_sentences))
dev_sentences = []
if len(sys.argv) >= 4:
dev_path = sys.argv[3]
with gzip.open(dev_path, "rt", encoding="utf8") if dev_path.endswith(".gz") else open(
dev_path, "r", encoding="utf8"
) as fIn:
for line in fIn:
line = line.strip()
if len(line) >= 10:
dev_sentences.append(line)
print("Dev sentences:", len(dev_sentences))
# A dataset wrapper, that tokenizes our data on-the-fly
class TokenizedSentencesDataset:
def __init__(self, sentences, tokenizer, max_length, cache_tokenization=False):
self.tokenizer = tokenizer
self.sentences = sentences
self.max_length = max_length
self.cache_tokenization = cache_tokenization
def __getitem__(self, item):
if not self.cache_tokenization:
return self.tokenizer(
self.sentences[item],
add_special_tokens=True,
truncation=True,
max_length=self.max_length,
return_special_tokens_mask=True,
)
if isinstance(self.sentences[item], str):
self.sentences[item] = self.tokenizer(
self.sentences[item],
add_special_tokens=True,
truncation=True,
max_length=self.max_length,
return_special_tokens_mask=True,
)
return self.sentences[item]
def __len__(self):
return len(self.sentences)
train_dataset = TokenizedSentencesDataset(train_sentences, tokenizer, max_length)
dev_dataset = (
TokenizedSentencesDataset(dev_sentences, tokenizer, max_length, cache_tokenization=True)
if len(dev_sentences) > 0
else None
)
##### Training arguments
if do_whole_word_mask:
data_collator = DataCollatorForWholeWordMask(tokenizer=tokenizer, mlm=True, mlm_probability=mlm_prob)
else:
data_collator = DataCollatorForLanguageModeling(tokenizer=tokenizer, mlm=True, mlm_probability=mlm_prob)
training_args = TrainingArguments(
output_dir=output_dir,
overwrite_output_dir=True,
num_train_epochs=num_train_epochs,
evaluation_strategy="steps" if dev_dataset is not None else "no",
per_device_train_batch_size=per_device_train_batch_size,
eval_steps=save_steps,
save_steps=save_steps,
logging_steps=save_steps,
save_total_limit=1,
prediction_loss_only=True,
fp16=use_fp16,
)
trainer = Trainer(
model=model, args=training_args, data_collator=data_collator, train_dataset=train_dataset, eval_dataset=dev_dataset
)
print("Save tokenizer to:", output_dir)
tokenizer.save_pretrained(output_dir)
trainer.train()
print("Save model to:", output_dir)
model.save_pretrained(output_dir)
print("Training done")
|
"""
This file runs Masked Language Model. You provide a training file. Each line is interpreted as a sentence / paragraph.
Optionally, you can also provide a dev file.
The fine-tuned model is stored in the output/model_name folder.
Usage:
python train_mlm.py model_name data/train_sentences.txt [data/dev_sentences.txt]
"""
from transformers import AutoModelForMaskedLM, AutoTokenizer
from transformers import DataCollatorForLanguageModeling, DataCollatorForWholeWordMask
from transformers import Trainer, TrainingArguments
import sys
import gzip
from datetime import datetime
if len(sys.argv) < 3:
print("Usage: python train_mlm.py model_name data/train_sentences.txt [data/dev_sentences.txt]")
exit()
model_name = sys.argv[1]
per_device_train_batch_size = 64
save_steps = 1000 # Save model every 1k steps
num_train_epochs = 3 # Number of epochs
use_fp16 = False # Set to True, if your GPU supports FP16 operations
max_length = 100 # Max length for a text input
do_whole_word_mask = True # If set to true, whole words are masked
mlm_prob = 0.15 # Probability that a word is replaced by a [MASK] token
# Load the model
model = AutoModelForMaskedLM.from_pretrained(model_name)
tokenizer = AutoTokenizer.from_pretrained(model_name)
output_dir = "output/{}-{}".format(model_name.replace("/", "_"), datetime.now().strftime("%Y-%m-%d_%H-%M-%S"))
print("Save checkpoints to:", output_dir)
##### Load our training datasets
train_sentences = []
train_path = sys.argv[2]
with gzip.open(train_path, "rt", encoding="utf8") if train_path.endswith(".gz") else open(
train_path, "r", encoding="utf8"
) as fIn:
for line in fIn:
line = line.strip()
if len(line) >= 10:
train_sentences.append(line)
print("Train sentences:", len(train_sentences))
dev_sentences = []
if len(sys.argv) >= 4:
dev_path = sys.argv[3]
with gzip.open(dev_path, "rt", encoding="utf8") if dev_path.endswith(".gz") else open(
dev_path, "r", encoding="utf8"
) as fIn:
for line in fIn:
line = line.strip()
if len(line) >= 10:
dev_sentences.append(line)
print("Dev sentences:", len(dev_sentences))
# A dataset wrapper, that tokenizes our data on-the-fly
class TokenizedSentencesDataset:
def __init__(self, sentences, tokenizer, max_length, cache_tokenization=False):
self.tokenizer = tokenizer
self.sentences = sentences
self.max_length = max_length
self.cache_tokenization = cache_tokenization
def __getitem__(self, item):
if not self.cache_tokenization:
return self.tokenizer(
self.sentences[item],
add_special_tokens=True,
truncation=True,
max_length=self.max_length,
return_special_tokens_mask=True,
)
if isinstance(self.sentences[item], str):
self.sentences[item] = self.tokenizer(
self.sentences[item],
add_special_tokens=True,
truncation=True,
max_length=self.max_length,
return_special_tokens_mask=True,
)
return self.sentences[item]
def __len__(self):
return len(self.sentences)
train_dataset = TokenizedSentencesDataset(train_sentences, tokenizer, max_length)
dev_dataset = (
TokenizedSentencesDataset(dev_sentences, tokenizer, max_length, cache_tokenization=True)
if len(dev_sentences) > 0
else None
)
##### Training arguments
if do_whole_word_mask:
data_collator = DataCollatorForWholeWordMask(tokenizer=tokenizer, mlm=True, mlm_probability=mlm_prob)
else:
data_collator = DataCollatorForLanguageModeling(tokenizer=tokenizer, mlm=True, mlm_probability=mlm_prob)
training_args = TrainingArguments(
output_dir=output_dir,
overwrite_output_dir=True,
num_train_epochs=num_train_epochs,
evaluation_strategy="steps" if dev_dataset is not None else "no",
per_device_train_batch_size=per_device_train_batch_size,
eval_steps=save_steps,
save_steps=save_steps,
logging_steps=save_steps,
save_total_limit=1,
prediction_loss_only=True,
fp16=use_fp16,
)
trainer = Trainer(
model=model, args=training_args, data_collator=data_collator, train_dataset=train_dataset, eval_dataset=dev_dataset
)
print("Save tokenizer to:", output_dir)
tokenizer.save_pretrained(output_dir)
trainer.train()
print("Save model to:", output_dir)
model.save_pretrained(output_dir)
print("Training done")
|
# Copyright (c) OpenMMLab. All rights reserved.
import functools
import mmcv
import torch.nn.functional as F
def reduce_loss(loss, reduction):
"""Reduce loss as specified.
Args:
loss (Tensor): Elementwise loss tensor.
reduction (str): Options are "none", "mean" and "sum".
Return:
Tensor: Reduced loss tensor.
"""
reduction_enum = F._Reduction.get_enum(reduction)
# none: 0, elementwise_mean:1, sum: 2
if reduction_enum == 0:
return loss
elif reduction_enum == 1:
return loss.mean()
elif reduction_enum == 2:
return loss.sum()
@mmcv.jit(derivate=True, coderize=True)
def weight_reduce_loss(loss, weight=None, reduction='mean', avg_factor=None):
"""Apply element-wise weight and reduce loss.
Args:
loss (Tensor): Element-wise loss.
weight (Tensor): Element-wise weights.
reduction (str): Same as built-in losses of PyTorch.
avg_factor (float): Average factor when computing the mean of losses.
Returns:
Tensor: Processed loss values.
"""
# if weight is specified, apply element-wise weight
if weight is not None:
loss = loss * weight
# if avg_factor is not specified, just reduce the loss
if avg_factor is None:
loss = reduce_loss(loss, reduction)
else:
# if reduction is mean, then average the loss by avg_factor
if reduction == 'mean':
loss = loss.sum() / avg_factor
# if reduction is 'none', then do nothing, otherwise raise an error
elif reduction != 'none':
raise ValueError('avg_factor can not be used with reduction="sum"')
return loss
def weighted_loss(loss_func):
"""Create a weighted version of a given loss function.
To use this decorator, the loss function must have the signature like
`loss_func(pred, target, **kwargs)`. The function only needs to compute
element-wise loss without any reduction. This decorator will add weight
and reduction arguments to the function. The decorated function will have
the signature like `loss_func(pred, target, weight=None, reduction='mean',
avg_factor=None, **kwargs)`.
:Example:
>>> import torch
>>> @weighted_loss
>>> def l1_loss(pred, target):
>>> return (pred - target).abs()
>>> pred = torch.Tensor([0, 2, 3])
>>> target = torch.Tensor([1, 1, 1])
>>> weight = torch.Tensor([1, 0, 1])
>>> l1_loss(pred, target)
tensor(1.3333)
>>> l1_loss(pred, target, weight)
tensor(1.)
>>> l1_loss(pred, target, reduction='none')
tensor([1., 1., 2.])
>>> l1_loss(pred, target, weight, avg_factor=2)
tensor(1.5000)
"""
@functools.wraps(loss_func)
def wrapper(pred,
target,
weight=None,
reduction='mean',
avg_factor=None,
**kwargs):
# get element-wise loss
loss = loss_func(pred, target, **kwargs)
loss = weight_reduce_loss(loss, weight, reduction, avg_factor)
return loss
return wrapper
|
# Copyright (c) OpenMMLab. All rights reserved.
import functools
import mmcv
import torch.nn.functional as F
def reduce_loss(loss, reduction):
"""Reduce loss as specified.
Args:
loss (Tensor): Elementwise loss tensor.
reduction (str): Options are "none", "mean" and "sum".
Return:
Tensor: Reduced loss tensor.
"""
reduction_enum = F._Reduction.get_enum(reduction)
# none: 0, elementwise_mean:1, sum: 2
if reduction_enum == 0:
return loss
elif reduction_enum == 1:
return loss.mean()
elif reduction_enum == 2:
return loss.sum()
@mmcv.jit(derivate=True, coderize=True)
def weight_reduce_loss(loss, weight=None, reduction='mean', avg_factor=None):
"""Apply element-wise weight and reduce loss.
Args:
loss (Tensor): Element-wise loss.
weight (Tensor): Element-wise weights.
reduction (str): Same as built-in losses of PyTorch.
avg_factor (float): Avarage factor when computing the mean of losses.
Returns:
Tensor: Processed loss values.
"""
# if weight is specified, apply element-wise weight
if weight is not None:
loss = loss * weight
# if avg_factor is not specified, just reduce the loss
if avg_factor is None:
loss = reduce_loss(loss, reduction)
else:
# if reduction is mean, then average the loss by avg_factor
if reduction == 'mean':
loss = loss.sum() / avg_factor
# if reduction is 'none', then do nothing, otherwise raise an error
elif reduction != 'none':
raise ValueError('avg_factor can not be used with reduction="sum"')
return loss
def weighted_loss(loss_func):
"""Create a weighted version of a given loss function.
To use this decorator, the loss function must have the signature like
`loss_func(pred, target, **kwargs)`. The function only needs to compute
element-wise loss without any reduction. This decorator will add weight
and reduction arguments to the function. The decorated function will have
the signature like `loss_func(pred, target, weight=None, reduction='mean',
avg_factor=None, **kwargs)`.
:Example:
>>> import torch
>>> @weighted_loss
>>> def l1_loss(pred, target):
>>> return (pred - target).abs()
>>> pred = torch.Tensor([0, 2, 3])
>>> target = torch.Tensor([1, 1, 1])
>>> weight = torch.Tensor([1, 0, 1])
>>> l1_loss(pred, target)
tensor(1.3333)
>>> l1_loss(pred, target, weight)
tensor(1.)
>>> l1_loss(pred, target, reduction='none')
tensor([1., 1., 2.])
>>> l1_loss(pred, target, weight, avg_factor=2)
tensor(1.5000)
"""
@functools.wraps(loss_func)
def wrapper(pred,
target,
weight=None,
reduction='mean',
avg_factor=None,
**kwargs):
# get element-wise loss
loss = loss_func(pred, target, **kwargs)
loss = weight_reduce_loss(loss, weight, reduction, avg_factor)
return loss
return wrapper
|
from __future__ import annotations
import pytest
from sentence_transformers.cross_encoder import CrossEncoder
@pytest.mark.parametrize(
"model_name, expected_score",
[
("cross-encoder/ms-marco-MiniLM-L6-v2", [8.12545108795166, -3.045016050338745, -3.1524128913879395]),
("cross-encoder/ms-marco-TinyBERT-L2-v2", [8.142767906188965, 1.2057735919952393, -2.7283530235290527]),
("cross-encoder/stsb-distilroberta-base", [0.4977430999279022, 0.255491703748703, 0.28261035680770874]),
("mixedbread-ai/mxbai-rerank-xsmall-v1", [0.9224735498428345, 0.04793589934706688, 0.03315146267414093]),
],
)
def test_pretrained_model(model_name: str, expected_score: list[float]) -> None:
# Ensure that pretrained models are not accidentally changed
model = CrossEncoder(model_name)
query = "is toprol xl the same as metoprolol?"
answers = [
"Metoprolol succinate is also known by the brand name Toprol XL. It is the extended-release form of metoprolol. Metoprolol succinate is approved to treat high blood pressure, chronic chest pain, and congestive heart failure.",
"Pill with imprint 1 is White, Round and has been identified as Metoprolol Tartrate 25 mg.",
"Interactions between your drugs No interactions were found between Allergy Relief and metoprolol. This does not necessarily mean no interactions exist. Always consult your healthcare provider.",
]
scores = model.predict([(query, answer) for answer in answers])
assert scores.tolist() == pytest.approx(expected_score, rel=1e-4)
|
from __future__ import annotations
import pytest
from sentence_transformers.cross_encoder import CrossEncoder
@pytest.mark.parametrize(
"model_name, expected_score",
[
("cross-encoder/ms-marco-MiniLM-L-6-v2", [8.12545108795166, -3.045016050338745, -3.1524128913879395]),
("cross-encoder/ms-marco-TinyBERT-L-2-v2", [8.142767906188965, 1.2057735919952393, -2.7283530235290527]),
("cross-encoder/stsb-distilroberta-base", [0.4977430999279022, 0.255491703748703, 0.28261035680770874]),
("mixedbread-ai/mxbai-rerank-xsmall-v1", [0.9224735498428345, 0.04793589934706688, 0.03315146267414093]),
],
)
def test_pretrained_model(model_name: str, expected_score: list[float]) -> None:
# Ensure that pretrained models are not accidentally changed
model = CrossEncoder(model_name)
query = "is toprol xl the same as metoprolol?"
answers = [
"Metoprolol succinate is also known by the brand name Toprol XL. It is the extended-release form of metoprolol. Metoprolol succinate is approved to treat high blood pressure, chronic chest pain, and congestive heart failure.",
"Pill with imprint 1 is White, Round and has been identified as Metoprolol Tartrate 25 mg.",
"Interactions between your drugs No interactions were found between Allergy Relief and metoprolol. This does not necessarily mean no interactions exist. Always consult your healthcare provider.",
]
scores = model.predict([(query, answer) for answer in answers])
assert scores.tolist() == pytest.approx(expected_score, rel=1e-4)
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This software may be used and distributed according to the terms of the Llama 2 Community License Agreement.
from typing import Optional
import fire
from llama import Llama
def main(
ckpt_dir: str,
tokenizer_path: str,
temperature: float = 0.6,
top_p: float = 0.9,
max_seq_len: int = 512,
max_batch_size: int = 8,
max_gen_len: Optional[int] = None,
):
generator = Llama.build(
ckpt_dir=ckpt_dir,
tokenizer_path=tokenizer_path,
max_seq_len=max_seq_len,
max_batch_size=max_batch_size,
)
dialogs = [
[{"role": "user", "content": "what is the recipe of mayonnaise?"}],
[
{"role": "user", "content": "I am going to Paris, what should I see?"},
{
"role": "assistant",
"content": """\
Paris, the capital of France, is known for its stunning architecture, art museums, historical landmarks, and romantic atmosphere. Here are some of the top attractions to see in Paris:
1. The Eiffel Tower: The iconic Eiffel Tower is one of the most recognizable landmarks in the world and offers breathtaking views of the city.
2. The Louvre Museum: The Louvre is one of the world's largest and most famous museums, housing an impressive collection of art and artifacts, including the Mona Lisa.
3. Notre-Dame Cathedral: This beautiful cathedral is one of the most famous landmarks in Paris and is known for its Gothic architecture and stunning stained glass windows.
These are just a few of the many attractions that Paris has to offer. With so much to see and do, it's no wonder that Paris is one of the most popular tourist destinations in the world.""",
},
{"role": "user", "content": "What is so great about #1?"},
],
[
{"role": "system", "content": "Always answer with Haiku"},
{"role": "user", "content": "I am going to Paris, what should I see?"},
],
[
{
"role": "system",
"content": "Always answer with emojis",
},
{"role": "user", "content": "How to go from Beijing to NY?"},
],
[
{
"role": "system",
"content": """\
You are a helpful, respectful and honest assistant. Always answer as helpfully as possible, while being safe. Your answers should not include any harmful, unethical, racist, sexist, toxic, dangerous, or illegal content. Please ensure that your responses are socially unbiased and positive in nature.
If a question does not make any sense, or is not factually coherent, explain why instead of answering something not correct. If you don't know the answer to a question, please don't share false information.""",
},
{"role": "user", "content": "Write a brief birthday message to John"},
],
]
results = generator.chat_completion(
dialogs, # type: ignore
max_gen_len=max_gen_len,
temperature=temperature,
top_p=top_p,
)
for dialog, result in zip(dialogs, results):
for msg in dialog:
print(f"{msg['role'].capitalize()}: {msg['content']}\n")
print(
f"> {result['generation']['role'].capitalize()}: {result['generation']['content']}"
)
print("\n==================================\n")
if __name__ == "__main__":
fire.Fire(main)
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This software may be used and distributed according to the terms of the Llama 2 Community License Agreement.
from typing import Optional
import fire
from llama import Llama
def main(
ckpt_dir: str,
tokenizer_path: str,
temperature: float = 0.6,
top_p: float = 0.9,
max_seq_len: int = 512,
max_batch_size: int = 4,
max_gen_len: Optional[int] = None,
):
generator = Llama.build(
ckpt_dir=ckpt_dir,
tokenizer_path=tokenizer_path,
max_seq_len=max_seq_len,
max_batch_size=max_batch_size,
)
dialogs = [
[{"role": "user", "content": "what is the recipe of mayonnaise?"}],
[
{"role": "user", "content": "I am going to Paris, what should I see?"},
{
"role": "assistant",
"content": """\
Paris, the capital of France, is known for its stunning architecture, art museums, historical landmarks, and romantic atmosphere. Here are some of the top attractions to see in Paris:
1. The Eiffel Tower: The iconic Eiffel Tower is one of the most recognizable landmarks in the world and offers breathtaking views of the city.
2. The Louvre Museum: The Louvre is one of the world's largest and most famous museums, housing an impressive collection of art and artifacts, including the Mona Lisa.
3. Notre-Dame Cathedral: This beautiful cathedral is one of the most famous landmarks in Paris and is known for its Gothic architecture and stunning stained glass windows.
These are just a few of the many attractions that Paris has to offer. With so much to see and do, it's no wonder that Paris is one of the most popular tourist destinations in the world.""",
},
{"role": "user", "content": "What is so great about #1?"},
],
[
{"role": "system", "content": "Always answer with Haiku"},
{"role": "user", "content": "I am going to Paris, what should I see?"},
],
[
{
"role": "system",
"content": "Always answer with emojis",
},
{"role": "user", "content": "How to go from Beijing to NY?"},
],
]
results = generator.chat_completion(
dialogs, # type: ignore
max_gen_len=max_gen_len,
temperature=temperature,
top_p=top_p,
)
for dialog, result in zip(dialogs, results):
for msg in dialog:
print(f"{msg['role'].capitalize()}: {msg['content']}\n")
print(
f"> {result['generation']['role'].capitalize()}: {result['generation']['content']}"
)
print("\n==================================\n")
if __name__ == "__main__":
fire.Fire(main)
|
# Copyright (c) OpenMMLab. All rights reserved.
"""MMEngine provides 11 root registries to support using modules across
projects.
More datails can be found at
https://mmengine.readthedocs.io/en/latest/tutorials/registry.html.
"""
from .registry import Registry
# manage all kinds of runners like `EpochBasedRunner` and `IterBasedRunner`
RUNNERS = Registry('runner')
# manage runner constructors that define how to initialize runners
RUNNER_CONSTRUCTORS = Registry('runner constructor')
# manage all kinds of loops like `EpochBasedTrainLoop`
LOOPS = Registry('loop')
# manage all kinds of hooks like `CheckpointHook`
HOOKS = Registry('hook')
# manage data-related modules
DATASETS = Registry('dataset')
DATA_SAMPLERS = Registry('data sampler')
TRANSFORMS = Registry('transform')
# mangage all kinds of modules inheriting `nn.Module`
MODELS = Registry('model')
# mangage all kinds of model wrappers like 'MMDistributedDataParallel'
MODEL_WRAPPERS = Registry('model_wrapper')
# mangage all kinds of weight initialization modules like `Uniform`
WEIGHT_INITIALIZERS = Registry('weight initializer')
# mangage all kinds of optimizers like `SGD` and `Adam`
OPTIMIZERS = Registry('optimizer')
# manage constructors that customize the optimization hyperparameters.
OPTIMIZER_CONSTRUCTORS = Registry('optimizer constructor')
# mangage all kinds of parameter schedulers like `MultiStepLR`
PARAM_SCHEDULERS = Registry('parameter scheduler')
# manage task-specific modules like anchor generators and box coders
TASK_UTILS = Registry('task util')
# manage all kinds of evaluators for computing metrics
EVALUATORS = Registry('evaluator')
# manage visualizer
VISUALIZERS = Registry('visualizer')
# manage writer
WRITERS = Registry('writer')
|
# Copyright (c) OpenMMLab. All rights reserved.
"""MMEngine provides 11 root registries to support using modules across
projects.
More datails can be found at
https://mmengine.readthedocs.io/en/latest/tutorials/registry.html.
"""
from .registry import Registry
# manage all kinds of runners like `EpochBasedRunner` and `IterBasedRunner`
RUNNERS = Registry('runner')
# manage runner constructors that define how to initialize runners
RUNNER_CONSTRUCTORS = Registry('runner constructor')
# manage all kinds of hooks like `CheckpointHook`
HOOKS = Registry('hook')
# manage data-related modules
DATASETS = Registry('dataset')
DATA_SAMPLERS = Registry('data sampler')
TRANSFORMS = Registry('transform')
# mangage all kinds of modules inheriting `nn.Module`
MODELS = Registry('model')
# mangage all kinds of model wrappers like 'MMDistributedDataParallel'
MODEL_WRAPPERS = Registry('model_wrapper')
# mangage all kinds of weight initialization modules like `Uniform`
WEIGHT_INITIALIZERS = Registry('weight initializer')
# mangage all kinds of optimizers like `SGD` and `Adam`
OPTIMIZERS = Registry('optimizer')
# manage constructors that customize the optimization hyperparameters.
OPTIMIZER_CONSTRUCTORS = Registry('optimizer constructor')
# mangage all kinds of parameter schedulers like `MultiStepLR`
PARAM_SCHEDULERS = Registry('parameter scheduler')
# manage task-specific modules like anchor generators and box coders
TASK_UTILS = Registry('task util')
# manage all kinds of evaluators for computing metrics
EVALUATORS = Registry('evaluator')
# manage visualizer
VISUALIZERS = Registry('visualizer')
# manage writer
WRITERS = Registry('writer')
|
from enum import Enum
from typing import Dict, Iterable
import torch.nn.functional as F
from torch import Tensor, nn
from sentence_transformers.SentenceTransformer import SentenceTransformer
class SiameseDistanceMetric(Enum):
"""The metric for the contrastive loss"""
EUCLIDEAN = lambda x, y: F.pairwise_distance(x, y, p=2)
MANHATTAN = lambda x, y: F.pairwise_distance(x, y, p=1)
COSINE_DISTANCE = lambda x, y: 1 - F.cosine_similarity(x, y)
class ContrastiveLoss(nn.Module):
def __init__(
self,
model: SentenceTransformer,
distance_metric=SiameseDistanceMetric.COSINE_DISTANCE,
margin: float = 0.5,
size_average: bool = True,
):
"""
Contrastive loss. Expects as input two texts and a label of either 0 or 1. If the label == 1, then the distance between the
two embeddings is reduced. If the label == 0, then the distance between the embeddings is increased.
Args:
model: SentenceTransformer model
distance_metric: Function that returns a distance between
two embeddings. The class SiameseDistanceMetric contains
pre-defined metrices that can be used
margin: Negative samples (label == 0) should have a distance
of at least the margin value.
size_average: Average by the size of the mini-batch.
References:
* Further information: http://yann.lecun.com/exdb/publis/pdf/hadsell-chopra-lecun-06.pdf
* `Training Examples > Quora Duplicate Questions <../../examples/training/quora_duplicate_questions/README.html>`_
Requirements:
1. (anchor, positive/negative) pairs
Relations:
- :class:`OnlineContrastiveLoss` is similar, but uses hard positive and hard negative pairs.
It often yields better results.
Inputs:
+-----------------------------------------------+------------------------------+
| Texts | Labels |
+===============================================+==============================+
| (anchor, positive/negative) pairs | 1 if positive, 0 if negative |
+-----------------------------------------------+------------------------------+
Example:
::
from sentence_transformers import SentenceTransformer, SentenceTransformerTrainer, losses
from datasets import Dataset
model = SentenceTransformer("microsoft/mpnet-base")
train_dataset = Dataset.from_dict({
"sentence1": ["It's nice weather outside today.", "He drove to work."],
"sentence2": ["It's so sunny.", "She walked to the store."],
"label": [1, 0],
})
loss = losses.ContrastiveLoss(model)
trainer = SentenceTransformerTrainer(
model=model,
train_dataset=train_dataset,
loss=loss,
)
trainer.train()
"""
super(ContrastiveLoss, self).__init__()
self.distance_metric = distance_metric
self.margin = margin
self.model = model
self.size_average = size_average
def get_config_dict(self):
distance_metric_name = self.distance_metric.__name__
for name, value in vars(SiameseDistanceMetric).items():
if value == self.distance_metric:
distance_metric_name = "SiameseDistanceMetric.{}".format(name)
break
return {"distance_metric": distance_metric_name, "margin": self.margin, "size_average": self.size_average}
def forward(self, sentence_features: Iterable[Dict[str, Tensor]], labels: Tensor):
reps = [self.model(sentence_feature)["sentence_embedding"] for sentence_feature in sentence_features]
assert len(reps) == 2
rep_anchor, rep_other = reps
distances = self.distance_metric(rep_anchor, rep_other)
losses = 0.5 * (
labels.float() * distances.pow(2) + (1 - labels).float() * F.relu(self.margin - distances).pow(2)
)
return losses.mean() if self.size_average else losses.sum()
@property
def citation(self) -> str:
return """
@inproceedings{hadsell2006dimensionality,
author={Hadsell, R. and Chopra, S. and LeCun, Y.},
booktitle={2006 IEEE Computer Society Conference on Computer Vision and Pattern Recognition (CVPR'06)},
title={Dimensionality Reduction by Learning an Invariant Mapping},
year={2006},
volume={2},
number={},
pages={1735-1742},
doi={10.1109/CVPR.2006.100}
}
"""
|
from enum import Enum
from typing import Iterable, Dict
import torch.nn.functional as F
from torch import nn, Tensor
from sentence_transformers.SentenceTransformer import SentenceTransformer
class SiameseDistanceMetric(Enum):
"""The metric for the contrastive loss"""
EUCLIDEAN = lambda x, y: F.pairwise_distance(x, y, p=2)
MANHATTAN = lambda x, y: F.pairwise_distance(x, y, p=1)
COSINE_DISTANCE = lambda x, y: 1 - F.cosine_similarity(x, y)
class ContrastiveLoss(nn.Module):
def __init__(
self,
model: SentenceTransformer,
distance_metric=SiameseDistanceMetric.COSINE_DISTANCE,
margin: float = 0.5,
size_average: bool = True,
):
"""
Contrastive loss. Expects as input two texts and a label of either 0 or 1. If the label == 1, then the distance between the
two embeddings is reduced. If the label == 0, then the distance between the embeddings is increased.
Args:
model: SentenceTransformer model
distance_metric: Function that returns a distance between
two embeddings. The class SiameseDistanceMetric contains
pre-defined metrices that can be used
margin: Negative samples (label == 0) should have a distance
of at least the margin value.
size_average: Average by the size of the mini-batch.
References:
* Further information: http://yann.lecun.com/exdb/publis/pdf/hadsell-chopra-lecun-06.pdf
* `Training Examples > Quora Duplicate Questions <../../examples/training/quora_duplicate_questions/README.html>`_
Requirements:
1. (anchor, positive/negative) pairs
Relations:
- :class:`OnlineContrastiveLoss` is similar, but uses hard positive and hard negative pairs.
It often yields better results.
Inputs:
+-----------------------------------------------+------------------------------+
| Texts | Labels |
+===============================================+==============================+
| (anchor, positive/negative) pairs | 1 if positive, 0 if negative |
+-----------------------------------------------+------------------------------+
Example:
::
from sentence_transformers import SentenceTransformer, SentenceTransformerTrainer, losses
from datasets import Dataset
model = SentenceTransformer("microsoft/mpnet-base")
train_dataset = Dataset.from_dict({
"sentence1": ["It's nice weather outside today.", "He drove to work."],
"sentence2": ["It's so sunny.", "She walked to the store."],
"label": [1, 0],
})
loss = losses.ContrastiveLoss(model)
trainer = SentenceTransformerTrainer(
model=model,
train_dataset=train_dataset,
loss=loss,
)
trainer.train()
"""
super(ContrastiveLoss, self).__init__()
self.distance_metric = distance_metric
self.margin = margin
self.model = model
self.size_average = size_average
def get_config_dict(self):
distance_metric_name = self.distance_metric.__name__
for name, value in vars(SiameseDistanceMetric).items():
if value == self.distance_metric:
distance_metric_name = "SiameseDistanceMetric.{}".format(name)
break
return {"distance_metric": distance_metric_name, "margin": self.margin, "size_average": self.size_average}
def forward(self, sentence_features: Iterable[Dict[str, Tensor]], labels: Tensor):
reps = [self.model(sentence_feature)["sentence_embedding"] for sentence_feature in sentence_features]
assert len(reps) == 2
rep_anchor, rep_other = reps
distances = self.distance_metric(rep_anchor, rep_other)
losses = 0.5 * (
labels.float() * distances.pow(2) + (1 - labels).float() * F.relu(self.margin - distances).pow(2)
)
return losses.mean() if self.size_average else losses.sum()
@property
def citation(self) -> str:
return """
@inproceedings{hadsell2006dimensionality,
author={Hadsell, R. and Chopra, S. and LeCun, Y.},
booktitle={2006 IEEE Computer Society Conference on Computer Vision and Pattern Recognition (CVPR'06)},
title={Dimensionality Reduction by Learning an Invariant Mapping},
year={2006},
volume={2},
number={},
pages={1735-1742},
doi={10.1109/CVPR.2006.100}
}
"""
|
# Licensed to the LF AI & Data foundation under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class ConcurrentPushException(Exception):
"""Exception raised when a concurrent push is detected."""
pass
|
class ConcurrentPushException(Exception):
"""Exception raised when a concurrent push is detected."""
pass
|
from .conv_emformer import ConvEmformer
from .conv_tasnet import conv_tasnet_base
from .hdemucs import HDemucs
from .rnnt import conformer_rnnt_base, conformer_rnnt_model
__all__ = [
"conformer_rnnt_base",
"conformer_rnnt_model",
"conv_tasnet_base",
"ConvEmformer",
"HDemucs",
]
|
from .conv_emformer import ConvEmformer
from .conv_tasnet import conv_tasnet_base
from .rnnt import conformer_rnnt_base, conformer_rnnt_model
__all__ = [
"conformer_rnnt_base",
"conformer_rnnt_model",
"conv_tasnet_base",
"ConvEmformer",
]
|
from __future__ import annotations
import csv
import gzip
import os
from . import InputExample
class STSDataReader:
"""Reads in the STS dataset. Each line contains two sentences (s1_col_idx, s2_col_idx) and one label (score_col_idx)
Default values expects a tab separated file with the first & second column the sentence pair and third column the score (0...1). Default config normalizes scores from 0...5 to 0...1
"""
def __init__(
self,
dataset_folder,
s1_col_idx=0,
s2_col_idx=1,
score_col_idx=2,
delimiter="\t",
quoting=csv.QUOTE_NONE,
normalize_scores=True,
min_score=0,
max_score=5,
):
self.dataset_folder = dataset_folder
self.score_col_idx = score_col_idx
self.s1_col_idx = s1_col_idx
self.s2_col_idx = s2_col_idx
self.delimiter = delimiter
self.quoting = quoting
self.normalize_scores = normalize_scores
self.min_score = min_score
self.max_score = max_score
def get_examples(self, filename, max_examples=0):
"""filename specified which data split to use (train.csv, dev.csv, test.csv)."""
filepath = os.path.join(self.dataset_folder, filename)
with (
gzip.open(filepath, "rt", encoding="utf8")
if filename.endswith(".gz")
else open(filepath, encoding="utf-8") as fIn
):
data = csv.reader(fIn, delimiter=self.delimiter, quoting=self.quoting)
examples = []
for id, row in enumerate(data):
score = float(row[self.score_col_idx])
if self.normalize_scores: # Normalize to a 0...1 value
score = (score - self.min_score) / (self.max_score - self.min_score)
s1 = row[self.s1_col_idx]
s2 = row[self.s2_col_idx]
examples.append(InputExample(guid=filename + str(id), texts=[s1, s2], label=score))
if max_examples > 0 and len(examples) >= max_examples:
break
return examples
class STSBenchmarkDataReader(STSDataReader):
"""Reader especially for the STS benchmark dataset. There, the sentences are in column 5 and 6, the score is in column 4.
Scores are normalized from 0...5 to 0...1
"""
def __init__(
self,
dataset_folder,
s1_col_idx=5,
s2_col_idx=6,
score_col_idx=4,
delimiter="\t",
quoting=csv.QUOTE_NONE,
normalize_scores=True,
min_score=0,
max_score=5,
):
super().__init__(
dataset_folder=dataset_folder,
s1_col_idx=s1_col_idx,
s2_col_idx=s2_col_idx,
score_col_idx=score_col_idx,
delimiter=delimiter,
quoting=quoting,
normalize_scores=normalize_scores,
min_score=min_score,
max_score=max_score,
)
|
from __future__ import annotations
import csv
import gzip
import os
from . import InputExample
class STSDataReader:
"""Reads in the STS dataset. Each line contains two sentences (s1_col_idx, s2_col_idx) and one label (score_col_idx)
Default values expects a tab separated file with the first & second column the sentence pair and third column the score (0...1). Default config normalizes scores from 0...5 to 0...1
"""
def __init__(
self,
dataset_folder,
s1_col_idx=0,
s2_col_idx=1,
score_col_idx=2,
delimiter="\t",
quoting=csv.QUOTE_NONE,
normalize_scores=True,
min_score=0,
max_score=5,
):
self.dataset_folder = dataset_folder
self.score_col_idx = score_col_idx
self.s1_col_idx = s1_col_idx
self.s2_col_idx = s2_col_idx
self.delimiter = delimiter
self.quoting = quoting
self.normalize_scores = normalize_scores
self.min_score = min_score
self.max_score = max_score
def get_examples(self, filename, max_examples=0):
"""filename specified which data split to use (train.csv, dev.csv, test.csv)."""
filepath = os.path.join(self.dataset_folder, filename)
with gzip.open(filepath, "rt", encoding="utf8") if filename.endswith(".gz") else open(
filepath, encoding="utf-8"
) as fIn:
data = csv.reader(fIn, delimiter=self.delimiter, quoting=self.quoting)
examples = []
for id, row in enumerate(data):
score = float(row[self.score_col_idx])
if self.normalize_scores: # Normalize to a 0...1 value
score = (score - self.min_score) / (self.max_score - self.min_score)
s1 = row[self.s1_col_idx]
s2 = row[self.s2_col_idx]
examples.append(InputExample(guid=filename + str(id), texts=[s1, s2], label=score))
if max_examples > 0 and len(examples) >= max_examples:
break
return examples
class STSBenchmarkDataReader(STSDataReader):
"""Reader especially for the STS benchmark dataset. There, the sentences are in column 5 and 6, the score is in column 4.
Scores are normalized from 0...5 to 0...1
"""
def __init__(
self,
dataset_folder,
s1_col_idx=5,
s2_col_idx=6,
score_col_idx=4,
delimiter="\t",
quoting=csv.QUOTE_NONE,
normalize_scores=True,
min_score=0,
max_score=5,
):
super().__init__(
dataset_folder=dataset_folder,
s1_col_idx=s1_col_idx,
s2_col_idx=s2_col_idx,
score_col_idx=score_col_idx,
delimiter=delimiter,
quoting=quoting,
normalize_scores=normalize_scores,
min_score=min_score,
max_score=max_score,
)
|
import os
from pathlib import Path
from typing import List, Tuple, Union
import torchaudio
from torch import Tensor
from torch.hub import download_url_to_file
from torch.utils.data import Dataset
from torchaudio.datasets.librispeech import _get_librispeech_metadata
from torchaudio.datasets.utils import extract_archive
_ARCHIVE_NAME = "librispeech_finetuning"
_URL = "https://dl.fbaipublicfiles.com/librilight/data/librispeech_finetuning.tgz"
_CHECKSUM = "5d1efdc777b548194d7e09ba89126e2188026df9fd57aa57eb14408d2b2342af"
_SUBSET_MAP = {"10min": ["1h/0"], "1h": ["1h/*"], "10h": ["1h/*", "9h"]}
def _get_fileids_paths(path, folders, _ext_audio) -> List[Tuple[str, str]]:
"""Get the file names and the corresponding file paths without `speaker_id`
and `chapter_id` directories.
The format of path is like:
{root}/{_ARCHIVE_NAME}/1h/[0-5]/[clean, other] or
{root}/{_ARCHIVE_NAME}/9h/[clean, other]
"""
path = Path(path)
files_paths = []
for folder in folders:
paths = [p.relative_to(path) for p in path.glob(f"{folder}/*/*/*/*{_ext_audio}")]
files_paths += [(str(p.parent.parent.parent), str(p.stem)) for p in paths] # get subset folder and file name
files_paths.sort(key=lambda x: x[0] + x[1])
return files_paths
class LibriLightLimited(Dataset):
"""Subset of Libri-light :cite:`librilight` dataset,
which was used in HuBERT :cite:`hsu2021hubert` for supervised fine-tuning.
Args:
root (str or Path): Path to the directory where the dataset is found or downloaded.
subset (str, optional): The subset to use. Options: [``"10min"``, ``"1h"``, ``"10h"``]
(Default: ``"10min"``).
download (bool, optional):
Whether to download the dataset if it is not found at root path. (default: ``False``).
"""
_ext_txt = ".trans.txt"
_ext_audio = ".flac"
def __init__(
self,
root: Union[str, Path],
subset: str = "10min",
download: bool = False,
) -> None:
if subset not in _SUBSET_MAP:
raise ValueError(f"`subset` must be one of {_SUBSET_MAP.keys()}. Found: {subset}")
folders = _SUBSET_MAP[subset]
root = os.fspath(root)
self._path = os.path.join(root, _ARCHIVE_NAME)
archive = os.path.join(root, f"{_ARCHIVE_NAME}.tgz")
if not os.path.isdir(self._path):
if not download:
raise RuntimeError("Dataset not found. Please use `download=True` to download")
if not os.path.isfile(archive):
download_url_to_file(_URL, archive, hash_prefix=_CHECKSUM)
extract_archive(archive)
self._fileids_paths = _get_fileids_paths(self._path, folders, self._ext_audio)
def __getitem__(self, n: int) -> Tuple[Tensor, int, str, int, int, int]:
"""Load the n-th sample from the dataset.
Args:
n (int): The index of the sample to be loaded
Returns:
Tuple of the following items;
Tensor:
Waveform
int:
Sample rate
str:
Transcript
int:
Speaker ID
int:
Chapter ID
int:
Utterance ID
"""
file_path, fileid = self._fileids_paths[n]
metadata = _get_librispeech_metadata(fileid, self._path, file_path, self._ext_audio, self._ext_txt)
waveform, _ = torchaudio.load(os.path.join(self._path, metadata[0]))
return (waveform,) + metadata[1:]
def __len__(self) -> int:
return len(self._fileids_paths)
|
import os
from pathlib import Path
from typing import List, Tuple, Union
import torchaudio
from torch import Tensor
from torch.hub import download_url_to_file
from torch.utils.data import Dataset
from torchaudio.datasets.librispeech import _get_librispeech_metadata
from torchaudio.datasets.utils import extract_archive
_ARCHIVE_NAME = "librispeech_finetuning"
_URL = "https://dl.fbaipublicfiles.com/librilight/data/librispeech_finetuning.tgz"
_CHECKSUM = "5d1efdc777b548194d7e09ba89126e2188026df9fd57aa57eb14408d2b2342af"
_SUBSET_MAP = {"10min": ["1h/0"], "1h": ["1h/*"], "10h": ["1h/*", "9h"]}
def _get_fileids_paths(path, folders, _ext_audio) -> List[Tuple[str, str]]:
"""Get the file names and the corresponding file paths without `speaker_id`
and `chapter_id` directories.
The format of path is like:
{root}/{_ARCHIVE_NAME}/1h/[0-5]/[clean, other] or
{root}/{_ARCHIVE_NAME}/9h/[clean, other]
"""
path = Path(path)
files_paths = []
for folder in folders:
paths = [p.relative_to(path) for p in path.glob(f"{folder}/*/*/*/*{_ext_audio}")]
files_paths += [(str(p.parent.parent.parent), str(p.stem)) for p in paths] # get subset folder and file name
files_paths.sort(key=lambda x: x[0] + x[1])
return files_paths
class LibriLightLimited(Dataset):
"""Create a Dataset for LibriLightLimited, which is the supervised subset of
LibriLight dataset.
Args:
root (str or Path): Path to the directory where the dataset is found or downloaded.
subset (str, optional): The subset to use. Options: [``10min``, ``1h``, ``10h``]
(Default: ``10min``).
download (bool, optional):
Whether to download the dataset if it is not found at root path. (default: ``False``).
"""
_ext_txt = ".trans.txt"
_ext_audio = ".flac"
def __init__(
self,
root: Union[str, Path],
subset: str = "10min",
download: bool = False,
) -> None:
if subset not in _SUBSET_MAP:
raise ValueError(f"`subset` must be one of {_SUBSET_MAP.keys()}. Found: {subset}")
folders = _SUBSET_MAP[subset]
root = os.fspath(root)
self._path = os.path.join(root, _ARCHIVE_NAME)
archive = os.path.join(root, f"{_ARCHIVE_NAME}.tgz")
if not os.path.isdir(self._path):
if not download:
raise RuntimeError("Dataset not found. Please use `download=True` to download")
if not os.path.isfile(archive):
download_url_to_file(_URL, archive, hash_prefix=_CHECKSUM)
extract_archive(archive)
self._fileids_paths = _get_fileids_paths(self._path, folders, self._ext_audio)
def __getitem__(self, n: int) -> Tuple[Tensor, int, str, int, int, int]:
"""Load the n-th sample from the dataset.
Args:
n (int): The index of the sample to be loaded
Returns:
(Tensor, int, str, int, int, int):
``(waveform, sample_rate, transcript, speaker_id, chapter_id, utterance_id)``
"""
file_path, fileid = self._fileids_paths[n]
metadata = _get_librispeech_metadata(fileid, self._path, file_path, self._ext_audio, self._ext_txt)
waveform, _ = torchaudio.load(os.path.join(self._path, metadata[0]))
return (waveform,) + metadata[1:]
def __len__(self) -> int:
return len(self._fileids_paths)
|
import numpy as np
from docarray import BaseDoc
from docarray.array.doc_vec.doc_vec import DocVec
from docarray.typing import AnyTensor, NdArray
def test_da_init():
class MyDoc(BaseDoc):
tensor: AnyTensor
name: str
docs = [MyDoc(tensor=np.zeros(10), name='hello') for _ in range(4)]
da = DocVec[MyDoc](docs, tensor_type=NdArray)
assert (da._storage.tensor_columns['tensor'] == np.zeros((4, 10))).all()
assert da._storage.any_columns['name'] == ['hello' for _ in range(4)]
def test_da_iter():
class MyDoc(BaseDoc):
tensor: AnyTensor
name: str
docs = [MyDoc(tensor=i * np.zeros((10, 10)), name=f'hello{i}') for i in range(4)]
da = DocVec[MyDoc](docs, tensor_type=NdArray)
for i, doc in enumerate(da):
assert isinstance(doc, MyDoc)
assert (doc.tensor == i * np.zeros((10, 10))).all()
assert doc.name == f'hello{i}'
|
import numpy as np
from docarray import BaseDoc
from docarray.array.doc_vec.doc_vec import DocVec
from docarray.typing import AnyTensor, NdArray
def test_da_init():
class MyDoc(BaseDoc):
tensor: AnyTensor
name: str
docs = [MyDoc(tensor=np.zeros(10), name='hello') for _ in range(4)]
da = DocVec[MyDoc](docs, tensor_type=NdArray)
assert (da._storage.tensor_columns['tensor'] == np.zeros((4, 10))).all()
assert da._storage.any_columns['name']._data == ['hello' for _ in range(4)]
def test_da_iter():
class MyDoc(BaseDoc):
tensor: AnyTensor
name: str
docs = [MyDoc(tensor=i * np.zeros((10, 10)), name=f'hello{i}') for i in range(4)]
da = DocVec[MyDoc](docs, tensor_type=NdArray)
for i, doc in enumerate(da):
assert isinstance(doc, MyDoc)
assert (doc.tensor == i * np.zeros((10, 10))).all()
assert doc.name == f'hello{i}'
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.