input
stringlengths 33
5k
| output
stringlengths 32
5k
|
---|---|
_base_ = [
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
# model settings
model = dict(
type='FCOS',
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[102.9801, 115.9465, 122.7717],
std=[1.0, 1.0, 1.0],
bgr_to_rgb=False,
pad_size_divisor=32),
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=False),
norm_eval=True,
style='caffe',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron/resnet50_caffe')),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
add_extra_convs='on_output', # use P5
num_outs=5,
relu_before_extra_convs=True),
bbox_head=dict(
type='FCOSHead',
num_classes=80,
in_channels=256,
stacked_convs=4,
feat_channels=256,
strides=[8, 16, 32, 64, 128],
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='IoULoss', loss_weight=1.0),
loss_centerness=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0)),
# testing settings
test_cfg=dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.5),
max_per_img=100))
# learning rate
param_scheduler = [
dict(type='ConstantLR', factor=1.0 / 3, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=12,
by_epoch=True,
milestones=[8, 11],
gamma=0.1)
]
# optimizer
optim_wrapper = dict(
optimizer=dict(lr=0.01),
paramwise_cfg=dict(bias_lr_mult=2., bias_decay_mult=0.),
clip_grad=dict(max_norm=35, norm_type=2))
|
_base_ = [
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
# model settings
preprocess_cfg = dict(
mean=[102.9801, 115.9465, 122.7717],
std=[1.0, 1.0, 1.0],
to_rgb=False,
pad_size_divisor=32)
model = dict(
type='FCOS',
preprocess_cfg=preprocess_cfg,
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=False),
norm_eval=True,
style='caffe',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron/resnet50_caffe')),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
add_extra_convs='on_output', # use P5
num_outs=5,
relu_before_extra_convs=True),
bbox_head=dict(
type='FCOSHead',
num_classes=80,
in_channels=256,
stacked_convs=4,
feat_channels=256,
strides=[8, 16, 32, 64, 128],
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='IoULoss', loss_weight=1.0),
loss_centerness=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0)),
# testing settings
test_cfg=dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.5),
max_per_img=100))
# learning rate
param_scheduler = [
dict(type='ConstantLR', factor=1.0 / 3, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=12,
by_epoch=True,
milestones=[8, 11],
gamma=0.1)
]
# optimizer
optim_wrapper = dict(
optimizer=dict(lr=0.01),
paramwise_cfg=dict(bias_lr_mult=2., bias_decay_mult=0.))
default_hooks = dict(
optimizer=dict(
_delete_=True,
type='OptimizerHook',
grad_clip=dict(max_norm=35, norm_type=2)))
|
_base_ = './mask-rcnn_r50_fpn_1x_coco.py'
model = dict(
# use caffe img_norm
data_preprocessor=dict(
mean=[103.530, 116.280, 123.675],
std=[1.0, 1.0, 1.0],
bgr_to_rgb=False),
backbone=dict(
norm_cfg=dict(requires_grad=False),
style='caffe',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet50_caffe')))
|
_base_ = './mask_rcnn_r50_fpn_1x_coco.py'
model = dict(
# use caffe img_norm
data_preprocessor=dict(
mean=[103.530, 116.280, 123.675],
std=[1.0, 1.0, 1.0],
bgr_to_rgb=False),
backbone=dict(
norm_cfg=dict(requires_grad=False),
style='caffe',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet50_caffe')))
|
# Copyright (c) OpenMMLab. All rights reserved.
from .det_data_sample import DetDataSample, OptSampleList, SampleList
from .reid_data_sample import ReIDDataSample
from .track_data_sample import (OptTrackSampleList, TrackDataSample,
TrackSampleList)
__all__ = [
'DetDataSample', 'SampleList', 'OptSampleList', 'TrackDataSample',
'TrackSampleList', 'OptTrackSampleList', 'ReIDDataSample'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .det_data_sample import DetDataSample, OptSampleList, SampleList
__all__ = ['DetDataSample', 'SampleList', 'OptSampleList']
|
from typing import (
Union,
Optional,
TYPE_CHECKING,
List,
Dict,
)
if TYPE_CHECKING:
import numpy as np
from docarray import DocumentArray
class FindMixin:
def _find(
self,
query: 'np.ndarray',
limit: Optional[Union[int, float]] = 20,
only_id: bool = False,
filter: Optional[Dict] = None,
**kwargs,
) -> List['DocumentArray']:
"""Returns approximate nearest neighbors given an input query.
:param query: the query documents to search.
:param limit: the number of results to get for each query document in search.
:param only_id: if set, then returning matches will only contain ``id``
:param filter: filter query used for pre-filtering
:param kwargs: other kwargs.
:return: a list of DocumentArrays containing the closest Document objects for each of the queries in `query`.
"""
from docarray.math import ndarray
n_rows, _ = ndarray.get_array_rows(query)
if n_rows == 1:
query = query.reshape(1, -1)
_, match_docs = self._annlite._search_documents(
query, limit=limit, filter=filter or {}, include_metadata=not only_id
)
return match_docs
def _filter(
self,
filter: Dict,
limit: Optional[Union[int, float]] = 20,
only_id: bool = False,
) -> 'DocumentArray':
"""Returns a subset of documents by filtering by the given filter (`Annlite` filter).
:param filter: the input filter to apply in each stored document
:param limit: the number of results to get for each query document in search.
:param only_id: if set, then returning matches will only contain ``id``
:return: a `DocumentArray` containing the `Document` objects that verify the filter.
"""
docs = self._annlite.filter(
filter=filter,
limit=limit,
include_metadata=not only_id,
)
return DocumentArray(docs)
|
from typing import (
Union,
Optional,
TYPE_CHECKING,
List,
Dict,
)
if TYPE_CHECKING:
import numpy as np
from docarray import DocumentArray
class FindMixin:
def _find(
self,
query: 'np.ndarray',
limit: Optional[Union[int, float]] = 20,
only_id: bool = False,
filter: Optional[Dict] = None,
**kwargs,
) -> List['DocumentArray']:
"""Returns approximate nearest neighbors given an input query.
:param query: the query documents to search.
:param limit: the number of results to get for each query document in search.
:param only_id: if set, then returning matches will only contain ``id``
:param filter: filter query used for pre-filtering
:param kwargs: other kwargs.
:return: a list of DocumentArrays containing the closest Document objects for each of the queries in `query`.
"""
from docarray.math import ndarray
n_rows, _ = ndarray.get_array_rows(query)
if n_rows == 1:
query = query.reshape(1, -1)
_, match_docs = self._annlite._search_documents(
query, limit=limit, filter=filter or {}, include_metadata=not only_id
)
return match_docs
def _filter(
self,
filter: Dict,
limit: Optional[Union[int, float]] = 20,
only_id: bool = False,
) -> 'DocumentArray':
"""Returns a subset of documents by filtering by the given filter (`Annlite` filter).
:param filter: the input filter to apply in each stored document
:param limit: the number of results to get for each query document in search.
:param only_id: if set, then returning matches will only contain ``id``
:return: a `DocumentArray` containing the `Document` objects that verify the filter.
"""
docs = self._annlite.filter(
filter=filter, limit=limit, include_metadata=not only_id
)
return DocumentArray(docs)
|
from abc import abstractmethod
from typing import Iterable, Union
from qdrant_client import QdrantClient
from docarray.array.storage.base.seqlike import BaseSequenceLikeMixin
from docarray import Document
class SequenceLikeMixin(BaseSequenceLikeMixin):
@property
@abstractmethod
def client(self) -> QdrantClient:
raise NotImplementedError()
@property
@abstractmethod
def collection_name(self) -> str:
raise NotImplementedError()
@property
@abstractmethod
def config(self):
raise NotImplementedError()
@abstractmethod
def _upload_batch(self, docs: Iterable['Document']):
raise NotImplementedError()
def __eq__(self, other):
"""Compare this object to the other, returns True if and only if other
as the same type as self and other has the same meta information
:param other: the other object to check for equality
:return: ``True`` if other is equal to self
"""
# two DAW are considered as the same if they have the same client meta data
return (
type(self) is type(other)
and self.client.openapi_client.client.host
== other.openapi_client.client.host
and self.config == other.config
)
def __len__(self):
return self.client.http.collections_api.get_collection(
self.collection_name
).result.vectors_count
def __contains__(self, x: Union[str, 'Document']):
if isinstance(x, str):
return self._id_exists(x)
elif isinstance(x, Document):
return self._id_exists(x.id)
else:
return False
def _id_exists(self, x: str):
try:
self._get_doc_by_id(x)
return True
except KeyError:
return False
def __repr__(self):
return f'<DocumentArray[Qdrant] (length={len(self)}) at {id(self)}>'
def _extend(self, docs: Iterable['Document'], **kwargs):
docs = list(docs)
self._upload_batch(docs)
self._offset2ids.extend([doc.id for doc in docs])
|
from abc import abstractmethod
from typing import Iterable, Union
from qdrant_client import QdrantClient
from docarray.array.storage.base.seqlike import BaseSequenceLikeMixin
from docarray import Document
class SequenceLikeMixin(BaseSequenceLikeMixin):
@property
@abstractmethod
def client(self) -> QdrantClient:
raise NotImplementedError()
@property
@abstractmethod
def collection_name(self) -> str:
raise NotImplementedError()
@property
@abstractmethod
def config(self):
raise NotImplementedError()
@abstractmethod
def _upload_batch(self, docs: Iterable['Document']):
raise NotImplementedError()
def __eq__(self, other):
"""Compare this object to the other, returns True if and only if other
as the same type as self and other has the same meta information
:param other: the other object to check for equality
:return: ``True`` if other is equal to self
"""
# two DAW are considered as the same if they have the same client meta data
return (
type(self) is type(other)
and self.client.openapi_client.client.host
== other.openapi_client.client.host
and self.config == other.config
)
def __len__(self):
return self.client.http.collections_api.get_collection(
self.collection_name
).result.vectors_count
def __contains__(self, x: Union[str, 'Document']):
if isinstance(x, str):
return self._id_exists(x)
elif isinstance(x, Document):
return self._id_exists(x.id)
else:
return False
def _id_exists(self, x: str):
try:
self._get_doc_by_id(x)
return True
except KeyError:
return False
def __repr__(self):
return f'<DocumentArray[Qdrant] (length={len(self)}) at {id(self)}>'
def extend(self, docs: Iterable['Document']):
docs = list(docs)
self._upload_batch(docs)
self._offset2ids.extend([doc.id for doc in docs])
|
from docarray import BaseDoc
from docarray.typing import PointCloud3DUrl
def test_set_point_cloud_url():
class MyDocument(BaseDoc):
point_cloud_url: PointCloud3DUrl
d = MyDocument(point_cloud_url="https://jina.ai/mesh.obj")
assert isinstance(d.point_cloud_url, PointCloud3DUrl)
assert d.point_cloud_url == "https://jina.ai/mesh.obj"
|
from docarray import BaseDocument
from docarray.typing import PointCloud3DUrl
def test_set_point_cloud_url():
class MyDocument(BaseDocument):
point_cloud_url: PointCloud3DUrl
d = MyDocument(point_cloud_url="https://jina.ai/mesh.obj")
assert isinstance(d.point_cloud_url, PointCloud3DUrl)
assert d.point_cloud_url == "https://jina.ai/mesh.obj"
|
__all__ = [
"Audio",
"Array2D",
"Array3D",
"Array4D",
"Array5D",
"ClassLabel",
"Features",
"Sequence",
"Value",
"Image",
"Translation",
"TranslationVariableLanguages",
]
from .audio import Audio
from .features import Array2D, Array3D, Array4D, Array5D, ClassLabel, Features, Sequence, Value
from .image import Image
from .translation import Translation, TranslationVariableLanguages
|
# ruff: noqa
__all__ = [
"Audio",
"Array2D",
"Array3D",
"Array4D",
"Array5D",
"ClassLabel",
"Features",
"Sequence",
"Value",
"Image",
"Translation",
"TranslationVariableLanguages",
]
from .audio import Audio
from .features import Array2D, Array3D, Array4D, Array5D, ClassLabel, Features, Sequence, Value
from .image import Image
from .translation import Translation, TranslationVariableLanguages
|
from typing import Iterable, Dict, Sequence
from docarray.array.storage.base.getsetdel import BaseGetSetDelMixin
from docarray.array.storage.base.helper import Offset2ID
from docarray import Document
class GetSetDelMixin(BaseGetSetDelMixin):
"""Provide concrete implementation for ``__getitem__``, ``__setitem__``,
and ``__delitem__`` for ``DocumentArrayElastic``"""
MAX_ES_RETURNED_DOCS = 10000
def _document_to_elastic(self, doc: 'Document') -> Dict:
extra_columns = {
col: doc.tags.get(col) for col, _ in self._config.columns.items()
}
request = {
'_op_type': 'index',
'_id': doc.id,
'_index': self._config.index_name,
'embedding': self._map_embedding(doc.embedding),
'blob': doc.to_base64(),
**extra_columns,
}
if self._config.tag_indices:
for index in self._config.tag_indices:
request[index] = doc.tags.get(index)
if doc.text:
request['text'] = doc.text
return request
def _getitem(self, doc_id: str) -> 'Document':
"""Helper method for getting item with elastic as storage
:param doc_id: id of the document
:raises KeyError: raise error when elastic id does not exist in storage
:return: Document
"""
try:
result = self._client.get(index=self._config.index_name, id=doc_id)
doc = Document.from_base64(result['_source']['blob'])
return doc
except Exception as ex:
raise KeyError(doc_id) from ex
def _get_doc_by_id(self, _id: str) -> 'Document':
"""Concrete implementation of base class' ``_get_doc_by_id``
:param _id: the id of the document
:return: the retrieved document from elastic
"""
return self._getitem(_id)
def _get_docs_by_ids(self, ids: Sequence[str]) -> Iterable['Document']:
"""Concrete implementation of base class' ``_get_docs_by_ids``
:param ids: ids of the document
:return: Iterable[Document]
"""
accumulated_docs = []
accumulated_docs_id_not_found = []
if not ids:
return accumulated_docs
# Handle if doc len is more than MAX_ES_RETURNED_DOCS
for pos in range(0, len(ids), self.MAX_ES_RETURNED_DOCS):
es_docs = self._client.mget(
index=self._config.index_name,
ids=ids[pos : pos + self.MAX_ES_RETURNED_DOCS],
)['docs']
for doc in es_docs:
if doc['found']:
accumulated_docs.append(
Document.from_base64(doc['_source']['blob'])
)
else:
accumulated_docs_id_not_found.append(doc['_id'])
if accumulated_docs_id_not_found:
raise KeyError(accumulated_docs_id_not_found, accumulated_docs)
return accumulated_docs
def _set_doc_by_id(self, _id: str, value: 'Document'):
"""Concrete implementation of base class' ``_set_doc_by_id``
:param _id: the id of doc to update
:param value: the document to update to
"""
if _id != value.id:
self._del_doc_by_id(_id)
request = [self._document_to_elastic(value)]
self._send_requests(request)
self._refresh(self._config.index_name)
def _set_docs_by_ids(self, ids, docs: Iterable['Document'], mismatch_ids: Dict):
"""Overridden implementation of _set_docs_by_ids in order to add docs in batches and flush at the end
:param ids: the ids used for indexing
"""
for _id, doc in zip(ids, docs):
self._set_doc_by_id(_id, doc)
self._refresh(self._config.index_name)
def _del_doc_by_id(self, _id: str):
"""Concrete implementation of base class' ``_del_doc_by_id``
:param _id: the id of the document to delete
"""
if self._doc_id_exists(_id):
self._client.delete(index=self._config.index_name, id=_id)
self._refresh(self._config.index_name)
def _clear_storage(self):
"""Concrete implementation of base class' ``_clear_storage``"""
self._client.indices.delete(index=self._config.index_name)
self._build_index()
def _load_offset2ids(self):
if self._list_like:
ids = self._get_offset2ids_meta()
self._offset2ids = Offset2ID(ids, list_like=self._list_like)
else:
self._offset2ids = Offset2ID([], list_like=self._list_like)
def _save_offset2ids(self):
if self._list_like:
self._update_offset2ids_meta()
|
from typing import Iterable, Dict, Sequence
from docarray.array.storage.base.getsetdel import BaseGetSetDelMixin
from docarray.array.storage.base.helper import Offset2ID
from docarray import Document
class GetSetDelMixin(BaseGetSetDelMixin):
"""Provide concrete implementation for ``__getitem__``, ``__setitem__``,
and ``__delitem__`` for ``DocumentArrayElastic``"""
MAX_ES_RETURNED_DOCS = 10000
def _document_to_elastic(self, doc: 'Document') -> Dict:
extra_columns = {
col: doc.tags.get(col) for col, _ in self._config.columns.items()
}
request = {
'_op_type': 'index',
'_id': doc.id,
'_index': self._config.index_name,
'embedding': self._map_embedding(doc.embedding),
'blob': doc.to_base64(),
**extra_columns,
}
if self._config.tag_indices:
for index in self._config.tag_indices:
request[index] = doc.tags.get(index)
if doc.text:
request['text'] = doc.text
return request
def _getitem(self, doc_id: str) -> 'Document':
"""Helper method for getting item with elastic as storage
:param doc_id: id of the document
:raises KeyError: raise error when elastic id does not exist in storage
:return: Document
"""
try:
result = self._client.get(index=self._config.index_name, id=doc_id)
doc = Document.from_base64(result['_source']['blob'])
return doc
except Exception as ex:
raise KeyError(doc_id) from ex
def _get_doc_by_id(self, _id: str) -> 'Document':
"""Concrete implementation of base class' ``_get_doc_by_id``
:param _id: the id of the document
:return: the retrieved document from elastic
"""
return self._getitem(_id)
def _get_docs_by_ids(self, ids: Sequence[str]) -> Iterable['Document']:
"""Concrete implementation of base class' ``_get_docs_by_ids``
:param ids: ids of the document
:return: Iterable[Document]
"""
accumulated_docs = []
accumulated_docs_id_not_found = []
if not ids:
return accumulated_docs
# Handle if doc len is more than MAX_ES_RETURNED_DOCS
for pos in range(0, len(ids), self.MAX_ES_RETURNED_DOCS):
es_docs = self._client.mget(
index=self._config.index_name,
ids=ids[pos : pos + self.MAX_ES_RETURNED_DOCS],
)['docs']
for doc in es_docs:
if doc['found']:
accumulated_docs.append(
Document.from_base64(doc['_source']['blob'])
)
else:
accumulated_docs_id_not_found.append(doc['_id'])
if accumulated_docs_id_not_found:
raise KeyError(accumulated_docs_id_not_found, accumulated_docs)
return accumulated_docs
def _set_doc_by_id(self, _id: str, value: 'Document'):
"""Concrete implementation of base class' ``_set_doc_by_id``
:param _id: the id of doc to update
:param value: the document to update to
"""
if _id != value.id:
self._del_doc_by_id(_id)
request = [self._document_to_elastic(value)]
self._send_requests(request)
self._refresh(self._config.index_name)
def _set_docs_by_ids(self, ids, docs: Iterable['Document'], mismatch_ids: Dict):
"""Overridden implementation of _set_docs_by_ids in order to add docs in batches and flush at the end
:param ids: the ids used for indexing
"""
for _id, doc in zip(ids, docs):
self._set_doc_by_id(_id, doc)
self._refresh(self._config.index_name)
def _del_doc_by_id(self, _id: str):
"""Concrete implementation of base class' ``_del_doc_by_id``
:param _id: the id of the document to delete
"""
if self._doc_id_exists(_id):
self._client.delete(index=self._config.index_name, id=_id)
self._refresh(self._config.index_name)
def _clear_storage(self):
"""Concrete implementation of base class' ``_clear_storage``"""
self._client.indices.delete(index=self._config.index_name)
def _load_offset2ids(self):
if self._list_like:
ids = self._get_offset2ids_meta()
self._offset2ids = Offset2ID(ids, list_like=self._list_like)
else:
self._offset2ids = Offset2ID([], list_like=self._list_like)
def _save_offset2ids(self):
if self._list_like:
self._update_offset2ids_meta()
|
"""Tests related to the `DataIter` interface."""
from typing import Callable, Optional
import numpy as np
from xgboost import testing as tm
from ..compat import import_cupy
from ..core import DataIter, DMatrix, ExtMemQuantileDMatrix, QuantileDMatrix
def run_mixed_sparsity(device: str) -> None:
"""Check QDM with mixed batches."""
X_0, y_0, _ = tm.make_regression(128, 16, False)
if device.startswith("cuda"):
X_1, y_1 = tm.make_sparse_regression(256, 16, 0.1, True)
else:
X_1, y_1 = tm.make_sparse_regression(256, 16, 0.1, False)
X_2, y_2 = tm.make_sparse_regression(512, 16, 0.9, True)
X = [X_0, X_1, X_2]
y = [y_0, y_1, y_2]
if device.startswith("cuda"):
cp = import_cupy()
X = [cp.array(batch) for batch in X]
it = tm.IteratorForTest(X, y, None, cache=None, on_host=False)
Xy_0 = QuantileDMatrix(it)
X_1, y_1 = tm.make_sparse_regression(256, 16, 0.1, True)
X = [X_0, X_1, X_2]
y = [y_0, y_1, y_2]
X_arr = np.concatenate(X, axis=0)
y_arr = np.concatenate(y, axis=0)
Xy_1 = QuantileDMatrix(X_arr, y_arr)
assert tm.predictor_equal(Xy_0, Xy_1)
def check_invalid_cat_batches(device: str) -> None:
"""Check error message for inconsistent feature types."""
class _InvalidCatIter(DataIter):
def __init__(self) -> None:
super().__init__(cache_prefix=None)
self._it = 0
def next(self, input_data: Callable) -> bool:
if self._it == 2:
return False
X, y = tm.make_categorical(
64,
12,
4,
onehot=False,
sparsity=0.5,
cat_ratio=1.0 if self._it == 0 else 0.5,
)
if device == "cuda":
import cudf
import cupy
X = cudf.DataFrame(X)
y = cupy.array(y)
input_data(data=X, label=y)
self._it += 1
return True
def reset(self) -> None:
self._it = 0
it = _InvalidCatIter()
import pytest
with pytest.raises(ValueError, match="Inconsistent feature types between batches"):
ExtMemQuantileDMatrix(it, enable_categorical=True)
def check_uneven_sizes(device: str) -> None:
"""Tests for having irregular data shapes."""
batches = [
tm.make_regression(n_samples, 16, use_cupy=device == "cuda")
for n_samples in [512, 256, 1024]
]
unzip = list(zip(*batches))
it = tm.IteratorForTest(unzip[0], unzip[1], None, cache="cache", on_host=True)
Xy = DMatrix(it)
assert Xy.num_col() == 16
assert Xy.num_row() == sum(x.shape[0] for x in unzip[0])
Xy = ExtMemQuantileDMatrix(it)
assert Xy.num_col() == 16
assert Xy.num_row() == sum(x.shape[0] for x in unzip[0])
class CatIter(DataIter): # pylint: disable=too-many-instance-attributes
"""An iterator for testing categorical features."""
def __init__( # pylint: disable=too-many-arguments,too-many-locals
self,
n_samples_per_batch: int,
n_features: int,
*,
n_batches: int,
n_cats: int,
sparsity: float,
cat_ratio: float,
onehot: bool,
device: str,
cache: Optional[str],
) -> None:
super().__init__(cache_prefix=cache)
self.n_batches = n_batches
self.device = device
n_samples = n_samples_per_batch * n_batches
cat, y = tm.make_categorical(
n_samples,
n_features,
n_categories=n_cats,
onehot=onehot,
cat_ratio=cat_ratio,
sparsity=sparsity,
)
xs, ys = [], []
prev = 0
for _ in range(n_batches):
n = min(n_samples_per_batch, n_samples - prev)
X = cat.iloc[prev : prev + n, :]
xs.append(X)
ys.append(y[prev : prev + n])
prev += n_samples_per_batch
self.xs = xs
self.ys = ys
self.x = cat
self.y = y
self._it = 0
def xy(self) -> tuple:
"""Return the concatenated data."""
return self.x, self.y
def next(self, input_data: Callable) -> bool:
if self._it == self.n_batches:
return False
X, y = self.xs[self._it], self.ys[self._it]
if self.device == "cuda":
import cudf
import cupy
X = cudf.DataFrame(X)
y = cupy.array(y)
input_data(data=X, label=y)
self._it += 1
return True
def reset(self) -> None:
self._it = 0
|
"""Tests related to the `DataIter` interface."""
from typing import Callable, Optional
import numpy as np
from xgboost import testing as tm
from ..core import DataIter, DMatrix, ExtMemQuantileDMatrix, QuantileDMatrix
def run_mixed_sparsity(device: str) -> None:
"""Check QDM with mixed batches."""
X_0, y_0, _ = tm.make_regression(128, 16, False)
if device.startswith("cuda"):
X_1, y_1 = tm.make_sparse_regression(256, 16, 0.1, True)
else:
X_1, y_1 = tm.make_sparse_regression(256, 16, 0.1, False)
X_2, y_2 = tm.make_sparse_regression(512, 16, 0.9, True)
X = [X_0, X_1, X_2]
y = [y_0, y_1, y_2]
if device.startswith("cuda"):
import cupy as cp # pylint: disable=import-error
X = [cp.array(batch) for batch in X]
it = tm.IteratorForTest(X, y, None, cache=None, on_host=False)
Xy_0 = QuantileDMatrix(it)
X_1, y_1 = tm.make_sparse_regression(256, 16, 0.1, True)
X = [X_0, X_1, X_2]
y = [y_0, y_1, y_2]
X_arr = np.concatenate(X, axis=0)
y_arr = np.concatenate(y, axis=0)
Xy_1 = QuantileDMatrix(X_arr, y_arr)
assert tm.predictor_equal(Xy_0, Xy_1)
def check_invalid_cat_batches(device: str) -> None:
"""Check error message for inconsistent feature types."""
class _InvalidCatIter(DataIter):
def __init__(self) -> None:
super().__init__(cache_prefix=None)
self._it = 0
def next(self, input_data: Callable) -> bool:
if self._it == 2:
return False
X, y = tm.make_categorical(
64,
12,
4,
onehot=False,
sparsity=0.5,
cat_ratio=1.0 if self._it == 0 else 0.5,
)
if device == "cuda":
import cudf # pylint: disable=import-error
import cupy # pylint: disable=import-error
X = cudf.DataFrame(X)
y = cupy.array(y)
input_data(data=X, label=y)
self._it += 1
return True
def reset(self) -> None:
self._it = 0
it = _InvalidCatIter()
import pytest
with pytest.raises(ValueError, match="Inconsistent feature types between batches"):
ExtMemQuantileDMatrix(it, enable_categorical=True)
def check_uneven_sizes(device: str) -> None:
"""Tests for having irregular data shapes."""
batches = [
tm.make_regression(n_samples, 16, use_cupy=device == "cuda")
for n_samples in [512, 256, 1024]
]
unzip = list(zip(*batches))
it = tm.IteratorForTest(unzip[0], unzip[1], None, cache="cache", on_host=True)
Xy = DMatrix(it)
assert Xy.num_col() == 16
assert Xy.num_row() == sum(x.shape[0] for x in unzip[0])
Xy = ExtMemQuantileDMatrix(it)
assert Xy.num_col() == 16
assert Xy.num_row() == sum(x.shape[0] for x in unzip[0])
class CatIter(DataIter): # pylint: disable=too-many-instance-attributes
"""An iterator for testing categorical features."""
def __init__( # pylint: disable=too-many-arguments,too-many-locals
self,
n_samples_per_batch: int,
n_features: int,
*,
n_batches: int,
n_cats: int,
sparsity: float,
cat_ratio: float,
onehot: bool,
device: str,
cache: Optional[str],
) -> None:
super().__init__(cache_prefix=cache)
self.n_batches = n_batches
self.device = device
n_samples = n_samples_per_batch * n_batches
cat, y = tm.make_categorical(
n_samples,
n_features,
n_categories=n_cats,
onehot=onehot,
cat_ratio=cat_ratio,
sparsity=sparsity,
)
xs, ys = [], []
prev = 0
for _ in range(n_batches):
n = min(n_samples_per_batch, n_samples - prev)
X = cat.iloc[prev : prev + n, :]
xs.append(X)
ys.append(y[prev : prev + n])
prev += n_samples_per_batch
self.xs = xs
self.ys = ys
self.x = cat
self.y = y
self._it = 0
def xy(self) -> tuple:
"""Return the concatenated data."""
return self.x, self.y
def next(self, input_data: Callable) -> bool:
if self._it == self.n_batches:
return False
X, y = self.xs[self._it], self.ys[self._it]
if self.device == "cuda":
import cudf # pylint: disable=import-error
import cupy # pylint: disable=import-error
X = cudf.DataFrame(X)
y = cupy.array(y)
input_data(data=X, label=y)
self._it += 1
return True
def reset(self) -> None:
self._it = 0
|
import requests
import urllib.parse
from typing import Dict
from llama_index.core.schema import Document
from llama_index.core.tools.tool_spec.base import BaseToolSpec
SEARCH_URL_TMPL = "https://api.search.brave.com/res/v1/web/search?{params}"
class BraveSearchToolSpec(BaseToolSpec):
"""
Brave Search tool spec.
"""
spec_functions = ["brave_search"]
def __init__(self, api_key: str) -> None:
"""
Initialize with parameters.
"""
self.api_key = api_key
def _make_request(self, params: Dict) -> requests.Response:
"""
Make a request to the Brave Search API.
Args:
params (dict): The parameters to be passed to the API.
Returns:
requests.Response: The response from the API.
"""
headers = {
"Accept": "application/json",
"Accept-Encoding": "gzip",
"X-Subscription-Token": self.api_key,
}
url = SEARCH_URL_TMPL.format(params=urllib.parse.urlencode(params))
response = requests.get(url, headers=headers)
response.raise_for_status()
return response
def brave_search(
self, query: str, search_lang: str = "en", num_results: int = 5
) -> [Document]:
"""
Make a query to the Brave Search engine to receive a list of results.
Args:
query (str): The query to be passed to Brave Search.
search_lang (str): The search language preference (ISO 639-1), default is "en".
num_results (int): The number of search results returned in response, default is 5.
Returns:
[Document]: A list of documents containing search results.
"""
search_params = {
"q": query,
"search_lang": search_lang,
"count": num_results,
}
response = self._make_request(search_params)
return [Document(text=response.text)]
|
import requests
import urllib.parse
from typing import Dict
from llama_index.core.schema import Document
from llama_index.core.tools.tool_spec.base import BaseToolSpec
SEARCH_URL_TMPL = "https://api.search.brave.com/res/v1/web/search?{params}"
class BraveSearchToolSpec(BaseToolSpec):
"""
Brave Search tool spec.
"""
spec_functions = ["brave_search"]
def __init__(self, api_key: str) -> None:
"""
Initialize with parameters.
"""
self.api_key = api_key
def _make_request(self, params: Dict) -> requests.Response:
"""
Make a request to the Brave Search API.
Args:
params (dict): The parameters to be passed to the API.
Returns:
requests.Response: The response from the API.
"""
headers = {
"Accept": "application/json",
"Accept-Encoding": "gzip",
"X-Subscription-Token": self.api_key,
}
url = SEARCH_URL_TMPL.format(params=urllib.parse.urlencode(params))
response = requests.get(url, headers=headers)
response.raise_for_status()
return response
def brave_search(
self, query: str, search_lang: str = "en", num_results: int = 5
) -> [Document]:
"""
Make a query to the Brave Search engine to receive a list of results.
Args:
query (str): The query to be passed to Brave Search.
search_lang (str): The search language preference (ISO 639-1), default is "en".
num_results (int): The number of search results returned in response, default is 5.
Returns:
[Document]: A list of documents containing search results.
"""
search_params = {
"q": query,
"search_lang": search_lang,
"count": num_results,
}
response = self._make_request(search_params)
return [Document(text=response.text)]
|
from datasets import load_dataset
from sentence_transformers import (
SentenceTransformer,
SentenceTransformerTrainer,
SentenceTransformerTrainingArguments,
losses,
)
from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator, SimilarityFunction
from sentence_transformers.training_args import BatchSamplers
# 1. Load the AllNLI dataset: https://huggingface.co/datasets/sentence-transformers/all-nli, 10k samples
train_dataset = load_dataset("sentence-transformers/all-nli", "triplet", split="train[:10000]")
eval_dataset = load_dataset("sentence-transformers/all-nli", "triplet", split="dev[:1000]")
# 2. Create an evaluator to perform useful HPO
stsb_eval_dataset = load_dataset("sentence-transformers/stsb", split="validation")
dev_evaluator = EmbeddingSimilarityEvaluator(
sentences1=stsb_eval_dataset["sentence1"],
sentences2=stsb_eval_dataset["sentence2"],
scores=stsb_eval_dataset["score"],
main_similarity=SimilarityFunction.COSINE,
name="sts-dev",
)
# 3. Define the Hyperparameter Search Space
def hpo_search_space(trial):
return {
"num_train_epochs": trial.suggest_int("num_train_epochs", 1, 2),
"per_device_train_batch_size": trial.suggest_int("per_device_train_batch_size", 32, 128),
"warmup_ratio": trial.suggest_float("warmup_ratio", 0, 0.3),
"learning_rate": trial.suggest_float("learning_rate", 1e-6, 1e-4, log=True),
}
# 4. Define the Model Initialization
def hpo_model_init(trial):
return SentenceTransformer("distilbert-base-uncased")
# 5. Define the Loss Initialization
def hpo_loss_init(model):
return losses.MultipleNegativesRankingLoss(model)
# 6. Define the Objective Function
def hpo_compute_objective(metrics):
"""
Valid keys are: 'eval_loss', 'eval_sts-dev_pearson_cosine', 'eval_sts-dev_spearman_cosine',
'eval_sts-dev_pearson_manhattan', 'eval_sts-dev_spearman_manhattan', 'eval_sts-dev_pearson_euclidean',
'eval_sts-dev_spearman_euclidean', 'eval_sts-dev_pearson_dot', 'eval_sts-dev_spearman_dot',
'eval_sts-dev_pearson_max', 'eval_sts-dev_spearman_max', 'eval_runtime', 'eval_samples_per_second',
'eval_steps_per_second', 'epoch'
due to the evaluator that we're using.
"""
return metrics["eval_sts-dev_spearman_cosine"]
# 7. Define the training arguments
args = SentenceTransformerTrainingArguments(
# Required parameter:
output_dir="checkpoints",
# Optional training parameters:
# max_steps=10000, # We might want to limit the number of steps for HPO
fp16=True, # Set to False if you get an error that your GPU can't run on FP16
bf16=False, # Set to True if you have a GPU that supports BF16
batch_sampler=BatchSamplers.NO_DUPLICATES, # MultipleNegativesRankingLoss benefits from no duplicate samples in a batch
# Optional tracking/debugging parameters:
eval_strategy="no", # We don't need to evaluate/save during HPO
save_strategy="no",
logging_steps=10,
run_name="hpo", # Will be used in W&B if `wandb` is installed
)
# 8. Create the trainer with model_init rather than model
trainer = SentenceTransformerTrainer(
model=None,
args=args,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
evaluator=dev_evaluator,
model_init=hpo_model_init,
loss=hpo_loss_init,
)
# 9. Perform the HPO
best_trial = trainer.hyperparameter_search(
hp_space=hpo_search_space,
compute_objective=hpo_compute_objective,
n_trials=20,
direction="maximize",
backend="optuna",
)
print(best_trial)
# Alternatively, to just train normally:
# trainer.train()
# print(dev_evaluator(trainer.model))
|
from datasets import load_dataset
from sentence_transformers import (
SentenceTransformer,
SentenceTransformerTrainer,
SentenceTransformerTrainingArguments,
losses,
)
from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator, SimilarityFunction
from sentence_transformers.training_args import BatchSamplers
# 1. Load the AllNLI dataset: https://huggingface.co/datasets/sentence-transformers/all-nli, 10k samples
train_dataset = load_dataset("sentence-transformers/all-nli", "triplet", split="train[:10000]")
eval_dataset = load_dataset("sentence-transformers/all-nli", "triplet", split="dev[:1000]")
# 2. Create an evaluator to perform useful HPO
stsb_eval_dataset = load_dataset("sentence-transformers/stsb", split="validation")
dev_evaluator = EmbeddingSimilarityEvaluator(
sentences1=stsb_eval_dataset["sentence1"],
sentences2=stsb_eval_dataset["sentence2"],
scores=stsb_eval_dataset["score"],
main_similarity=SimilarityFunction.COSINE,
name="sts-dev",
)
# 3. Define the Hyperparameter Search Space
def hpo_search_space(trial):
return {
"num_train_epochs": trial.suggest_int("num_train_epochs", 1, 2),
"per_device_train_batch_size": trial.suggest_int("per_device_train_batch_size", 32, 128),
"warmup_ratio": trial.suggest_float("warmup_ratio", 0, 0.3),
"learning_rate": trial.suggest_float("learning_rate", 1e-6, 1e-4, log=True),
}
# 4. Define the Model Initialization
def hpo_model_init(trial):
return SentenceTransformer("distilbert-base-uncased")
# 5. Define the Loss Initialization
def hpo_loss_init(model):
return losses.MultipleNegativesRankingLoss(model)
# 6. Define the Objective Function
def hpo_compute_objective(metrics):
"""
Valid keys are: 'eval_loss', 'eval_sts-dev_pearson_cosine', 'eval_sts-dev_spearman_cosine',
'eval_sts-dev_pearson_manhattan', 'eval_sts-dev_spearman_manhattan', 'eval_sts-dev_pearson_euclidean',
'eval_sts-dev_spearman_euclidean', 'eval_sts-dev_pearson_dot', 'eval_sts-dev_spearman_dot',
'eval_sts-dev_pearson_max', 'eval_sts-dev_spearman_max', 'eval_runtime', 'eval_samples_per_second',
'eval_steps_per_second', 'epoch'
due to the evaluator that we're using.
"""
return metrics["eval_sts-dev_spearman_cosine"]
# 7. Define the training arguments
args = SentenceTransformerTrainingArguments(
# Required parameter:
output_dir="checkpoints",
# Optional training parameters:
# max_steps=10000, # We might want to limit the number of steps for HPO
fp16=True, # Set to False if you get an error that your GPU can't run on FP16
bf16=False, # Set to True if you have a GPU that supports BF16
batch_sampler=BatchSamplers.NO_DUPLICATES, # MultipleNegativesRankingLoss benefits from no duplicate samples in a batch
# Optional tracking/debugging parameters:
eval_strategy="no", # We don't need to evaluate/save during HPO
save_strategy="no",
logging_steps=10,
run_name="hpo", # Will be used in W&B if `wandb` is installed
)
# 8. Create the trainer with model_init rather than model
trainer = SentenceTransformerTrainer(
model=None,
args=args,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
evaluator=dev_evaluator,
model_init=hpo_model_init,
loss=hpo_loss_init,
)
# 9. Perform the HPO
best_trial = trainer.hyperparameter_search(
hp_space=hpo_search_space,
compute_objective=hpo_compute_objective,
n_trials=20,
direction="maximize",
backend="optuna",
)
print(best_trial)
# Alternatively, to just train normally:
# trainer.train()
# print(dev_evaluator(trainer.model))
|
"""
This examples trains a CrossEncoder for the Quora Duplicate Questions Detection task. A CrossEncoder takes a sentence pair
as input and outputs a label. Here, it output a continuous labels 0...1 to indicate the similarity between the input pair.
It does NOT produce a sentence embedding and does NOT work for individual sentences.
Usage:
python training_quora_duplicate_questions.py
"""
import csv
import logging
import math
import os
from datetime import datetime
from zipfile import ZipFile
from torch.utils.data import DataLoader
from sentence_transformers import LoggingHandler, util
from sentence_transformers.cross_encoder import CrossEncoder
from sentence_transformers.cross_encoder.evaluation import CEBinaryClassificationEvaluator
from sentence_transformers.readers import InputExample
#### Just some code to print debug information to stdout
logging.basicConfig(
format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO, handlers=[LoggingHandler()]
)
logger = logging.getLogger(__name__)
#### /print debug information to stdout
# Check if dataset exists. If not, download and extract it
dataset_path = "quora-dataset/"
if not os.path.exists(dataset_path):
logger.info("Dataset not found. Download")
zip_save_path = "quora-IR-dataset.zip"
util.http_get(url="https://sbert.net/datasets/quora-IR-dataset.zip", path=zip_save_path)
with ZipFile(zip_save_path, "r") as zip:
zip.extractall(dataset_path)
# Read the quora dataset split for classification
logger.info("Read train dataset")
train_samples = []
with open(os.path.join(dataset_path, "classification", "train_pairs.tsv"), "r", encoding="utf8") as fIn:
reader = csv.DictReader(fIn, delimiter="\t", quoting=csv.QUOTE_NONE)
for row in reader:
train_samples.append(InputExample(texts=[row["question1"], row["question2"]], label=int(row["is_duplicate"])))
train_samples.append(InputExample(texts=[row["question2"], row["question1"]], label=int(row["is_duplicate"])))
logger.info("Read dev dataset")
dev_samples = []
with open(os.path.join(dataset_path, "classification", "dev_pairs.tsv"), "r", encoding="utf8") as fIn:
reader = csv.DictReader(fIn, delimiter="\t", quoting=csv.QUOTE_NONE)
for row in reader:
dev_samples.append(InputExample(texts=[row["question1"], row["question2"]], label=int(row["is_duplicate"])))
# Configuration
train_batch_size = 16
num_epochs = 4
model_save_path = "output/training_quora-" + datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
# We use distilroberta-base with a single label, i.e., it will output a value between 0 and 1 indicating the similarity of the two questions
model = CrossEncoder("distilroberta-base", num_labels=1)
# We wrap train_samples (which is a List[InputExample]) into a pytorch DataLoader
train_dataloader = DataLoader(train_samples, shuffle=True, batch_size=train_batch_size)
# We add an evaluator, which evaluates the performance during training
evaluator = CEBinaryClassificationEvaluator.from_input_examples(dev_samples, name="Quora-dev")
# Configure the training
warmup_steps = math.ceil(len(train_dataloader) * num_epochs * 0.1) # 10% of train data for warm-up
logger.info("Warmup-steps: {}".format(warmup_steps))
# Train the model
model.fit(
train_dataloader=train_dataloader,
evaluator=evaluator,
epochs=num_epochs,
evaluation_steps=5000,
warmup_steps=warmup_steps,
output_path=model_save_path,
)
|
"""
This examples trains a CrossEncoder for the Quora Duplicate Questions Detection task. A CrossEncoder takes a sentence pair
as input and outputs a label. Here, it output a continious labels 0...1 to indicate the similarity between the input pair.
It does NOT produce a sentence embedding and does NOT work for individual sentences.
Usage:
python training_quora_duplicate_questions.py
"""
from torch.utils.data import DataLoader
import math
from sentence_transformers import LoggingHandler, util
from sentence_transformers.cross_encoder import CrossEncoder
from sentence_transformers.cross_encoder.evaluation import CEBinaryClassificationEvaluator
from sentence_transformers.readers import InputExample
import logging
from datetime import datetime
import os
import gzip
import csv
from zipfile import ZipFile
#### Just some code to print debug information to stdout
logging.basicConfig(format='%(asctime)s - %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
level=logging.INFO,
handlers=[LoggingHandler()])
logger = logging.getLogger(__name__)
#### /print debug information to stdout
#Check if dataset exsist. If not, download and extract it
dataset_path = 'quora-dataset/'
if not os.path.exists(dataset_path):
logger.info("Dataset not found. Download")
zip_save_path = 'quora-IR-dataset.zip'
util.http_get(url='https://sbert.net/datasets/quora-IR-dataset.zip', path=zip_save_path)
with ZipFile(zip_save_path, 'r') as zip:
zip.extractall(dataset_path)
# Read the quora dataset split for classification
logger.info("Read train dataset")
train_samples = []
with open(os.path.join(dataset_path, 'classification', 'train_pairs.tsv'), 'r', encoding='utf8') as fIn:
reader = csv.DictReader(fIn, delimiter='\t', quoting=csv.QUOTE_NONE)
for row in reader:
train_samples.append(InputExample(texts=[row['question1'], row['question2']], label=int(row['is_duplicate'])))
train_samples.append(InputExample(texts=[row['question2'], row['question1']], label=int(row['is_duplicate'])))
logger.info("Read dev dataset")
dev_samples = []
with open(os.path.join(dataset_path, 'classification', 'dev_pairs.tsv'), 'r', encoding='utf8') as fIn:
reader = csv.DictReader(fIn, delimiter='\t', quoting=csv.QUOTE_NONE)
for row in reader:
dev_samples.append(InputExample(texts=[row['question1'], row['question2']], label=int(row['is_duplicate'])))
#Configuration
train_batch_size = 16
num_epochs = 4
model_save_path = 'output/training_quora-'+datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
#We use distilroberta-base with a single label, i.e., it will output a value between 0 and 1 indicating the similarity of the two questions
model = CrossEncoder('distilroberta-base', num_labels=1)
# We wrap train_samples (which is a List[InputExample]) into a pytorch DataLoader
train_dataloader = DataLoader(train_samples, shuffle=True, batch_size=train_batch_size)
# We add an evaluator, which evaluates the performance during training
evaluator = CEBinaryClassificationEvaluator.from_input_examples(dev_samples, name='Quora-dev')
# Configure the training
warmup_steps = math.ceil(len(train_dataloader) * num_epochs * 0.1) #10% of train data for warm-up
logger.info("Warmup-steps: {}".format(warmup_steps))
# Train the model
model.fit(train_dataloader=train_dataloader,
evaluator=evaluator,
epochs=num_epochs,
evaluation_steps=5000,
warmup_steps=warmup_steps,
output_path=model_save_path)
|
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
from pathlib import Path
from typing import Dict, Tuple
import numpy as np
import pytest
from jina import Document, DocumentArray, Executor
from ...torch_encoder import ImageTorchEncoder
def test_config():
ex = Executor.load_config(str(Path(__file__).parents[2] / 'config.yml'))
assert ex.default_batch_size == 32
@pytest.mark.parametrize(
['content', 'out_shape'],
[
([np.ones((10, 10, 3), dtype=np.uint8), (3, 224, 224)]),
([np.ones((360, 420, 3), dtype=np.uint8), (3, 224, 224)]),
([np.ones((300, 300, 3), dtype=np.uint8), (3, 224, 224)]),
],
)
def test_preprocessing_reshape_correct(content: np.ndarray, out_shape: Tuple):
encoder = ImageTorchEncoder()
reshaped_content = encoder._preprocess(content)
assert (
reshaped_content.shape == out_shape
), f'Expected shape {out_shape} but got {reshaped_content.shape}'
@pytest.mark.parametrize(
'traversal_paths, docs',
[
(('r',), pytest.lazy_fixture('docs_with_blobs')),
(('c',), pytest.lazy_fixture('docs_with_chunk_blobs')),
],
)
def test_encode_image_returns_correct_length(
traversal_paths: Tuple[str], docs: DocumentArray
) -> None:
encoder = ImageTorchEncoder(default_traversal_path=traversal_paths)
encoder.encode(docs=docs, parameters={})
for doc in docs.traverse_flat(traversal_paths):
assert doc.embedding is not None
assert doc.embedding.shape == (512,)
@pytest.mark.gpu
def test_encode_gpu(docs_with_blobs: DocumentArray) -> None:
encoder = ImageTorchEncoder(default_traversal_path=('r',), device='cuda')
encoder.encode(docs=docs_with_blobs, parameters={})
for doc in docs_with_blobs.traverse_flat(('r',)):
assert doc.embedding is not None
assert doc.embedding.shape == (512,)
@pytest.mark.parametrize('model_name', ['resnet50', 'mobilenet_v3_large', 'googlenet'])
def test_encodes_semantic_meaning(test_images: Dict[str, np.array], model_name: str):
encoder = ImageTorchEncoder(model_name=model_name)
embeddings = {}
for name, image_arr in test_images.items():
docs = DocumentArray([Document(blob=image_arr)])
encoder.encode(docs, parameters={})
embeddings[name] = docs[0].embedding
def dist(a, b):
a_embedding = embeddings[a]
b_embedding = embeddings[b]
return np.linalg.norm(a_embedding - b_embedding)
small_distance = dist('banana1', 'banana2')
assert small_distance < dist('banana1', 'airplane')
assert small_distance < dist('banana1', 'satellite')
assert small_distance < dist('banana1', 'studio')
assert small_distance < dist('banana2', 'airplane')
assert small_distance < dist('banana2', 'satellite')
assert small_distance < dist('banana2', 'studio')
assert small_distance < dist('airplane', 'studio')
assert small_distance < dist('airplane', 'satellite')
assert small_distance < dist('studio', 'satellite')
def test_no_preprocessing():
encoder = ImageTorchEncoder(use_default_preprocessing=False)
# without pre-processing the user needs to provide the right shape for the model directly
arr_in = np.ones((3, 224, 224), dtype=np.float32)
docs = DocumentArray([Document(blob=arr_in)])
encoder.encode(docs=docs, parameters={})
assert docs[0].embedding.shape == (512,)
def test_empty_doc_array():
docs = DocumentArray()
encoder = ImageTorchEncoder()
encoder.encode(docs, parameters={})
assert len(docs) == 0
def test_docs_array_with_no_text():
docs = DocumentArray([Document(text='hello world')])
encoder = ImageTorchEncoder()
encoder.encode(docs, parameters={})
assert docs[0].embedding is None
|
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
from pathlib import Path
from typing import Tuple, Dict
import pytest
import numpy as np
from jina import DocumentArray, Document, Executor
from ...torch_encoder import ImageTorchEncoder
def test_config():
ex = Executor.load_config(str(Path(__file__).parents[2] / 'config.yml'))
assert ex.default_batch_size == 32
@pytest.mark.parametrize(
['content', 'out_shape'],
[
([np.ones((10, 10, 3), dtype=np.uint8), (3, 224, 224)]),
([np.ones((360, 420, 3), dtype=np.uint8), (3, 224, 224)]),
([np.ones((300, 300, 3), dtype=np.uint8), (3, 224, 224)]),
],
)
def test_preprocessing_reshape_correct(content: np.ndarray, out_shape: Tuple):
encoder = ImageTorchEncoder()
reshaped_content = encoder._preprocess(content)
assert (
reshaped_content.shape == out_shape
), f'Expected shape {out_shape} but got {reshaped_content.shape}'
@pytest.mark.parametrize(
'traversal_paths, docs',
[
(('r',), pytest.lazy_fixture('docs_with_blobs')),
(('c',), pytest.lazy_fixture('docs_with_chunk_blobs')),
],
)
def test_encode_image_returns_correct_length(
traversal_paths: Tuple[str], docs: DocumentArray
) -> None:
encoder = ImageTorchEncoder(default_traversal_path=traversal_paths)
encoder.encode(docs=docs, parameters={})
for doc in docs.traverse_flat(traversal_paths):
assert doc.embedding is not None
assert doc.embedding.shape == (512,)
@pytest.mark.parametrize('model_name', ['resnet50', 'mobilenet_v3_large', 'googlenet'])
def test_encodes_semantic_meaning(test_images: Dict[str, np.array], model_name: str):
encoder = ImageTorchEncoder(model_name=model_name)
embeddings = {}
for name, image_arr in test_images.items():
docs = DocumentArray([Document(blob=image_arr)])
encoder.encode(docs, parameters={})
embeddings[name] = docs[0].embedding
def dist(a, b):
a_embedding = embeddings[a]
b_embedding = embeddings[b]
return np.linalg.norm(a_embedding - b_embedding)
small_distance = dist('banana1', 'banana2')
assert small_distance < dist('banana1', 'airplane')
assert small_distance < dist('banana1', 'satellite')
assert small_distance < dist('banana1', 'studio')
assert small_distance < dist('banana2', 'airplane')
assert small_distance < dist('banana2', 'satellite')
assert small_distance < dist('banana2', 'studio')
assert small_distance < dist('airplane', 'studio')
assert small_distance < dist('airplane', 'satellite')
assert small_distance < dist('studio', 'satellite')
def test_no_preprocessing():
encoder = ImageTorchEncoder(use_default_preprocessing=False)
# without pre-processing the user needs to provide the right shape for the model directly
arr_in = np.ones((3, 224, 224), dtype=np.float32)
docs = DocumentArray([Document(blob=arr_in)])
encoder.encode(docs=docs, parameters={})
assert docs[0].embedding.shape == (512,)
def test_empty_doc_array():
docs = DocumentArray()
encoder = ImageTorchEncoder()
encoder.encode(docs, parameters={})
assert len(docs) == 0
def test_docs_array_with_no_text():
docs = DocumentArray([Document(text='hello world')])
encoder = ImageTorchEncoder()
encoder.encode(docs, parameters={})
assert docs[0].embedding is None
|
import os
from pydoc import locate
import numpy as np
import pytest
from jina import Document, Flow
from PIL.Image import fromarray
cur_dir = os.path.dirname(os.path.abspath(__file__))
@pytest.fixture
def numpy_image_uri(tmpdir):
blob = np.random.randint(255, size=(96, 96, 3), dtype='uint8')
im = fromarray(blob)
uri = os.path.join(tmpdir, 'tmp.png')
im.save(uri)
return uri
def data_generator(num_docs, numpy_image_uri):
for i in range(num_docs):
doc = Document(uri=numpy_image_uri)
doc.convert_image_uri_to_blob()
yield doc
@pytest.fixture()
def dtype(request):
os.environ['DTYPE'] = request.param
yield
del os.environ['DTYPE']
@pytest.mark.parametrize(
'dtype', ['numpy.uint8', 'numpy.float32', 'numpy.float64'], indirect=['dtype']
)
def test_use_in_flow(numpy_image_uri, dtype):
dtype = os.environ['DTYPE']
with Flow.load_config('flow.yml') as flow:
data = flow.post(
on='/index', inputs=data_generator(5, numpy_image_uri), return_results=True
)
for doc in data[0].docs:
assert doc.blob.shape == (64, 64, 3)
assert doc.blob.dtype == locate(dtype)
|
import os
from pydoc import locate
import numpy as np
import pytest
from PIL.Image import fromarray
from jina import Flow, Document
from ...normalizer import ImageNormalizer
cur_dir = os.path.dirname(os.path.abspath(__file__))
@pytest.fixture
def numpy_image_uri(tmpdir):
blob = np.random.randint(255, size=(96, 96, 3), dtype='uint8')
im = fromarray(blob)
uri = os.path.join(tmpdir, 'tmp.png')
im.save(uri)
return uri
def data_generator(num_docs, numpy_image_uri):
for i in range(num_docs):
doc = Document(uri=numpy_image_uri)
doc.convert_image_uri_to_blob()
yield doc
@pytest.fixture()
def dtype(request):
os.environ['DTYPE'] = request.param
yield
del os.environ['DTYPE']
@pytest.mark.parametrize('dtype', ['numpy.uint8', 'numpy.float32', 'numpy.float64'], indirect=['dtype'])
def test_use_in_flow(numpy_image_uri, dtype):
dtype = os.environ['DTYPE']
with Flow.load_config('flow.yml') as flow:
data = flow.post(
on='/index', inputs=data_generator(5, numpy_image_uri), return_results=True
)
for doc in data[0].docs:
assert doc.blob.shape == (64, 64, 3)
assert doc.blob.dtype == locate(dtype)
|
from __future__ import annotations
from collections.abc import Iterable
import torch
from torch import Tensor, nn
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
class FlopsLoss(nn.Module):
def __init__(self, model: SparseEncoder) -> None:
"""
FlopsLoss implements a regularization technique to promote sparsity in sparse encoder models.
It calculates the squared L2 norm of the mean embedding vector, which helps reduce the number of floating-point
operations (FLOPs) required during inference by encouraging more zero values in the embeddings.
This loss is used as a regularization component within other losses like :class:`SpladeLoss` rather than
being used as a standalone loss function.
Args:
model: SparseEncoder model to be regularized
References:
- For further details, see: https://arxiv.org/abs/2004.05665
Relations:
- Used as a component within :class:`SpladeLoss` to regularize both query and document embeddings
Example:
- This loss is typically used within the :class:`SpladeLoss` class, which combines it with other loss components.
"""
super().__init__()
self.model = model
def forward(self, sentence_features: Iterable[dict[str, Tensor]], labels: Tensor) -> Tensor:
# Compute the embeddings and distribute them to anchor and candidates (positive and optionally negatives)
embeddings = [self.model(sentence_feature)["sentence_embedding"] for sentence_feature in sentence_features]
return self.compute_loss_from_embeddings(embeddings)
def compute_loss_from_embeddings(self, embeddings: list[torch.Tensor], embeddings_type: str) -> torch.Tensor:
anchors = embeddings[0] # (batch_size, embedding_dim)
candidates = torch.cat(embeddings[1:]) # (batch_size * (1 + num_negatives), embedding_dim)
if embeddings_type == "query":
return torch.sum(torch.mean(anchors, dim=0) ** 2)
else:
return torch.sum(torch.mean(candidates, dim=0) ** 2)
@property
def citation(self) -> str:
return """
@article{paria2020minimizing,
title={Minimizing flops to learn efficient sparse representations},
author={Paria, Biswajit and Yeh, Chih-Kuan and Yen, Ian EH and Xu, Ning and Ravikumar, Pradeep and P{\'o}czos, Barnab{\'a}s},
journal={arXiv preprint arXiv:2004.05665},
year={2020}
}
"""
|
from __future__ import annotations
from collections.abc import Iterable
import torch
from torch import Tensor, nn
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
class FlopsLoss(nn.Module):
def __init__(self, model: SparseEncoder) -> None:
"""
FlopsLoss implements a regularization technique to promote sparsity in sparse encoder models.
It calculates the squared L2 norm of the mean embedding vector, which helps reduce the number of floating-point
operations (FLOPs) required during inference by encouraging more zero values in the embeddings.
This loss is used as a regularization component within other losses like it's done in SpladeLoss rather than
as a standalone loss function.
Args:
model: SparseEncoder model to be regularized
References:
- For further details, see: https://arxiv.org/abs/2004.05665
Relations:
- Used as a component within :class:`SpladeLoss` to regularize both query and document embeddings
Example:
This loss is typically used within the :class:`SpladeLoss` class, which combines it with other loss components.
"""
super().__init__()
self.model = model
def forward(self, sentence_features: Iterable[dict[str, Tensor]], labels: Tensor) -> Tensor:
# Compute the embeddings and distribute them to anchor and candidates (positive and optionally negatives)
embeddings = [self.model(sentence_feature)["sentence_embedding"] for sentence_feature in sentence_features]
return self.compute_loss_from_embeddings(embeddings)
def compute_loss_from_embeddings(self, embeddings: list[torch.Tensor], embeddings_type: str) -> torch.Tensor:
anchors = embeddings[0] # (batch_size, embedding_dim)
candidates = torch.cat(embeddings[1:]) # (batch_size * (1 + num_negatives), embedding_dim)
if embeddings_type == "query":
return torch.sum(torch.mean(anchors, dim=0) ** 2)
else:
return torch.sum(torch.mean(candidates, dim=0) ** 2)
@property
def citation(self) -> str:
return """
@article{paria2020minimizing,
title={Minimizing flops to learn efficient sparse representations},
author={Paria, Biswajit and Yeh, Chih-Kuan and Yen, Ian EH and Xu, Ning and Ravikumar, Pradeep and P{\'o}czos, Barnab{\'a}s},
journal={arXiv preprint arXiv:2004.05665},
year={2020}
}
"""
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.llms.cloudflare_workersai import CloudflareWorkersAI
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"CloudflareWorkersAI": "langchain_community.llms.cloudflare_workersai",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"CloudflareWorkersAI",
]
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.llms.cloudflare_workersai import CloudflareWorkersAI
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"CloudflareWorkersAI": "langchain_community.llms.cloudflare_workersai"
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"CloudflareWorkersAI",
]
|
from typing import TYPE_CHECKING, Any, List, Tuple, Type, TypeVar, Union
import numpy as np
from docarray.typing.proto_register import _register_proto
from docarray.typing.tensor.ndarray import NdArray
from docarray.typing.tensor.video.video_tensor_mixin import VideoTensorMixin
T = TypeVar('T', bound='VideoNdArray')
if TYPE_CHECKING:
from pydantic import BaseConfig
from pydantic.fields import ModelField
@_register_proto(proto_type_name='video_ndarray')
class VideoNdArray(NdArray, VideoTensorMixin):
"""
Subclass of NdArray, to represent a video tensor.
Adds video-specific features to the tensor.
EXAMPLE USAGE
.. code-block:: python
from typing import Optional
import numpy as np
from pydantic import parse_obj_as
from docarray import BaseDoc
from docarray.typing import VideoNdArray, VideoUrl
class MyVideoDoc(BaseDoc):
title: str
url: Optional[VideoUrl]
video_tensor: Optional[VideoNdArray]
doc_1 = MyVideoDoc(
title='my_first_video_doc',
video_tensor=np.random.random((100, 224, 224, 3)),
)
doc_1.video_tensor.save(file_path='file_1.mp4')
doc_2 = MyVideoDoc(
title='my_second_video_doc',
url='https://www.kozco.com/tech/piano2.wav',
)
doc_2.video_tensor = parse_obj_as(VideoNdArray, doc_2.url.load().video)
doc_2.video_tensor.save(file_path='file_2.mp4')
"""
@classmethod
def validate(
cls: Type[T],
value: Union[T, np.ndarray, List[Any], Tuple[Any], Any],
field: 'ModelField',
config: 'BaseConfig',
) -> T:
tensor = super().validate(value=value, field=field, config=config)
return cls.validate_shape(value=tensor)
|
from typing import TYPE_CHECKING, Any, List, Tuple, Type, TypeVar, Union
import numpy as np
from docarray.typing.proto_register import _register_proto
from docarray.typing.tensor.ndarray import NdArray
from docarray.typing.tensor.video.video_tensor_mixin import VideoTensorMixin
T = TypeVar('T', bound='VideoNdArray')
if TYPE_CHECKING:
from pydantic import BaseConfig
from pydantic.fields import ModelField
@_register_proto(proto_type_name='video_ndarray')
class VideoNdArray(NdArray, VideoTensorMixin):
"""
Subclass of NdArray, to represent a video tensor.
Adds video-specific features to the tensor.
EXAMPLE USAGE
.. code-block:: python
from typing import Optional
import numpy as np
from pydantic import parse_obj_as
from docarray import BaseDocument
from docarray.typing import VideoNdArray, VideoUrl
class MyVideoDoc(BaseDocument):
title: str
url: Optional[VideoUrl]
video_tensor: Optional[VideoNdArray]
doc_1 = MyVideoDoc(
title='my_first_video_doc',
video_tensor=np.random.random((100, 224, 224, 3)),
)
doc_1.video_tensor.save(file_path='file_1.mp4')
doc_2 = MyVideoDoc(
title='my_second_video_doc',
url='https://www.kozco.com/tech/piano2.wav',
)
doc_2.video_tensor = parse_obj_as(VideoNdArray, doc_2.url.load().video)
doc_2.video_tensor.save(file_path='file_2.mp4')
"""
@classmethod
def validate(
cls: Type[T],
value: Union[T, np.ndarray, List[Any], Tuple[Any], Any],
field: 'ModelField',
config: 'BaseConfig',
) -> T:
tensor = super().validate(value=value, field=field, config=config)
return cls.validate_shape(value=tensor)
|
from llama_index.llms.openai_like.base import OpenAILike
class OpenLLM(OpenAILike):
r"""
OpenLLM LLM.
A thin wrapper around OpenAI interface to help users interact with OpenLLM's running server.
Examples:
`pip install llama-index-llms-openllm`
```python
from llama_index.llms.openllm import OpenLLM
llm = OpenLLM(api_base="https://hostname.com/v1", api_key="na", model="meta-llama/Meta-Llama-3-8B-Instruct")
result = llm.complete("Hi, write a short story\n")
print(result)
```
"""
@classmethod
def class_name(cls) -> str:
return "OpenLLM"
|
from llama_index.llms.openai_like.base import OpenAILike
class OpenLLM(OpenAILike):
r"""
OpenLLM LLM.
A thin wrapper around OpenAI interface to help users interact with OpenLLM's running server.
Examples:
`pip install llama-index-llms-openllm`
```python
from llama_index.llms.openllm import OpenLLM
llm = OpenLLM(api_base="https://hostname.com/v1", api_key="na", model="meta-llama/Meta-Llama-3-8B-Instruct")
result = llm.complete("Hi, write a short story\n")
print(result)
```
"""
@classmethod
def class_name(cls) -> str:
return "OpenLLM"
|
"""Argparser module for Deployment runtimes"""
import argparse
from jina import helper
from jina.enums import DeploymentRoleType
from jina.parsers.helper import _SHOW_ALL_ARGS, KVAppendAction, add_arg_group
def mixin_base_deployment_parser(parser):
"""Add mixin arguments required by :class:`BaseDeployment` into the given parser.
:param parser: the parser instance to which we add arguments
"""
gp = add_arg_group(parser, title='Deployment')
gp.add_argument(
'--uses-before',
type=str,
help='The executor attached before the Pods described by --uses, typically before sending to all '
'shards, accepted type follows `--uses`. This argument only applies for sharded Deployments (shards > 1).',
)
gp.add_argument(
'--uses-after',
type=str,
help='The executor attached after the Pods described by --uses, typically used for receiving from '
'all shards, accepted type follows `--uses`. This argument only applies for sharded Deployments (shards > 1).',
)
gp.add_argument(
'--when',
action=KVAppendAction,
metavar='KEY: VALUE',
nargs='*',
help='The condition that the documents need to fulfill before reaching the Executor.'
'The condition can be defined in the form of a `DocArray query condition <https://docarray.jina.ai/fundamentals/documentarray/find/#query-by-conditions>`',
)
gp.add_argument(
'--external',
action='store_true',
default=False,
help='The Deployment will be considered an external Deployment that has been started independently from the Flow.'
'This Deployment will not be context managed by the Flow.',
)
gp.add_argument(
'--grpc-metadata',
action=KVAppendAction,
metavar='KEY: VALUE',
nargs='*',
help='The metadata to be passed to the gRPC request.',
)
# hidden CLI used for internal only
gp.add_argument(
'--deployment-role',
type=DeploymentRoleType.from_string,
choices=list(DeploymentRoleType),
help='The role of this deployment in the flow'
if _SHOW_ALL_ARGS
else argparse.SUPPRESS,
)
gp.add_argument(
'--tls',
action='store_true',
default=False,
help='If set, connect to deployment using tls encryption',
)
|
"""Argparser module for Deployment runtimes"""
import argparse
from jina import helper
from jina.enums import DeploymentRoleType
from jina.parsers.helper import _SHOW_ALL_ARGS, KVAppendAction, add_arg_group
def mixin_base_deployment_parser(parser):
"""Add mixin arguments required by :class:`BaseDeployment` into the given parser.
:param parser: the parser instance to which we add arguments
"""
gp = add_arg_group(parser, title='Deployment')
gp.add_argument(
'--uses-before',
type=str,
help='The executor attached before the Pods described by --uses, typically before sending to all '
'shards, accepted type follows `--uses`. This argument only applies for sharded Deployments (shards > 1).',
)
gp.add_argument(
'--uses-after',
type=str,
help='The executor attached after the Pods described by --uses, typically used for receiving from '
'all shards, accepted type follows `--uses`. This argument only applies for sharded Deployments (shards > 1).',
)
gp.add_argument(
'--when',
action=KVAppendAction,
metavar='KEY: VALUE',
nargs='*',
help='The condition that the documents need to fulfill before reaching the Executor.'
'The condition can be defined in the form of a `DocArray query condition <https://docarray.jina.ai/fundamentals/documentarray/find/#query-by-conditions>`',
)
gp.add_argument(
'--external',
action='store_true',
default=False,
help='The Deployment will be considered an external Deployment that has been started independently from the Flow.'
'This Deployment will not be context managed by the Flow.',
)
# hidden CLI used for internal only
gp.add_argument(
'--deployment-role',
type=DeploymentRoleType.from_string,
choices=list(DeploymentRoleType),
help='The role of this deployment in the flow'
if _SHOW_ALL_ARGS
else argparse.SUPPRESS,
)
gp.add_argument(
'--tls',
action='store_true',
default=False,
help='If set, connect to deployment using tls encryption',
)
|
"""Module to change the configuration of FFmpeg libraries (such as libavformat).
It affects functionalities in :py:mod:`torchaudio.io` (and indirectly :py:func:`torchaudio.load`).
"""
# This file is just for BC.
def __getattr__(item):
from torio.utils import ffmpeg_utils
return getattr(ffmpeg_utils, item)
|
def __getattr__(item):
from torio.utils import ffmpeg_utils
return getattr(ffmpeg_utils, item)
|
# Copyright (c) OpenMMLab. All rights reserved.
from abc import ABCMeta, abstractmethod
from typing import List, Optional, Tuple, Union
from mmcv.runner import BaseModule
from mmengine.config import ConfigDict
from mmengine.data import InstanceData
from torch import Tensor
from mmdet.core import DetDataSample
from mmdet.registry import MODELS
class BaseRoIHead(BaseModule, metaclass=ABCMeta):
"""Base class for RoIHeads."""
def __init__(self,
bbox_roi_extractor: Optional[Union[ConfigDict, dict]] = None,
bbox_head: Optional[Union[ConfigDict, dict]] = None,
mask_roi_extractor: Optional[Union[ConfigDict, dict]] = None,
mask_head: Optional[Union[ConfigDict, dict]] = None,
shared_head: Optional[Union[ConfigDict, dict]] = None,
train_cfg: Optional[Union[ConfigDict, dict]] = None,
test_cfg: Optional[Union[ConfigDict, dict]] = None,
pretrained: Optional[str] = None,
init_cfg: Optional[Union[ConfigDict, dict]] = None) -> None:
super().__init__(init_cfg=init_cfg)
self.train_cfg = train_cfg
self.test_cfg = test_cfg
if shared_head is not None:
shared_head.pretrained = pretrained
self.shared_head = MODELS.build(shared_head)
if bbox_head is not None:
self.init_bbox_head(bbox_roi_extractor, bbox_head)
if mask_head is not None:
self.init_mask_head(mask_roi_extractor, mask_head)
self.init_assigner_sampler()
@property
def with_bbox(self) -> bool:
"""bool: whether the RoI head contains a `bbox_head`"""
return hasattr(self, 'bbox_head') and self.bbox_head is not None
@property
def with_mask(self) -> bool:
"""bool: whether the RoI head contains a `mask_head`"""
return hasattr(self, 'mask_head') and self.mask_head is not None
@property
def with_shared_head(self) -> bool:
"""bool: whether the RoI head contains a `shared_head`"""
return hasattr(self, 'shared_head') and self.shared_head is not None
@abstractmethod
def init_bbox_head(self, *args, **kwargs):
"""Initialize ``bbox_head``"""
pass
@abstractmethod
def init_mask_head(self, *args, **kwargs):
"""Initialize ``mask_head``"""
pass
@abstractmethod
def init_assigner_sampler(self, *args, **kwargs):
"""Initialize assigner and sampler."""
pass
@abstractmethod
def forward_train(self, x: Tuple[Tensor],
proposal_list: List[InstanceData],
batch_data_samples: List[DetDataSample], **kwargs):
"""Forward function during training."""
# TODO: Currently not supported
async def async_simple_test(self,
x,
proposal_list,
img_metas,
proposals=None,
rescale=False,
**kwargs):
"""Asynchronized test function."""
raise NotImplementedError
def simple_test(self,
x: Tuple[Tensor],
proposal_list: List[InstanceData],
batch_img_metas: List[dict],
rescale: bool = False,
**kwargs):
"""Test without augmentation."""
# TODO: Currently not supported
def aug_test(self, x, proposal_list, img_metas, rescale=False, **kwargs):
"""Test with augmentations.
If rescale is False, then returned bboxes and masks will fit the scale
of imgs[0].
"""
|
# Copyright (c) OpenMMLab. All rights reserved.
from abc import ABCMeta, abstractmethod
from mmcv.runner import BaseModule
from ..builder import build_shared_head
class BaseRoIHead(BaseModule, metaclass=ABCMeta):
"""Base class for RoIHeads."""
def __init__(self,
bbox_roi_extractor=None,
bbox_head=None,
mask_roi_extractor=None,
mask_head=None,
shared_head=None,
train_cfg=None,
test_cfg=None,
pretrained=None,
init_cfg=None):
super(BaseRoIHead, self).__init__(init_cfg)
self.train_cfg = train_cfg
self.test_cfg = test_cfg
if shared_head is not None:
shared_head.pretrained = pretrained
self.shared_head = build_shared_head(shared_head)
if bbox_head is not None:
self.init_bbox_head(bbox_roi_extractor, bbox_head)
if mask_head is not None:
self.init_mask_head(mask_roi_extractor, mask_head)
self.init_assigner_sampler()
@property
def with_bbox(self):
"""bool: whether the RoI head contains a `bbox_head`"""
return hasattr(self, 'bbox_head') and self.bbox_head is not None
@property
def with_mask(self):
"""bool: whether the RoI head contains a `mask_head`"""
return hasattr(self, 'mask_head') and self.mask_head is not None
@property
def with_shared_head(self):
"""bool: whether the RoI head contains a `shared_head`"""
return hasattr(self, 'shared_head') and self.shared_head is not None
@abstractmethod
def init_bbox_head(self):
"""Initialize ``bbox_head``"""
pass
@abstractmethod
def init_mask_head(self):
"""Initialize ``mask_head``"""
pass
@abstractmethod
def init_assigner_sampler(self):
"""Initialize assigner and sampler."""
pass
@abstractmethod
def forward_train(self, x, proposal_list, data_samples, **kwargs):
"""Forward function during training."""
async def async_simple_test(self,
x,
proposal_list,
img_metas,
proposals=None,
rescale=False,
**kwargs):
"""Asynchronized test function."""
raise NotImplementedError
def simple_test(self,
x,
proposal_list,
img_meta,
proposals=None,
rescale=False,
**kwargs):
"""Test without augmentation."""
def aug_test(self, x, proposal_list, img_metas, rescale=False, **kwargs):
"""Test with augmentations.
If rescale is False, then returned bboxes and masks will fit the scale
of imgs[0].
"""
|
"""Argparser module for Flow"""
from jina.parsers.base import set_base_parser
from jina.parsers.helper import KVAppendAction, add_arg_group
from jina.parsers.orchestrate.base import mixin_essential_parser
def mixin_flow_features_parser(parser):
"""Add the arguments for the Flow features to the parser
:param parser: the parser configure
"""
from jina.enums import FlowInspectType
gp = add_arg_group(parser, title='Flow Feature')
gp.add_argument(
'--uses',
type=str,
help='The YAML path represents a flow. It can be either a local file path or a URL.',
)
gp.add_argument(
'--env',
action=KVAppendAction,
metavar='KEY: VALUE',
nargs='*',
help='The map of environment variables that are available inside runtime',
)
gp.add_argument(
'--inspect',
type=FlowInspectType.from_string,
choices=list(FlowInspectType),
default=FlowInspectType.COLLECT,
help='''
The strategy on those inspect deployments in the flow.
If `REMOVE` is given then all inspect deployments are removed when building the flow.
''',
)
def set_flow_parser(parser=None):
"""Set the parser for the flow
:param parser: an (optional) initial parser to build upon
:return: the parser
"""
if not parser:
parser = set_base_parser()
mixin_essential_parser(parser)
mixin_flow_features_parser(parser)
return parser
|
"""Argparser module for Flow"""
from jina.parsers.base import set_base_parser
from jina.parsers.helper import KVAppendAction, add_arg_group
from jina.parsers.orchestrate.base import mixin_essential_parser
def mixin_flow_features_parser(parser):
"""Add the arguments for the Flow features to the parser
:param parser: the parser configure
"""
from jina.enums import FlowInspectType
gp = add_arg_group(parser, title='Flow Feature')
gp.add_argument(
'--uses',
type=str,
help='The YAML path represents a flow. It can be either a local file path or a URL.',
)
gp.add_argument(
'--env',
action=KVAppendAction,
metavar='KEY: VALUE',
nargs='*',
help='The map of environment variables that are available inside runtime',
)
gp.add_argument(
'--inspect',
type=FlowInspectType.from_string,
choices=list(FlowInspectType),
default=FlowInspectType.COLLECT,
help='''
The strategy on those inspect deployments in the flow.
If `REMOVE` is given then all inspect deployments are removed when building the flow.
''',
)
def set_flow_parser(parser=None):
"""Set the parser for the flow
:param parser: an (optional) initial parser to build upon
:return: the parser
"""
if not parser:
parser = set_base_parser()
mixin_essential_parser(parser)
mixin_flow_features_parser(parser)
return parser
|
"""Vector stores."""
from typing import TYPE_CHECKING
from langchain_core._import_utils import import_attr
if TYPE_CHECKING:
from langchain_core.vectorstores.base import VST, VectorStore, VectorStoreRetriever
from langchain_core.vectorstores.in_memory import InMemoryVectorStore
__all__ = (
"VectorStore",
"VST",
"VectorStoreRetriever",
"InMemoryVectorStore",
)
_dynamic_imports = {
"VectorStore": "base",
"VST": "base",
"VectorStoreRetriever": "base",
"InMemoryVectorStore": "in_memory",
}
def __getattr__(attr_name: str) -> object:
module_name = _dynamic_imports.get(attr_name)
result = import_attr(attr_name, module_name, __spec__.parent)
globals()[attr_name] = result
return result
def __dir__() -> list[str]:
return list(__all__)
|
"""Vector stores."""
from langchain_core.vectorstores.base import VST, VectorStore, VectorStoreRetriever
from langchain_core.vectorstores.in_memory import InMemoryVectorStore
__all__ = [
"VectorStore",
"VST",
"VectorStoreRetriever",
"InMemoryVectorStore",
]
|
"""
Top-level module of Jina.
The primary function of this module is to import all of the public Jina
interfaces into a single place. The interfaces themselves are located in
sub-modules, as described below.
"""
import os as _os
import platform as _platform
import signal as _signal
import sys as _sys
import warnings as _warnings
import docarray as _docarray
if _sys.version_info < (3, 7, 0):
raise OSError(f'Jina requires Python >= 3.7, but yours is {_sys.version_info}')
def _warning_on_one_line(message, category, filename, lineno, *args, **kwargs):
return '\033[1;33m%s: %s\033[0m \033[1;30m(raised from %s:%s)\033[0m\n' % (
category.__name__,
message,
filename,
lineno,
)
def _ignore_google_warnings():
import warnings
warnings.filterwarnings(
'ignore',
category=DeprecationWarning,
message='Deprecated call to `pkg_resources.declare_namespace(\'google\')`.',
append=True
)
_warnings.formatwarning = _warning_on_one_line
_warnings.simplefilter('always', DeprecationWarning, append=True)
_ignore_google_warnings()
# fix fork error on MacOS but seems no effect? must do EXPORT manually before jina start
_os.environ['OBJC_DISABLE_INITIALIZE_FORK_SAFETY'] = 'YES'
# JINA_MP_START_METHOD has higher priority than os-patch
_start_method = _os.environ.get('JINA_MP_START_METHOD', None)
if _start_method and _start_method.lower() in {'fork', 'spawn', 'forkserver'}:
from multiprocessing import set_start_method as _set_start_method
try:
_set_start_method(_start_method.lower())
_warnings.warn(
f'multiprocessing start method is set to `{_start_method.lower()}`'
)
except Exception as e:
_warnings.warn(
f'failed to set multiprocessing start_method to `{_start_method.lower()}`: {e!r}'
)
elif _sys.version_info >= (3, 8, 0) and _platform.system() == 'Darwin':
# DO SOME OS-WISE PATCHES
# temporary fix for python 3.8 on macos where the default start is set to "spawn"
# https://docs.python.org/3/library/multiprocessing.html#contexts-and-start-methods
from multiprocessing import set_start_method as _set_start_method
try:
_set_start_method('fork')
_warnings.warn(f'multiprocessing start method is set to `fork`')
except Exception as e:
_warnings.warn(f'failed to set multiprocessing start_method to `fork`: {e!r}')
# do not change this line manually
# this is managed by git tag and updated on every release
# NOTE: this represents the NEXT release version
__version__ = '3.23.5'
# do not change this line manually
# this is managed by proto/build-proto.sh and updated on every execution
__proto_version__ = '0.1.27'
try:
__docarray_version__ = _docarray.__version__
except AttributeError as e:
raise RuntimeError(
'`docarray` dependency is not installed correctly, please reinstall with `pip install -U --force-reinstall docarray`'
)
try:
_signal.signal(_signal.SIGINT, _signal.default_int_handler)
except Exception as exc:
_warnings.warn(f'failed to set default signal handler: {exc!r}`')
def _set_nofile(nofile_atleast=4096):
"""
Set nofile soft limit to at least 4096, useful for running matlplotlib/seaborn on
parallel executing plot generators vs. Ubuntu default ulimit -n 1024 or OS X El Captian 256
temporary setting extinguishing with Python session.
:param nofile_atleast: nofile soft limit
:return: nofile soft limit and nofile hard limit
"""
try:
import resource as res
except ImportError: # Windows
res = None
if res is None:
return (None,) * 2
soft, ohard = res.getrlimit(res.RLIMIT_NOFILE)
hard = ohard
if soft < nofile_atleast:
soft = nofile_atleast
if hard < soft:
hard = soft
try:
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except (ValueError, res.error):
try:
hard = soft
print(f'trouble with max limit, retrying with soft,hard {soft},{hard}')
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except Exception:
print('failed to set ulimit, giving up')
soft, hard = res.getrlimit(res.RLIMIT_NOFILE)
return soft, hard
_set_nofile()
# ONLY FIRST CLASS CITIZENS ARE ALLOWED HERE, namely Document, Executor Flow
# Document
from jina._docarray import Document, DocumentArray
# Client
from jina.clients import Client
# Deployment
from jina.orchestrate.deployments import Deployment
from jina.orchestrate.flow.asyncio import AsyncFlow
# Flow
from jina.orchestrate.flow.base import Flow
# Executor
from jina.serve.executors import BaseExecutor as Executor
from jina.serve.executors.decorators import dynamic_batching, monitor, requests
# Custom Gateway
from jina.serve.runtimes.gateway.gateway import Gateway
|
"""
Top-level module of Jina.
The primary function of this module is to import all of the public Jina
interfaces into a single place. The interfaces themselves are located in
sub-modules, as described below.
"""
import os as _os
import platform as _platform
import signal as _signal
import sys as _sys
import warnings as _warnings
import docarray as _docarray
if _sys.version_info < (3, 7, 0):
raise OSError(f'Jina requires Python >= 3.7, but yours is {_sys.version_info}')
def _warning_on_one_line(message, category, filename, lineno, *args, **kwargs):
return '\033[1;33m%s: %s\033[0m \033[1;30m(raised from %s:%s)\033[0m\n' % (
category.__name__,
message,
filename,
lineno,
)
def _ignore_google_warnings():
import warnings
warnings.filterwarnings(
'ignore',
category=DeprecationWarning,
message='Deprecated call to `pkg_resources.declare_namespace(\'google\')`.',
append=True
)
_warnings.formatwarning = _warning_on_one_line
_warnings.simplefilter('always', DeprecationWarning, append=True)
_ignore_google_warnings()
# fix fork error on MacOS but seems no effect? must do EXPORT manually before jina start
_os.environ['OBJC_DISABLE_INITIALIZE_FORK_SAFETY'] = 'YES'
# JINA_MP_START_METHOD has higher priority than os-patch
_start_method = _os.environ.get('JINA_MP_START_METHOD', None)
if _start_method and _start_method.lower() in {'fork', 'spawn', 'forkserver'}:
from multiprocessing import set_start_method as _set_start_method
try:
_set_start_method(_start_method.lower())
_warnings.warn(
f'multiprocessing start method is set to `{_start_method.lower()}`'
)
except Exception as e:
_warnings.warn(
f'failed to set multiprocessing start_method to `{_start_method.lower()}`: {e!r}'
)
elif _sys.version_info >= (3, 8, 0) and _platform.system() == 'Darwin':
# DO SOME OS-WISE PATCHES
# temporary fix for python 3.8 on macos where the default start is set to "spawn"
# https://docs.python.org/3/library/multiprocessing.html#contexts-and-start-methods
from multiprocessing import set_start_method as _set_start_method
try:
_set_start_method('fork')
_warnings.warn(f'multiprocessing start method is set to `fork`')
except Exception as e:
_warnings.warn(f'failed to set multiprocessing start_method to `fork`: {e!r}')
# do not change this line manually
# this is managed by git tag and updated on every release
# NOTE: this represents the NEXT release version
__version__ = '3.23.4'
# do not change this line manually
# this is managed by proto/build-proto.sh and updated on every execution
__proto_version__ = '0.1.27'
try:
__docarray_version__ = _docarray.__version__
except AttributeError as e:
raise RuntimeError(
'`docarray` dependency is not installed correctly, please reinstall with `pip install -U --force-reinstall docarray`'
)
try:
_signal.signal(_signal.SIGINT, _signal.default_int_handler)
except Exception as exc:
_warnings.warn(f'failed to set default signal handler: {exc!r}`')
def _set_nofile(nofile_atleast=4096):
"""
Set nofile soft limit to at least 4096, useful for running matlplotlib/seaborn on
parallel executing plot generators vs. Ubuntu default ulimit -n 1024 or OS X El Captian 256
temporary setting extinguishing with Python session.
:param nofile_atleast: nofile soft limit
:return: nofile soft limit and nofile hard limit
"""
try:
import resource as res
except ImportError: # Windows
res = None
if res is None:
return (None,) * 2
soft, ohard = res.getrlimit(res.RLIMIT_NOFILE)
hard = ohard
if soft < nofile_atleast:
soft = nofile_atleast
if hard < soft:
hard = soft
try:
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except (ValueError, res.error):
try:
hard = soft
print(f'trouble with max limit, retrying with soft,hard {soft},{hard}')
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except Exception:
print('failed to set ulimit, giving up')
soft, hard = res.getrlimit(res.RLIMIT_NOFILE)
return soft, hard
_set_nofile()
# ONLY FIRST CLASS CITIZENS ARE ALLOWED HERE, namely Document, Executor Flow
# Document
from jina._docarray import Document, DocumentArray
# Client
from jina.clients import Client
# Deployment
from jina.orchestrate.deployments import Deployment
from jina.orchestrate.flow.asyncio import AsyncFlow
# Flow
from jina.orchestrate.flow.base import Flow
# Executor
from jina.serve.executors import BaseExecutor as Executor
from jina.serve.executors.decorators import dynamic_batching, monitor, requests
# Custom Gateway
from jina.serve.runtimes.gateway.gateway import Gateway
|
__version__ = '0.12.6'
import os
from .document import Document
from .array import DocumentArray
from .dataclasses import dataclass, field
if 'DA_NO_RICH_HANDLER' not in os.environ:
from rich.traceback import install
install()
|
__version__ = '0.12.5'
import os
from .document import Document
from .array import DocumentArray
from .dataclasses import dataclass, field
if 'DA_NO_RICH_HANDLER' not in os.environ:
from rich.traceback import install
install()
|
_base_ = './cascade-mask-rcnn_r50_fpn_ms-3x_coco.py'
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')))
|
_base_ = './cascade_mask_rcnn_r50_fpn_mstrain_3x_coco.py'
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')))
|
"""Mock embedding model."""
from typing import Any, List
from llama_index.core.base.embeddings.base import BaseEmbedding
class MockEmbedding(BaseEmbedding):
"""
Mock embedding.
Used for token prediction.
Args:
embed_dim (int): embedding dimension
"""
embed_dim: int
def __init__(self, embed_dim: int, **kwargs: Any) -> None:
"""Init params."""
super().__init__(embed_dim=embed_dim, **kwargs)
@classmethod
def class_name(cls) -> str:
return "MockEmbedding"
def _get_vector(self) -> List[float]:
return [0.5] * self.embed_dim
async def _aget_text_embedding(self, text: str) -> List[float]:
return self._get_vector()
async def _aget_query_embedding(self, query: str) -> List[float]:
return self._get_vector()
def _get_query_embedding(self, query: str) -> List[float]:
"""Get query embedding."""
return self._get_vector()
def _get_text_embedding(self, text: str) -> List[float]:
"""Get text embedding."""
return self._get_vector()
|
"""Mock embedding model."""
from typing import Any, List
from llama_index.core.base.embeddings.base import BaseEmbedding
class MockEmbedding(BaseEmbedding):
"""Mock embedding.
Used for token prediction.
Args:
embed_dim (int): embedding dimension
"""
embed_dim: int
def __init__(self, embed_dim: int, **kwargs: Any) -> None:
"""Init params."""
super().__init__(embed_dim=embed_dim, **kwargs)
@classmethod
def class_name(cls) -> str:
return "MockEmbedding"
def _get_vector(self) -> List[float]:
return [0.5] * self.embed_dim
async def _aget_text_embedding(self, text: str) -> List[float]:
return self._get_vector()
async def _aget_query_embedding(self, query: str) -> List[float]:
return self._get_vector()
def _get_query_embedding(self, query: str) -> List[float]:
"""Get query embedding."""
return self._get_vector()
def _get_text_embedding(self, text: str) -> List[float]:
"""Get text embedding."""
return self._get_vector()
|
from typing import Any, Literal
from pydantic import SecretStr
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from backend.data.model import (
APIKeyCredentials,
CredentialsField,
CredentialsMetaInput,
SchemaField,
)
from backend.integrations.providers import ProviderName
from backend.util.request import requests
TEST_CREDENTIALS = APIKeyCredentials(
id="01234567-89ab-cdef-0123-456789abcdef",
provider="unreal_speech",
api_key=SecretStr("mock-unreal-speech-api-key"),
title="Mock Unreal Speech API key",
expires_at=None,
)
TEST_CREDENTIALS_INPUT = {
"provider": TEST_CREDENTIALS.provider,
"id": TEST_CREDENTIALS.id,
"type": TEST_CREDENTIALS.type,
"title": TEST_CREDENTIALS.type,
}
class UnrealTextToSpeechBlock(Block):
class Input(BlockSchema):
text: str = SchemaField(
description="The text to be converted to speech",
placeholder="Enter the text you want to convert to speech",
)
voice_id: str = SchemaField(
description="The voice ID to use for text-to-speech conversion",
placeholder="Scarlett",
default="Scarlett",
)
credentials: CredentialsMetaInput[
Literal[ProviderName.UNREAL_SPEECH], Literal["api_key"]
] = CredentialsField(
description="The Unreal Speech integration can be used with "
"any API key with sufficient permissions for the blocks it is used on.",
)
class Output(BlockSchema):
mp3_url: str = SchemaField(description="The URL of the generated MP3 file")
error: str = SchemaField(description="Error message if the API call failed")
def __init__(self):
super().__init__(
id="4ff1ff6d-cc40-4caa-ae69-011daa20c378",
description="Converts text to speech using the Unreal Speech API",
categories={BlockCategory.AI, BlockCategory.TEXT, BlockCategory.MULTIMEDIA},
input_schema=UnrealTextToSpeechBlock.Input,
output_schema=UnrealTextToSpeechBlock.Output,
test_input={
"text": "This is a test of the text to speech API.",
"voice_id": "Scarlett",
"credentials": TEST_CREDENTIALS_INPUT,
},
test_output=[("mp3_url", "https://example.com/test.mp3")],
test_mock={
"call_unreal_speech_api": lambda *args, **kwargs: {
"OutputUri": "https://example.com/test.mp3"
}
},
test_credentials=TEST_CREDENTIALS,
)
@staticmethod
def call_unreal_speech_api(
api_key: SecretStr, text: str, voice_id: str
) -> dict[str, Any]:
url = "https://api.v7.unrealspeech.com/speech"
headers = {
"Authorization": f"Bearer {api_key.get_secret_value()}",
"Content-Type": "application/json",
}
data = {
"Text": text,
"VoiceId": voice_id,
"Bitrate": "192k",
"Speed": "0",
"Pitch": "1",
"TimestampType": "sentence",
}
response = requests.post(url, headers=headers, json=data)
return response.json()
def run(
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
) -> BlockOutput:
api_response = self.call_unreal_speech_api(
credentials.api_key,
input_data.text,
input_data.voice_id,
)
yield "mp3_url", api_response["OutputUri"]
|
from typing import Any, Literal
from pydantic import SecretStr
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from backend.data.model import (
APIKeyCredentials,
CredentialsField,
CredentialsMetaInput,
SchemaField,
)
from backend.integrations.providers import ProviderName
from backend.util.request import requests
TEST_CREDENTIALS = APIKeyCredentials(
id="01234567-89ab-cdef-0123-456789abcdef",
provider="unreal_speech",
api_key=SecretStr("mock-unreal-speech-api-key"),
title="Mock Unreal Speech API key",
expires_at=None,
)
TEST_CREDENTIALS_INPUT = {
"provider": TEST_CREDENTIALS.provider,
"id": TEST_CREDENTIALS.id,
"type": TEST_CREDENTIALS.type,
"title": TEST_CREDENTIALS.type,
}
class UnrealTextToSpeechBlock(Block):
class Input(BlockSchema):
text: str = SchemaField(
description="The text to be converted to speech",
placeholder="Enter the text you want to convert to speech",
)
voice_id: str = SchemaField(
description="The voice ID to use for text-to-speech conversion",
placeholder="Scarlett",
default="Scarlett",
)
credentials: CredentialsMetaInput[
Literal[ProviderName.UNREAL_SPEECH], Literal["api_key"]
] = CredentialsField(
description="The Unreal Speech integration can be used with "
"any API key with sufficient permissions for the blocks it is used on.",
)
class Output(BlockSchema):
mp3_url: str = SchemaField(description="The URL of the generated MP3 file")
error: str = SchemaField(description="Error message if the API call failed")
def __init__(self):
super().__init__(
id="4ff1ff6d-cc40-4caa-ae69-011daa20c378",
description="Converts text to speech using the Unreal Speech API",
categories={BlockCategory.AI, BlockCategory.TEXT},
input_schema=UnrealTextToSpeechBlock.Input,
output_schema=UnrealTextToSpeechBlock.Output,
test_input={
"text": "This is a test of the text to speech API.",
"voice_id": "Scarlett",
"credentials": TEST_CREDENTIALS_INPUT,
},
test_output=[("mp3_url", "https://example.com/test.mp3")],
test_mock={
"call_unreal_speech_api": lambda *args, **kwargs: {
"OutputUri": "https://example.com/test.mp3"
}
},
test_credentials=TEST_CREDENTIALS,
)
@staticmethod
def call_unreal_speech_api(
api_key: SecretStr, text: str, voice_id: str
) -> dict[str, Any]:
url = "https://api.v7.unrealspeech.com/speech"
headers = {
"Authorization": f"Bearer {api_key.get_secret_value()}",
"Content-Type": "application/json",
}
data = {
"Text": text,
"VoiceId": voice_id,
"Bitrate": "192k",
"Speed": "0",
"Pitch": "1",
"TimestampType": "sentence",
}
response = requests.post(url, headers=headers, json=data)
return response.json()
def run(
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
) -> BlockOutput:
api_response = self.call_unreal_speech_api(
credentials.api_key,
input_data.text,
input_data.voice_id,
)
yield "mp3_url", api_response["OutputUri"]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .base_video_metric import BaseVideoMetric
from .cityscapes_metric import CityScapesMetric
from .coco_caption_metric import COCOCaptionMetric
from .coco_metric import CocoMetric
from .coco_occluded_metric import CocoOccludedSeparatedMetric
from .coco_panoptic_metric import CocoPanopticMetric
from .coco_video_metric import CocoVideoMetric
from .crowdhuman_metric import CrowdHumanMetric
from .dod_metric import DODCocoMetric
from .dump_det_results import DumpDetResults
from .dump_odvg_results import DumpODVGResults
from .dump_proposals_metric import DumpProposals
from .flickr30k_metric import Flickr30kMetric
from .grefcoco_metric import gRefCOCOMetric
from .lvis_metric import LVISMetric
from .mot_challenge_metric import MOTChallengeMetric
from .openimages_metric import OpenImagesMetric
from .ov_coco_metric import OVCocoMetric
from .refexp_metric import RefExpMetric
from .refseg_metric import RefSegMetric
from .reid_metric import ReIDMetrics
from .semseg_metric import SemSegMetric
from .voc_metric import VOCMetric
from .youtube_vis_metric import YouTubeVISMetric
__all__ = [
'CityScapesMetric', 'CocoMetric', 'CocoPanopticMetric', 'OpenImagesMetric',
'VOCMetric', 'LVISMetric', 'CrowdHumanMetric', 'DumpProposals',
'CocoOccludedSeparatedMetric', 'DumpDetResults', 'BaseVideoMetric',
'MOTChallengeMetric', 'CocoVideoMetric', 'ReIDMetrics', 'YouTubeVISMetric',
'COCOCaptionMetric', 'SemSegMetric', 'RefSegMetric', 'RefExpMetric',
'gRefCOCOMetric', 'DODCocoMetric', 'DumpODVGResults', 'Flickr30kMetric',
'OVCocoMetric'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .base_video_metric import BaseVideoMetric
from .cityscapes_metric import CityScapesMetric
from .coco_caption_metric import COCOCaptionMetric
from .coco_metric import CocoMetric
from .coco_occluded_metric import CocoOccludedSeparatedMetric
from .coco_panoptic_metric import CocoPanopticMetric
from .coco_video_metric import CocoVideoMetric
from .crowdhuman_metric import CrowdHumanMetric
from .dod_metric import DODCocoMetric
from .dump_det_results import DumpDetResults
from .dump_odvg_results import DumpODVGResults
from .dump_proposals_metric import DumpProposals
from .flickr30k_metric import Flickr30kMetric
from .grefcoco_metric import gRefCOCOMetric
from .lvis_metric import LVISMetric
from .mot_challenge_metric import MOTChallengeMetric
from .openimages_metric import OpenImagesMetric
from .refexp_metric import RefExpMetric
from .refseg_metric import RefSegMetric
from .reid_metric import ReIDMetrics
from .semseg_metric import SemSegMetric
from .voc_metric import VOCMetric
from .youtube_vis_metric import YouTubeVISMetric
__all__ = [
'CityScapesMetric', 'CocoMetric', 'CocoPanopticMetric', 'OpenImagesMetric',
'VOCMetric', 'LVISMetric', 'CrowdHumanMetric', 'DumpProposals',
'CocoOccludedSeparatedMetric', 'DumpDetResults', 'BaseVideoMetric',
'MOTChallengeMetric', 'CocoVideoMetric', 'ReIDMetrics', 'YouTubeVISMetric',
'COCOCaptionMetric', 'SemSegMetric', 'RefSegMetric', 'RefExpMetric',
'gRefCOCOMetric', 'DODCocoMetric', 'DumpODVGResults', 'Flickr30kMetric'
]
|
import logging
import sys
import uuid
import pytest
from langchain.callbacks.tracers import LoggingCallbackHandler
def test_logging(
caplog: pytest.LogCaptureFixture,
capsys: pytest.CaptureFixture[str],
) -> None:
# Set up a Logger and a handler so we can check the Logger's handlers work too
logger = logging.getLogger("test_logging")
logger.setLevel(logging.INFO)
logger.addHandler(logging.StreamHandler(sys.stdout))
handler = LoggingCallbackHandler(logger, extra={"test": "test_extra"})
handler.on_text("test", run_id=uuid.uuid4())
# Assert logging actually took place
assert len(caplog.record_tuples) == 1
record = caplog.records[0]
assert record.name == logger.name
assert record.levelno == logging.INFO
assert (
record.msg == "\x1b[36;1m\x1b[1;3m[text]\x1b[0m \x1b[1mNew text:\x1b[0m\ntest"
)
# Check the extra shows up
assert record.test == "test_extra" # type: ignore[attr-defined]
# Assert log handlers worked
cap_result = capsys.readouterr()
assert (
cap_result.out
== "\x1b[36;1m\x1b[1;3m[text]\x1b[0m \x1b[1mNew text:\x1b[0m\ntest\n"
)
|
import logging
import sys
import uuid
import pytest
from langchain.callbacks.tracers import LoggingCallbackHandler
def test_logging(
caplog: pytest.LogCaptureFixture, capsys: pytest.CaptureFixture[str]
) -> None:
# Set up a Logger and a handler so we can check the Logger's handlers work too
logger = logging.getLogger("test_logging")
logger.setLevel(logging.INFO)
logger.addHandler(logging.StreamHandler(sys.stdout))
handler = LoggingCallbackHandler(logger, extra={"test": "test_extra"})
handler.on_text("test", run_id=uuid.uuid4())
# Assert logging actually took place
assert len(caplog.record_tuples) == 1
record = caplog.records[0]
assert record.name == logger.name
assert record.levelno == logging.INFO
assert (
record.msg == "\x1b[36;1m\x1b[1;3m[text]\x1b[0m \x1b[1mNew text:\x1b[0m\ntest"
)
# Check the extra shows up
assert record.test == "test_extra" # type: ignore[attr-defined]
# Assert log handlers worked
cap_result = capsys.readouterr()
assert (
cap_result.out
== "\x1b[36;1m\x1b[1;3m[text]\x1b[0m \x1b[1mNew text:\x1b[0m\ntest\n"
)
|
"""
Train XGBoost with cat_in_the_dat dataset
=========================================
A simple demo for categorical data support using dataset from Kaggle categorical data
tutorial.
The excellent tutorial is at:
https://www.kaggle.com/shahules/an-overview-of-encoding-techniques
And the data can be found at:
https://www.kaggle.com/shahules/an-overview-of-encoding-techniques/data
.. versionadded:: 1.6.0
See Also
--------
- :doc:`Tutorial </tutorials/categorical>`
- :ref:`sphx_glr_python_examples_categorical.py`
- :ref:`sphx_glr_python_examples_cat_pipeline.py`
"""
from __future__ import annotations
import os
from tempfile import TemporaryDirectory
from time import time
import pandas as pd
from sklearn.metrics import roc_auc_score
from sklearn.model_selection import train_test_split
import xgboost as xgb
def load_cat_in_the_dat() -> tuple[pd.DataFrame, pd.Series]:
"""Assuming you have already downloaded the data into `input` directory."""
df_train = pd.read_csv("./input/cat-in-the-dat/train.csv")
print(
"train data set has got {} rows and {} columns".format(
df_train.shape[0], df_train.shape[1]
)
)
X = df_train.drop(["target"], axis=1)
y = df_train["target"]
for i in range(0, 5):
X["bin_" + str(i)] = X["bin_" + str(i)].astype("category")
for i in range(0, 5):
X["nom_" + str(i)] = X["nom_" + str(i)].astype("category")
for i in range(5, 10):
X["nom_" + str(i)] = X["nom_" + str(i)].apply(int, base=16)
for i in range(0, 6):
X["ord_" + str(i)] = X["ord_" + str(i)].astype("category")
print(
"train data set has got {} rows and {} columns".format(X.shape[0], X.shape[1])
)
return X, y
params = {
"tree_method": "hist",
"device": "cuda",
"n_estimators": 32,
"colsample_bylevel": 0.7,
}
def categorical_model(X: pd.DataFrame, y: pd.Series, output_dir: str) -> None:
"""Train using builtin categorical data support from XGBoost"""
X_train, X_test, y_train, y_test = train_test_split(
X, y, random_state=1994, test_size=0.2
)
# Specify `enable_categorical` to True.
clf = xgb.XGBClassifier(
**params,
eval_metric="auc",
enable_categorical=True,
max_cat_to_onehot=1, # We use optimal partitioning exclusively
)
clf.fit(X_train, y_train, eval_set=[(X_test, y_test), (X_train, y_train)])
clf.save_model(os.path.join(output_dir, "categorical.json"))
y_score = clf.predict_proba(X_test)[:, 1] # proba of positive samples
auc = roc_auc_score(y_test, y_score)
print("AUC of using builtin categorical data support:", auc)
def onehot_encoding_model(X: pd.DataFrame, y: pd.Series, output_dir: str) -> None:
"""Train using one-hot encoded data."""
X_train, X_test, y_train, y_test = train_test_split(
X, y, random_state=42, test_size=0.2
)
# Specify `enable_categorical` to False as we are using encoded data.
clf = xgb.XGBClassifier(**params, eval_metric="auc", enable_categorical=False)
clf.fit(
X_train,
y_train,
eval_set=[(X_test, y_test), (X_train, y_train)],
)
clf.save_model(os.path.join(output_dir, "one-hot.json"))
y_score = clf.predict_proba(X_test)[:, 1] # proba of positive samples
auc = roc_auc_score(y_test, y_score)
print("AUC of using onehot encoding:", auc)
if __name__ == "__main__":
X, y = load_cat_in_the_dat()
with TemporaryDirectory() as tmpdir:
start = time()
categorical_model(X, y, tmpdir)
end = time()
print("Duration:categorical", end - start)
X = pd.get_dummies(X)
start = time()
onehot_encoding_model(X, y, tmpdir)
end = time()
print("Duration:onehot", end - start)
|
"""
Train XGBoost with cat_in_the_dat dataset
=========================================
A simple demo for categorical data support using dataset from Kaggle categorical data
tutorial.
The excellent tutorial is at:
https://www.kaggle.com/shahules/an-overview-of-encoding-techniques
And the data can be found at:
https://www.kaggle.com/shahules/an-overview-of-encoding-techniques/data
Also, see the tutorial for using XGBoost with categorical data:
:doc:`/tutorials/categorical`.
.. versionadded 1.6.0
"""
from __future__ import annotations
import os
from tempfile import TemporaryDirectory
from time import time
import pandas as pd
from sklearn.metrics import roc_auc_score
from sklearn.model_selection import train_test_split
import xgboost as xgb
def load_cat_in_the_dat() -> tuple[pd.DataFrame, pd.Series]:
"""Assuming you have already downloaded the data into `input` directory."""
df_train = pd.read_csv("./input/cat-in-the-dat/train.csv")
print(
"train data set has got {} rows and {} columns".format(
df_train.shape[0], df_train.shape[1]
)
)
X = df_train.drop(["target"], axis=1)
y = df_train["target"]
for i in range(0, 5):
X["bin_" + str(i)] = X["bin_" + str(i)].astype("category")
for i in range(0, 5):
X["nom_" + str(i)] = X["nom_" + str(i)].astype("category")
for i in range(5, 10):
X["nom_" + str(i)] = X["nom_" + str(i)].apply(int, base=16)
for i in range(0, 6):
X["ord_" + str(i)] = X["ord_" + str(i)].astype("category")
print(
"train data set has got {} rows and {} columns".format(X.shape[0], X.shape[1])
)
return X, y
params = {
"tree_method": "hist",
"device": "cuda",
"n_estimators": 32,
"colsample_bylevel": 0.7,
}
def categorical_model(X: pd.DataFrame, y: pd.Series, output_dir: str) -> None:
"""Train using builtin categorical data support from XGBoost"""
X_train, X_test, y_train, y_test = train_test_split(
X, y, random_state=1994, test_size=0.2
)
# Specify `enable_categorical` to True.
clf = xgb.XGBClassifier(
**params,
eval_metric="auc",
enable_categorical=True,
max_cat_to_onehot=1, # We use optimal partitioning exclusively
)
clf.fit(X_train, y_train, eval_set=[(X_test, y_test), (X_train, y_train)])
clf.save_model(os.path.join(output_dir, "categorical.json"))
y_score = clf.predict_proba(X_test)[:, 1] # proba of positive samples
auc = roc_auc_score(y_test, y_score)
print("AUC of using builtin categorical data support:", auc)
def onehot_encoding_model(X: pd.DataFrame, y: pd.Series, output_dir: str) -> None:
"""Train using one-hot encoded data."""
X_train, X_test, y_train, y_test = train_test_split(
X, y, random_state=42, test_size=0.2
)
# Specify `enable_categorical` to False as we are using encoded data.
clf = xgb.XGBClassifier(**params, eval_metric="auc", enable_categorical=False)
clf.fit(
X_train,
y_train,
eval_set=[(X_test, y_test), (X_train, y_train)],
)
clf.save_model(os.path.join(output_dir, "one-hot.json"))
y_score = clf.predict_proba(X_test)[:, 1] # proba of positive samples
auc = roc_auc_score(y_test, y_score)
print("AUC of using onehot encoding:", auc)
if __name__ == "__main__":
X, y = load_cat_in_the_dat()
with TemporaryDirectory() as tmpdir:
start = time()
categorical_model(X, y, tmpdir)
end = time()
print("Duration:categorical", end - start)
X = pd.get_dummies(X)
start = time()
onehot_encoding_model(X, y, tmpdir)
end = time()
print("Duration:onehot", end - start)
|
import pathlib
from typing import Optional
from langchain_core.callbacks import CallbackManagerForChainRun
from langchain.callbacks import FileCallbackHandler
from langchain.chains.base import Chain
class FakeChain(Chain):
"""Fake chain class for testing purposes."""
be_correct: bool = True
the_input_keys: list[str] = ["foo"]
the_output_keys: list[str] = ["bar"]
@property
def input_keys(self) -> list[str]:
"""Input keys."""
return self.the_input_keys
@property
def output_keys(self) -> list[str]:
"""Output key of bar."""
return self.the_output_keys
def _call(
self,
inputs: dict[str, str],
run_manager: Optional[CallbackManagerForChainRun] = None,
) -> dict[str, str]:
return {"bar": "bar"}
def test_filecallback(tmp_path: pathlib.Path) -> None:
"""Test the file callback handler."""
log1 = tmp_path / "output.log"
handler = FileCallbackHandler(str(log1))
chain_test = FakeChain(callbacks=[handler])
chain_test.invoke({"foo": "bar"})
handler.close()
# Assert the output is as expected
assert "Entering new FakeChain chain" in log1.read_text()
# Test using a callback manager
log2 = tmp_path / "output2.log"
with FileCallbackHandler(str(log2)) as handler_cm:
chain_test = FakeChain(callbacks=[handler_cm])
chain_test.invoke({"foo": "bar"})
assert "Entering new FakeChain chain" in log2.read_text()
# Test passing via invoke callbacks
log3 = tmp_path / "output3.log"
with FileCallbackHandler(str(log3)) as handler_cm:
chain_test.invoke({"foo": "bar"}, {"callbacks": [handler_cm]})
assert "Entering new FakeChain chain" in log3.read_text()
|
import pathlib
from typing import Any, Optional
import pytest
from langchain_core.callbacks import CallbackManagerForChainRun
from langchain.callbacks import FileCallbackHandler
from langchain.chains.base import Chain
class FakeChain(Chain):
"""Fake chain class for testing purposes."""
be_correct: bool = True
the_input_keys: list[str] = ["foo"]
the_output_keys: list[str] = ["bar"]
@property
def input_keys(self) -> list[str]:
"""Input keys."""
return self.the_input_keys
@property
def output_keys(self) -> list[str]:
"""Output key of bar."""
return self.the_output_keys
def _call(
self,
inputs: dict[str, str],
run_manager: Optional[CallbackManagerForChainRun] = None,
) -> dict[str, str]:
return {"bar": "bar"}
def test_filecallback(capsys: pytest.CaptureFixture, tmp_path: pathlib.Path) -> Any:
"""Test the file callback handler."""
p = tmp_path / "output.log"
handler = FileCallbackHandler(str(p))
chain_test = FakeChain(callbacks=[handler])
chain_test.invoke({"foo": "bar"})
# Assert the output is as expected
assert p.read_text() == (
"\n\n\x1b[1m> Entering new FakeChain "
"chain...\x1b[0m\n\n\x1b[1m> Finished chain.\x1b[0m\n"
)
|
"""Init file."""
from llama_index.readers.dad_jokes.base import DadJokesReader
__all__ = ["DadJokesReader"]
|
"""Init file."""
from llama_index.readers.dad_jokes.base import DadJokesReader
__all__ = ["DadJokesReader"]
|
# Copyright (c) OpenMMLab. All rights reserved.
__version__ = '0.4.0'
def parse_version_info(version_str):
"""Parse the version information.
Args:
version_str (str): version string like '0.1.0'.
Returns:
tuple: version information contains major, minor, micro version.
"""
version_info = []
for x in version_str.split('.'):
if x.isdigit():
version_info.append(int(x))
elif x.find('rc') != -1:
patch_version = x.split('rc')
version_info.append(int(patch_version[0]))
version_info.append(f'rc{patch_version[1]}')
return tuple(version_info)
version_info = parse_version_info(__version__)
|
# Copyright (c) OpenMMLab. All rights reserved.
__version__ = '0.3.2'
def parse_version_info(version_str):
"""Parse the version information.
Args:
version_str (str): version string like '0.1.0'.
Returns:
tuple: version information contains major, minor, micro version.
"""
version_info = []
for x in version_str.split('.'):
if x.isdigit():
version_info.append(int(x))
elif x.find('rc') != -1:
patch_version = x.split('rc')
version_info.append(int(patch_version[0]))
version_info.append(f'rc{patch_version[1]}')
return tuple(version_info)
version_info = parse_version_info(__version__)
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.registry import DATASETS
from .coco import CocoDataset
@DATASETS.register_module()
class DeepFashionDataset(CocoDataset):
"""Dataset for DeepFashion."""
METAINFO = {
'CLASSES': ('top', 'skirt', 'leggings', 'dress', 'outer', 'pants',
'bag', 'neckwear', 'headwear', 'eyeglass', 'belt',
'footwear', 'hair', 'skin', 'face'),
# PALETTE is a list of color tuples, which is used for visualization.
'PALETTE': [(0, 192, 64), (0, 64, 96), (128, 192, 192), (0, 64, 64),
(0, 192, 224), (0, 192, 192), (128, 192, 64), (0, 192, 96),
(128, 32, 192), (0, 0, 224), (0, 0, 64), (0, 160, 192),
(128, 0, 96), (128, 0, 192), (0, 32, 192)]
}
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.registry import DATASETS
from .coco import CocoDataset
@DATASETS.register_module()
class DeepFashionDataset(CocoDataset):
CLASSES = ('top', 'skirt', 'leggings', 'dress', 'outer', 'pants', 'bag',
'neckwear', 'headwear', 'eyeglass', 'belt', 'footwear', 'hair',
'skin', 'face')
PALETTE = [(0, 192, 64), (0, 64, 96), (128, 192, 192), (0, 64, 64),
(0, 192, 224), (0, 192, 192), (128, 192, 64), (0, 192, 96),
(128, 32, 192), (0, 0, 224), (0, 0, 64), (0, 160, 192),
(128, 0, 96), (128, 0, 192), (0, 32, 192)]
|
# Copyright (c) OpenMMLab. All rights reserved.
import unittest
from unittest import TestCase
import torch
import torch.nn as nn
from torch.optim import SGD
from mmengine.model import BaseDataPreprocessor, BaseModel
from mmengine.optim import OptimWrapper
from mmengine.registry import MODELS
from mmengine.testing import assert_allclose
@MODELS.register_module()
class CustomDataPreprocessor(BaseDataPreprocessor):
def forward(self, data, training=False):
if training:
return 1
else:
return 2
class ToyModel(BaseModel):
def __init__(self, data_preprocessor=None):
super().__init__(data_preprocessor=data_preprocessor, init_cfg=None)
self.conv = nn.Conv2d(3, 1, 1)
def forward(self, batch_inputs, data_samples=None, mode='tensor'):
if mode == 'loss':
out = self.conv(batch_inputs)
return dict(loss=out)
elif mode == 'predict':
out = self.conv(batch_inputs)
return out
elif mode == 'tensor':
out = self.conv(batch_inputs)
return out
class TestBaseModel(TestCase):
def test_init(self):
# initiate model without `preprocess_cfg`
model = ToyModel()
self.assertIsInstance(model.data_preprocessor, BaseDataPreprocessor)
data_preprocessor = dict(type='CustomDataPreprocessor')
model = ToyModel(data_preprocessor=data_preprocessor)
self.assertIsInstance(model.data_preprocessor, CustomDataPreprocessor)
self.assertEqual(model.data_preprocessor(1, training=True), 1)
self.assertEqual(model.data_preprocessor(1, training=False), 2)
# initiate model with built `data_preprocessor`.
data_preprocessor = CustomDataPreprocessor()
model = ToyModel(data_preprocessor=data_preprocessor)
self.assertIs(model.data_preprocessor, data_preprocessor)
# initiate model with error type `data_preprocessor`.
with self.assertRaisesRegex(TypeError, 'data_preprocessor should be'):
ToyModel(data_preprocessor=[data_preprocessor])
def test_parse_losses(self):
model = ToyModel()
loss_cls = torch.tensor(1, dtype=torch.float32)
loss_list = [
torch.tensor(2, dtype=torch.float32),
torch.tensor(3, dtype=torch.float32)
]
losses = dict(loss_cls=loss_cls, loss_list=loss_list)
target_parsed_losses = torch.tensor(6, dtype=torch.float32)
targe_log_vars = dict(
loss=torch.tensor(6, dtype=torch.float32),
loss_cls=torch.tensor(1, dtype=torch.float32),
loss_list=torch.tensor(5, dtype=torch.float32))
parse_losses, log_vars = model.parse_losses(losses)
assert_allclose(parse_losses, target_parsed_losses)
for key in log_vars:
self.assertIn(key, targe_log_vars)
assert_allclose(log_vars[key], targe_log_vars[key])
with self.assertRaises(TypeError):
losses['error_key'] = dict()
model.parse_losses(losses)
def test_train_step(self):
model = ToyModel()
optimizer = SGD(model.parameters(), lr=0.1)
optim_wrapper = OptimWrapper(optimizer)
inputs = torch.randn(3, 1, 1)
data = dict(inputs=inputs)
# initiate grad.
# model.conv.weight.grad = torch.randn(1, 3, 1, 1)
log_vars = model.train_step([data], optim_wrapper)
self.assertIsNotNone(model.conv.weight.grad)
self.assertIsInstance(log_vars['loss'], torch.Tensor)
def test_val_step(self):
inputs = torch.randn(3, 1, 1)
data = dict(inputs=inputs)
model = ToyModel()
out = model.val_step([data])
self.assertIsInstance(out, torch.Tensor)
def test_test_step(self):
inputs = torch.randn(3, 1, 1)
data = dict(inputs=inputs)
model = ToyModel()
out = model.val_step([data])
self.assertIsInstance(out, torch.Tensor)
@unittest.skipIf(not torch.cuda.is_available(), 'cuda should be available')
def test_cuda(self):
inputs = torch.randn(3, 1, 1).cuda()
data = dict(inputs=inputs)
model = ToyModel().cuda()
out = model.val_step([data])
self.assertEqual(out.device.type, 'cuda')
@unittest.skipIf(not torch.cuda.is_available(), 'cuda should be available')
def test_to(self):
inputs = torch.randn(3, 1, 1).to('cuda:0')
data = dict(inputs=inputs)
model = ToyModel().to(torch.cuda.current_device())
out = model.val_step([data])
self.assertEqual(out.device.type, 'cuda')
|
# Copyright (c) OpenMMLab. All rights reserved.
import unittest
from unittest import TestCase
import torch
import torch.nn as nn
from torch.optim import SGD
from mmengine.model import BaseDataPreprocessor, BaseModel
from mmengine.optim import OptimWrapper
from mmengine.registry import MODELS
from mmengine.testing import assert_allclose
@MODELS.register_module()
class CustomDataPreprocessor(BaseDataPreprocessor):
def forward(self, data, training=False):
if training:
return 1
else:
return 2
class ToyModel(BaseModel):
def __init__(self, data_preprocessor=None):
super().__init__(data_preprocessor=data_preprocessor, init_cfg=None)
self.conv = nn.Conv2d(3, 1, 1)
def forward(self, batch_inputs, data_samples=None, mode='tensor'):
if mode == 'loss':
out = self.conv(batch_inputs)
return dict(loss=out)
elif mode == 'predict':
out = self.conv(batch_inputs)
return out
elif mode == 'tensor':
out = self.conv(batch_inputs)
return out
class TestBaseModel(TestCase):
def test_init(self):
# initiate model without `preprocess_cfg`
model = ToyModel()
self.assertIsInstance(model.data_preprocessor, BaseDataPreprocessor)
data_preprocessor = dict(type='CustomDataPreprocessor')
model = ToyModel(data_preprocessor=data_preprocessor)
self.assertIsInstance(model.data_preprocessor, CustomDataPreprocessor)
self.assertEqual(model.data_preprocessor(1, training=True), 1)
self.assertEqual(model.data_preprocessor(1, training=False), 2)
# initiate model with built `data_preprocessor`.
data_preprocessor = CustomDataPreprocessor()
model = ToyModel(data_preprocessor=data_preprocessor)
self.assertIs(model.data_preprocessor, data_preprocessor)
# initiate model with error type `data_preprocessor`.
with self.assertRaisesRegex(TypeError, 'data_preprocessor should be'):
ToyModel(data_preprocessor=[data_preprocessor])
def test_parse_losses(self):
model = ToyModel()
loss_cls = torch.tensor(1, dtype=torch.float32)
loss_list = [
torch.tensor(2, dtype=torch.float32),
torch.tensor(3, dtype=torch.float32)
]
losses = dict(loss_cls=loss_cls, loss_list=loss_list)
target_parsed_losses = torch.tensor(6, dtype=torch.float32)
targe_log_vars = dict(
loss=torch.tensor(6, dtype=torch.float32),
loss_cls=torch.tensor(1, dtype=torch.float32),
loss_list=torch.tensor(5, dtype=torch.float32))
parse_losses, log_vars = model.parse_losses(losses)
assert_allclose(parse_losses, target_parsed_losses)
for key in log_vars:
self.assertIn(key, targe_log_vars)
assert_allclose(log_vars[key], targe_log_vars[key])
with self.assertRaises(TypeError):
losses['error_key'] = dict()
model.parse_losses(losses)
def test_train_step(self):
model = ToyModel()
optimizer = SGD(model.parameters(), lr=0.1)
optim_wrapper = OptimWrapper(optimizer)
inputs = torch.randn(3, 1, 1)
data = dict(inputs=inputs)
# initiate grad.
# model.conv.weight.grad = torch.randn(1, 3, 1, 1)
log_vars = model.train_step([data], optim_wrapper)
self.assertIsNotNone(model.conv.weight.grad)
self.assertIsInstance(log_vars['loss'], torch.Tensor)
def test_val_step(self):
inputs = torch.randn(3, 1, 1)
data = dict(inputs=inputs)
model = ToyModel()
out = model.val_step([data])
self.assertIsInstance(out, torch.Tensor)
def test_test_step(self):
inputs = torch.randn(3, 1, 1)
data = dict(inputs=inputs)
model = ToyModel()
out = model.val_step([data])
self.assertIsInstance(out, torch.Tensor)
@unittest.skipIf(not torch.cuda.is_available(), 'cuda should be available')
def test_cuda(self):
inputs = torch.randn(3, 1, 1).cuda()
data = dict(inputs=inputs)
model = ToyModel().cuda()
model.val_step([data])
@unittest.skipIf(not torch.cuda.is_available(), 'cuda should be available')
def test_to(self):
inputs = torch.randn(3, 1, 1).cuda()
data = dict(inputs=inputs)
model = ToyModel().to(torch.cuda.current_device())
model.val_step([data])
|
"""String utilities."""
from typing import Any
def stringify_value(val: Any) -> str:
"""Stringify a value.
Args:
val: The value to stringify.
Returns:
str: The stringified value.
"""
if isinstance(val, str):
return val
if isinstance(val, dict):
return "\n" + stringify_dict(val)
if isinstance(val, list):
return "\n".join(stringify_value(v) for v in val)
return str(val)
def stringify_dict(data: dict) -> str:
"""Stringify a dictionary.
Args:
data: The dictionary to stringify.
Returns:
str: The stringified dictionary.
"""
text = ""
for key, value in data.items():
text += key + ": " + stringify_value(value) + "\n"
return text
def comma_list(items: list[Any]) -> str:
"""Convert a list to a comma-separated string.
Args:
items: The list to convert.
Returns:
str: The comma-separated string.
"""
return ", ".join(str(item) for item in items)
|
"""String utilities."""
from typing import Any
def stringify_value(val: Any) -> str:
"""Stringify a value.
Args:
val: The value to stringify.
Returns:
str: The stringified value.
"""
if isinstance(val, str):
return val
elif isinstance(val, dict):
return "\n" + stringify_dict(val)
elif isinstance(val, list):
return "\n".join(stringify_value(v) for v in val)
else:
return str(val)
def stringify_dict(data: dict) -> str:
"""Stringify a dictionary.
Args:
data: The dictionary to stringify.
Returns:
str: The stringified dictionary.
"""
text = ""
for key, value in data.items():
text += key + ": " + stringify_value(value) + "\n"
return text
def comma_list(items: list[Any]) -> str:
"""Convert a list to a comma-separated string.
Args:
items: The list to convert.
Returns:
str: The comma-separated string.
"""
return ", ".join(str(item) for item in items)
|
from abc import abstractmethod
from typing import Iterable, Union
from qdrant_client import QdrantClient
from docarray.array.storage.base.seqlike import BaseSequenceLikeMixin
from docarray import Document
class SequenceLikeMixin(BaseSequenceLikeMixin):
@property
@abstractmethod
def client(self) -> QdrantClient:
raise NotImplementedError()
@property
@abstractmethod
def collection_name(self) -> str:
raise NotImplementedError()
@property
@abstractmethod
def config(self):
raise NotImplementedError()
@abstractmethod
def _upload_batch(self, docs: Iterable['Document']):
raise NotImplementedError()
def __eq__(self, other):
"""Compare this object to the other, returns True if and only if other
as the same type as self and other has the same meta information
:param other: the other object to check for equality
:return: ``True`` if other is equal to self
"""
# two DAW are considered as the same if they have the same client meta data
return (
type(self) is type(other)
and self.client.openapi_client.client.host
== other.openapi_client.client.host
and self.config == other.config
)
def __len__(self):
return self.client.http.collections_api.get_collection(
self.collection_name
).result.vectors_count
def __contains__(self, x: Union[str, 'Document']):
if isinstance(x, str):
return self._id_exists(x)
elif isinstance(x, Document):
return self._id_exists(x.id)
else:
return False
def _id_exists(self, x: str):
try:
self._get_doc_by_id(x)
return True
except KeyError:
return False
def __repr__(self):
return f'<DocumentArray[Qdrant] (length={len(self)}) at {id(self)}>'
def extend(self, docs: Iterable['Document']):
docs = list(docs)
self._upload_batch(docs)
self._offset2ids.extend([doc.id for doc in docs])
|
from abc import abstractmethod
from typing import Iterable, Union
from qdrant_client import QdrantClient
from ..base.seqlike import BaseSequenceLikeMixin
from .... import Document
class SequenceLikeMixin(BaseSequenceLikeMixin):
@property
@abstractmethod
def client(self) -> QdrantClient:
raise NotImplementedError()
@property
@abstractmethod
def collection_name(self) -> str:
raise NotImplementedError()
@property
@abstractmethod
def config(self):
raise NotImplementedError()
@abstractmethod
def _upload_batch(self, docs: Iterable['Document']):
raise NotImplementedError()
def __eq__(self, other):
"""Compare this object to the other, returns True if and only if other
as the same type as self and other has the same meta information
:param other: the other object to check for equality
:return: ``True`` if other is equal to self
"""
# two DAW are considered as the same if they have the same client meta data
return (
type(self) is type(other)
and self.client.openapi_client.client.host
== other.openapi_client.client.host
and self.config == other.config
)
def __len__(self):
return self.client.http.collections_api.get_collection(
self.collection_name
).result.vectors_count
def __contains__(self, x: Union[str, 'Document']):
if isinstance(x, str):
return self._id_exists(x)
elif isinstance(x, Document):
return self._id_exists(x.id)
else:
return False
def _id_exists(self, x: str):
try:
self._get_doc_by_id(x)
return True
except KeyError:
return False
def __repr__(self):
return f'<DocumentArray[Qdrant] (length={len(self)}) at {id(self)}>'
def extend(self, docs: Iterable['Document']):
docs = list(docs)
self._upload_batch(docs)
self._offset2ids.extend([doc.id for doc in docs])
|
from __future__ import annotations
from sentence_transformers.sparse_encoder.data_collator import SparseEncoderDataCollator
from sentence_transformers.sparse_encoder.evaluation import (
SparseBinaryClassificationEvaluator,
SparseEmbeddingSimilarityEvaluator,
SparseInformationRetrievalEvaluator,
SparseMSEEvaluator,
SparseNanoBEIREvaluator,
SparseRerankingEvaluator,
SparseTranslationEvaluator,
SparseTripletEvaluator,
)
from sentence_transformers.sparse_encoder.losses import (
CSRLoss,
CSRReconstructionLoss,
FlopsLoss,
SparseAnglELoss,
SparseCachedGISTEmbedLoss,
SparseCachedMultipleNegativesRankingLoss,
SparseCoSENTLoss,
SparseCosineSimilarityLoss,
SparseDistillKLDivLoss,
SparseGISTEmbedLoss,
SparseMarginMSELoss,
SparseMSELoss,
SparseMultipleNegativesRankingLoss,
SparseTripletLoss,
SpladeLoss,
)
from sentence_transformers.sparse_encoder.model_card import SparseEncoderModelCardData
from sentence_transformers.sparse_encoder.models import IDF, CSRSparsity, MLMTransformer, SpladePooling
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
from sentence_transformers.sparse_encoder.trainer import SparseEncoderTrainer
from sentence_transformers.sparse_encoder.training_args import (
SparseEncoderTrainingArguments,
)
__all__ = [
# Core components
"SparseEncoder",
"SparseEncoderDataCollator",
"SparseEncoderTrainer",
"SparseEncoderTrainingArguments",
# Models
"CSRSparsity",
"MLMTransformer",
"SpladePooling",
"IDF",
# Losses
"CSRLoss",
"CSRReconstructionLoss",
"SparseMultipleNegativesRankingLoss",
"SparseCoSENTLoss",
"SparseTripletLoss",
"SparseCachedMultipleNegativesRankingLoss",
"SparseMarginMSELoss",
"SparseGISTEmbedLoss",
"SparseCachedGISTEmbedLoss",
"SparseCosineSimilarityLoss",
"SparseMSELoss",
"SparseAnglELoss",
"SparseDistillKLDivLoss",
"FlopsLoss",
"SpladeLoss",
# Evaluators
"SparseBinaryClassificationEvaluator",
"SparseEmbeddingSimilarityEvaluator",
"SparseInformationRetrievalEvaluator",
"SparseMSEEvaluator",
"SparseNanoBEIREvaluator",
"SparseTranslationEvaluator",
"SparseRerankingEvaluator",
"SparseTripletEvaluator",
# Model card
"SparseEncoderModelCardData",
]
# TODO : Add tests for all the components
# TODO : Add the equivalent of the quantization file for the sparse encoder
# TODO : Watch for similaty default value as rn cosine but dot product might be better for sparse
|
from __future__ import annotations
from sentence_transformers.sparse_encoder.data_collator import SparseEncoderDataCollator
from sentence_transformers.sparse_encoder.evaluation import (
SparseBinaryClassificationEvaluator,
SparseEmbeddingSimilarityEvaluator,
SparseInformationRetrievalEvaluator,
SparseMSEEvaluator,
SparseNanoBEIREvaluator,
SparseRerankingEvaluator,
SparseTranslationEvaluator,
SparseTripletEvaluator,
)
from sentence_transformers.sparse_encoder.losses import (
CSRLoss,
CSRReconstructionLoss,
FlopsLoss,
SparseAnglELoss,
SparseCachedGISTEmbedLoss,
SparseCachedMultipleNegativesRankingLoss,
SparseCoSENTLoss,
SparseCosineSimilarityLoss,
SparseDistillKLDivLoss,
SparseGISTEmbedLoss,
SparseMarginMSELoss,
SparseMSELoss,
SparseMultipleNegativesRankingLoss,
SparseTripletLoss,
SpladeLoss,
)
from sentence_transformers.sparse_encoder.model_card import SparseEncoderModelCardData
from sentence_transformers.sparse_encoder.models import CSRSparsity, MLMTransformer, SpladePooling
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
from sentence_transformers.sparse_encoder.trainer import SparseEncoderTrainer
from sentence_transformers.sparse_encoder.training_args import (
SparseEncoderTrainingArguments,
)
__all__ = [
# Core components
"SparseEncoder",
"SparseEncoderDataCollator",
"SparseEncoderTrainer",
"SparseEncoderTrainingArguments",
# Models
"CSRSparsity",
"MLMTransformer",
"SpladePooling",
# Losses
"CSRLoss",
"CSRReconstructionLoss",
"SparseMultipleNegativesRankingLoss",
"SparseCoSENTLoss",
"SparseTripletLoss",
"SparseCachedMultipleNegativesRankingLoss",
"SparseMarginMSELoss",
"SparseGISTEmbedLoss",
"SparseCachedGISTEmbedLoss",
"SparseCosineSimilarityLoss",
"SparseMSELoss",
"SparseAnglELoss",
"SparseDistillKLDivLoss",
"FlopsLoss",
"SpladeLoss",
# Evaluators
"SparseBinaryClassificationEvaluator",
"SparseEmbeddingSimilarityEvaluator",
"SparseInformationRetrievalEvaluator",
"SparseMSEEvaluator",
"SparseNanoBEIREvaluator",
"SparseTranslationEvaluator",
"SparseRerankingEvaluator",
"SparseTripletEvaluator",
# Model card
"SparseEncoderModelCardData",
]
# TODO : Add tests for all the components
# TODO : Add the equivalent of the quantization file for the sparse encoder
# TODO : Watch for similaty default value as rn cosine but dot product might be better for sparse
|
# Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from mmdet.models.backbones.hourglass import HourglassNet
def test_hourglass_backbone():
with pytest.raises(AssertionError):
# HourglassNet's num_stacks should larger than 0
HourglassNet(num_stacks=0)
with pytest.raises(AssertionError):
# len(stage_channels) should equal len(stage_blocks)
HourglassNet(
stage_channels=[256, 256, 384, 384, 384],
stage_blocks=[2, 2, 2, 2, 2, 4])
with pytest.raises(AssertionError):
# len(stage_channels) should lagrer than downsample_times
HourglassNet(
downsample_times=5,
stage_channels=[256, 256, 384, 384, 384],
stage_blocks=[2, 2, 2, 2, 2])
# Test HourglassNet-52
model = HourglassNet(num_stacks=1)
model.init_weights()
model.train()
imgs = torch.randn(1, 3, 256, 256)
feat = model(imgs)
assert len(feat) == 1
assert feat[0].shape == torch.Size([1, 256, 64, 64])
# Test HourglassNet-104
model = HourglassNet(num_stacks=2)
model.init_weights()
model.train()
imgs = torch.randn(1, 3, 256, 256)
feat = model(imgs)
assert len(feat) == 2
assert feat[0].shape == torch.Size([1, 256, 64, 64])
assert feat[1].shape == torch.Size([1, 256, 64, 64])
|
import pytest
import torch
from mmdet.models.backbones.hourglass import HourglassNet
def test_hourglass_backbone():
with pytest.raises(AssertionError):
# HourglassNet's num_stacks should larger than 0
HourglassNet(num_stacks=0)
with pytest.raises(AssertionError):
# len(stage_channels) should equal len(stage_blocks)
HourglassNet(
stage_channels=[256, 256, 384, 384, 384],
stage_blocks=[2, 2, 2, 2, 2, 4])
with pytest.raises(AssertionError):
# len(stage_channels) should lagrer than downsample_times
HourglassNet(
downsample_times=5,
stage_channels=[256, 256, 384, 384, 384],
stage_blocks=[2, 2, 2, 2, 2])
# Test HourglassNet-52
model = HourglassNet(num_stacks=1)
model.init_weights()
model.train()
imgs = torch.randn(1, 3, 256, 256)
feat = model(imgs)
assert len(feat) == 1
assert feat[0].shape == torch.Size([1, 256, 64, 64])
# Test HourglassNet-104
model = HourglassNet(num_stacks=2)
model.init_weights()
model.train()
imgs = torch.randn(1, 3, 256, 256)
feat = model(imgs)
assert len(feat) == 2
assert feat[0].shape == torch.Size([1, 256, 64, 64])
assert feat[1].shape == torch.Size([1, 256, 64, 64])
|
"""
This file contains some utilities functions used to find parallel sentences
in two monolingual corpora.
Code in this file has been adapted from the LASER repository:
https://github.com/facebookresearch/LASER
"""
import gzip
import lzma
import time
import faiss
import numpy as np
######## Functions to find and score candidates
def score(x, y, fwd_mean, bwd_mean, margin):
return margin(x.dot(y), (fwd_mean + bwd_mean) / 2)
def score_candidates(x, y, candidate_inds, fwd_mean, bwd_mean, margin):
scores = np.zeros(candidate_inds.shape)
for i in range(scores.shape[0]):
for j in range(scores.shape[1]):
k = candidate_inds[i, j]
scores[i, j] = score(x[i], y[k], fwd_mean[i], bwd_mean[k], margin)
return scores
def kNN(x, y, k, use_ann_search=False, ann_num_clusters=32768, ann_num_cluster_probe=3):
start_time = time.time()
if use_ann_search:
print("Perform approx. kNN search")
n_cluster = min(ann_num_clusters, int(y.shape[0] / 1000))
quantizer = faiss.IndexFlatIP(y.shape[1])
index = faiss.IndexIVFFlat(quantizer, y.shape[1], n_cluster, faiss.METRIC_INNER_PRODUCT)
index.nprobe = ann_num_cluster_probe
index.train(y)
index.add(y)
sim, ind = index.search(x, k)
else:
print("Perform exact search")
idx = faiss.IndexFlatIP(y.shape[1])
idx.add(y)
sim, ind = idx.search(x, k)
print("Done: {:.2f} sec".format(time.time() - start_time))
return sim, ind
def file_open(filepath):
# Function to allowing opening files based on file extension
if filepath.endswith(".gz"):
return gzip.open(filepath, "rt", encoding="utf8")
elif filepath.endswith("xz"):
return lzma.open(filepath, "rt", encoding="utf8")
else:
return open(filepath, "r", encoding="utf8")
|
"""
This file contains some utilities functions used to find parallel sentences
in two monolingual corpora.
Code in this file has been adapted from the LASER repository:
https://github.com/facebookresearch/LASER
"""
import faiss
import numpy as np
import time
import gzip
import lzma
######## Functions to find and score candidates
def score(x, y, fwd_mean, bwd_mean, margin):
return margin(x.dot(y), (fwd_mean + bwd_mean) / 2)
def score_candidates(x, y, candidate_inds, fwd_mean, bwd_mean, margin):
scores = np.zeros(candidate_inds.shape)
for i in range(scores.shape[0]):
for j in range(scores.shape[1]):
k = candidate_inds[i, j]
scores[i, j] = score(x[i], y[k], fwd_mean[i], bwd_mean[k], margin)
return scores
def kNN(x, y, k, use_ann_search=False, ann_num_clusters=32768, ann_num_cluster_probe=3):
start_time = time.time()
if use_ann_search:
print("Perform approx. kNN search")
n_cluster = min(ann_num_clusters, int(y.shape[0] / 1000))
quantizer = faiss.IndexFlatIP(y.shape[1])
index = faiss.IndexIVFFlat(quantizer, y.shape[1], n_cluster, faiss.METRIC_INNER_PRODUCT)
index.nprobe = ann_num_cluster_probe
index.train(y)
index.add(y)
sim, ind = index.search(x, k)
else:
print("Perform exact search")
idx = faiss.IndexFlatIP(y.shape[1])
idx.add(y)
sim, ind = idx.search(x, k)
print("Done: {:.2f} sec".format(time.time() - start_time))
return sim, ind
def file_open(filepath):
# Function to allowing opening files based on file extension
if filepath.endswith(".gz"):
return gzip.open(filepath, "rt", encoding="utf8")
elif filepath.endswith("xz"):
return lzma.open(filepath, "rt", encoding="utf8")
else:
return open(filepath, "r", encoding="utf8")
|
_base_ = '../faster_rcnn/faster-rcnn_r50_fpn_1x_coco.py'
model = dict(
backbone=dict(
dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False),
stage_with_dcn=(False, True, True, True)))
|
_base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py'
model = dict(
backbone=dict(
dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False),
stage_with_dcn=(False, True, True, True)))
|
from docarray.documents.text import TextDoc
def test_text_document_operators():
doc = TextDoc(text='text', url='http://url.com')
assert doc == 'text'
assert doc != 'http://url.com'
doc2 = TextDoc(id=doc.id, text='text', url='http://url.com')
assert doc == doc2
doc3 = TextDoc(id='other-id', text='text', url='http://url.com')
assert doc == doc3
assert 't' in doc
assert 'a' not in doc
t = TextDoc(text='this is my text document')
assert 'text' in t
assert 'docarray' not in t
text = TextDoc()
assert text is not None
assert text.text is None
|
from docarray.documents.text import TextDoc
def test_text_document_operators():
doc = TextDoc(text='text', url='http://url.com')
assert doc == 'text'
assert doc != 'http://url.com'
doc2 = TextDoc(id=doc.id, text='text', url='http://url.com')
assert doc == doc2
doc3 = TextDoc(id='other-id', text='text', url='http://url.com')
assert doc != doc3
assert 't' in doc
assert 'a' not in doc
t = TextDoc(text='this is my text document')
assert 'text' in t
assert 'docarray' not in t
text = TextDoc()
assert text is not None
assert text.text is None
|
"""Helpers for creating Anthropic API clients.
This module allows for the caching of httpx clients to avoid creating new instances
for each instance of ChatAnthropic.
Logic is largely replicated from anthropic._base_client.
"""
import asyncio
import os
from functools import lru_cache
from typing import Any, Optional
import anthropic
_NOT_GIVEN: Any = object()
class _SyncHttpxClientWrapper(anthropic.DefaultHttpxClient):
"""Borrowed from anthropic._base_client"""
def __del__(self) -> None:
if self.is_closed:
return
try:
self.close()
except Exception: # noqa: S110
pass
class _AsyncHttpxClientWrapper(anthropic.DefaultAsyncHttpxClient):
"""Borrowed from anthropic._base_client"""
def __del__(self) -> None:
if self.is_closed:
return
try:
# TODO(someday): support non asyncio runtimes here
asyncio.get_running_loop().create_task(self.aclose())
except Exception: # noqa: S110
pass
@lru_cache
def _get_default_httpx_client(
*,
base_url: Optional[str],
timeout: Any = _NOT_GIVEN,
) -> _SyncHttpxClientWrapper:
kwargs: dict[str, Any] = {
"base_url": base_url
or os.environ.get("ANTHROPIC_BASE_URL")
or "https://api.anthropic.com",
}
if timeout is not _NOT_GIVEN:
kwargs["timeout"] = timeout
return _SyncHttpxClientWrapper(**kwargs)
@lru_cache
def _get_default_async_httpx_client(
*,
base_url: Optional[str],
timeout: Any = _NOT_GIVEN,
) -> _AsyncHttpxClientWrapper:
kwargs: dict[str, Any] = {
"base_url": base_url
or os.environ.get("ANTHROPIC_BASE_URL")
or "https://api.anthropic.com",
}
if timeout is not _NOT_GIVEN:
kwargs["timeout"] = timeout
return _AsyncHttpxClientWrapper(**kwargs)
|
"""Helpers for creating Anthropic API clients.
This module allows for the caching of httpx clients to avoid creating new instances
for each instance of ChatAnthropic.
Logic is largely replicated from anthropic._base_client.
"""
import asyncio
import os
from functools import lru_cache
from typing import Any, Optional
import anthropic
_NOT_GIVEN: Any = object()
class _SyncHttpxClientWrapper(anthropic.DefaultHttpxClient):
"""Borrowed from anthropic._base_client"""
def __del__(self) -> None:
if self.is_closed:
return
try:
self.close()
except Exception:
pass
class _AsyncHttpxClientWrapper(anthropic.DefaultAsyncHttpxClient):
"""Borrowed from anthropic._base_client"""
def __del__(self) -> None:
if self.is_closed:
return
try:
# TODO(someday): support non asyncio runtimes here
asyncio.get_running_loop().create_task(self.aclose())
except Exception:
pass
@lru_cache
def _get_default_httpx_client(
*,
base_url: Optional[str],
timeout: Any = _NOT_GIVEN,
) -> _SyncHttpxClientWrapper:
kwargs: dict[str, Any] = {
"base_url": base_url
or os.environ.get("ANTHROPIC_BASE_URL")
or "https://api.anthropic.com",
}
if timeout is not _NOT_GIVEN:
kwargs["timeout"] = timeout
return _SyncHttpxClientWrapper(**kwargs)
@lru_cache
def _get_default_async_httpx_client(
*,
base_url: Optional[str],
timeout: Any = _NOT_GIVEN,
) -> _AsyncHttpxClientWrapper:
kwargs: dict[str, Any] = {
"base_url": base_url
or os.environ.get("ANTHROPIC_BASE_URL")
or "https://api.anthropic.com",
}
if timeout is not _NOT_GIVEN:
kwargs["timeout"] = timeout
return _AsyncHttpxClientWrapper(**kwargs)
|
from sentence_transformers import SentenceTransformer, losses, util
class AnglELoss(losses.CoSENTLoss):
def __init__(self, model: SentenceTransformer, scale: float = 20.0):
"""
This class implements AnglE (Angle Optimized) loss.
This is a modification of :class:`CoSENTLoss`, designed to address the following issue:
The cosine function's gradient approaches 0 as the wave approaches the top or bottom of its form.
This can hinder the optimization process, so AnglE proposes to instead optimize the angle difference
in complex space in order to mitigate this effect.
It expects that each of the InputExamples consists of a pair of texts and a float valued label, representing
the expected similarity score between the pair.
It computes the following loss function:
``loss = logsum(1+exp(s(k,l)-s(i,j))+exp...)``, where ``(i,j)`` and ``(k,l)`` are any of the input pairs in the
batch such that the expected similarity of ``(i,j)`` is greater than ``(k,l)``. The summation is over all possible
pairs of input pairs in the batch that match this condition. This is the same as CoSENTLoss, with a different
similarity function.
Args:
model: SentenceTransformerModel
scale: Output of similarity function is multiplied by scale
value. Represents the inverse temperature.
References:
- For further details, see: https://arxiv.org/abs/2309.12871v1
Requirements:
- Sentence pairs with corresponding similarity scores in range of the similarity function. Default is [-1,1].
Relations:
- :class:`CoSENTLoss` is AnglELoss with ``pairwise_cos_sim`` as the metric, rather than ``pairwise_angle_sim``.
- :class:`CosineSimilarityLoss` seems to produce a weaker training signal than ``CoSENTLoss`` or ``AnglELoss``.
Inputs:
+--------------------------------+------------------------+
| Texts | Labels |
+================================+========================+
| (sentence_A, sentence_B) pairs | float similarity score |
+--------------------------------+------------------------+
Example:
::
from sentence_transformers import SentenceTransformer, SentenceTransformerTrainer, losses
from datasets import Dataset
model = SentenceTransformer("microsoft/mpnet-base")
train_dataset = Dataset.from_dict({
"sentence1": ["It's nice weather outside today.", "He drove to work."],
"sentence2": ["It's so sunny.", "She walked to the store."],
"score": [1.0, 0.3],
})
loss = losses.AnglELoss(model)
trainer = SentenceTransformerTrainer(
model=model,
train_dataset=train_dataset,
loss=loss,
)
trainer.train()
"""
super().__init__(model, scale, similarity_fct=util.pairwise_angle_sim)
@property
def citation(self) -> str:
return """
@misc{li2023angleoptimized,
title={AnglE-optimized Text Embeddings},
author={Xianming Li and Jing Li},
year={2023},
eprint={2309.12871},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
"""
|
from sentence_transformers import losses, SentenceTransformer, util
class AnglELoss(losses.CoSENTLoss):
def __init__(self, model: SentenceTransformer, scale: float = 20.0):
"""
This class implements AnglE (Angle Optimized) loss.
This is a modification of :class:`CoSENTLoss`, designed to address the following issue:
The cosine function's gradient approaches 0 as the wave approaches the top or bottom of its form.
This can hinder the optimization process, so AnglE proposes to instead optimize the angle difference
in complex space in order to mitigate this effect.
It expects that each of the InputExamples consists of a pair of texts and a float valued label, representing
the expected similarity score between the pair.
It computes the following loss function:
``loss = logsum(1+exp(s(k,l)-s(i,j))+exp...)``, where ``(i,j)`` and ``(k,l)`` are any of the input pairs in the
batch such that the expected similarity of ``(i,j)`` is greater than ``(k,l)``. The summation is over all possible
pairs of input pairs in the batch that match this condition. This is the same as CoSENTLoss, with a different
similarity function.
Args:
model: SentenceTransformerModel
scale: Output of similarity function is multiplied by scale
value. Represents the inverse temperature.
References:
- For further details, see: https://arxiv.org/abs/2309.12871v1
Requirements:
- Sentence pairs with corresponding similarity scores in range of the similarity function. Default is [-1,1].
Relations:
- :class:`CoSENTLoss` is AnglELoss with ``pairwise_cos_sim`` as the metric, rather than ``pairwise_angle_sim``.
- :class:`CosineSimilarityLoss` seems to produce a weaker training signal than ``CoSENTLoss`` or ``AnglELoss``.
Inputs:
+--------------------------------+------------------------+
| Texts | Labels |
+================================+========================+
| (sentence_A, sentence_B) pairs | float similarity score |
+--------------------------------+------------------------+
Example:
::
from sentence_transformers import SentenceTransformer, SentenceTransformerTrainer, losses
from datasets import Dataset
model = SentenceTransformer("microsoft/mpnet-base")
train_dataset = Dataset.from_dict({
"sentence1": ["It's nice weather outside today.", "He drove to work."],
"sentence2": ["It's so sunny.", "She walked to the store."],
"score": [1.0, 0.3],
})
loss = losses.AnglELoss(model)
trainer = SentenceTransformerTrainer(
model=model,
train_dataset=train_dataset,
loss=loss,
)
trainer.train()
"""
super().__init__(model, scale, similarity_fct=util.pairwise_angle_sim)
@property
def citation(self) -> str:
return """
@misc{li2023angleoptimized,
title={AnglE-optimized Text Embeddings},
author={Xianming Li and Jing Li},
year={2023},
eprint={2309.12871},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
"""
|
_base_ = '../_base_/default_runtime.py'
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
image_size = (1024, 1024)
# Example to use different file client
# Method 1: simply set the data root and let the file I/O module
# automatically infer from prefix (not support LMDB and Memcache yet)
# data_root = 's3://openmmlab/datasets/detection/coco/'
# Method 2: Use `backend_args`, `file_client_args` in versions before 3.0.0rc6
# backend_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/detection/',
# 'data/': 's3://openmmlab/datasets/detection/'
# }))
backend_args = None
# Standard Scale Jittering (SSJ) resizes and crops an image
# with a resize range of 0.8 to 1.25 of the original image size.
train_pipeline = [
dict(type='LoadImageFromFile', backend_args=backend_args),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='RandomResize',
scale=image_size,
ratio_range=(0.8, 1.25),
keep_ratio=True),
dict(
type='RandomCrop',
crop_type='absolute_range',
crop_size=image_size,
recompute_bbox=True,
allow_negative_crop=True),
dict(type='FilterAnnotations', min_gt_bbox_wh=(1e-2, 1e-2)),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile', backend_args=backend_args),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(
batch_size=2,
num_workers=2,
persistent_workers=True,
sampler=dict(type='InfiniteSampler'),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instances_train2017.json',
data_prefix=dict(img='train2017/'),
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=train_pipeline,
backend_args=backend_args))
val_dataloader = dict(
batch_size=1,
num_workers=2,
persistent_workers=True,
drop_last=False,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instances_val2017.json',
data_prefix=dict(img='val2017/'),
test_mode=True,
pipeline=test_pipeline,
backend_args=backend_args))
test_dataloader = val_dataloader
val_evaluator = dict(
type='CocoMetric',
ann_file=data_root + 'annotations/instances_val2017.json',
metric=['bbox', 'segm'],
format_only=False,
backend_args=backend_args)
test_evaluator = val_evaluator
# The model is trained by 270k iterations with batch_size 64,
# which is roughly equivalent to 144 epochs.
max_iters = 270000
train_cfg = dict(
type='IterBasedTrainLoop', max_iters=max_iters, val_interval=10000)
val_cfg = dict(type='ValLoop')
test_cfg = dict(type='TestLoop')
# optimizer assumes bs=64
optim_wrapper = dict(
type='OptimWrapper',
optimizer=dict(type='SGD', lr=0.1, momentum=0.9, weight_decay=0.00004))
# learning rate policy
# lr steps at [0.9, 0.95, 0.975] of the maximum iterations
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0,
end=1000),
dict(
type='MultiStepLR',
begin=0,
end=270000,
by_epoch=False,
milestones=[243000, 256500, 263250],
gamma=0.1)
]
default_hooks = dict(checkpoint=dict(by_epoch=False, interval=10000))
log_processor = dict(by_epoch=False)
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (32 GPUs) x (2 samples per GPU)
auto_scale_lr = dict(base_batch_size=64)
|
_base_ = '../_base_/default_runtime.py'
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
image_size = (1024, 1024)
file_client_args = dict(backend='disk')
# comment out the code below to use different file client
# file_client_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/detection/',
# 'data/': 's3://openmmlab/datasets/detection/'
# }))
# Standard Scale Jittering (SSJ) resizes and crops an image
# with a resize range of 0.8 to 1.25 of the original image size.
train_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='RandomResize',
scale=image_size,
ratio_range=(0.8, 1.25),
keep_ratio=True),
dict(
type='RandomCrop',
crop_type='absolute_range',
crop_size=image_size,
recompute_bbox=True,
allow_negative_crop=True),
dict(type='FilterAnnotations', min_gt_bbox_wh=(1e-2, 1e-2)),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(
batch_size=2,
num_workers=2,
persistent_workers=True,
sampler=dict(type='InfiniteSampler'),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instances_train2017.json',
data_prefix=dict(img='train2017/'),
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=train_pipeline))
val_dataloader = dict(
batch_size=1,
num_workers=2,
persistent_workers=True,
drop_last=False,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instances_val2017.json',
data_prefix=dict(img='val2017/'),
test_mode=True,
pipeline=test_pipeline))
test_dataloader = val_dataloader
val_evaluator = dict(
type='CocoMetric',
ann_file=data_root + 'annotations/instances_val2017.json',
metric=['bbox', 'segm'],
format_only=False)
test_evaluator = val_evaluator
# The model is trained by 270k iterations with batch_size 64,
# which is roughly equivalent to 144 epochs.
max_iters = 270000
train_cfg = dict(
type='IterBasedTrainLoop', max_iters=max_iters, val_interval=10000)
val_cfg = dict(type='ValLoop')
test_cfg = dict(type='TestLoop')
# optimizer assumes bs=64
optim_wrapper = dict(
type='OptimWrapper',
optimizer=dict(type='SGD', lr=0.1, momentum=0.9, weight_decay=0.00004))
# learning rate policy
# lr steps at [0.9, 0.95, 0.975] of the maximum iterations
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0,
end=1000),
dict(
type='MultiStepLR',
begin=0,
end=270000,
by_epoch=False,
milestones=[243000, 256500, 263250],
gamma=0.1)
]
default_hooks = dict(checkpoint=dict(by_epoch=False, interval=10000))
log_processor = dict(by_epoch=False)
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (32 GPUs) x (2 samples per GPU)
auto_scale_lr = dict(base_batch_size=64)
|
from .autoencoder_asym_kl import AsymmetricAutoencoderKL
from .autoencoder_kl import AutoencoderKL
from .autoencoder_kl_allegro import AutoencoderKLAllegro
from .autoencoder_kl_cogvideox import AutoencoderKLCogVideoX
from .autoencoder_kl_mochi import AutoencoderKLMochi
from .autoencoder_kl_temporal_decoder import AutoencoderKLTemporalDecoder
from .autoencoder_oobleck import AutoencoderOobleck
from .autoencoder_tiny import AutoencoderTiny
from .consistency_decoder_vae import ConsistencyDecoderVAE
from .vq_model import VQModel
|
from .autoencoder_asym_kl import AsymmetricAutoencoderKL
from .autoencoder_kl import AutoencoderKL
from .autoencoder_kl_allegro import AutoencoderKLAllegro
from .autoencoder_kl_cogvideox import AutoencoderKLCogVideoX
from .autoencoder_kl_temporal_decoder import AutoencoderKLTemporalDecoder
from .autoencoder_oobleck import AutoencoderOobleck
from .autoencoder_tiny import AutoencoderTiny
from .consistency_decoder_vae import ConsistencyDecoderVAE
from .vq_model import VQModel
|
from functools import wraps
from typing import Any, Callable, Concatenate, Coroutine, ParamSpec, TypeVar, cast
from backend.data.credit import get_user_credit_model
from backend.data.execution import (
ExecutionResult,
NodeExecutionEntry,
RedisExecutionEventBus,
create_graph_execution,
get_execution_results,
get_executions_in_timerange,
get_incomplete_executions,
get_latest_execution,
update_execution_status,
update_graph_execution_stats,
update_node_execution_stats,
upsert_execution_input,
upsert_execution_output,
)
from backend.data.graph import get_graph, get_node
from backend.data.notifications import (
create_or_add_to_user_notification_batch,
empty_user_notification_batch,
get_user_notification_batch,
get_user_notification_last_message_in_batch,
)
from backend.data.user import (
get_active_user_ids_in_timerange,
get_active_users_ids,
get_user_by_id,
get_user_email_by_id,
get_user_integrations,
get_user_metadata,
get_user_notification_preference,
update_user_integrations,
update_user_metadata,
)
from backend.util.service import AppService, expose, register_pydantic_serializers
from backend.util.settings import Config
P = ParamSpec("P")
R = TypeVar("R")
config = Config()
class DatabaseManager(AppService):
def __init__(self):
super().__init__()
self.use_db = True
self.use_redis = True
self.event_queue = RedisExecutionEventBus()
@classmethod
def get_port(cls) -> int:
return config.database_api_port
@expose
def send_execution_update(self, execution_result: ExecutionResult):
self.event_queue.publish(execution_result)
@staticmethod
def exposed_run_and_wait(
f: Callable[P, Coroutine[None, None, R]]
) -> Callable[Concatenate[object, P], R]:
@expose
@wraps(f)
def wrapper(self, *args: P.args, **kwargs: P.kwargs) -> R:
coroutine = f(*args, **kwargs)
res = self.run_and_wait(coroutine)
return res
# Register serializers for annotations on bare function
register_pydantic_serializers(f)
return wrapper
# Executions
create_graph_execution = exposed_run_and_wait(create_graph_execution)
get_execution_results = exposed_run_and_wait(get_execution_results)
get_incomplete_executions = exposed_run_and_wait(get_incomplete_executions)
get_latest_execution = exposed_run_and_wait(get_latest_execution)
update_execution_status = exposed_run_and_wait(update_execution_status)
update_graph_execution_stats = exposed_run_and_wait(update_graph_execution_stats)
update_node_execution_stats = exposed_run_and_wait(update_node_execution_stats)
upsert_execution_input = exposed_run_and_wait(upsert_execution_input)
upsert_execution_output = exposed_run_and_wait(upsert_execution_output)
get_executions_in_timerange = exposed_run_and_wait(get_executions_in_timerange)
# Graphs
get_node = exposed_run_and_wait(get_node)
get_graph = exposed_run_and_wait(get_graph)
# Credits
user_credit_model = get_user_credit_model()
spend_credits = cast(
Callable[[Any, NodeExecutionEntry, float, float], int],
exposed_run_and_wait(user_credit_model.spend_credits),
)
# User + User Metadata + User Integrations + User Notification Preferences
get_user_metadata = exposed_run_and_wait(get_user_metadata)
update_user_metadata = exposed_run_and_wait(update_user_metadata)
get_user_integrations = exposed_run_and_wait(get_user_integrations)
update_user_integrations = exposed_run_and_wait(update_user_integrations)
get_active_user_ids_in_timerange = exposed_run_and_wait(
get_active_user_ids_in_timerange
)
get_user_by_id = exposed_run_and_wait(get_user_by_id)
get_user_email_by_id = exposed_run_and_wait(get_user_email_by_id)
get_user_notification_preference = exposed_run_and_wait(
get_user_notification_preference
)
get_active_users_ids = exposed_run_and_wait(get_active_users_ids)
# Notifications
create_or_add_to_user_notification_batch = exposed_run_and_wait(
create_or_add_to_user_notification_batch
)
get_user_notification_last_message_in_batch = exposed_run_and_wait(
get_user_notification_last_message_in_batch
)
empty_user_notification_batch = exposed_run_and_wait(empty_user_notification_batch)
get_user_notification_batch = exposed_run_and_wait(get_user_notification_batch)
|
from functools import wraps
from typing import Any, Callable, Concatenate, Coroutine, ParamSpec, TypeVar, cast
from backend.data.credit import get_user_credit_model
from backend.data.execution import (
ExecutionResult,
NodeExecutionEntry,
RedisExecutionEventBus,
create_graph_execution,
get_execution_results,
get_executions_in_timerange,
get_incomplete_executions,
get_latest_execution,
update_execution_status,
update_graph_execution_stats,
update_node_execution_stats,
upsert_execution_input,
upsert_execution_output,
)
from backend.data.graph import get_graph, get_node
from backend.data.notifications import (
create_or_add_to_user_notification_batch,
empty_user_notification_batch,
get_user_notification_batch,
get_user_notification_last_message_in_batch,
)
from backend.data.user import (
get_active_user_ids_in_timerange,
get_active_users_ids,
get_user_by_id,
get_user_integrations,
get_user_metadata,
get_user_notification_preference,
update_user_integrations,
update_user_metadata,
)
from backend.util.service import AppService, expose, register_pydantic_serializers
from backend.util.settings import Config
P = ParamSpec("P")
R = TypeVar("R")
config = Config()
class DatabaseManager(AppService):
def __init__(self):
super().__init__()
self.use_db = True
self.use_redis = True
self.event_queue = RedisExecutionEventBus()
@classmethod
def get_port(cls) -> int:
return config.database_api_port
@expose
def send_execution_update(self, execution_result: ExecutionResult):
self.event_queue.publish(execution_result)
@staticmethod
def exposed_run_and_wait(
f: Callable[P, Coroutine[None, None, R]]
) -> Callable[Concatenate[object, P], R]:
@expose
@wraps(f)
def wrapper(self, *args: P.args, **kwargs: P.kwargs) -> R:
coroutine = f(*args, **kwargs)
res = self.run_and_wait(coroutine)
return res
# Register serializers for annotations on bare function
register_pydantic_serializers(f)
return wrapper
# Executions
create_graph_execution = exposed_run_and_wait(create_graph_execution)
get_execution_results = exposed_run_and_wait(get_execution_results)
get_incomplete_executions = exposed_run_and_wait(get_incomplete_executions)
get_latest_execution = exposed_run_and_wait(get_latest_execution)
update_execution_status = exposed_run_and_wait(update_execution_status)
update_graph_execution_stats = exposed_run_and_wait(update_graph_execution_stats)
update_node_execution_stats = exposed_run_and_wait(update_node_execution_stats)
upsert_execution_input = exposed_run_and_wait(upsert_execution_input)
upsert_execution_output = exposed_run_and_wait(upsert_execution_output)
get_executions_in_timerange = exposed_run_and_wait(get_executions_in_timerange)
# Graphs
get_node = exposed_run_and_wait(get_node)
get_graph = exposed_run_and_wait(get_graph)
# Credits
user_credit_model = get_user_credit_model()
spend_credits = cast(
Callable[[Any, NodeExecutionEntry, float, float], int],
exposed_run_and_wait(user_credit_model.spend_credits),
)
# User + User Metadata + User Integrations + User Notification Preferences
get_user_metadata = exposed_run_and_wait(get_user_metadata)
update_user_metadata = exposed_run_and_wait(update_user_metadata)
get_user_integrations = exposed_run_and_wait(get_user_integrations)
update_user_integrations = exposed_run_and_wait(update_user_integrations)
get_active_user_ids_in_timerange = exposed_run_and_wait(
get_active_user_ids_in_timerange
)
get_user_by_id = exposed_run_and_wait(get_user_by_id)
get_user_notification_preference = exposed_run_and_wait(
get_user_notification_preference
)
get_active_users_ids = exposed_run_and_wait(get_active_users_ids)
# Notifications
create_or_add_to_user_notification_batch = exposed_run_and_wait(
create_or_add_to_user_notification_batch
)
get_user_notification_last_message_in_batch = exposed_run_and_wait(
get_user_notification_last_message_in_batch
)
empty_user_notification_batch = exposed_run_and_wait(empty_user_notification_batch)
get_user_notification_batch = exposed_run_and_wait(get_user_notification_batch)
|
import numpy as np
import pytest
from fastapi import FastAPI
from httpx import AsyncClient
from docarray import BaseDocument
from docarray.base_document import DocumentResponse
from docarray.documents import ImageDoc, TextDoc
from docarray.typing import NdArray
@pytest.mark.asyncio
async def test_fast_api():
class Mmdoc(BaseDocument):
img: ImageDoc
text: TextDoc
title: str
input_doc = Mmdoc(
img=ImageDoc(tensor=np.zeros((3, 224, 224))), text=TextDoc(), title='hello'
)
app = FastAPI()
@app.post("/doc/", response_model=Mmdoc, response_class=DocumentResponse)
async def create_item(doc: Mmdoc):
return doc
async with AsyncClient(app=app, base_url="http://test") as ac:
response = await ac.post("/doc/", data=input_doc.json())
resp_doc = await ac.get("/docs")
resp_redoc = await ac.get("/redoc")
assert response.status_code == 200
assert resp_doc.status_code == 200
assert resp_redoc.status_code == 200
@pytest.mark.asyncio
async def test_image():
class InputDoc(BaseDocument):
img: ImageDoc
class OutputDoc(BaseDocument):
embedding_clip: NdArray
embedding_bert: NdArray
input_doc = InputDoc(img=ImageDoc(tensor=np.zeros((3, 224, 224))))
app = FastAPI()
@app.post("/doc/", response_model=OutputDoc, response_class=DocumentResponse)
async def create_item(doc: InputDoc) -> OutputDoc:
## call my fancy model to generate the embeddings
doc = OutputDoc(
embedding_clip=np.zeros((100, 1)), embedding_bert=np.zeros((100, 1))
)
return doc
async with AsyncClient(app=app, base_url="http://test") as ac:
response = await ac.post("/doc/", data=input_doc.json())
resp_doc = await ac.get("/docs")
resp_redoc = await ac.get("/redoc")
assert response.status_code == 200
assert resp_doc.status_code == 200
assert resp_redoc.status_code == 200
doc = OutputDoc.parse_raw(response.content.decode())
assert isinstance(doc, OutputDoc)
assert doc.embedding_clip.shape == (100, 1)
assert doc.embedding_bert.shape == (100, 1)
@pytest.mark.asyncio
async def test_sentence_to_embeddings():
class InputDoc(BaseDocument):
text: str
class OutputDoc(BaseDocument):
embedding_clip: NdArray
embedding_bert: NdArray
input_doc = InputDoc(text='hello')
app = FastAPI()
@app.post("/doc/", response_model=OutputDoc, response_class=DocumentResponse)
async def create_item(doc: InputDoc) -> OutputDoc:
## call my fancy model to generate the embeddings
return OutputDoc(
embedding_clip=np.zeros((100, 1)), embedding_bert=np.zeros((100, 1))
)
async with AsyncClient(app=app, base_url="http://test") as ac:
response = await ac.post("/doc/", data=input_doc.json())
resp_doc = await ac.get("/docs")
resp_redoc = await ac.get("/redoc")
assert response.status_code == 200
assert resp_doc.status_code == 200
assert resp_redoc.status_code == 200
doc = OutputDoc.parse_raw(response.content.decode())
assert isinstance(doc, OutputDoc)
assert doc.embedding_clip.shape == (100, 1)
assert doc.embedding_bert.shape == (100, 1)
|
import numpy as np
import pytest
from fastapi import FastAPI
from httpx import AsyncClient
from docarray import BaseDocument
from docarray.base_document import DocumentResponse
from docarray.documents import Image, Text
from docarray.typing import NdArray
@pytest.mark.asyncio
async def test_fast_api():
class Mmdoc(BaseDocument):
img: Image
text: Text
title: str
input_doc = Mmdoc(
img=Image(tensor=np.zeros((3, 224, 224))), text=Text(), title='hello'
)
app = FastAPI()
@app.post("/doc/", response_model=Mmdoc, response_class=DocumentResponse)
async def create_item(doc: Mmdoc):
return doc
async with AsyncClient(app=app, base_url="http://test") as ac:
response = await ac.post("/doc/", data=input_doc.json())
resp_doc = await ac.get("/docs")
resp_redoc = await ac.get("/redoc")
assert response.status_code == 200
assert resp_doc.status_code == 200
assert resp_redoc.status_code == 200
@pytest.mark.asyncio
async def test_image():
class InputDoc(BaseDocument):
img: Image
class OutputDoc(BaseDocument):
embedding_clip: NdArray
embedding_bert: NdArray
input_doc = InputDoc(img=Image(tensor=np.zeros((3, 224, 224))))
app = FastAPI()
@app.post("/doc/", response_model=OutputDoc, response_class=DocumentResponse)
async def create_item(doc: InputDoc) -> OutputDoc:
## call my fancy model to generate the embeddings
doc = OutputDoc(
embedding_clip=np.zeros((100, 1)), embedding_bert=np.zeros((100, 1))
)
return doc
async with AsyncClient(app=app, base_url="http://test") as ac:
response = await ac.post("/doc/", data=input_doc.json())
resp_doc = await ac.get("/docs")
resp_redoc = await ac.get("/redoc")
assert response.status_code == 200
assert resp_doc.status_code == 200
assert resp_redoc.status_code == 200
doc = OutputDoc.parse_raw(response.content.decode())
assert isinstance(doc, OutputDoc)
assert doc.embedding_clip.shape == (100, 1)
assert doc.embedding_bert.shape == (100, 1)
@pytest.mark.asyncio
async def test_sentence_to_embeddings():
class InputDoc(BaseDocument):
text: str
class OutputDoc(BaseDocument):
embedding_clip: NdArray
embedding_bert: NdArray
input_doc = InputDoc(text='hello')
app = FastAPI()
@app.post("/doc/", response_model=OutputDoc, response_class=DocumentResponse)
async def create_item(doc: InputDoc) -> OutputDoc:
## call my fancy model to generate the embeddings
return OutputDoc(
embedding_clip=np.zeros((100, 1)), embedding_bert=np.zeros((100, 1))
)
async with AsyncClient(app=app, base_url="http://test") as ac:
response = await ac.post("/doc/", data=input_doc.json())
resp_doc = await ac.get("/docs")
resp_redoc = await ac.get("/redoc")
assert response.status_code == 200
assert resp_doc.status_code == 200
assert resp_redoc.status_code == 200
doc = OutputDoc.parse_raw(response.content.decode())
assert isinstance(doc, OutputDoc)
assert doc.embedding_clip.shape == (100, 1)
assert doc.embedding_bert.shape == (100, 1)
|
import enum
from typing import Any, List, Optional, Union
import pydantic
import backend.data.graph
from backend.data.api_key import APIKeyPermission, APIKeyWithoutHash
class Methods(enum.Enum):
SUBSCRIBE = "subscribe"
UNSUBSCRIBE = "unsubscribe"
EXECUTION_EVENT = "execution_event"
ERROR = "error"
HEARTBEAT = "heartbeat"
class WsMessage(pydantic.BaseModel):
method: Methods
data: Optional[Union[dict[str, Any], list[Any], str]] = None
success: bool | None = None
channel: str | None = None
error: str | None = None
class ExecutionSubscription(pydantic.BaseModel):
graph_id: str
class SubscriptionDetails(pydantic.BaseModel):
event_type: str
channel: str
graph_id: str
class CreateGraph(pydantic.BaseModel):
template_id: str | None = None
template_version: int | None = None
graph: backend.data.graph.Graph | None = None
class CreateAPIKeyRequest(pydantic.BaseModel):
name: str
permissions: List[APIKeyPermission]
description: Optional[str] = None
class CreateAPIKeyResponse(pydantic.BaseModel):
api_key: APIKeyWithoutHash
plain_text_key: str
class SetGraphActiveVersion(pydantic.BaseModel):
active_graph_version: int
class UpdatePermissionsRequest(pydantic.BaseModel):
permissions: List[APIKeyPermission]
class RequestTopUp(pydantic.BaseModel):
credit_amount: int
|
import enum
from typing import Any, List, Optional, Union
import pydantic
import backend.data.graph
from backend.data.api_key import APIKeyPermission, APIKeyWithoutHash
class Methods(enum.Enum):
SUBSCRIBE = "subscribe"
UNSUBSCRIBE = "unsubscribe"
EXECUTION_EVENT = "execution_event"
ERROR = "error"
HEARTBEAT = "heartbeat"
class WsMessage(pydantic.BaseModel):
method: Methods
data: Optional[Union[dict[str, Any], list[Any], str]] = None
success: bool | None = None
channel: str | None = None
error: str | None = None
class ExecutionSubscription(pydantic.BaseModel):
graph_id: str
class SubscriptionDetails(pydantic.BaseModel):
event_type: str
channel: str
graph_id: str
class CreateGraph(pydantic.BaseModel):
template_id: str | None = None
template_version: int | None = None
graph: backend.data.graph.Graph | None = None
class CreateAPIKeyRequest(pydantic.BaseModel):
name: str
permissions: List[APIKeyPermission]
description: Optional[str] = None
class CreateAPIKeyResponse(pydantic.BaseModel):
api_key: APIKeyWithoutHash
plain_text_key: str
class SetGraphActiveVersion(pydantic.BaseModel):
active_graph_version: int
class UpdatePermissionsRequest(pydantic.BaseModel):
permissions: List[APIKeyPermission]
class RequestTopUp(pydantic.BaseModel):
amount: int
"""Amount of credits to top up."""
|
"""Module to change the configuration of FFmpeg libraries (such as libavformat).
It affects functionalities in :py:mod:`torchaudio.io` (and indirectly :py:func:`torchaudio.load`).
"""
from typing import Dict, Tuple
import torch
def get_versions() -> Dict[str, Tuple[int]]:
"""Get the versions of FFmpeg libraries
Returns:
dict: mapping from library names to version string,
i.e. `"libavutil": (56, 22, 100)`.
"""
return torch.ops.torchaudio.ffmpeg_get_versions()
def get_log_level() -> int:
"""Get the log level of FFmpeg.
See :py:func:`set_log_level` for the detailo.
"""
return torch.ops.torchaudio.ffmpeg_get_log_level()
def set_log_level(level: int):
"""Set the log level of FFmpeg (libavformat etc)
Arguments:
level (int): Log level. The larger, the more verbose.
The following values are common values, the corresponding ``ffmpeg``'s
``-loglevel`` option value and desription.
* ``-8`` (``quiet``):
Print no output.
* ``0`` (``panic``):
Something went really wrong and we will crash now.
* ``8`` (``fatal``):
Something went wrong and recovery is not possible.
For example, no header was found for a format which depends
on headers or an illegal combination of parameters is used.
* ``16`` (``error``):
Something went wrong and cannot losslessly be recovered.
However, not all future data is affected.
* ``24`` (``warning``):
Something somehow does not look correct.
This may or may not lead to problems.
* ``32`` (``info``):
Standard information.
* ``40`` (``verbose``):
Detailed information.
* ``48`` (``debug``):
Stuff which is only useful for libav* developers.
* ``56`` (``trace``):
Extremely verbose debugging, useful for libav* development.
"""
torch.ops.torchaudio.ffmpeg_set_log_level(level)
|
from typing import Dict, Tuple
import torch
def get_versions() -> Dict[str, Tuple[int]]:
"""Get the versions of FFmpeg libraries
Returns:
dict: mapping from library names to version string,
i.e. `"libavutil": (56, 22, 100)`.
"""
return torch.ops.torchaudio.ffmpeg_get_versions()
def get_log_level() -> int:
"""Get the log level of FFmpeg.
See :py:func:`set_log_level` for the detailo.
"""
return torch.ops.torchaudio.ffmpeg_get_log_level()
def set_log_level(level: int):
"""Set the log level of FFmpeg (libavformat etc)
Arguments:
level (int): Log level. The larger, the more verbose.
The following values are common values, the corresponding ``ffmpeg``'s
``-loglevel`` option value and desription.
* ``-8`` (``quiet``):
Print no output.
* ``0`` (``panic``):
Something went really wrong and we will crash now.
* ``8`` (``fatal``):
Something went wrong and recovery is not possible.
For example, no header was found for a format which depends
on headers or an illegal combination of parameters is used.
* ``16`` (``error``):
Something went wrong and cannot losslessly be recovered.
However, not all future data is affected.
* ``24`` (``warning``):
Something somehow does not look correct.
This may or may not lead to problems.
* ``32`` (``info``):
Standard information.
* ``40`` (``verbose``):
Detailed information.
* ``48`` (``debug``):
Stuff which is only useful for libav* developers.
* ``56`` (``trace``):
Extremely verbose debugging, useful for libav* development.
"""
torch.ops.torchaudio.ffmpeg_set_log_level(level)
|
# ruff: noqa
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__version__ = "2.19.2.dev0"
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_default_config_name,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
_arrow_dataset.concatenate_datasets = concatenate_datasets
_utils.DownloadConfig = DownloadConfig
_utils.DownloadManager = DownloadManager
_utils.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadConfig = DownloadConfig
_deprecated_download_manager.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadManager = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
|
# ruff: noqa
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__version__ = "2.19.1.dev0"
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_default_config_name,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
_arrow_dataset.concatenate_datasets = concatenate_datasets
_utils.DownloadConfig = DownloadConfig
_utils.DownloadManager = DownloadManager
_utils.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadConfig = DownloadConfig
_deprecated_download_manager.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadManager = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
|
_base_ = '../faster_rcnn/faster-rcnn_r50_fpn_1x_coco.py'
model = dict(
roi_head=dict(
bbox_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(
_delete_=True,
type='DeformRoIPoolPack',
output_size=7,
output_channels=256),
out_channels=256,
featmap_strides=[4, 8, 16, 32])))
|
_base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py'
model = dict(
roi_head=dict(
bbox_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(
_delete_=True,
type='DeformRoIPoolPack',
output_size=7,
output_channels=256),
out_channels=256,
featmap_strides=[4, 8, 16, 32])))
|
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
from typing import Iterable, Optional
import torch
from jina import DocumentArray, Executor, requests
from jina_commons.batching import get_docs_batch_generator
from .audio_clip.model import AudioCLIP
class AudioCLIPTextEncoder(Executor):
"""
Encode text data with the AudioCLIP model
"""
def __init__(
self,
model_path: str = '.cache/AudioCLIP-Full-Training.pt',
tokenizer_path: str = '.cache/bpe_simple_vocab_16e6.txt.gz',
traversal_paths: Iterable[str] = ('r',),
batch_size: int = 32,
device: str = 'cpu',
*args,
**kwargs
):
"""
:param model_path: path to the pre-trained AudioCLIP model.
:param traversal_paths: default traversal path (used if not specified in
request's parameters)
:param batch_size: default batch size (used if not specified in
request's parameters)
:param device: device that the model is on (should be "cpu", "cuda" or "cuda:X",
where X is the index of the GPU on the machine)
"""
super().__init__(*args, **kwargs)
self.model = (
AudioCLIP(
pretrained=model_path,
bpe_path=tokenizer_path,
)
.to(device)
.eval()
)
self.traversal_paths = traversal_paths
self.batch_size = batch_size
@requests
def encode(
self,
docs: Optional[DocumentArray] = None,
parameters: dict = {},
*args,
**kwargs
) -> None:
"""
Method to create embeddings for documents by encoding their text.
:param docs: A document array with documents to create embeddings for. Only the
documents that have the ``text`` attribute will get embeddings.
:param parameters: A dictionary that contains parameters to control encoding.
The accepted keys are ``traversal_paths`` and ``batch_size`` - in their
absence their corresponding default values are used.
"""
if not docs:
return
batch_generator = get_docs_batch_generator(
docs,
traversal_path=parameters.get('traversal_paths', self.traversal_paths),
batch_size=parameters.get('batch_size', self.batch_size),
needs_attr='text',
)
with torch.no_grad():
for batch in batch_generator:
embeddings = self.model.encode_text(text=[[doc.text] for doc in batch])
embeddings = embeddings.cpu().numpy()
for idx, doc in enumerate(batch):
doc.embedding = embeddings[idx]
|
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
from typing import Iterable, Optional
import torch
from jina import DocumentArray, Executor, requests
from jina_commons.batching import get_docs_batch_generator
from .audio_clip.model import AudioCLIP
class AudioCLIPTextEncoder(Executor):
"""
Encode text data with the AudioCLIP model
"""
def __init__(
self,
model_path: str = '.cache/AudioCLIP-Full-Training.pt',
tokenizer_path: str = '.cache/bpe_simple_vocab_16e6.txt.gz',
default_traversal_paths: Iterable[str] = ('r',),
default_batch_size: int = 32,
device: str = 'cpu',
*args,
**kwargs
):
"""
:param model_path: path to the pre-trained AudioCLIP model.
:param default_traversal_paths: default traversal path (used if not specified in
request's parameters)
:param default_batch_size: default batch size (used if not specified in
request's parameters)
:param device: device that the model is on (should be "cpu", "cuda" or "cuda:X",
where X is the index of the GPU on the machine)
"""
super().__init__(*args, **kwargs)
self.model = (
AudioCLIP(
pretrained=model_path,
bpe_path=tokenizer_path,
)
.to(device)
.eval()
)
self.default_traversal_paths = default_traversal_paths
self.default_batch_size = default_batch_size
@requests
def encode(
self, docs: Optional[DocumentArray], parameters: dict, *args, **kwargs
) -> None:
"""
Method to create embedddings for documents by encoding their text.
:param docs: A document array with documents to create embeddings for. Only the
documents that have the ``text`` attribute will get embeddings.
:param parameters: A dictionary that contains parameters to control encoding.
The accepted keys are ``traversal_paths`` and ``batch_size`` - in their
absence their corresponding default values are used.
"""
batch_generator = get_docs_batch_generator(
docs,
traversal_path=parameters.get(
'traversal_paths', self.default_traversal_paths
),
batch_size=parameters.get('batch_size', self.default_batch_size),
needs_attr='text',
)
with torch.no_grad():
for batch in batch_generator:
embeddings = self.model.encode_text(text=[[doc.text] for doc in batch])
embeddings = embeddings.cpu().numpy()
for idx, doc in enumerate(batch):
doc.embedding = embeddings[idx]
|
from docarray import DocumentArray
from jina import requests
from jina.serve.executors import BaseExecutor
class DummyExternalIndexer(BaseExecutor):
@requests
def index(self, docs: DocumentArray, **kwargs):
for doc in docs:
doc.text = 'indexed'
|
from jina.serve.executors import BaseExecutor
class DummyExternalIndexer(BaseExecutor):
pass
|
from __future__ import annotations
from typing import Any, Optional, Sequence, Type, TypeVar, Union
import torch
from torch.utils._pytree import tree_map
from torchvision.datapoints._datapoint import Datapoint
L = TypeVar("L", bound="_LabelBase")
class _LabelBase(Datapoint):
categories: Optional[Sequence[str]]
@classmethod
def _wrap(cls: Type[L], tensor: torch.Tensor, *, categories: Optional[Sequence[str]]) -> L: # type: ignore[override]
label_base = tensor.as_subclass(cls)
label_base.categories = categories
return label_base
def __new__(
cls: Type[L],
data: Any,
*,
categories: Optional[Sequence[str]] = None,
dtype: Optional[torch.dtype] = None,
device: Optional[Union[torch.device, str, int]] = None,
requires_grad: Optional[bool] = None,
) -> L:
tensor = cls._to_tensor(data, dtype=dtype, device=device, requires_grad=requires_grad)
return cls._wrap(tensor, categories=categories)
@classmethod
def wrap_like(cls: Type[L], other: L, tensor: torch.Tensor, *, categories: Optional[Sequence[str]] = None) -> L:
return cls._wrap(
tensor,
categories=categories if categories is not None else other.categories,
)
@classmethod
def from_category(
cls: Type[L],
category: str,
*,
categories: Sequence[str],
**kwargs: Any,
) -> L:
return cls(categories.index(category), categories=categories, **kwargs)
class Label(_LabelBase):
def to_categories(self) -> Any:
if self.categories is None:
raise RuntimeError("Label does not have categories")
return tree_map(lambda idx: self.categories[idx], self.tolist())
class OneHotLabel(_LabelBase):
def __new__(
cls,
data: Any,
*,
categories: Optional[Sequence[str]] = None,
dtype: Optional[torch.dtype] = None,
device: Optional[Union[torch.device, str, int]] = None,
requires_grad: bool = False,
) -> OneHotLabel:
one_hot_label = super().__new__(
cls, data, categories=categories, dtype=dtype, device=device, requires_grad=requires_grad
)
if categories is not None and len(categories) != one_hot_label.shape[-1]:
raise ValueError()
return one_hot_label
|
from __future__ import annotations
from typing import Any, Optional, Sequence, Type, TypeVar, Union
import torch
from torch.utils._pytree import tree_map
from torchvision.datapoints._datapoint import Datapoint
L = TypeVar("L", bound="_LabelBase")
class _LabelBase(Datapoint):
categories: Optional[Sequence[str]]
@classmethod
def _wrap(cls: Type[L], tensor: torch.Tensor, *, categories: Optional[Sequence[str]]) -> L:
label_base = tensor.as_subclass(cls)
label_base.categories = categories
return label_base
def __new__(
cls: Type[L],
data: Any,
*,
categories: Optional[Sequence[str]] = None,
dtype: Optional[torch.dtype] = None,
device: Optional[Union[torch.device, str, int]] = None,
requires_grad: Optional[bool] = None,
) -> L:
tensor = cls._to_tensor(data, dtype=dtype, device=device, requires_grad=requires_grad)
return cls._wrap(tensor, categories=categories)
@classmethod
def wrap_like(cls: Type[L], other: L, tensor: torch.Tensor, *, categories: Optional[Sequence[str]] = None) -> L:
return cls._wrap(
tensor,
categories=categories if categories is not None else other.categories,
)
@classmethod
def from_category(
cls: Type[L],
category: str,
*,
categories: Sequence[str],
**kwargs: Any,
) -> L:
return cls(categories.index(category), categories=categories, **kwargs)
class Label(_LabelBase):
def to_categories(self) -> Any:
if self.categories is None:
raise RuntimeError("Label does not have categories")
return tree_map(lambda idx: self.categories[idx], self.tolist())
class OneHotLabel(_LabelBase):
def __new__(
cls,
data: Any,
*,
categories: Optional[Sequence[str]] = None,
dtype: Optional[torch.dtype] = None,
device: Optional[Union[torch.device, str, int]] = None,
requires_grad: bool = False,
) -> OneHotLabel:
one_hot_label = super().__new__(
cls, data, categories=categories, dtype=dtype, device=device, requires_grad=requires_grad
)
if categories is not None and len(categories) != one_hot_label.shape[-1]:
raise ValueError()
return one_hot_label
|
"""Athena Reader."""
import warnings
from typing import Optional
import boto3
from llama_index.core.readers.base import BaseReader
from sqlalchemy.engine import create_engine
class AthenaReader(BaseReader):
"""
Athena reader.
Follow AWS best practices for security.
AWS discourages hardcoding credentials in code.
We recommend that you use IAM roles instead of IAM user credentials.
If you must use credentials, do not embed them in your code.
Instead, store them in environment variables or in a separate configuration file.
"""
def __init__(
self,
) -> None:
"""Initialize with parameters."""
def create_athena_engine(
self,
aws_access_key: Optional[str] = None,
aws_secret_key: Optional[str] = None,
aws_region: str = None,
s3_staging_dir: str = None,
database: str = None,
workgroup: str = None,
):
"""
Args:
aws_access_key is the AWS access key from aws credential
aws_secret_key is the AWS secret key from aws credential
aws_region is the AWS region
s3_staging_dir is the S3 staging (result bucket) directory
database is the Athena database name
workgroup is the Athena workgroup name.
"""
if not aws_access_key or not aws_secret_key:
conn_str = (
"awsathena+rest://:@athena.{region_name}.amazonaws.com:443/"
"{database}?s3_staging_dir={s3_staging_dir}?work_group={workgroup}"
)
engine = create_engine(
conn_str.format(
region_name=aws_region,
s3_staging_dir=s3_staging_dir,
database=database,
workgroup=workgroup,
)
)
else:
warnings.warn(
"aws_access_key and aws_secret_key are set. We recommend to use IAM role instead."
)
boto3.client(
"athena",
aws_access_key_id=aws_access_key,
aws_secret_access_key=aws_secret_key,
region_name=aws_region,
)
conn_str = (
"awsathena+rest://:@athena.{region_name}.amazonaws.com:443/"
"{database}?s3_staging_dir={s3_staging_dir}?work_group={workgroup}"
)
engine = create_engine(
conn_str.format(
region_name=aws_region,
s3_staging_dir=s3_staging_dir,
database=database,
workgroup=workgroup,
)
)
return engine
|
"""Athena Reader."""
import warnings
from typing import Optional
import boto3
from llama_index.core.readers.base import BaseReader
from sqlalchemy.engine import create_engine
class AthenaReader(BaseReader):
"""Athena reader.
Follow AWS best practices for security.
AWS discourages hardcoding credentials in code.
We recommend that you use IAM roles instead of IAM user credentials.
If you must use credentials, do not embed them in your code.
Instead, store them in environment variables or in a separate configuration file.
"""
def __init__(
self,
) -> None:
"""Initialize with parameters."""
def create_athena_engine(
self,
aws_access_key: Optional[str] = None,
aws_secret_key: Optional[str] = None,
aws_region: str = None,
s3_staging_dir: str = None,
database: str = None,
workgroup: str = None,
):
"""
Args:
aws_access_key is the AWS access key from aws credential
aws_secret_key is the AWS secret key from aws credential
aws_region is the AWS region
s3_staging_dir is the S3 staging (result bucket) directory
database is the Athena database name
workgroup is the Athena workgroup name.
"""
if not aws_access_key or not aws_secret_key:
conn_str = (
"awsathena+rest://:@athena.{region_name}.amazonaws.com:443/"
"{database}?s3_staging_dir={s3_staging_dir}?work_group={workgroup}"
)
engine = create_engine(
conn_str.format(
region_name=aws_region,
s3_staging_dir=s3_staging_dir,
database=database,
workgroup=workgroup,
)
)
else:
warnings.warn(
"aws_access_key and aws_secret_key are set. We recommend to use IAM role instead."
)
boto3.client(
"athena",
aws_access_key_id=aws_access_key,
aws_secret_access_key=aws_secret_key,
region_name=aws_region,
)
conn_str = (
"awsathena+rest://:@athena.{region_name}.amazonaws.com:443/"
"{database}?s3_staging_dir={s3_staging_dir}?work_group={workgroup}"
)
engine = create_engine(
conn_str.format(
region_name=aws_region,
s3_staging_dir=s3_staging_dir,
database=database,
workgroup=workgroup,
)
)
return engine
|
# 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨
# This file was automatically generated from examples/modular-transformers/modular_add_function.py.
# Do NOT edit this file manually as any edits will be overwritten by the generation of
# the file from the modular. If any change should be done, please apply the change to the
# modular_add_function.py file directly. One of our CI enforces this.
# 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨
# Note that zamba does not have the `apply_rotary_pos_emb` function!
from typing import Optional, Tuple
import torch
from torch import nn
def rotate_half(x):
"""Rotates half the hidden dims of the input."""
x1 = x[..., : x.shape[-1] // 2]
x2 = x[..., x.shape[-1] // 2 :]
return torch.cat((-x2, x1), dim=-1)
def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1):
"""Applies Rotary Position Embedding to the query and key tensors.
Args:
q (`torch.Tensor`): The query tensor.
k (`torch.Tensor`): The key tensor.
cos (`torch.Tensor`): The cosine part of the rotary embedding.
sin (`torch.Tensor`): The sine part of the rotary embedding.
position_ids (`torch.Tensor`, *optional*):
Deprecated and unused.
unsqueeze_dim (`int`, *optional*, defaults to 1):
The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
Returns:
`tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
"""
cos = cos.unsqueeze(unsqueeze_dim)
sin = sin.unsqueeze(unsqueeze_dim)
q_embed = (q * cos) + (rotate_half(q) * sin)
k_embed = (k * cos) + (rotate_half(k) * sin)
return q_embed, k_embed
class TestAttention(nn.Module):
"""
Multi-headed attention from 'Attention Is All You Need' paper. Modified to use sliding window attention: Longformer
and "Generating Long Sequences with Sparse Transformers".
Adapted from transformers.models.mistral.modeling_mistral.MistralAttention:
The input dimension here is attention_hidden_size = 2 * hidden_size, and head_dim = attention_hidden_size // num_heads.
The extra factor of 2 comes from the input being the concatenation of original_hidden_states with the output of the previous (mamba) layer
(see fig. 2 in https://arxiv.org/pdf/2405.16712).
Additionally, replaced
attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim) with
attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim/2)
"""
def __init__(self):
pass
def forward(self) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
_ = apply_rotary_pos_emb(1, 1, 1, 1)
|
# 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨
# This file was automatically generated from examples/modular-transformers/modular_add_function.py.
# Do NOT edit this file manually as any edits will be overwritten by the generation of
# the file from the modular. If any change should be done, please apply the change to the
# modular_add_function.py file directly. One of our CI enforces this.
# 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨
# Note that zamba does not have the `apply_rotary_pos_emb` function!
from typing import Optional
import torch
from torch import nn
def rotate_half(x):
"""Rotates half the hidden dims of the input."""
x1 = x[..., : x.shape[-1] // 2]
x2 = x[..., x.shape[-1] // 2 :]
return torch.cat((-x2, x1), dim=-1)
def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1):
"""Applies Rotary Position Embedding to the query and key tensors.
Args:
q (`torch.Tensor`): The query tensor.
k (`torch.Tensor`): The key tensor.
cos (`torch.Tensor`): The cosine part of the rotary embedding.
sin (`torch.Tensor`): The sine part of the rotary embedding.
position_ids (`torch.Tensor`, *optional*):
Deprecated and unused.
unsqueeze_dim (`int`, *optional*, defaults to 1):
The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
Returns:
`tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
"""
cos = cos.unsqueeze(unsqueeze_dim)
sin = sin.unsqueeze(unsqueeze_dim)
q_embed = (q * cos) + (rotate_half(q) * sin)
k_embed = (k * cos) + (rotate_half(k) * sin)
return q_embed, k_embed
class TestAttention(nn.Module):
"""
Multi-headed attention from 'Attention Is All You Need' paper. Modified to use sliding window attention: Longformer
and "Generating Long Sequences with Sparse Transformers".
Adapted from transformers.models.mistral.modeling_mistral.MistralAttention:
The input dimension here is attention_hidden_size = 2 * hidden_size, and head_dim = attention_hidden_size // num_heads.
The extra factor of 2 comes from the input being the concatenation of original_hidden_states with the output of the previous (mamba) layer
(see fig. 2 in https://arxiv.org/pdf/2405.16712).
Additionally, replaced
attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim) with
attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim/2)
"""
def __init__(self):
pass
def forward(self) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]:
_ = apply_rotary_pos_emb(1, 1, 1, 1)
|
import pytest
from docarray import BaseDoc
from docarray.utils.misc import is_tf_available
tf_available = is_tf_available()
if tf_available:
import tensorflow as tf
import tensorflow._api.v2.experimental.numpy as tnp # type: ignore
from docarray.typing import TensorFlowEmbedding, TensorFlowTensor
@pytest.mark.tensorflow
def test_set_tensorflow_tensor():
class MyDocument(BaseDoc):
t: TensorFlowTensor
doc = MyDocument(t=tf.zeros((3, 224, 224)))
assert isinstance(doc.t, TensorFlowTensor)
assert isinstance(doc.t.tensor, tf.Tensor)
assert tnp.allclose(doc.t.tensor, tf.zeros((3, 224, 224)))
@pytest.mark.tensorflow
def test_set_tf_embedding():
class MyDocument(BaseDoc):
embedding: TensorFlowEmbedding
doc = MyDocument(embedding=tf.zeros((128,)))
assert isinstance(doc.embedding, TensorFlowTensor)
assert isinstance(doc.embedding, TensorFlowEmbedding)
assert isinstance(doc.embedding.tensor, tf.Tensor)
assert tnp.allclose(doc.embedding.tensor, tf.zeros((128,)))
|
import pytest
from docarray import BaseDocument
from docarray.utils.misc import is_tf_available
tf_available = is_tf_available()
if tf_available:
import tensorflow as tf
import tensorflow._api.v2.experimental.numpy as tnp # type: ignore
from docarray.typing import TensorFlowEmbedding, TensorFlowTensor
@pytest.mark.tensorflow
def test_set_tensorflow_tensor():
class MyDocument(BaseDocument):
t: TensorFlowTensor
doc = MyDocument(t=tf.zeros((3, 224, 224)))
assert isinstance(doc.t, TensorFlowTensor)
assert isinstance(doc.t.tensor, tf.Tensor)
assert tnp.allclose(doc.t.tensor, tf.zeros((3, 224, 224)))
@pytest.mark.tensorflow
def test_set_tf_embedding():
class MyDocument(BaseDocument):
embedding: TensorFlowEmbedding
doc = MyDocument(embedding=tf.zeros((128,)))
assert isinstance(doc.embedding, TensorFlowTensor)
assert isinstance(doc.embedding, TensorFlowEmbedding)
assert isinstance(doc.embedding.tensor, tf.Tensor)
assert tnp.allclose(doc.embedding.tensor, tf.zeros((128,)))
|
# Copyright (c) OpenMMLab. All rights reserved.
from .csp_darknet import CSPDarknet
from .darknet import Darknet
from .detectors_resnet import DetectoRS_ResNet
from .detectors_resnext import DetectoRS_ResNeXt
from .efficientnet import EfficientNet
from .hourglass import HourglassNet
from .hrnet import HRNet
from .mobilenet_v2 import MobileNetV2
from .pvt import PyramidVisionTransformer, PyramidVisionTransformerV2
from .regnet import RegNet
from .res2net import Res2Net
from .resnest import ResNeSt
from .resnet import ResNet, ResNetV1d
from .resnext import ResNeXt
from .ssd_vgg import SSDVGG
from .swin import SwinTransformer
from .trident_resnet import TridentResNet
__all__ = [
'RegNet', 'ResNet', 'ResNetV1d', 'ResNeXt', 'SSDVGG', 'HRNet',
'MobileNetV2', 'Res2Net', 'HourglassNet', 'DetectoRS_ResNet',
'DetectoRS_ResNeXt', 'Darknet', 'ResNeSt', 'TridentResNet', 'CSPDarknet',
'SwinTransformer', 'PyramidVisionTransformer',
'PyramidVisionTransformerV2', 'EfficientNet'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .csp_darknet import CSPDarknet
from .darknet import Darknet
from .detectors_resnet import DetectoRS_ResNet
from .detectors_resnext import DetectoRS_ResNeXt
from .hourglass import HourglassNet
from .hrnet import HRNet
from .mobilenet_v2 import MobileNetV2
from .pvt import PyramidVisionTransformer, PyramidVisionTransformerV2
from .regnet import RegNet
from .res2net import Res2Net
from .resnest import ResNeSt
from .resnet import ResNet, ResNetV1d
from .resnext import ResNeXt
from .ssd_vgg import SSDVGG
from .swin import SwinTransformer
from .trident_resnet import TridentResNet
__all__ = [
'RegNet', 'ResNet', 'ResNetV1d', 'ResNeXt', 'SSDVGG', 'HRNet',
'MobileNetV2', 'Res2Net', 'HourglassNet', 'DetectoRS_ResNet',
'DetectoRS_ResNeXt', 'Darknet', 'ResNeSt', 'TridentResNet', 'CSPDarknet',
'SwinTransformer', 'PyramidVisionTransformer', 'PyramidVisionTransformerV2'
]
|
import os as _os
import sys as _sys
from pathlib import Path as _Path
import datetime as _datetime
__windows__ = _sys.platform == 'win32'
__uptime__ = _datetime.datetime.now().isoformat()
# update on MacOS 1. clean this tuple, 2. grep -rohEI --exclude-dir=jina/hub --exclude-dir=tests --include \*.py
# "\'JINA_.*?\'" jina | sort -u | sed "s/$/,/g" 3. copy all lines EXCEPT the first (which is the grep command in the
# last line)
__jina_env__ = (
'JINA_DEFAULT_HOST',
'JINA_DEFAULT_TIMEOUT_CTRL',
'JINA_DEPLOYMENT_NAME',
'JINA_DISABLE_UVLOOP',
'JINA_EARLY_STOP',
'JINA_FULL_CLI',
'JINA_GATEWAY_IMAGE',
'JINA_GRPC_RECV_BYTES',
'JINA_GRPC_SEND_BYTES',
'JINA_HUB_NO_IMAGE_REBUILD',
'JINA_LOG_CONFIG',
'JINA_LOG_LEVEL',
'JINA_LOG_NO_COLOR',
'JINA_MP_START_METHOD',
'JINA_OPTOUT_TELEMETRY',
'JINA_RANDOM_PORT_MAX',
'JINA_RANDOM_PORT_MIN',
'JINA_LOCKS_ROOT',
'JINA_OPTOUT_TELEMETRY',
'JINA_K8S_ACCESS_MODES',
'JINA_K8S_STORAGE_CLASS_NAME',
'JINA_K8S_STORAGE_CAPACITY',
'JINA_STREAMER_ARGS',
)
__default_host__ = _os.getenv(
'JINA_DEFAULT_HOST', '127.0.0.1' if __windows__ else '0.0.0.0'
)
__docker_host__ = 'host.docker.internal'
__default_executor__ = 'BaseExecutor'
__default_gateway__ = 'BaseGateway'
__default_http_gateway__ = 'HTTPGateway'
__default_composite_gateway__ = 'CompositeGateway'
__default_websocket_gateway__ = 'WebSocketGateway'
__default_grpc_gateway__ = 'GRPCGateway'
__default_endpoint__ = '/default'
__dynamic_base_gateway_hubble__ = 'jinaai+docker://jina-ai/JinaGateway:latest'
__ready_msg__ = 'ready and listening'
__stop_msg__ = 'terminated'
__unset_msg__ = '(unset)'
__args_executor_func__ = {
'docs',
'parameters',
'docs_matrix',
}
__args_executor_init__ = {'metas', 'requests', 'runtime_args'}
__resources_path__ = _os.path.join(
_os.path.dirname(_sys.modules['jina'].__file__), 'resources'
)
__cache_path__ = f'{_os.path.expanduser("~")}/.cache/{__package__}'
if not _Path(__cache_path__).exists():
_Path(__cache_path__).mkdir(parents=True, exist_ok=True)
_names_with_underscore = [
'__version__',
'__proto_version__',
'__default_host__',
'__ready_msg__',
'__stop_msg__',
'__jina_env__',
'__uptime__',
'__default_endpoint__',
'__default_executor__',
'__unset_msg__',
'__windows__',
]
__all__ = [_s for _s in dir() if not _s.startswith('_')] + _names_with_underscore
RAFT_TO_EXECUTOR_PORT = 100
|
import os as _os
import sys as _sys
from pathlib import Path as _Path
import datetime as _datetime
__windows__ = _sys.platform == 'win32'
__uptime__ = _datetime.datetime.now().isoformat()
# update on MacOS 1. clean this tuple, 2. grep -rohEI --exclude-dir=jina/hub --exclude-dir=tests --include \*.py
# "\'JINA_.*?\'" jina | sort -u | sed "s/$/,/g" 3. copy all lines EXCEPT the first (which is the grep command in the
# last line)
__jina_env__ = (
'JINA_DEFAULT_HOST',
'JINA_DEFAULT_TIMEOUT_CTRL',
'JINA_DEPLOYMENT_NAME',
'JINA_DISABLE_UVLOOP',
'JINA_EARLY_STOP',
'JINA_FULL_CLI',
'JINA_GATEWAY_IMAGE',
'JINA_GRPC_RECV_BYTES',
'JINA_GRPC_SEND_BYTES',
'JINA_HUB_NO_IMAGE_REBUILD',
'JINA_LOG_CONFIG',
'JINA_LOG_LEVEL',
'JINA_LOG_NO_COLOR',
'JINA_MP_START_METHOD',
'JINA_OPTOUT_TELEMETRY',
'JINA_RANDOM_PORT_MAX',
'JINA_RANDOM_PORT_MIN',
'JINA_LOCKS_ROOT',
'JINA_OPTOUT_TELEMETRY',
'JINA_K8S_ACCESS_MODES',
'JINA_K8S_STORAGE_CLASS_NAME',
'JINA_K8S_STORAGE_CAPACITY',
'JINA_STREAMER_ARGS',
)
__default_host__ = _os.environ.get(
'JINA_DEFAULT_HOST', '127.0.0.1' if __windows__ else '0.0.0.0'
)
__docker_host__ = 'host.docker.internal'
__default_executor__ = 'BaseExecutor'
__default_gateway__ = 'BaseGateway'
__default_http_gateway__ = 'HTTPGateway'
__default_composite_gateway__ = 'CompositeGateway'
__default_websocket_gateway__ = 'WebSocketGateway'
__default_grpc_gateway__ = 'GRPCGateway'
__default_endpoint__ = '/default'
__dynamic_base_gateway_hubble__ = 'jinaai+docker://jina-ai/JinaGateway:latest'
__ready_msg__ = 'ready and listening'
__stop_msg__ = 'terminated'
__unset_msg__ = '(unset)'
__args_executor_func__ = {
'docs',
'parameters',
'docs_matrix',
}
__args_executor_init__ = {'metas', 'requests', 'runtime_args'}
__resources_path__ = _os.path.join(
_os.path.dirname(_sys.modules['jina'].__file__), 'resources'
)
__cache_path__ = f'{_os.path.expanduser("~")}/.cache/{__package__}'
if not _Path(__cache_path__).exists():
_Path(__cache_path__).mkdir(parents=True, exist_ok=True)
_names_with_underscore = [
'__version__',
'__proto_version__',
'__default_host__',
'__ready_msg__',
'__stop_msg__',
'__jina_env__',
'__uptime__',
'__default_endpoint__',
'__default_executor__',
'__unset_msg__',
'__windows__',
]
__all__ = [_s for _s in dir() if not _s.startswith('_')]
__all__.extend(_names_with_underscore)
RAFT_TO_EXECUTOR_PORT = 100
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.applications.inception_resnet_v2 import (
InceptionResNetV2 as InceptionResNetV2,
)
from keras.src.applications.inception_resnet_v2 import (
decode_predictions as decode_predictions,
)
from keras.src.applications.inception_resnet_v2 import (
preprocess_input as preprocess_input,
)
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.applications.inception_resnet_v2 import InceptionResNetV2
from keras.src.applications.inception_resnet_v2 import decode_predictions
from keras.src.applications.inception_resnet_v2 import preprocess_input
|
"""Edenai Tools."""
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.tools import (
EdenAiExplicitImageTool,
EdenAiObjectDetectionTool,
EdenAiParsingIDTool,
EdenAiParsingInvoiceTool,
EdenAiSpeechToTextTool,
EdenAiTextModerationTool,
EdenAiTextToSpeechTool,
EdenaiTool,
)
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"EdenAiExplicitImageTool": "langchain_community.tools",
"EdenAiObjectDetectionTool": "langchain_community.tools",
"EdenAiParsingIDTool": "langchain_community.tools",
"EdenAiParsingInvoiceTool": "langchain_community.tools",
"EdenAiTextToSpeechTool": "langchain_community.tools",
"EdenAiSpeechToTextTool": "langchain_community.tools",
"EdenAiTextModerationTool": "langchain_community.tools",
"EdenaiTool": "langchain_community.tools",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"EdenAiExplicitImageTool",
"EdenAiObjectDetectionTool",
"EdenAiParsingIDTool",
"EdenAiParsingInvoiceTool",
"EdenAiSpeechToTextTool",
"EdenAiTextModerationTool",
"EdenAiTextToSpeechTool",
"EdenaiTool",
]
|
"""Edenai Tools."""
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.tools import (
EdenAiExplicitImageTool,
EdenAiObjectDetectionTool,
EdenAiParsingIDTool,
EdenAiParsingInvoiceTool,
EdenAiSpeechToTextTool,
EdenAiTextModerationTool,
EdenAiTextToSpeechTool,
EdenaiTool,
)
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"EdenAiExplicitImageTool": "langchain_community.tools",
"EdenAiObjectDetectionTool": "langchain_community.tools",
"EdenAiParsingIDTool": "langchain_community.tools",
"EdenAiParsingInvoiceTool": "langchain_community.tools",
"EdenAiTextToSpeechTool": "langchain_community.tools",
"EdenAiSpeechToTextTool": "langchain_community.tools",
"EdenAiTextModerationTool": "langchain_community.tools",
"EdenaiTool": "langchain_community.tools",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"EdenAiExplicitImageTool",
"EdenAiObjectDetectionTool",
"EdenAiParsingIDTool",
"EdenAiParsingInvoiceTool",
"EdenAiTextToSpeechTool",
"EdenAiSpeechToTextTool",
"EdenAiTextModerationTool",
"EdenaiTool",
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .atss import ATSS
from .autoassign import AutoAssign
from .base import BaseDetector
from .cascade_rcnn import CascadeRCNN
from .centernet import CenterNet
from .cornernet import CornerNet
from .deformable_detr import DeformableDETR
from .detr import DETR
from .fast_rcnn import FastRCNN
from .faster_rcnn import FasterRCNN
from .fcos import FCOS
from .fovea import FOVEA
from .fsaf import FSAF
from .gfl import GFL
from .grid_rcnn import GridRCNN
from .htc import HybridTaskCascade
from .kd_one_stage import KnowledgeDistillationSingleStageDetector
from .lad import LAD
from .mask_rcnn import MaskRCNN
from .mask_scoring_rcnn import MaskScoringRCNN
from .nasfcos import NASFCOS
from .paa import PAA
from .panoptic_fpn import PanopticFPN
from .panoptic_two_stage_segmentor import TwoStagePanopticSegmentor
from .point_rend import PointRend
from .queryinst import QueryInst
from .reppoints_detector import RepPointsDetector
from .retinanet import RetinaNet
from .rpn import RPN
from .scnet import SCNet
from .single_stage import SingleStageDetector
from .solo import SOLO
from .sparse_rcnn import SparseRCNN
from .tood import TOOD
from .trident_faster_rcnn import TridentFasterRCNN
from .two_stage import TwoStageDetector
from .vfnet import VFNet
from .yolact import YOLACT
from .yolo import YOLOV3
from .yolof import YOLOF
from .yolox import YOLOX
__all__ = [
'ATSS', 'BaseDetector', 'SingleStageDetector', 'TwoStageDetector', 'RPN',
'KnowledgeDistillationSingleStageDetector', 'FastRCNN', 'FasterRCNN',
'MaskRCNN', 'CascadeRCNN', 'HybridTaskCascade', 'RetinaNet', 'FCOS',
'GridRCNN', 'MaskScoringRCNN', 'RepPointsDetector', 'FOVEA', 'FSAF',
'NASFCOS', 'PointRend', 'GFL', 'CornerNet', 'PAA', 'YOLOV3', 'YOLACT',
'VFNet', 'DETR', 'TridentFasterRCNN', 'SparseRCNN', 'SCNet', 'SOLO',
'DeformableDETR', 'AutoAssign', 'YOLOF', 'CenterNet', 'YOLOX',
'TwoStagePanopticSegmentor', 'PanopticFPN', 'QueryInst', 'LAD', 'TOOD'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .atss import ATSS
from .autoassign import AutoAssign
from .base import BaseDetector
from .cascade_rcnn import CascadeRCNN
from .centernet import CenterNet
from .cornernet import CornerNet
from .deformable_detr import DeformableDETR
from .detr import DETR
from .fast_rcnn import FastRCNN
from .faster_rcnn import FasterRCNN
from .fcos import FCOS
from .fovea import FOVEA
from .fsaf import FSAF
from .gfl import GFL
from .grid_rcnn import GridRCNN
from .htc import HybridTaskCascade
from .kd_one_stage import KnowledgeDistillationSingleStageDetector
from .lad import LAD
from .mask_rcnn import MaskRCNN
from .mask_scoring_rcnn import MaskScoringRCNN
from .nasfcos import NASFCOS
from .paa import PAA
from .panoptic_fpn import PanopticFPN
from .panoptic_two_stage_segmentor import TwoStagePanopticSegmentor
from .point_rend import PointRend
from .queryinst import QueryInst
from .reppoints_detector import RepPointsDetector
from .retinanet import RetinaNet
from .rpn import RPN
from .scnet import SCNet
from .single_stage import SingleStageDetector
from .solo import SOLO
from .sparse_rcnn import SparseRCNN
from .trident_faster_rcnn import TridentFasterRCNN
from .two_stage import TwoStageDetector
from .vfnet import VFNet
from .yolact import YOLACT
from .yolo import YOLOV3
from .yolof import YOLOF
from .yolox import YOLOX
__all__ = [
'ATSS', 'BaseDetector', 'SingleStageDetector', 'TwoStageDetector', 'RPN',
'KnowledgeDistillationSingleStageDetector', 'FastRCNN', 'FasterRCNN',
'MaskRCNN', 'CascadeRCNN', 'HybridTaskCascade', 'RetinaNet', 'FCOS',
'GridRCNN', 'MaskScoringRCNN', 'RepPointsDetector', 'FOVEA', 'FSAF',
'NASFCOS', 'PointRend', 'GFL', 'CornerNet', 'PAA', 'YOLOV3', 'YOLACT',
'VFNet', 'DETR', 'TridentFasterRCNN', 'SparseRCNN', 'SCNet', 'SOLO',
'DeformableDETR', 'AutoAssign', 'YOLOF', 'CenterNet', 'YOLOX',
'TwoStagePanopticSegmentor', 'PanopticFPN', 'QueryInst', 'LAD'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
import copy
import os.path as osp
import unittest
import numpy as np
from mmdet.core.mask import BitmapMasks, PolygonMasks
from mmdet.datasets.pipelines import LoadAnnotations
class TestLoadAnnotations(unittest.TestCase):
def setUp(self):
"""Setup the model and optimizer which are used in every test method.
TestCase calls functions in this order: setUp() -> testMethod() ->
tearDown() -> cleanUp()
"""
data_prefix = osp.join(osp.dirname(__file__), '../../data')
seg_map = osp.join(data_prefix, 'gray.jpg')
self.results = {
'height':
300,
'width':
400,
'seg_map_path':
seg_map,
'instances': [{
'bbox': [0, 0, 10, 20],
'bbox_label': 1,
'mask': [[0, 0, 0, 20, 10, 20, 10, 0]],
'ignore_flag': 0
}, {
'bbox': [10, 10, 110, 120],
'bbox_label': 2,
'mask': [[10, 10, 110, 10, 110, 120, 110, 10]],
'ignore_flag': 0
}, {
'bbox': [50, 50, 60, 80],
'bbox_label': 2,
'mask': [[50, 50, 60, 50, 60, 80, 50, 80]],
'ignore_flag': 1
}]
}
def test_load_bboxes(self):
transform = LoadAnnotations(
with_bbox=True,
with_label=False,
with_seg=False,
with_mask=False,
)
results = transform(copy.deepcopy(self.results))
self.assertIn('gt_bboxes', results)
self.assertTrue((results['gt_bboxes'] == np.array([[0, 0, 10, 20],
[10, 10, 110, 120],
[50, 50, 60,
80]])).all())
self.assertEqual(results['gt_bboxes'].dtype, np.float32)
self.assertTrue((results['gt_ignore_flags'] == np.array([0, 0,
1])).all())
self.assertEqual(results['gt_ignore_flags'].dtype, np.bool)
def test_load_denorm_bboxes(self):
transform = LoadAnnotations(
with_bbox=True,
with_label=False,
with_seg=False,
with_mask=False,
denorm_bbox=True)
results = transform(copy.deepcopy(self.results))
self.assertIn('gt_bboxes', results)
self.assertTrue(
(results['gt_bboxes'] == np.array([[0, 0, 4000, 6000],
[4000, 3000, 44000, 36000],
[20000, 15000, 24000,
24000]])).all())
self.assertEqual(results['gt_bboxes'].dtype, np.float32)
def test_load_labels(self):
transform = LoadAnnotations(
with_bbox=False,
with_label=True,
with_seg=False,
with_mask=False,
)
results = transform(copy.deepcopy(self.results))
self.assertIn('gt_bboxes_labels', results)
self.assertTrue((results['gt_bboxes_labels'] == np.array([1, 2,
2])).all())
self.assertEqual(results['gt_bboxes_labels'].dtype, np.int64)
def test_load_mask(self):
transform = LoadAnnotations(
with_bbox=False,
with_label=False,
with_seg=False,
with_mask=True,
poly2mask=False)
results = transform(copy.deepcopy(self.results))
self.assertIn('gt_masks', results)
self.assertEqual(len(results['gt_masks']), 3)
self.assertIsInstance(results['gt_masks'], PolygonMasks)
def test_load_mask_poly2mask(self):
transform = LoadAnnotations(
with_bbox=False,
with_label=False,
with_seg=False,
with_mask=True,
poly2mask=True)
results = transform(copy.deepcopy(self.results))
self.assertIn('gt_masks', results)
self.assertEqual(len(results['gt_masks']), 3)
self.assertIsInstance(results['gt_masks'], BitmapMasks)
def test_repr(self):
transform = LoadAnnotations(
with_bbox=True,
with_label=False,
with_seg=False,
with_mask=False,
)
self.assertEqual(
repr(transform), ('LoadAnnotations(with_bbox=True, '
'with_label=False, with_mask=False, '
'with_seg=False, poly2mask=True, '
"imdecode_backend='cv2', "
"file_client_args={'backend': 'disk'})"))
|
# Copyright (c) OpenMMLab. All rights reserved.
import copy
import os.path as osp
import unittest
import numpy as np
from mmdet.core.mask import BitmapMasks, PolygonMasks
from mmdet.datasets.pipelines import LoadAnnotations
class TestLoadAnnotations(unittest.TestCase):
def setUp(self):
"""Setup the model and optimizer which are used in every test method.
TestCase calls functions in this order: setUp() -> testMethod() ->
tearDown() -> cleanUp()
"""
data_prefix = osp.join(osp.dirname(__file__), '../../data')
seg_map = osp.join(data_prefix, 'gray.jpg')
self.results = {
'height':
300,
'width':
400,
'seg_map_path':
seg_map,
'instances': [{
'bbox': [0, 0, 10, 20],
'bbox_label': 1,
'mask': [[0, 0, 0, 20, 10, 20, 10, 0]],
'ignore_flag': 0
}, {
'bbox': [10, 10, 110, 120],
'bbox_label': 2,
'mask': [[10, 10, 110, 10, 110, 120, 110, 10]],
'ignore_flag': 0
}, {
'bbox': [50, 50, 60, 80],
'bbox_label': 2,
'mask': [[50, 50, 60, 50, 60, 80, 50, 80]],
'ignore_flag': 1
}]
}
def test_load_bboxes(self):
transform = LoadAnnotations(
with_bbox=True,
with_label=False,
with_seg=False,
with_mask=False,
)
results = transform(copy.deepcopy(self.results))
self.assertIn('gt_bboxes', results)
self.assertTrue((results['gt_bboxes'] == np.array([[0, 0, 10, 20],
[10, 10, 110, 120],
[50, 50, 60,
80]])).all())
self.assertEqual(results['gt_bboxes'].dtype, np.float32)
self.assertTrue((results['gt_ignore_flags'] == np.array([0, 0,
1])).all())
self.assertEqual(results['gt_ignore_flags'].dtype, np.bool)
def test_load_denorm_bboxes(self):
transform = LoadAnnotations(
with_bbox=True,
with_label=False,
with_seg=False,
with_mask=False,
denorm_bbox=True)
results = transform(copy.deepcopy(self.results))
self.assertIn('gt_bboxes', results)
self.assertTrue(
(results['gt_bboxes'] == np.array([[0, 0, 4000, 6000],
[4000, 3000, 44000, 36000],
[20000, 15000, 24000,
24000]])).all())
self.assertEqual(results['gt_bboxes'].dtype, np.float32)
def test_load_labels(self):
transform = LoadAnnotations(
with_bbox=False,
with_label=True,
with_seg=False,
with_mask=False,
)
results = transform(copy.deepcopy(self.results))
self.assertIn('gt_bboxes_labels', results)
self.assertTrue((results['gt_bboxes_labels'] == np.array([1, 2,
2])).all())
self.assertEqual(results['gt_bboxes_labels'].dtype, np.int32)
def test_load_mask(self):
transform = LoadAnnotations(
with_bbox=False,
with_label=False,
with_seg=False,
with_mask=True,
poly2mask=False)
results = transform(copy.deepcopy(self.results))
self.assertIn('gt_masks', results)
self.assertEqual(len(results['gt_masks']), 3)
self.assertIsInstance(results['gt_masks'], PolygonMasks)
def test_load_mask_poly2mask(self):
transform = LoadAnnotations(
with_bbox=False,
with_label=False,
with_seg=False,
with_mask=True,
poly2mask=True)
results = transform(copy.deepcopy(self.results))
self.assertIn('gt_masks', results)
self.assertEqual(len(results['gt_masks']), 3)
self.assertIsInstance(results['gt_masks'], BitmapMasks)
def test_repr(self):
transform = LoadAnnotations(
with_bbox=True,
with_label=False,
with_seg=False,
with_mask=False,
)
self.assertEqual(
repr(transform), ('LoadAnnotations(with_bbox=True, '
'with_label=False, with_mask=False, '
'with_seg=False, poly2mask=True, '
"imdecode_backend='cv2', "
"file_client_args={'backend': 'disk'})"))
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest.mock import Mock
from mmengine.hooks import ParamSchedulerHook
class TestParamSchedulerHook:
def test_after_iter(self):
hook = ParamSchedulerHook()
runner = Mock()
scheduler = Mock()
scheduler.step = Mock()
scheduler.by_epoch = False
runner.param_schedulers = [scheduler]
hook.after_train_iter(runner, 0)
scheduler.step.assert_called()
def test_after_epoch(self):
hook = ParamSchedulerHook()
runner = Mock()
scheduler = Mock()
scheduler.step = Mock()
scheduler.by_epoch = True
runner.param_schedulers = [scheduler]
hook.after_train_epoch(runner)
scheduler.step.assert_called()
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest.mock import Mock
from mmengine.hooks import ParamSchedulerHook
class TestParamSchedulerHook:
def test_after_iter(self):
Hook = ParamSchedulerHook()
Runner = Mock()
scheduler = Mock()
scheduler.step = Mock()
scheduler.by_epoch = False
Runner.param_schedulers = [scheduler]
Hook.after_train_iter(Runner)
scheduler.step.assert_called()
def test_after_epoch(self):
Hook = ParamSchedulerHook()
Runner = Mock()
scheduler = Mock()
scheduler.step = Mock()
scheduler.by_epoch = True
Runner.param_schedulers = [scheduler]
Hook.after_train_epoch(Runner)
scheduler.step.assert_called()
|
"""
This script contains an example how to perform semantic search with Elasticsearch.
You need Elasticsearch up and running locally:
https://www.elastic.co/guide/en/elasticsearch/reference/current/run-elasticsearch-locally.html
Further, you need the Python Elasticsearch Client installed: https://elasticsearch-py.readthedocs.io/, e.g.:
```
pip install elasticsearch
```
This script was created for `elasticsearch` v8.0+.
"""
import time
from datasets import load_dataset
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.search_engines import semantic_search_elasticsearch
# 1. Load the natural-questions dataset with 100K answers
dataset = load_dataset("sentence-transformers/natural-questions", split="train")
num_docs = 10_000
corpus = dataset["answer"][:num_docs]
# 2. Come up with some queries
queries = dataset["query"][:2]
# 3. Load the model
sparse_model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
# 4. Encode the corpus
print("Start encoding corpus...")
start_time = time.time()
corpus_embeddings = sparse_model.encode(corpus, convert_to_sparse_tensor=True, batch_size=16, show_progress_bar=True)
corpus_embeddings_decoded = sparse_model.decode(corpus_embeddings)
print(f"Corpus encoding time: {time.time() - start_time:.6f} seconds")
corpus_index = None
while True:
# 5. Encode the queries using the full precision
start_time = time.time()
query_embeddings = sparse_model.encode(queries, convert_to_sparse_tensor=True)
query_embeddings_decoded = sparse_model.decode(query_embeddings)
print(f"Encoding time: {time.time() - start_time:.6f} seconds")
# 6. Perform semantic search using Elasticsearch
results, search_time, corpus_index = semantic_search_elasticsearch(
query_embeddings_decoded,
corpus_index=corpus_index,
corpus_embeddings_decoded=corpus_embeddings_decoded if corpus_index is None else None,
top_k=5,
output_index=True,
)
# 7. Output the results
print(f"Search time: {search_time:.6f} seconds")
for query, result in zip(queries, results):
print(f"Query: {query}")
for entry in result:
print(f"(Score: {entry['score']:.4f}) {corpus[entry['corpus_id']]}, corpus_id: {entry['corpus_id']}")
print("")
# 8. Prompt for more queries
queries = [input("Please enter a question: ")]
|
"""
This script contains an example how to perform semantic search with Elasticsearch.
You need Elasticsearch up and running locally:
https://www.elastic.co/guide/en/elasticsearch/reference/current/run-elasticsearch-locally.html
Further, you need the Python Elasticsearch Client installed: https://elasticsearch-py.readthedocs.io/, e.g.:
```
pip install elasticsearch
```
This script was created for `elasticsearch` v8.0+.
"""
import time
from datasets import load_dataset
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.search_engines import semantic_search_elasticsearch
# 1. Load the natural-questions dataset with 100K answers
dataset = load_dataset("sentence-transformers/natural-questions", split="train")
num_docs = 10_000
corpus = dataset["answer"][:num_docs]
# 2. Come up with some queries
queries = dataset["query"][:2]
# 3. Load the model
sparse_model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
# 4. Encode the corpus
corpus_embeddings = sparse_model.encode(corpus, convert_to_sparse_tensor=True, batch_size=32, show_progress_bar=True)
corpus_index = None
while True:
# 5. Encode the queries using the full precision
start_time = time.time()
query_embeddings = sparse_model.encode(queries, convert_to_sparse_tensor=True)
print(f"Encoding time: {time.time() - start_time:.6f} seconds")
# 6. Perform semantic search using Elasticsearch
results, search_time, corpus_index = semantic_search_elasticsearch(
query_embeddings,
corpus_index=corpus_index,
corpus_embeddings=corpus_embeddings if corpus_index is None else None,
top_k=5,
output_index=True,
)
# 7. Output the results
print(f"Search time: {search_time:.6f} seconds")
for query, result in zip(queries, results):
print(f"Query: {query}")
for entry in result:
print(f"(Score: {entry['score']:.4f}) {corpus[entry['corpus_id']]}, corpus_id: {entry['corpus_id']}")
print("")
# 8. Prompt for more queries
queries = [input("Please enter a question: ")]
|
from __future__ import annotations
from torch import Tensor, nn
from sentence_transformers.cross_encoder.CrossEncoder import CrossEncoder
from sentence_transformers.util import fullname
class MSELoss(nn.Module):
def __init__(self, model: CrossEncoder, activation_fct: nn.Module = nn.Identity(), **kwargs) -> None:
"""
Computes the MSE loss between the computed query-passage score and a target query-passage score. This loss
is used to distill a cross-encoder model from a teacher cross-encoder model or gold labels.
Args:
model (:class:`~sentence_transformers.cross_encoder.CrossEncoder`): A CrossEncoder model to be trained.
activation_fct (:class:`~torch.nn.Module`): Activation function applied to the logits before computing the loss.
**kwargs: Additional keyword arguments passed to the underlying :class:`torch.nn.MSELoss`.
.. note::
Be mindful of the magnitude of both the labels and what the model produces. If the teacher model produces
logits with Sigmoid to bound them to [0, 1], then you may wish to use a Sigmoid activation function in the loss.
References:
- Improving Efficient Neural Ranking Models with Cross-Architecture Knowledge Distillation: https://arxiv.org/abs/2010.02666
Requirements:
1. Your model must be initialized with `num_labels = 1` (a.k.a. the default) to predict one class.
2. Usually uses a finetuned CrossEncoder teacher M in a knowledge distillation setup.
Inputs:
+-----------------------------------------+-----------------------------+-------------------------------+
| Texts | Labels | Number of Model Output Labels |
+=========================================+=============================+===============================+
| (sentence_A, sentence_B) pairs | similarity score | 1 |
+-----------------------------------------+-----------------------------+-------------------------------+
Relations:
- :class:`MarginMSELoss` is similar to this loss, but with a margin through a negative pair.
Example:
::
from sentence_transformers.cross_encoder import CrossEncoder, CrossEncoderTrainer, losses
from datasets import Dataset
student_model = CrossEncoder("microsoft/mpnet-base")
teacher_model = CrossEncoder("cross-encoder/ms-marco-MiniLM-L-12-v2")
train_dataset = Dataset.from_dict({
"query": ["What are pandas?", "What is the capital of France?"],
"answer": ["Pandas are a kind of bear.", "The capital of France is Paris."],
})
def compute_labels(batch):
return {
"label": teacher_model.predict(list(zip(batch["query"], batch["answer"])))
}
train_dataset = train_dataset.map(compute_labels, batched=True)
loss = losses.MSELoss(student_model)
trainer = CrossEncoderTrainer(
model=student_model,
train_dataset=train_dataset,
loss=loss,
)
trainer.train()
"""
super().__init__()
self.model = model
self.activation_fct = activation_fct
self.loss_fct = nn.MSELoss(**kwargs)
if not isinstance(self.model, CrossEncoder):
raise ValueError(
f"{self.__class__.__name__} expects a model of type CrossEncoder, "
f"but got a model of type {type(self.model)}."
)
if self.model.num_labels != 1:
raise ValueError(
f"{self.__class__.__name__} expects a model with 1 output label, "
f"but got a model with {self.model.num_labels} output labels."
)
def forward(self, inputs: list[list[str]], labels: Tensor) -> Tensor:
if len(inputs) != 2:
raise ValueError(
f"MSELoss expects a dataset with two non-label columns, but got a dataset with {len(inputs)} columns."
)
pairs = list(zip(inputs[0], inputs[1]))
tokens = self.model.tokenizer(
pairs,
padding=True,
truncation=True,
return_tensors="pt",
)
tokens.to(self.model.device)
logits = self.model(**tokens)[0].view(-1)
logits = self.activation_fct(logits)
loss = self.loss_fct(logits, labels.float())
return loss
def get_config_dict(self):
return {
"activation_fct": fullname(self.activation_fct),
}
|
from __future__ import annotations
from torch import Tensor, nn
from sentence_transformers.cross_encoder import CrossEncoder
from sentence_transformers.util import fullname
class MSELoss(nn.Module):
def __init__(self, model: CrossEncoder, **kwargs) -> None:
super().__init__()
self.model = model
self.loss_fct = nn.MSELoss(**kwargs)
if self.model.num_labels != 1:
raise ValueError(
f"{self.__class__.__name__} expects a model with 1 output label, "
f"but got a model with {self.model.num_labels} output labels."
)
def forward(self, inputs: list[list[str]], labels: Tensor) -> Tensor:
if len(inputs) != 2:
raise ValueError(
f"MSELoss expects a dataset with two non-label columns, but got a dataset with {len(inputs)} columns."
)
pairs = list(zip(inputs[0], inputs[1]))
tokens = self.model.tokenizer(
pairs,
padding=True,
truncation=True,
return_tensors="pt",
)
tokens.to(self.model.device)
logits = self.model(**tokens)[0].view(-1)
loss = self.loss_fct(logits, labels.float())
return loss
def get_config_dict(self):
return {
"activation_fct": fullname(self.activation_fct),
}
|
import pytest
import inspect
from llama_index.core.callbacks import CallbackManager, LlamaDebugHandler, CBEventType
from llama_index.core.base.embeddings.base import BaseEmbedding
from llama_index.embeddings.nvidia import NVIDIAEmbedding
from openai import AuthenticationError
from pytest_httpx import HTTPXMock
@pytest.fixture()
def mock_integration_api(httpx_mock: HTTPXMock):
BASE_URL = "https://integrate.api.nvidia.com/v1"
mock_response = {"object": "list", "data": [{"index": 0, "embedding": ""}]}
httpx_mock.add_response(
method="POST",
url=f"{BASE_URL}/embeddings",
json=mock_response,
headers={"Content-Type": "application/json"},
status_code=200,
)
def test_embedding_class():
emb = NVIDIAEmbedding(api_key="BOGUS")
assert isinstance(emb, BaseEmbedding)
def test_nvidia_embedding_param_setting():
emb = NVIDIAEmbedding(
api_key="BOGUS",
model="NV-Embed-QA",
truncate="END",
timeout=20,
max_retries=10,
embed_batch_size=15,
)
assert emb.model == "NV-Embed-QA"
assert emb.truncate == "END"
assert emb._client.timeout == 20
assert emb._client.max_retries == 10
assert emb._aclient.timeout == 20
assert emb._aclient.max_retries == 10
assert emb.embed_batch_size == 15
def test_nvidia_embedding_throws_on_batches_larger_than_259():
with pytest.raises(ValueError):
NVIDIAEmbedding(embed_batch_size=300)
def test_nvidia_embedding_async():
emb = NVIDIAEmbedding(api_key="BOGUS")
assert inspect.iscoroutinefunction(emb._aget_query_embedding)
query_emb = emb._aget_query_embedding("hi")
assert inspect.isawaitable(query_emb)
query_emb.close()
assert inspect.iscoroutinefunction(emb._aget_text_embedding)
text_emb = emb._aget_text_embedding("hi")
assert inspect.isawaitable(text_emb)
text_emb.close()
assert inspect.iscoroutinefunction(emb._aget_text_embeddings)
text_embs = emb._aget_text_embeddings(["hi", "hello"])
assert inspect.isawaitable(text_embs)
text_embs.close()
def test_nvidia_embedding_callback(mock_integration_api):
llama_debug = LlamaDebugHandler(print_trace_on_end=False)
assert len(llama_debug.get_events()) == 0
callback_manager = CallbackManager([llama_debug])
emb = NVIDIAEmbedding(api_key="dummy", callback_manager=callback_manager)
try:
emb.get_text_embedding("hi")
except AuthenticationError:
pass
assert len(llama_debug.get_events(CBEventType.EMBEDDING)) > 0
def test_nvidia_embedding_throws_with_invalid_key(mock_integration_api):
emb = NVIDIAEmbedding(api_key="invalid")
emb.get_text_embedding("hi")
# @pytest.mark.parametrize("model", list(MODEL_ENDPOINT_MAP.keys()))
# def test_model_compatible_client_model(model: str) -> None:
# NVIDIAEmbedding(api_key="BOGUS", model=model)
# marking this as xfail as we do not return invalid error anymore
@pytest.mark.xfail(reason="value error is not raised anymore")
def test_model_incompatible_client_model() -> None:
model_name = "x"
err_msg = (
f"Model {model_name} is incompatible with client NVIDIAEmbedding. "
f"Please check `NVIDIAEmbedding.available_models`."
)
with pytest.raises(ValueError) as msg:
NVIDIAEmbedding(api_key="BOGUS", model=model_name)
assert err_msg == str(msg.value)
def test_model_incompatible_client_known_model() -> None:
model_name = "google/deplot"
warn_msg = f"Unable to determine validity"
with pytest.warns(UserWarning) as msg:
NVIDIAEmbedding(api_key="BOGUS", model=model_name)
assert warn_msg in str(msg[0].message)
|
import pytest
import inspect
from llama_index.core.callbacks import CallbackManager, LlamaDebugHandler, CBEventType
from llama_index.core.base.embeddings.base import BaseEmbedding
from llama_index.embeddings.nvidia import NVIDIAEmbedding
from openai import AuthenticationError
from pytest_httpx import HTTPXMock
@pytest.fixture()
def mock_integration_api(httpx_mock: HTTPXMock):
BASE_URL = "https://integrate.api.nvidia.com/v1"
mock_response = {"object": "list", "data": [{"index": 0, "embedding": ""}]}
httpx_mock.add_response(
method="POST",
url=f"{BASE_URL}/embeddings",
json=mock_response,
headers={"Content-Type": "application/json"},
status_code=200,
)
def test_embedding_class():
emb = NVIDIAEmbedding(api_key="BOGUS")
assert isinstance(emb, BaseEmbedding)
def test_nvidia_embedding_param_setting():
emb = NVIDIAEmbedding(
api_key="BOGUS",
model="NV-Embed-QA",
truncate="END",
timeout=20,
max_retries=10,
embed_batch_size=15,
)
assert emb.model == "NV-Embed-QA"
assert emb.truncate == "END"
assert emb._client.timeout == 20
assert emb._client.max_retries == 10
assert emb._aclient.timeout == 20
assert emb._aclient.max_retries == 10
assert emb.embed_batch_size == 15
def test_nvidia_embedding_throws_on_batches_larger_than_259():
with pytest.raises(ValueError):
NVIDIAEmbedding(embed_batch_size=300)
def test_nvidia_embedding_async():
emb = NVIDIAEmbedding(api_key="BOGUS")
assert inspect.iscoroutinefunction(emb._aget_query_embedding)
query_emb = emb._aget_query_embedding("hi")
assert inspect.isawaitable(query_emb)
query_emb.close()
assert inspect.iscoroutinefunction(emb._aget_text_embedding)
text_emb = emb._aget_text_embedding("hi")
assert inspect.isawaitable(text_emb)
text_emb.close()
assert inspect.iscoroutinefunction(emb._aget_text_embeddings)
text_embs = emb._aget_text_embeddings(["hi", "hello"])
assert inspect.isawaitable(text_embs)
text_embs.close()
def test_nvidia_embedding_callback(mock_integration_api):
llama_debug = LlamaDebugHandler(print_trace_on_end=False)
assert len(llama_debug.get_events()) == 0
callback_manager = CallbackManager([llama_debug])
emb = NVIDIAEmbedding(api_key="dummy", callback_manager=callback_manager)
try:
emb.get_text_embedding("hi")
except AuthenticationError:
pass
assert len(llama_debug.get_events(CBEventType.EMBEDDING)) > 0
def test_nvidia_embedding_throws_with_invalid_key(mock_integration_api):
emb = NVIDIAEmbedding(api_key="invalid")
emb.get_text_embedding("hi")
# @pytest.mark.parametrize("model", list(MODEL_ENDPOINT_MAP.keys()))
# def test_model_compatible_client_model(model: str) -> None:
# NVIDIAEmbedding(api_key="BOGUS", model=model)
def test_model_incompatible_client_model() -> None:
model_name = "x"
err_msg = (
f"Model {model_name} is incompatible with client NVIDIAEmbedding. "
f"Please check `NVIDIAEmbedding.available_models()`."
)
with pytest.raises(ValueError) as msg:
NVIDIAEmbedding(api_key="BOGUS", model=model_name)
assert err_msg == str(msg.value)
def test_model_incompatible_client_known_model() -> None:
model_name = "google/deplot"
warn_msg = f"Unable to determine validity"
with pytest.warns(UserWarning) as msg:
NVIDIAEmbedding(api_key="BOGUS", model=model_name)
assert len(msg) == 1
assert warn_msg in str(msg[0].message)
|
# Copyright (c) OpenMMLab. All rights reserved.
from .default_scope import DefaultScope
from .registry import Registry, build_from_cfg
from .root import (DATA_SAMPLERS, DATASETS, HOOKS, LOOPS, METRICS,
MODEL_WRAPPERS, MODELS, OPTIMIZER_CONSTRUCTORS, OPTIMIZERS,
PARAM_SCHEDULERS, RUNNER_CONSTRUCTORS, RUNNERS, TASK_UTILS,
TRANSFORMS, VISBACKENDS, VISUALIZERS, WEIGHT_INITIALIZERS)
__all__ = [
'Registry', 'build_from_cfg', 'RUNNERS', 'RUNNER_CONSTRUCTORS', 'HOOKS',
'DATASETS', 'DATA_SAMPLERS', 'TRANSFORMS', 'MODELS', 'WEIGHT_INITIALIZERS',
'OPTIMIZERS', 'OPTIMIZER_CONSTRUCTORS', 'TASK_UTILS', 'PARAM_SCHEDULERS',
'METRICS', 'MODEL_WRAPPERS', 'LOOPS', 'VISBACKENDS', 'VISUALIZERS',
'DefaultScope'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .default_scope import DefaultScope
from .registry import Registry, build_from_cfg
from .root import (DATA_SAMPLERS, DATASETS, HOOKS, LOOPS, METRICS,
MODEL_WRAPPERS, MODELS, OPTIMIZER_CONSTRUCTORS, OPTIMIZERS,
PARAM_SCHEDULERS, RUNNER_CONSTRUCTORS, RUNNERS, TASK_UTILS,
TRANSFORMS, VISUALIZERS, WEIGHT_INITIALIZERS, WRITERS)
__all__ = [
'Registry', 'build_from_cfg', 'RUNNERS', 'RUNNER_CONSTRUCTORS', 'HOOKS',
'DATASETS', 'DATA_SAMPLERS', 'TRANSFORMS', 'MODELS', 'WEIGHT_INITIALIZERS',
'OPTIMIZERS', 'OPTIMIZER_CONSTRUCTORS', 'TASK_UTILS', 'PARAM_SCHEDULERS',
'METRICS', 'MODEL_WRAPPERS', 'LOOPS', 'WRITERS', 'VISUALIZERS',
'DefaultScope'
]
|
from langchain_core.prompts.prompt import PromptTemplate
from langchain.memory.prompt import (
ENTITY_EXTRACTION_PROMPT,
ENTITY_MEMORY_CONVERSATION_TEMPLATE,
ENTITY_SUMMARIZATION_PROMPT,
KNOWLEDGE_TRIPLE_EXTRACTION_PROMPT,
SUMMARY_PROMPT,
)
DEFAULT_TEMPLATE = """The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.
Current conversation:
{history}
Human: {input}
AI:""" # noqa: E501
PROMPT = PromptTemplate(input_variables=["history", "input"], template=DEFAULT_TEMPLATE)
# Only for backwards compatibility
__all__ = [
"ENTITY_EXTRACTION_PROMPT",
"ENTITY_MEMORY_CONVERSATION_TEMPLATE",
"ENTITY_SUMMARIZATION_PROMPT",
"KNOWLEDGE_TRIPLE_EXTRACTION_PROMPT",
"PROMPT",
"SUMMARY_PROMPT",
]
|
# flake8: noqa
from langchain.memory.prompt import (
ENTITY_EXTRACTION_PROMPT,
ENTITY_MEMORY_CONVERSATION_TEMPLATE,
ENTITY_SUMMARIZATION_PROMPT,
KNOWLEDGE_TRIPLE_EXTRACTION_PROMPT,
SUMMARY_PROMPT,
)
from langchain_core.prompts.prompt import PromptTemplate
DEFAULT_TEMPLATE = """The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.
Current conversation:
{history}
Human: {input}
AI:"""
PROMPT = PromptTemplate(input_variables=["history", "input"], template=DEFAULT_TEMPLATE)
# Only for backwards compatibility
__all__ = [
"SUMMARY_PROMPT",
"ENTITY_MEMORY_CONVERSATION_TEMPLATE",
"ENTITY_SUMMARIZATION_PROMPT",
"ENTITY_EXTRACTION_PROMPT",
"KNOWLEDGE_TRIPLE_EXTRACTION_PROMPT",
"PROMPT",
]
|
from typing import TYPE_CHECKING
from ...utils import (
DIFFUSERS_SLOW_IMPORT,
OptionalDependencyNotAvailable,
_LazyModule,
get_objects_from_module,
is_torch_available,
is_transformers_available,
)
_dummy_objects = {}
_import_structure = {}
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils import dummy_torch_and_transformers_objects # noqa F403
_dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects))
else:
_import_structure["modeling_latent_upsampler"] = ["LTXLatentUpsamplerModel"]
_import_structure["pipeline_ltx"] = ["LTXPipeline"]
_import_structure["pipeline_ltx_condition"] = ["LTXConditionPipeline"]
_import_structure["pipeline_ltx_image2video"] = ["LTXImageToVideoPipeline"]
_import_structure["pipeline_ltx_latent_upsample"] = ["LTXLatentUpsamplePipeline"]
if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT:
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import *
else:
from .modeling_latent_upsampler import LTXLatentUpsamplerModel
from .pipeline_ltx import LTXPipeline
from .pipeline_ltx_condition import LTXConditionPipeline
from .pipeline_ltx_image2video import LTXImageToVideoPipeline
from .pipeline_ltx_latent_upsample import LTXLatentUpsamplePipeline
else:
import sys
sys.modules[__name__] = _LazyModule(
__name__,
globals()["__file__"],
_import_structure,
module_spec=__spec__,
)
for name, value in _dummy_objects.items():
setattr(sys.modules[__name__], name, value)
|
from typing import TYPE_CHECKING
from ...utils import (
DIFFUSERS_SLOW_IMPORT,
OptionalDependencyNotAvailable,
_LazyModule,
get_objects_from_module,
is_torch_available,
is_transformers_available,
)
_dummy_objects = {}
_import_structure = {}
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils import dummy_torch_and_transformers_objects # noqa F403
_dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects))
else:
_import_structure["pipeline_ltx"] = ["LTXPipeline"]
_import_structure["pipeline_ltx_condition"] = ["LTXConditionPipeline"]
_import_structure["pipeline_ltx_image2video"] = ["LTXImageToVideoPipeline"]
if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT:
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import *
else:
from .pipeline_ltx import LTXPipeline
from .pipeline_ltx_condition import LTXConditionPipeline
from .pipeline_ltx_image2video import LTXImageToVideoPipeline
else:
import sys
sys.modules[__name__] = _LazyModule(
__name__,
globals()["__file__"],
_import_structure,
module_spec=__spec__,
)
for name, value in _dummy_objects.items():
setattr(sys.modules[__name__], name, value)
|
from .cmuarctic import CMUARCTIC
from .cmudict import CMUDict
from .commonvoice import COMMONVOICE
from .dr_vctk import DR_VCTK
from .fluentcommands import FluentSpeechCommands
from .gtzan import GTZAN
from .librilight_limited import LibriLightLimited
from .librimix import LibriMix
from .librispeech import LIBRISPEECH
from .libritts import LIBRITTS
from .ljspeech import LJSPEECH
from .musdb_hq import MUSDB_HQ
from .quesst14 import QUESST14
from .speechcommands import SPEECHCOMMANDS
from .tedlium import TEDLIUM
from .vctk import VCTK_092
from .voxceleb1 import VoxCeleb1Identification, VoxCeleb1Verification
from .yesno import YESNO
__all__ = [
"COMMONVOICE",
"LIBRISPEECH",
"LibriLightLimited",
"SPEECHCOMMANDS",
"VCTK_092",
"DR_VCTK",
"YESNO",
"LJSPEECH",
"GTZAN",
"CMUARCTIC",
"CMUDict",
"LibriMix",
"LIBRITTS",
"TEDLIUM",
"QUESST14",
"MUSDB_HQ",
"FluentSpeechCommands",
"VoxCeleb1Identification",
"VoxCeleb1Verification",
]
|
from .cmuarctic import CMUARCTIC
from .cmudict import CMUDict
from .commonvoice import COMMONVOICE
from .dr_vctk import DR_VCTK
from .fluentcommands import FluentSpeechCommands
from .gtzan import GTZAN
from .librilight_limited import LibriLightLimited
from .librimix import LibriMix
from .librispeech import LIBRISPEECH
from .libritts import LIBRITTS
from .ljspeech import LJSPEECH
from .musdb_hq import MUSDB_HQ
from .quesst14 import QUESST14
from .speechcommands import SPEECHCOMMANDS
from .tedlium import TEDLIUM
from .vctk import VCTK_092
from .yesno import YESNO
__all__ = [
"COMMONVOICE",
"LIBRISPEECH",
"LibriLightLimited",
"SPEECHCOMMANDS",
"VCTK_092",
"DR_VCTK",
"YESNO",
"LJSPEECH",
"GTZAN",
"CMUARCTIC",
"CMUDict",
"LibriMix",
"LIBRITTS",
"TEDLIUM",
"QUESST14",
"MUSDB_HQ",
"FluentSpeechCommands",
]
|
import pathlib
from typing import Any, Dict, List, Optional, Tuple, Union
from torchdata.datapipes.iter import CSVDictParser, Demultiplexer, Filter, IterDataPipe, Mapper, Zipper
from torchvision.prototype.datasets.utils import Dataset, EncodedImage, HttpResource, OnlineResource
from torchvision.prototype.datasets.utils._internal import (
hint_sharding,
hint_shuffling,
INFINITE_BUFFER_SIZE,
path_comparator,
)
from torchvision.prototype.tv_tensors import Label
from torchvision.tv_tensors import BoundingBoxes
from .._api import register_dataset, register_info
NAME = "gtsrb"
@register_info(NAME)
def _info() -> Dict[str, Any]:
return dict(
categories=[f"{label:05d}" for label in range(43)],
)
@register_dataset(NAME)
class GTSRB(Dataset):
"""GTSRB Dataset
homepage="https://benchmark.ini.rub.de"
"""
def __init__(
self, root: Union[str, pathlib.Path], *, split: str = "train", skip_integrity_check: bool = False
) -> None:
self._split = self._verify_str_arg(split, "split", {"train", "test"})
self._categories = _info()["categories"]
super().__init__(root, skip_integrity_check=skip_integrity_check)
_URL_ROOT = "https://sid.erda.dk/public/archives/daaeac0d7ce1152aea9b61d9f1e19370/"
_URLS = {
"train": f"{_URL_ROOT}GTSRB-Training_fixed.zip",
"test": f"{_URL_ROOT}GTSRB_Final_Test_Images.zip",
"test_ground_truth": f"{_URL_ROOT}GTSRB_Final_Test_GT.zip",
}
_CHECKSUMS = {
"train": "df4144942083645bd60b594de348aa6930126c3e0e5de09e39611630abf8455a",
"test": "48ba6fab7e877eb64eaf8de99035b0aaecfbc279bee23e35deca4ac1d0a837fa",
"test_ground_truth": "f94e5a7614d75845c74c04ddb26b8796b9e483f43541dd95dd5b726504e16d6d",
}
def _resources(self) -> List[OnlineResource]:
rsrcs: List[OnlineResource] = [HttpResource(self._URLS[self._split], sha256=self._CHECKSUMS[self._split])]
if self._split == "test":
rsrcs.append(
HttpResource(
self._URLS["test_ground_truth"],
sha256=self._CHECKSUMS["test_ground_truth"],
)
)
return rsrcs
def _classify_train_archive(self, data: Tuple[str, Any]) -> Optional[int]:
path = pathlib.Path(data[0])
if path.suffix == ".ppm":
return 0
elif path.suffix == ".csv":
return 1
else:
return None
def _prepare_sample(self, data: Tuple[Tuple[str, Any], Dict[str, Any]]) -> Dict[str, Any]:
(path, buffer), csv_info = data
label = int(csv_info["ClassId"])
bounding_boxes = BoundingBoxes(
[int(csv_info[k]) for k in ("Roi.X1", "Roi.Y1", "Roi.X2", "Roi.Y2")],
format="xyxy",
spatial_size=(int(csv_info["Height"]), int(csv_info["Width"])),
)
return {
"path": path,
"image": EncodedImage.from_file(buffer),
"label": Label(label, categories=self._categories),
"bounding_boxes": bounding_boxes,
}
def _datapipe(self, resource_dps: List[IterDataPipe]) -> IterDataPipe[Dict[str, Any]]:
if self._split == "train":
images_dp, ann_dp = Demultiplexer(
resource_dps[0], 2, self._classify_train_archive, drop_none=True, buffer_size=INFINITE_BUFFER_SIZE
)
else:
images_dp, ann_dp = resource_dps
images_dp = Filter(images_dp, path_comparator("suffix", ".ppm"))
# The order of the image files in the .zip archives perfectly match the order of the entries in the
# (possibly concatenated) .csv files. So we're able to use Zipper here instead of a IterKeyZipper.
ann_dp = CSVDictParser(ann_dp, delimiter=";")
dp = Zipper(images_dp, ann_dp)
dp = hint_shuffling(dp)
dp = hint_sharding(dp)
return Mapper(dp, self._prepare_sample)
def __len__(self) -> int:
return 26_640 if self._split == "train" else 12_630
|
import pathlib
from typing import Any, Dict, List, Optional, Tuple, Union
from torchdata.datapipes.iter import CSVDictParser, Demultiplexer, Filter, IterDataPipe, Mapper, Zipper
from torchvision.datapoints import BoundingBoxes
from torchvision.prototype.datapoints import Label
from torchvision.prototype.datasets.utils import Dataset, EncodedImage, HttpResource, OnlineResource
from torchvision.prototype.datasets.utils._internal import (
hint_sharding,
hint_shuffling,
INFINITE_BUFFER_SIZE,
path_comparator,
)
from .._api import register_dataset, register_info
NAME = "gtsrb"
@register_info(NAME)
def _info() -> Dict[str, Any]:
return dict(
categories=[f"{label:05d}" for label in range(43)],
)
@register_dataset(NAME)
class GTSRB(Dataset):
"""GTSRB Dataset
homepage="https://benchmark.ini.rub.de"
"""
def __init__(
self, root: Union[str, pathlib.Path], *, split: str = "train", skip_integrity_check: bool = False
) -> None:
self._split = self._verify_str_arg(split, "split", {"train", "test"})
self._categories = _info()["categories"]
super().__init__(root, skip_integrity_check=skip_integrity_check)
_URL_ROOT = "https://sid.erda.dk/public/archives/daaeac0d7ce1152aea9b61d9f1e19370/"
_URLS = {
"train": f"{_URL_ROOT}GTSRB-Training_fixed.zip",
"test": f"{_URL_ROOT}GTSRB_Final_Test_Images.zip",
"test_ground_truth": f"{_URL_ROOT}GTSRB_Final_Test_GT.zip",
}
_CHECKSUMS = {
"train": "df4144942083645bd60b594de348aa6930126c3e0e5de09e39611630abf8455a",
"test": "48ba6fab7e877eb64eaf8de99035b0aaecfbc279bee23e35deca4ac1d0a837fa",
"test_ground_truth": "f94e5a7614d75845c74c04ddb26b8796b9e483f43541dd95dd5b726504e16d6d",
}
def _resources(self) -> List[OnlineResource]:
rsrcs: List[OnlineResource] = [HttpResource(self._URLS[self._split], sha256=self._CHECKSUMS[self._split])]
if self._split == "test":
rsrcs.append(
HttpResource(
self._URLS["test_ground_truth"],
sha256=self._CHECKSUMS["test_ground_truth"],
)
)
return rsrcs
def _classify_train_archive(self, data: Tuple[str, Any]) -> Optional[int]:
path = pathlib.Path(data[0])
if path.suffix == ".ppm":
return 0
elif path.suffix == ".csv":
return 1
else:
return None
def _prepare_sample(self, data: Tuple[Tuple[str, Any], Dict[str, Any]]) -> Dict[str, Any]:
(path, buffer), csv_info = data
label = int(csv_info["ClassId"])
bounding_boxes = BoundingBoxes(
[int(csv_info[k]) for k in ("Roi.X1", "Roi.Y1", "Roi.X2", "Roi.Y2")],
format="xyxy",
spatial_size=(int(csv_info["Height"]), int(csv_info["Width"])),
)
return {
"path": path,
"image": EncodedImage.from_file(buffer),
"label": Label(label, categories=self._categories),
"bounding_boxes": bounding_boxes,
}
def _datapipe(self, resource_dps: List[IterDataPipe]) -> IterDataPipe[Dict[str, Any]]:
if self._split == "train":
images_dp, ann_dp = Demultiplexer(
resource_dps[0], 2, self._classify_train_archive, drop_none=True, buffer_size=INFINITE_BUFFER_SIZE
)
else:
images_dp, ann_dp = resource_dps
images_dp = Filter(images_dp, path_comparator("suffix", ".ppm"))
# The order of the image files in the .zip archives perfectly match the order of the entries in the
# (possibly concatenated) .csv files. So we're able to use Zipper here instead of a IterKeyZipper.
ann_dp = CSVDictParser(ann_dp, delimiter=";")
dp = Zipper(images_dp, ann_dp)
dp = hint_shuffling(dp)
dp = hint_sharding(dp)
return Mapper(dp, self._prepare_sample)
def __len__(self) -> int:
return 26_640 if self._split == "train" else 12_630
|
from __future__ import annotations
from sentence_transformers.losses.MSELoss import MSELoss
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
class SparseMSELoss(MSELoss):
def __init__(self, model: SparseEncoder) -> None:
"""
# TODO: Update as it's mentionned trainings not applied to sparse models
Computes the MSE loss between the computed sentence embedding and a target sentence embedding. This loss
is used when extending sentence embeddings to new languages as described in our publication
Making Monolingual Sentence Embeddings Multilingual using Knowledge Distillation.
For an example, see `the distillation documentation <../../../examples/sentence_transformer/training/distillation/README.html>`_ on extending language models to new languages.
Args:
model: SparseEncoder
References:
- Making Monolingual Sentence Embeddings Multilingual using Knowledge Distillation: https://arxiv.org/abs/2004.09813
- `Training > Model Distillation <../../../examples/sentence_transformer/training/distillation/README.html>`_
- `Training > Multilingual Models <../../../examples/sentence_transformer/training/multilingual/README.html>`_
Requirements:
1. Usually uses a finetuned teacher M in a knowledge distillation setup
Inputs:
+-----------------------------------------+-----------------------------+
| Texts | Labels |
+=========================================+=============================+
| sentence | model sentence embeddings |
+-----------------------------------------+-----------------------------+
| sentence_1, sentence_2, ..., sentence_N | model sentence embeddings |
+-----------------------------------------+-----------------------------+
Relations:
- :class:`SparseMarginMSELoss` is equivalent to this loss, but with a margin through a negative pair.
Example:
::
from datasets import Dataset
from sentence_transformers.sparse_encoder import SparseEncoder, SparseEncoderTrainer, losses
student_model = SparseEncoder("prithivida/Splade_PP_en_v1")
teacher_model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
train_dataset = Dataset.from_dict(
{
"english": ["The first sentence", "The second sentence", "The third sentence", "The fourth sentence"],
"french": ["La première phrase", "La deuxième phrase", "La troisième phrase", "La quatrième phrase"],
}
)
def compute_labels(batch):
return {"label": teacher_model.encode(batch["english"], convert_to_sparse_tensor=False)}
train_dataset = train_dataset.map(compute_labels, batched=True)
loss = losses.SparseMSELoss(student_model)
trainer = SparseEncoderTrainer(model=student_model, train_dataset=train_dataset, loss=loss)
trainer.train()
"""
return super().__init__(model)
|
from __future__ import annotations
from sentence_transformers.losses.MSELoss import MSELoss
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
class SparseMSELoss(MSELoss):
def __init__(self, model: SparseEncoder) -> None:
"""
# TODO: Update as it's mentionned trainings not applied to sparse models
Computes the MSE loss between the computed sentence embedding and a target sentence embedding. This loss
is used when extending sentence embeddings to new languages as described in our publication
Making Monolingual Sentence Embeddings Multilingual using Knowledge Distillation.
For an example, see `the distillation documentation <../../../examples/sentence_transformer/training/distillation/README.html>`_ on extending language models to new languages.
Args:
model: SparseEncoder
References:
- Making Monolingual Sentence Embeddings Multilingual using Knowledge Distillation: https://arxiv.org/abs/2004.09813
- `Training > Model Distillation <../../../examples/sentence_transformer/training/distillation/README.html>`_
- `Training > Multilingual Models <../../../examples/sentence_transformer/training/multilingual/README.html>`_
Requirements:
1. Usually uses a finetuned teacher M in a knowledge distillation setup
Inputs:
+-----------------------------------------+-----------------------------+
| Texts | Labels |
+=========================================+=============================+
| sentence | model sentence embeddings |
+-----------------------------------------+-----------------------------+
| sentence_1, sentence_2, ..., sentence_N | model sentence embeddings |
+-----------------------------------------+-----------------------------+
Relations:
- :class:`SparseMarginMSELoss` is equivalent to this loss, but with a margin through a negative pair.
Example:
::
from datasets import Dataset
from sentence_transformers.sparse_encoder import SparseEncoder, SparseEncoderTrainer, losses
student_model = SparseEncoder("prithivida/Splade_PP_en_v1")
teacher_model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
train_dataset = Dataset.from_dict(
{
"english": ["The first sentence", "The second sentence", "The third sentence", "The fourth sentence"],
"french": ["La première phrase", "La deuxième phrase", "La troisième phrase", "La quatrième phrase"],
}
)
def compute_labels(batch):
return {"label": teacher_model.encode(batch["english"], convert_to_sparse_tensor=False)}
train_dataset = train_dataset.map(compute_labels, batched=True)
loss = losses.SparseMSELoss(student_model)
trainer = SparseEncoderTrainer(model=student_model, train_dataset=train_dataset, loss=loss)
trainer.train()
"""
return super().__init__(model)
|
"""
Arize-Phoenix LlamaPack.
"""
from typing import TYPE_CHECKING, Any, Dict, List
from llama_index.core import set_global_handler
from llama_index.core.indices.vector_store import VectorStoreIndex
from llama_index.core.llama_pack.base import BaseLlamaPack
from llama_index.core.schema import TextNode
if TYPE_CHECKING:
from phoenix import Session as PhoenixSession
class ArizePhoenixQueryEnginePack(BaseLlamaPack):
"""
The Arize-Phoenix LlamaPack show how to instrument your LlamaIndex query
engine with tracing. It launches Phoenix in the background, builds an index
over an input list of nodes, and instantiates and instruments a query engine
over that index so that trace data from each query is sent to Phoenix.
Note: Using this LlamaPack requires that your OpenAI API key is set via the
OPENAI_API_KEY environment variable.
"""
def __init__(
self,
nodes: List[TextNode],
**kwargs: Any,
) -> None:
"""
Initializes a new instance of ArizePhoenixQueryEnginePack.
Args:
nodes (List[TextNode]): An input list of nodes over which the index
will be built.
"""
try:
import phoenix as px
except ImportError:
raise ImportError(
"The arize-phoenix package could not be found. "
"Please install with `pip install arize-phoenix`."
)
self._session: "PhoenixSession" = px.launch_app()
set_global_handler("arize_phoenix")
self._index = VectorStoreIndex(nodes, **kwargs)
self._query_engine = self._index.as_query_engine()
def get_modules(self) -> Dict[str, Any]:
"""
Returns a dictionary containing the internals of the LlamaPack.
Returns:
Dict[str, Any]: A dictionary containing the internals of the
LlamaPack.
"""
return {
"session": self._session,
"session_url": self._session.url,
"index": self._index,
"query_engine": self._query_engine,
}
def run(self, *args: Any, **kwargs: Any) -> Any:
"""
Runs queries against the index.
Returns:
Any: A response from the query engine.
"""
return self._query_engine.query(*args, **kwargs)
|
"""
Arize-Phoenix LlamaPack.
"""
from typing import TYPE_CHECKING, Any, Dict, List
from llama_index.core import set_global_handler
from llama_index.core.indices.vector_store import VectorStoreIndex
from llama_index.core.llama_pack.base import BaseLlamaPack
from llama_index.core.schema import TextNode
if TYPE_CHECKING:
from phoenix import Session as PhoenixSession
class ArizePhoenixQueryEnginePack(BaseLlamaPack):
"""
The Arize-Phoenix LlamaPack show how to instrument your LlamaIndex query
engine with tracing. It launches Phoenix in the background, builds an index
over an input list of nodes, and instantiates and instruments a query engine
over that index so that trace data from each query is sent to Phoenix.
Note: Using this LlamaPack requires that your OpenAI API key is set via the
OPENAI_API_KEY environment variable.
"""
def __init__(
self,
nodes: List[TextNode],
**kwargs: Any,
) -> None:
"""
Initializes a new instance of ArizePhoenixQueryEnginePack.
Args:
nodes (List[TextNode]): An input list of nodes over which the index
will be built.
"""
try:
import phoenix as px
except ImportError:
raise ImportError(
"The arize-phoenix package could not be found. "
"Please install with `pip install arize-phoenix`."
)
self._session: "PhoenixSession" = px.launch_app()
set_global_handler("arize_phoenix")
self._index = VectorStoreIndex(nodes, **kwargs)
self._query_engine = self._index.as_query_engine()
def get_modules(self) -> Dict[str, Any]:
"""
Returns a dictionary containing the internals of the LlamaPack.
Returns:
Dict[str, Any]: A dictionary containing the internals of the
LlamaPack.
"""
return {
"session": self._session,
"session_url": self._session.url,
"index": self._index,
"query_engine": self._query_engine,
}
def run(self, *args: Any, **kwargs: Any) -> Any:
"""
Runs queries against the index.
Returns:
Any: A response from the query engine.
"""
return self._query_engine.query(*args, **kwargs)
|
# Copyright (c) OpenMMLab. All rights reserved.
from collections import OrderedDict
from mmcv.utils import print_log
from mmdet.core import eval_map, eval_recalls
from .builder import DATASETS
from .xml_style import XMLDataset
@DATASETS.register_module()
class VOCDataset(XMLDataset):
CLASSES = ('aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car',
'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse',
'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train',
'tvmonitor')
PALETTE = [(106, 0, 228), (119, 11, 32), (165, 42, 42), (0, 0, 192),
(197, 226, 255), (0, 60, 100), (0, 0, 142), (255, 77, 255),
(153, 69, 1), (120, 166, 157), (0, 182, 199), (0, 226, 252),
(182, 182, 255), (0, 0, 230), (220, 20, 60), (163, 255, 0),
(0, 82, 0), (3, 95, 161), (0, 80, 100), (183, 130, 88)]
def __init__(self, **kwargs):
super(VOCDataset, self).__init__(**kwargs)
if 'VOC2007' in self.img_prefix:
self.year = 2007
elif 'VOC2012' in self.img_prefix:
self.year = 2012
else:
raise ValueError('Cannot infer dataset year from img_prefix')
def evaluate(self,
results,
metric='mAP',
logger=None,
proposal_nums=(100, 300, 1000),
iou_thr=0.5,
scale_ranges=None):
"""Evaluate in VOC protocol.
Args:
results (list[list | tuple]): Testing results of the dataset.
metric (str | list[str]): Metrics to be evaluated. Options are
'mAP', 'recall'.
logger (logging.Logger | str, optional): Logger used for printing
related information during evaluation. Default: None.
proposal_nums (Sequence[int]): Proposal number used for evaluating
recalls, such as recall@100, recall@1000.
Default: (100, 300, 1000).
iou_thr (float | list[float]): IoU threshold. Default: 0.5.
scale_ranges (list[tuple], optional): Scale ranges for evaluating
mAP. If not specified, all bounding boxes would be included in
evaluation. Default: None.
Returns:
dict[str, float]: AP/recall metrics.
"""
if not isinstance(metric, str):
assert len(metric) == 1
metric = metric[0]
allowed_metrics = ['mAP', 'recall']
if metric not in allowed_metrics:
raise KeyError(f'metric {metric} is not supported')
annotations = [self.get_ann_info(i) for i in range(len(self))]
eval_results = OrderedDict()
iou_thrs = [iou_thr] if isinstance(iou_thr, float) else iou_thr
if metric == 'mAP':
assert isinstance(iou_thrs, list)
if self.year == 2007:
ds_name = 'voc07'
else:
ds_name = self.CLASSES
mean_aps = []
for iou_thr in iou_thrs:
print_log(f'\n{"-" * 15}iou_thr: {iou_thr}{"-" * 15}')
# Follow the official implementation,
# http://host.robots.ox.ac.uk/pascal/VOC/voc2012/VOCdevkit_18-May-2011.tar
# we should use the legacy coordinate system in mmdet 1.x,
# which means w, h should be computed as 'x2 - x1 + 1` and
# `y2 - y1 + 1`
mean_ap, _ = eval_map(
results,
annotations,
scale_ranges=None,
iou_thr=iou_thr,
dataset=ds_name,
logger=logger,
use_legacy_coordinate=True)
mean_aps.append(mean_ap)
eval_results[f'AP{int(iou_thr * 100):02d}'] = round(mean_ap, 3)
eval_results['mAP'] = sum(mean_aps) / len(mean_aps)
elif metric == 'recall':
gt_bboxes = [ann['bboxes'] for ann in annotations]
recalls = eval_recalls(
gt_bboxes,
results,
proposal_nums,
iou_thrs,
logger=logger,
use_legacy_coordinate=True)
for i, num in enumerate(proposal_nums):
for j, iou_thr in enumerate(iou_thrs):
eval_results[f'recall@{num}@{iou_thr}'] = recalls[i, j]
if recalls.shape[1] > 1:
ar = recalls.mean(axis=1)
for i, num in enumerate(proposal_nums):
eval_results[f'AR@{num}'] = ar[i]
return eval_results
|
# Copyright (c) OpenMMLab. All rights reserved.
from collections import OrderedDict
from mmcv.utils import print_log
from mmdet.core import eval_map, eval_recalls
from .builder import DATASETS
from .xml_style import XMLDataset
@DATASETS.register_module()
class VOCDataset(XMLDataset):
CLASSES = ('aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car',
'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse',
'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train',
'tvmonitor')
def __init__(self, **kwargs):
super(VOCDataset, self).__init__(**kwargs)
if 'VOC2007' in self.img_prefix:
self.year = 2007
elif 'VOC2012' in self.img_prefix:
self.year = 2012
else:
raise ValueError('Cannot infer dataset year from img_prefix')
def evaluate(self,
results,
metric='mAP',
logger=None,
proposal_nums=(100, 300, 1000),
iou_thr=0.5,
scale_ranges=None):
"""Evaluate in VOC protocol.
Args:
results (list[list | tuple]): Testing results of the dataset.
metric (str | list[str]): Metrics to be evaluated. Options are
'mAP', 'recall'.
logger (logging.Logger | str, optional): Logger used for printing
related information during evaluation. Default: None.
proposal_nums (Sequence[int]): Proposal number used for evaluating
recalls, such as recall@100, recall@1000.
Default: (100, 300, 1000).
iou_thr (float | list[float]): IoU threshold. Default: 0.5.
scale_ranges (list[tuple], optional): Scale ranges for evaluating
mAP. If not specified, all bounding boxes would be included in
evaluation. Default: None.
Returns:
dict[str, float]: AP/recall metrics.
"""
if not isinstance(metric, str):
assert len(metric) == 1
metric = metric[0]
allowed_metrics = ['mAP', 'recall']
if metric not in allowed_metrics:
raise KeyError(f'metric {metric} is not supported')
annotations = [self.get_ann_info(i) for i in range(len(self))]
eval_results = OrderedDict()
iou_thrs = [iou_thr] if isinstance(iou_thr, float) else iou_thr
if metric == 'mAP':
assert isinstance(iou_thrs, list)
if self.year == 2007:
ds_name = 'voc07'
else:
ds_name = self.CLASSES
mean_aps = []
for iou_thr in iou_thrs:
print_log(f'\n{"-" * 15}iou_thr: {iou_thr}{"-" * 15}')
# Follow the official implementation,
# http://host.robots.ox.ac.uk/pascal/VOC/voc2012/VOCdevkit_18-May-2011.tar
# we should use the legacy coordinate system in mmdet 1.x,
# which means w, h should be computed as 'x2 - x1 + 1` and
# `y2 - y1 + 1`
mean_ap, _ = eval_map(
results,
annotations,
scale_ranges=None,
iou_thr=iou_thr,
dataset=ds_name,
logger=logger,
use_legacy_coordinate=True)
mean_aps.append(mean_ap)
eval_results[f'AP{int(iou_thr * 100):02d}'] = round(mean_ap, 3)
eval_results['mAP'] = sum(mean_aps) / len(mean_aps)
elif metric == 'recall':
gt_bboxes = [ann['bboxes'] for ann in annotations]
recalls = eval_recalls(
gt_bboxes,
results,
proposal_nums,
iou_thrs,
logger=logger,
use_legacy_coordinate=True)
for i, num in enumerate(proposal_nums):
for j, iou_thr in enumerate(iou_thrs):
eval_results[f'recall@{num}@{iou_thr}'] = recalls[i, j]
if recalls.shape[1] > 1:
ar = recalls.mean(axis=1)
for i, num in enumerate(proposal_nums):
eval_results[f'AR@{num}'] = ar[i]
return eval_results
|
"""Init file of LlamaIndex."""
__version__ = "0.12.12"
import logging
from logging import NullHandler
from typing import Callable, Optional
try:
# Force pants to install eval_type_backport on 3.9
import eval_type_backport # noqa # type: ignore
except ImportError:
pass
# response
from llama_index.core.base.response.schema import Response
# import global eval handler
from llama_index.core.callbacks.global_handlers import set_global_handler
from llama_index.core.data_structs.struct_type import IndexStructType
from llama_index.core.embeddings.mock_embed_model import MockEmbedding
# indices
# loading
from llama_index.core.indices import (
ComposableGraph,
DocumentSummaryIndex,
GPTDocumentSummaryIndex,
GPTKeywordTableIndex,
GPTListIndex,
GPTRAKEKeywordTableIndex,
GPTSimpleKeywordTableIndex,
GPTTreeIndex,
GPTVectorStoreIndex,
KeywordTableIndex,
KnowledgeGraphIndex,
ListIndex,
PropertyGraphIndex,
RAKEKeywordTableIndex,
SimpleKeywordTableIndex,
SummaryIndex,
TreeIndex,
VectorStoreIndex,
load_graph_from_storage,
load_index_from_storage,
load_indices_from_storage,
)
# structured
from llama_index.core.indices.common.struct_store.base import (
SQLDocumentContextBuilder,
)
# prompt helper
from llama_index.core.indices.prompt_helper import PromptHelper
# prompts
from llama_index.core.prompts import (
BasePromptTemplate,
ChatPromptTemplate,
# backwards compatibility
Prompt,
PromptTemplate,
SelectorPromptTemplate,
)
from llama_index.core.readers import SimpleDirectoryReader, download_loader
# Response Synthesizer
from llama_index.core.response_synthesizers.factory import get_response_synthesizer
from llama_index.core.schema import Document, QueryBundle
from llama_index.core.service_context import (
ServiceContext,
set_global_service_context,
)
# global settings
from llama_index.core.settings import Settings
# storage
from llama_index.core.storage.storage_context import StorageContext
# sql wrapper
from llama_index.core.utilities.sql_wrapper import SQLDatabase
# global tokenizer
from llama_index.core.utils import get_tokenizer, set_global_tokenizer
# best practices for library logging:
# https://docs.python.org/3/howto/logging.html#configuring-logging-for-a-library
logging.getLogger(__name__).addHandler(NullHandler())
__all__ = [
"StorageContext",
"ServiceContext",
"ComposableGraph",
# indices
"SummaryIndex",
"VectorStoreIndex",
"SimpleKeywordTableIndex",
"KeywordTableIndex",
"RAKEKeywordTableIndex",
"TreeIndex",
"DocumentSummaryIndex",
"KnowledgeGraphIndex",
"PropertyGraphIndex",
# indices - legacy names
"GPTKeywordTableIndex",
"GPTKnowledgeGraphIndex",
"GPTSimpleKeywordTableIndex",
"GPTRAKEKeywordTableIndex",
"GPTListIndex",
"ListIndex",
"GPTTreeIndex",
"GPTVectorStoreIndex",
"GPTDocumentSummaryIndex",
"Prompt",
"PromptTemplate",
"BasePromptTemplate",
"ChatPromptTemplate",
"SelectorPromptTemplate",
"SummaryPrompt",
"TreeInsertPrompt",
"TreeSelectPrompt",
"TreeSelectMultiplePrompt",
"RefinePrompt",
"QuestionAnswerPrompt",
"KeywordExtractPrompt",
"QueryKeywordExtractPrompt",
"Response",
"Document",
"SimpleDirectoryReader",
"VellumPredictor",
"VellumPromptRegistry",
"MockEmbedding",
"SQLDatabase",
"SQLDocumentContextBuilder",
"SQLContextBuilder",
"PromptHelper",
"IndexStructType",
"download_loader",
"load_graph_from_storage",
"load_index_from_storage",
"load_indices_from_storage",
"QueryBundle",
"get_response_synthesizer",
"set_global_service_context",
"set_global_handler",
"set_global_tokenizer",
"get_tokenizer",
"Settings",
]
# eval global toggle
from llama_index.core.callbacks.base_handler import BaseCallbackHandler
global_handler: Optional[BaseCallbackHandler] = None
# NOTE: keep for backwards compatibility
SQLContextBuilder = SQLDocumentContextBuilder
# global tokenizer
global_tokenizer: Optional[Callable[[str], list]] = None
|
"""Init file of LlamaIndex."""
__version__ = "0.12.11"
import logging
from logging import NullHandler
from typing import Callable, Optional
try:
# Force pants to install eval_type_backport on 3.9
import eval_type_backport # noqa # type: ignore
except ImportError:
pass
# response
from llama_index.core.base.response.schema import Response
# import global eval handler
from llama_index.core.callbacks.global_handlers import set_global_handler
from llama_index.core.data_structs.struct_type import IndexStructType
from llama_index.core.embeddings.mock_embed_model import MockEmbedding
# indices
# loading
from llama_index.core.indices import (
ComposableGraph,
DocumentSummaryIndex,
GPTDocumentSummaryIndex,
GPTKeywordTableIndex,
GPTListIndex,
GPTRAKEKeywordTableIndex,
GPTSimpleKeywordTableIndex,
GPTTreeIndex,
GPTVectorStoreIndex,
KeywordTableIndex,
KnowledgeGraphIndex,
ListIndex,
PropertyGraphIndex,
RAKEKeywordTableIndex,
SimpleKeywordTableIndex,
SummaryIndex,
TreeIndex,
VectorStoreIndex,
load_graph_from_storage,
load_index_from_storage,
load_indices_from_storage,
)
# structured
from llama_index.core.indices.common.struct_store.base import (
SQLDocumentContextBuilder,
)
# prompt helper
from llama_index.core.indices.prompt_helper import PromptHelper
# prompts
from llama_index.core.prompts import (
BasePromptTemplate,
ChatPromptTemplate,
# backwards compatibility
Prompt,
PromptTemplate,
SelectorPromptTemplate,
)
from llama_index.core.readers import SimpleDirectoryReader, download_loader
# Response Synthesizer
from llama_index.core.response_synthesizers.factory import get_response_synthesizer
from llama_index.core.schema import Document, QueryBundle
from llama_index.core.service_context import (
ServiceContext,
set_global_service_context,
)
# global settings
from llama_index.core.settings import Settings
# storage
from llama_index.core.storage.storage_context import StorageContext
# sql wrapper
from llama_index.core.utilities.sql_wrapper import SQLDatabase
# global tokenizer
from llama_index.core.utils import get_tokenizer, set_global_tokenizer
# best practices for library logging:
# https://docs.python.org/3/howto/logging.html#configuring-logging-for-a-library
logging.getLogger(__name__).addHandler(NullHandler())
__all__ = [
"StorageContext",
"ServiceContext",
"ComposableGraph",
# indices
"SummaryIndex",
"VectorStoreIndex",
"SimpleKeywordTableIndex",
"KeywordTableIndex",
"RAKEKeywordTableIndex",
"TreeIndex",
"DocumentSummaryIndex",
"KnowledgeGraphIndex",
"PropertyGraphIndex",
# indices - legacy names
"GPTKeywordTableIndex",
"GPTKnowledgeGraphIndex",
"GPTSimpleKeywordTableIndex",
"GPTRAKEKeywordTableIndex",
"GPTListIndex",
"ListIndex",
"GPTTreeIndex",
"GPTVectorStoreIndex",
"GPTDocumentSummaryIndex",
"Prompt",
"PromptTemplate",
"BasePromptTemplate",
"ChatPromptTemplate",
"SelectorPromptTemplate",
"SummaryPrompt",
"TreeInsertPrompt",
"TreeSelectPrompt",
"TreeSelectMultiplePrompt",
"RefinePrompt",
"QuestionAnswerPrompt",
"KeywordExtractPrompt",
"QueryKeywordExtractPrompt",
"Response",
"Document",
"SimpleDirectoryReader",
"VellumPredictor",
"VellumPromptRegistry",
"MockEmbedding",
"SQLDatabase",
"SQLDocumentContextBuilder",
"SQLContextBuilder",
"PromptHelper",
"IndexStructType",
"download_loader",
"load_graph_from_storage",
"load_index_from_storage",
"load_indices_from_storage",
"QueryBundle",
"get_response_synthesizer",
"set_global_service_context",
"set_global_handler",
"set_global_tokenizer",
"get_tokenizer",
"Settings",
]
# eval global toggle
from llama_index.core.callbacks.base_handler import BaseCallbackHandler
global_handler: Optional[BaseCallbackHandler] = None
# NOTE: keep for backwards compatibility
SQLContextBuilder = SQLDocumentContextBuilder
# global tokenizer
global_tokenizer: Optional[Callable[[str], list]] = None
|
"""
Separation of concerns:
DataAdapter:
- x, y
- sample_weight
- class_weight
- shuffle
- batch_size
- steps, as it relates to batch_size for array data
EpochIterator:
- whether to yield numpy or tf data
- steps
- most argument validation
Trainer:
- steps_per_execution
- validation_split
- validation_data
- callbacks
- validation_freq
- epochs
- initial_epoch
- any backend-specific concern such as distribution
PyDataset:
- num_workers
- use_multiprocessing
- max_queue_size
EpochIterator steps:
1. Look at data type and select correct DataHandler
2. Instantiate DataHandler with correct arguments
3. Raise or warn on unused arguments
4. in __iter__, iterate, either for a fixed number of steps
or until there is no data
"""
import warnings
from keras.src.trainers import data_adapters
class EpochIterator:
def __init__(
self,
x,
y=None,
sample_weight=None,
batch_size=None,
steps_per_epoch=None,
shuffle=False,
class_weight=None,
steps_per_execution=1,
):
self.steps_per_epoch = steps_per_epoch
self.steps_per_execution = steps_per_execution
if steps_per_epoch:
self._current_iterator = None
self._insufficient_data = False
self.data_adapter = data_adapters.get_data_adapter(
x=x,
y=y,
sample_weight=sample_weight,
batch_size=batch_size,
steps_per_epoch=steps_per_epoch,
shuffle=shuffle,
class_weight=class_weight,
)
self._num_batches = self.data_adapter.num_batches
def _get_iterator(self):
return self.data_adapter.get_numpy_iterator()
def enumerate_epoch(self):
buffer = []
self.data_adapter.on_epoch_begin()
if self.steps_per_epoch:
if self._current_iterator is None:
self._current_iterator = iter(self._get_iterator())
self._insufficient_data = False
for step in range(self.steps_per_epoch):
if self._insufficient_data:
break
try:
data = next(self._current_iterator)
buffer.append(data)
if len(buffer) == self.steps_per_execution:
yield step - len(buffer) + 1, buffer
buffer = []
except (StopIteration,):
warnings.warn(
"Your input ran out of data; interrupting epoch. "
"Make sure that your dataset or generator can generate "
"at least `steps_per_epoch * epochs` batches. "
"You may need to use the `.repeat()` "
"function when building your dataset.",
stacklevel=2,
)
self._current_iterator = None
self._insufficient_data = True
if buffer:
yield step - len(buffer) + 1, buffer
else:
for step, data in enumerate(self._get_iterator()):
buffer.append(data)
if len(buffer) == self.steps_per_execution:
yield step - len(buffer) + 1, buffer
buffer = []
if buffer:
yield step - len(buffer) + 1, buffer
if not self._num_batches:
# Infer the number of batches returned by the data_adapter.
# Assumed static.
self._num_batches = step + 1
self.data_adapter.on_epoch_end()
@property
def num_batches(self):
if self.steps_per_epoch:
return self.steps_per_epoch
# Either copied from the data_adapter, or
# inferred at the end of an iteration.
return self._num_batches
|
"""
Separation of concerns:
DataAdapter:
- x, y
- sample_weight
- class_weight
- shuffle
- batch_size
- steps, as it relates to batch_size for array data
EpochIterator:
- whether to yield numpy or tf data
- steps
- most argument validation
Trainer:
- steps_per_execution
- validation_split
- validation_data
- callbacks
- validation_freq
- epochs
- initial_epoch
- any backend-specific concern such as distribution
PyDataset:
- num_workers
- use_multiprocessing
- max_queue_size
EpochIterator steps:
1. Look at data type and select correct DataHandler
2. Instantiate DataHandler with correct arguments
3. Raise or warn on unused arguments
4. in __iter__, iterate, either for a fixed number of steps
or until there is no data
"""
import warnings
from keras.src.trainers import data_adapters
class EpochIterator:
def __init__(
self,
x,
y=None,
sample_weight=None,
batch_size=None,
steps_per_epoch=None,
shuffle=False,
class_weight=None,
steps_per_execution=1,
):
self.steps_per_epoch = steps_per_epoch
self.steps_per_execution = steps_per_execution
if steps_per_epoch:
self._current_iterator = None
self._insufficient_data = False
self.data_adapter = data_adapters.get_data_adapter(
x=x,
y=y,
sample_weight=sample_weight,
batch_size=batch_size,
steps_per_epoch=steps_per_epoch,
shuffle=shuffle,
class_weight=class_weight,
)
self._num_batches = self.data_adapter.num_batches
def _get_iterator(self):
return self.data_adapter.get_numpy_iterator()
def enumerate_epoch(self):
buffer = []
if self.steps_per_epoch:
if self._current_iterator is None:
self._current_iterator = iter(self._get_iterator())
self._insufficient_data = False
for step in range(self.steps_per_epoch):
if self._insufficient_data:
break
try:
data = next(self._current_iterator)
buffer.append(data)
if len(buffer) == self.steps_per_execution:
yield step - len(buffer) + 1, buffer
buffer = []
except (StopIteration,):
warnings.warn(
"Your input ran out of data; interrupting epoch. "
"Make sure that your dataset or generator can generate "
"at least `steps_per_epoch * epochs` batches. "
"You may need to use the `.repeat()` "
"function when building your dataset.",
stacklevel=2,
)
self._current_iterator = None
self._insufficient_data = True
if buffer:
yield step - len(buffer) + 1, buffer
else:
for step, data in enumerate(self._get_iterator()):
buffer.append(data)
if len(buffer) == self.steps_per_execution:
yield step - len(buffer) + 1, buffer
buffer = []
if buffer:
yield step - len(buffer) + 1, buffer
if not self._num_batches:
# Infer the number of batches returned by the data_adapter.
# Assumed static.
self._num_batches = step + 1
self.data_adapter.on_epoch_end()
@property
def num_batches(self):
if self.steps_per_epoch:
return self.steps_per_epoch
# Either copied from the data_adapter, or
# inferred at the end of an iteration.
return self._num_batches
|
import os
from typing import Any, Optional
from llama_index.llms.openai_like import OpenAILike
class Cerebras(OpenAILike):
"""
Cerebras LLM.
Examples:
`pip install llama-index-llms-cerebras`
```python
from llama_index.llms.cerebras import Cerebras
# Set up the Cerebras class with the required model and API key
llm = Cerebras(model="llama-3.3-70b", api_key="your_api_key")
# Call the complete method with a query
response = llm.complete("Why is fast inference important?")
print(response)
```
"""
def __init__(
self,
model: str,
api_key: Optional[str] = None,
api_base: str = os.environ.get("CEREBRAS_BASE_URL", None)
or "https://api.cerebras.ai/v1/",
is_chat_model: bool = True,
**kwargs: Any,
) -> None:
api_key = api_key or os.environ.get("CEREBRAS_API_KEY", None)
assert api_key is not None, (
"API Key not specified! Please set `CEREBRAS_API_KEY`!"
)
super().__init__(
model=model,
api_key=api_key,
api_base=api_base,
is_chat_model=is_chat_model,
**kwargs,
)
@classmethod
def class_name(cls) -> str:
"""Get class name."""
return "Cerebras"
|
import os
from typing import Any, Optional
from llama_index.llms.openai_like import OpenAILike
class Cerebras(OpenAILike):
"""
Cerebras LLM.
Examples:
`pip install llama-index-llms-cerebras`
```python
from llama_index.llms.cerebras import Cerebras
# Set up the Cerebras class with the required model and API key
llm = Cerebras(model="llama-3.3-70b", api_key="your_api_key")
# Call the complete method with a query
response = llm.complete("Why is fast inference important?")
print(response)
```
"""
def __init__(
self,
model: str,
api_key: Optional[str] = None,
api_base: str = os.environ.get("CEREBRAS_BASE_URL", None)
or "https://api.cerebras.ai/v1/",
is_chat_model: bool = True,
**kwargs: Any,
) -> None:
api_key = api_key or os.environ.get("CEREBRAS_API_KEY", None)
assert (
api_key is not None
), "API Key not specified! Please set `CEREBRAS_API_KEY`!"
super().__init__(
model=model,
api_key=api_key,
api_base=api_base,
is_chat_model=is_chat_model,
**kwargs,
)
@classmethod
def class_name(cls) -> str:
"""Get class name."""
return "Cerebras"
|
from unittest import mock
import pytest
from llama_index.core.workflow import Context
from llama_index.core.workflow.handler import WorkflowHandler
def test_str():
h = WorkflowHandler()
h.set_result([])
assert str(h) == "[]"
@pytest.mark.asyncio
async def test_stream_no_context():
h = WorkflowHandler()
with pytest.raises(ValueError, match="Context is not set!"):
async for ev in h.stream_events():
pass
@pytest.mark.asyncio
async def test_run_step_no_context():
h = WorkflowHandler()
with pytest.raises(
ValueError,
match="Context must be set to run a workflow step-wise!",
):
await h.run_step()
@pytest.mark.asyncio
async def test_run_step_no_stepwise():
ctx = mock.MagicMock(spec=Context, stepwise=False)
h = WorkflowHandler(ctx=ctx)
with pytest.raises(
ValueError,
match="Workflow must be created passing stepwise=True to call this method.",
):
await h.run_step()
|
from unittest import mock
import pytest
from llama_index.core.workflow import Context
from llama_index.core.workflow.handler import WorkflowHandler
def test_str():
h = WorkflowHandler()
h.set_result([])
assert str(h) == "[]"
@pytest.mark.asyncio()
async def test_stream_no_context():
h = WorkflowHandler()
with pytest.raises(ValueError, match="Context is not set!"):
async for ev in h.stream_events():
pass
@pytest.mark.asyncio()
async def test_run_step_no_context():
h = WorkflowHandler()
with pytest.raises(
ValueError,
match="Context must be set to run a workflow step-wise!",
):
await h.run_step()
@pytest.mark.asyncio()
async def test_run_step_no_stepwise():
ctx = mock.MagicMock(spec=Context, stepwise=False)
h = WorkflowHandler(ctx=ctx)
with pytest.raises(
ValueError,
match="Workflow must be created passing stepwise=True to call this method.",
):
await h.run_step()
|
"""
This script runs the evaluation of an SBERT msmarco model on the
MS MARCO dev dataset and reports different performances metrices for cossine similarity & dot-product.
Usage:
python eval_msmarco.py model_name [max_corpus_size_in_thousands]
"""
import logging
import os
import sys
import tarfile
from sentence_transformers import LoggingHandler, SentenceTransformer, evaluation, util
#### Just some code to print debug information to stdout
logging.basicConfig(
format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO, handlers=[LoggingHandler()]
)
#### /print debug information to stdout
# Name of the SBERT model
model_name = sys.argv[1]
# You can limit the approx. max size of the corpus. Pass 100 as second parameter and the corpus has a size of approx 100k docs
corpus_max_size = int(sys.argv[2]) * 1000 if len(sys.argv) >= 3 else 0
#### Load model
model = SentenceTransformer(model_name)
### Data files
data_folder = "msmarco-data"
os.makedirs(data_folder, exist_ok=True)
collection_filepath = os.path.join(data_folder, "collection.tsv")
dev_queries_file = os.path.join(data_folder, "queries.dev.small.tsv")
qrels_filepath = os.path.join(data_folder, "qrels.dev.tsv")
### Download files if needed
if not os.path.exists(collection_filepath) or not os.path.exists(dev_queries_file):
tar_filepath = os.path.join(data_folder, "collectionandqueries.tar.gz")
if not os.path.exists(tar_filepath):
logging.info("Download: " + tar_filepath)
util.http_get(
"https://msmarco.z22.web.core.windows.net/msmarcoranking/collectionandqueries.tar.gz", tar_filepath
)
with tarfile.open(tar_filepath, "r:gz") as tar:
tar.extractall(path=data_folder)
if not os.path.exists(qrels_filepath):
util.http_get("https://msmarco.z22.web.core.windows.net/msmarcoranking/qrels.dev.tsv", qrels_filepath)
### Load data
corpus = {} # Our corpus pid => passage
dev_queries = {} # Our dev queries. qid => query
dev_rel_docs = {} # Mapping qid => set with relevant pids
needed_pids = set() # Passage IDs we need
needed_qids = set() # Query IDs we need
# Load the 6980 dev queries
with open(dev_queries_file, encoding="utf8") as fIn:
for line in fIn:
qid, query = line.strip().split("\t")
dev_queries[qid] = query.strip()
# Load which passages are relevant for which queries
with open(qrels_filepath) as fIn:
for line in fIn:
qid, _, pid, _ = line.strip().split("\t")
if qid not in dev_queries:
continue
if qid not in dev_rel_docs:
dev_rel_docs[qid] = set()
dev_rel_docs[qid].add(pid)
needed_pids.add(pid)
needed_qids.add(qid)
# Read passages
with open(collection_filepath, encoding="utf8") as fIn:
for line in fIn:
pid, passage = line.strip().split("\t")
passage = passage
if pid in needed_pids or corpus_max_size <= 0 or len(corpus) <= corpus_max_size:
corpus[pid] = passage.strip()
## Run evaluator
logging.info(f"Queries: {len(dev_queries)}")
logging.info(f"Corpus: {len(corpus)}")
ir_evaluator = evaluation.InformationRetrievalEvaluator(
dev_queries,
corpus,
dev_rel_docs,
show_progress_bar=True,
corpus_chunk_size=100000,
precision_recall_at_k=[10, 100],
name="msmarco dev",
)
ir_evaluator(model)
|
"""
This script runs the evaluation of an SBERT msmarco model on the
MS MARCO dev dataset and reports different performances metrices for cossine similarity & dot-product.
Usage:
python eval_msmarco.py model_name [max_corpus_size_in_thousands]
"""
import logging
import os
import sys
import tarfile
from sentence_transformers import LoggingHandler, SentenceTransformer, evaluation, util
#### Just some code to print debug information to stdout
logging.basicConfig(
format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO, handlers=[LoggingHandler()]
)
#### /print debug information to stdout
# Name of the SBERT model
model_name = sys.argv[1]
# You can limit the approx. max size of the corpus. Pass 100 as second parameter and the corpus has a size of approx 100k docs
corpus_max_size = int(sys.argv[2]) * 1000 if len(sys.argv) >= 3 else 0
#### Load model
model = SentenceTransformer(model_name)
### Data files
data_folder = "msmarco-data"
os.makedirs(data_folder, exist_ok=True)
collection_filepath = os.path.join(data_folder, "collection.tsv")
dev_queries_file = os.path.join(data_folder, "queries.dev.small.tsv")
qrels_filepath = os.path.join(data_folder, "qrels.dev.tsv")
### Download files if needed
if not os.path.exists(collection_filepath) or not os.path.exists(dev_queries_file):
tar_filepath = os.path.join(data_folder, "collectionandqueries.tar.gz")
if not os.path.exists(tar_filepath):
logging.info("Download: " + tar_filepath)
util.http_get(
"https://msmarco.z22.web.core.windows.net/msmarcoranking/collectionandqueries.tar.gz", tar_filepath
)
with tarfile.open(tar_filepath, "r:gz") as tar:
tar.extractall(path=data_folder)
if not os.path.exists(qrels_filepath):
util.http_get("https://msmarco.z22.web.core.windows.net/msmarcoranking/qrels.dev.tsv", qrels_filepath)
### Load data
corpus = {} # Our corpus pid => passage
dev_queries = {} # Our dev queries. qid => query
dev_rel_docs = {} # Mapping qid => set with relevant pids
needed_pids = set() # Passage IDs we need
needed_qids = set() # Query IDs we need
# Load the 6980 dev queries
with open(dev_queries_file, encoding="utf8") as fIn:
for line in fIn:
qid, query = line.strip().split("\t")
dev_queries[qid] = query.strip()
# Load which passages are relevant for which queries
with open(qrels_filepath) as fIn:
for line in fIn:
qid, _, pid, _ = line.strip().split("\t")
if qid not in dev_queries:
continue
if qid not in dev_rel_docs:
dev_rel_docs[qid] = set()
dev_rel_docs[qid].add(pid)
needed_pids.add(pid)
needed_qids.add(qid)
# Read passages
with open(collection_filepath, encoding="utf8") as fIn:
for line in fIn:
pid, passage = line.strip().split("\t")
passage = passage
if pid in needed_pids or corpus_max_size <= 0 or len(corpus) <= corpus_max_size:
corpus[pid] = passage.strip()
## Run evaluator
logging.info("Queries: {}".format(len(dev_queries)))
logging.info("Corpus: {}".format(len(corpus)))
ir_evaluator = evaluation.InformationRetrievalEvaluator(
dev_queries,
corpus,
dev_rel_docs,
show_progress_bar=True,
corpus_chunk_size=100000,
precision_recall_at_k=[10, 100],
name="msmarco dev",
)
ir_evaluator(model)
|
import numpy as np
from docarray import Document, DocumentArray, dataclass
from docarray.typing import Text
from jina import Executor, Flow, requests
def test_specific_params():
class MyExec(Executor):
def __init__(self, params_awaited, *args, **kwargs):
super().__init__(*args, **kwargs)
self.params_awaited = params_awaited
@requests
def process(self, docs, parameters, **kwargs):
for doc in docs:
doc.tags['assert'] = parameters == self.params_awaited
flow = (
Flow()
.add(uses=MyExec, name='exec1', uses_with={'params_awaited': {'key_1': True}})
.add(
uses=MyExec,
name='exec2',
uses_with={'params_awaited': {'key_1': True, 'key_2': False}},
)
)
with flow:
docs = flow.index(
DocumentArray.empty(size=1),
parameters={'key_1': True, 'exec2__key_2': False},
)
assert docs[0].tags['assert']
def test_specific_params_with_branched_flow():
class TextEncoderTestSpecific(Executor):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.model = lambda t: np.random.rand(
len(t), 128
) # initialize dummy text embedding model
@requests(on='/encode')
def encode_text(self, docs, parameters, **kwargs):
path = parameters.get('access_path', None)
text_docs = docs[path]
embeddings = self.model(text_docs[:, 'text'])
text_docs.embeddings = embeddings
class EmbeddingCombinerTestSpecific(Executor):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.model = lambda emb1, emb2: np.concatenate(
[emb1, emb2], axis=1
) # initialize dummy model to combine embeddings
@requests(on='/encode')
def combine(self, docs, parameters, **kwargs):
text1_path = parameters.get('text1_access_path', None)
text2_path = parameters.get('text2_access_path', None)
assert text1_path == '@.[text1]'
assert text2_path == '@.[text2]'
text1_docs = docs[text1_path]
text2_docs = docs[text2_path]
combined_embeddings = self.model(
text1_docs.embeddings, text2_docs.embeddings
)
docs.embeddings = combined_embeddings
@dataclass
class MMDoc:
text1: Text
text2: Text
mmdoc_dataclass = MMDoc(text1='text 1', text2='text 2')
da = DocumentArray([Document(mmdoc_dataclass)])
f = (
Flow()
.add(uses=TextEncoderTestSpecific, name='Text1Encoder')
.add(uses=TextEncoderTestSpecific, name='Text2Encoder', needs='gateway')
.add(
uses=EmbeddingCombinerTestSpecific,
name='Combiner',
needs=['Text1Encoder', 'Text2Encoder'],
)
)
with f:
da = f.post(
inputs=da,
on='/encode',
parameters={
'Text1Encoder__access_path': '@.[text1]',
'Text2Encoder__access_path': '@.[text2]',
'Combiner__text1_access_path': '@.[text1]',
'Combiner__text2_access_path': '@.[text2]',
},
)
assert len(da) == 1
for d in da:
assert d.embedding.shape == (256,)
|
import numpy as np
from docarray import DocumentArray, Document, dataclass
from docarray.typing import Text
from jina import Executor, Flow, requests
def test_specific_params():
class MyExec(Executor):
def __init__(self, params_awaited, *args, **kwargs):
super().__init__(*args, **kwargs)
self.params_awaited = params_awaited
@requests
def process(self, docs, parameters, **kwargs):
for doc in docs:
doc.tags['assert'] = parameters == self.params_awaited
flow = (
Flow()
.add(uses=MyExec, name='exec1', uses_with={'params_awaited': {'key_1': True}})
.add(
uses=MyExec,
name='exec2',
uses_with={'params_awaited': {'key_1': True, 'key_2': False}},
)
)
with flow:
docs = flow.index(
DocumentArray.empty(size=1),
parameters={'key_1': True, 'exec2__key_2': False},
)
assert docs[0].tags['assert']
def test_specific_params_with_branched_flow():
class TextEncoderTestSpecific(Executor):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.model = lambda t: np.random.rand(
len(t), 128
) # initialize dummy text embedding model
@requests(on='/encode')
def encode_text(self, docs, parameters, **kwargs):
path = parameters.get('access_path', None)
text_docs = docs[path]
embeddings = self.model(text_docs[:, 'text'])
text_docs.embeddings = embeddings
class EmbeddingCombinerTestSpecific(Executor):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.model = lambda emb1, emb2: np.concatenate(
[emb1, emb2], axis=1
) # initialize dummy model to combine embeddings
@requests(on='/encode')
def combine(self, docs, parameters, **kwargs):
text1_path = parameters.get('text1_access_path', None)
text2_path = parameters.get('text2_access_path', None)
assert text1_path == '@.[text1]'
assert text2_path == '@.[text2]'
text1_docs = docs[text1_path]
text2_docs = docs[text2_path]
combined_embeddings = self.model(text1_docs.embeddings, text2_docs.embeddings)
docs.embeddings = combined_embeddings
@dataclass
class MMDoc:
text1: Text
text2: Text
mmdoc_dataclass = MMDoc(text1='text 1', text2='text 2')
da = DocumentArray([Document(mmdoc_dataclass)])
f = (
Flow()
.add(uses=TextEncoderTestSpecific, name='Text1Encoder')
.add(uses=TextEncoderTestSpecific, name='Text2Encoder', needs='gateway')
.add(uses=EmbeddingCombinerTestSpecific, name='Combiner', needs=['Text1Encoder', 'Text2Encoder'])
)
with f:
da = f.post(
inputs=da,
on='/encode',
parameters={
'Text1Encoder__access_path': '@.[text1]',
'Text2Encoder__access_path': '@.[text2]',
'Combiner__text1_access_path': '@.[text1]',
'Combiner__text2_access_path': '@.[text2]',
},
)
assert len(da) == 1
for d in da:
assert d.embedding.shape == (256, )
|
from typing import Union
from docarray.typing.tensor.audio.audio_ndarray import AudioNdArray
from docarray.utils.misc import is_tf_available, is_torch_available
torch_available = is_torch_available()
if torch_available:
from docarray.typing.tensor.audio.audio_torch_tensor import AudioTorchTensor
tf_available = is_tf_available()
if tf_available:
from docarray.typing.tensor.audio.audio_tensorflow_tensor import (
AudioTensorFlowTensor as AudioTFTensor,
)
AudioTensor = AudioNdArray
if tf_available and torch_available:
AudioTensor = Union[AudioNdArray, AudioTorchTensor, AudioTFTensor] # type: ignore
elif tf_available:
AudioTensor = Union[AudioNdArray, AudioTFTensor] # type: ignore
elif torch_available:
AudioTensor = Union[AudioNdArray, AudioTorchTensor] # type: ignore
|
from typing import Union
from docarray.typing.tensor.audio.audio_ndarray import AudioNdArray
try:
import torch # noqa: F401
except ImportError:
AudioTensor = AudioNdArray
else:
from docarray.typing.tensor.audio.audio_torch_tensor import AudioTorchTensor
AudioTensor = Union[AudioNdArray, AudioTorchTensor] # type: ignore
|
import logging
from typing import Any, Dict, Optional, Tuple
from llama_index.core.base.llms.generic_utils import get_from_param_or_env
DEFAULT_UPSTAGE_API_BASE = "https://api.upstage.ai/v1/solar"
DEFAULT_CONTEXT_WINDOW = 32768
CHAT_MODELS = {
"solar-mini": 32768,
"solar-pro": 4096,
}
FUNCTION_CALLING_MODELS = ["solar-mini"]
DOC_PARSING_MODELS = ["solar-pro"]
ALL_AVAILABLE_MODELS = {**CHAT_MODELS}
SOLAR_TOKENIZERS = {
"solar-pro": "upstage/solar-pro-tokenizer",
"solar-mini": "upstage/solar-1-mini-tokenizer",
}
logger = logging.getLogger(__name__)
def resolve_upstage_credentials(
api_key: Optional[str] = None,
api_base: Optional[str] = None,
) -> Tuple[Optional[str], str]:
"""Resolve Upstage credentials.
The order of precedence is:
1. param
2. env
4. default
"""
# resolve from param or env
api_key = get_from_param_or_env("api_key", api_key, "UPSTAGE_API_KEY", "")
api_base = get_from_param_or_env("api_base", api_base, "UPSTAGE_API_BASE", "")
final_api_key = api_key or ""
final_api_base = api_base or DEFAULT_UPSTAGE_API_BASE
return final_api_key, str(final_api_base)
def is_chat_model(model: str) -> bool:
return True
def is_function_calling_model(model: str) -> bool:
return model in FUNCTION_CALLING_MODELS
def is_doc_parsing_model(model: str, kwargs: Dict[str, Any]) -> bool:
if "file_path" in kwargs:
if model in DOC_PARSING_MODELS:
return True
raise ValueError("file_path is not supported for this model.")
return False
def upstage_modelname_to_contextsize(modelname: str) -> int:
if modelname not in ALL_AVAILABLE_MODELS:
return DEFAULT_CONTEXT_WINDOW
return CHAT_MODELS[modelname]
|
import logging
from typing import Any, Dict, Optional, Tuple
from llama_index.core.base.llms.generic_utils import get_from_param_or_env
DEFAULT_UPSTAGE_API_BASE = "https://api.upstage.ai/v1/solar"
DEFAULT_CONTEXT_WINDOW = 32768
CHAT_MODELS = {
"solar-1-mini-chat": 32768,
"solar-pro": 4096,
"solar-docvision": 65536,
}
FUNCTION_CALLING_MODELS = ["solar-1-mini-chat"]
DOC_PARSING_MODELS = ["solar-pro"]
ALL_AVAILABLE_MODELS = {**CHAT_MODELS}
SOLAR_TOKENIZERS = {
"solar-pro": "upstage/solar-pro-tokenizer",
"solar-1-mini-chat": "upstage/solar-1-mini-tokenizer",
"solar-docvision": "upstage/solar-docvision-preview-tokenizer",
}
logger = logging.getLogger(__name__)
def resolve_upstage_credentials(
api_key: Optional[str] = None,
api_base: Optional[str] = None,
) -> Tuple[Optional[str], str]:
"""Resolve Upstage credentials.
The order of precedence is:
1. param
2. env
4. default
"""
# resolve from param or env
api_key = get_from_param_or_env("api_key", api_key, "UPSTAGE_API_KEY", "")
api_base = get_from_param_or_env("api_base", api_base, "UPSTAGE_API_BASE", "")
final_api_key = api_key or ""
final_api_base = api_base or DEFAULT_UPSTAGE_API_BASE
return final_api_key, str(final_api_base)
def is_chat_model(model: str) -> bool:
return True
def is_function_calling_model(model: str) -> bool:
return model in FUNCTION_CALLING_MODELS
def is_doc_parsing_model(model: str, kwargs: Dict[str, Any]) -> bool:
if "file_path" in kwargs:
if model in DOC_PARSING_MODELS:
return True
raise ValueError("file_path is not supported for this model.")
return False
def upstage_modelname_to_contextsize(modelname: str) -> int:
if modelname not in ALL_AVAILABLE_MODELS:
return DEFAULT_CONTEXT_WINDOW
return CHAT_MODELS[modelname]
|
import logging
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.evaluation import SparseNanoBEIREvaluator
logging.basicConfig(format="%(message)s", level=logging.INFO)
# Load a model
model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
datasets = ["QuoraRetrieval", "MSMARCO"]
evaluator = SparseNanoBEIREvaluator(
dataset_names=datasets,
show_progress_bar=True,
batch_size=32,
)
# Run evaluation
results = evaluator(model)
"""
Evaluating NanoQuoraRetrieval
Information Retrieval Evaluation of the model on the NanoQuoraRetrieval dataset:
Queries: 50
Corpus: 5046
Score-Function: dot
Accuracy@1: 92.00%
Accuracy@3: 96.00%
Accuracy@5: 98.00%
Accuracy@10: 100.00%
Precision@1: 92.00%
Precision@3: 40.00%
Precision@5: 24.80%
Precision@10: 13.20%
Recall@1: 79.73%
Recall@3: 92.53%
Recall@5: 94.93%
Recall@10: 98.27%
MRR@10: 0.9439
NDCG@10: 0.9339
MAP@100: 0.9072
Model Query Sparsity: Active Dimensions: 63.0, Sparsity Ratio: 0.9979
Model Corpus Sparsity: Active Dimensions: 63.4, Sparsity Ratio: 0.9979
Information Retrieval Evaluation of the model on the NanoMSMARCO dataset:
Queries: 50
Corpus: 5043
Score-Function: dot
Accuracy@1: 48.00%
Accuracy@3: 74.00%
Accuracy@5: 76.00%
Accuracy@10: 88.00%
Precision@1: 48.00%
Precision@3: 24.67%
Precision@5: 15.20%
Precision@10: 8.80%
Recall@1: 48.00%
Recall@3: 74.00%
Recall@5: 76.00%
Recall@10: 88.00%
MRR@10: 0.6211
NDCG@10: 0.6838
MAP@100: 0.6277
Model Query Sparsity: Active Dimensions: 48.1, Sparsity Ratio: 0.9984
Model Corpus Sparsity: Active Dimensions: 125.4, Sparsity Ratio: 0.9959
Average Queries: 50.0
Average Corpus: 5044.5
Aggregated for Score Function: dot
Accuracy@1: 70.00%
Accuracy@3: 85.00%
Accuracy@5: 87.00%
Accuracy@10: 94.00%
Precision@1: 70.00%
Recall@1: 63.87%
Precision@3: 32.33%
Recall@3: 83.27%
Precision@5: 20.00%
Recall@5: 85.47%
Precision@10: 11.00%
Recall@10: 93.13%
MRR@10: 0.7825
NDCG@10: 0.8089
Model Query Sparsity: Active Dimensions: 55.5, Sparsity Ratio: 0.9982
Model Corpus Sparsity: Active Dimensions: 94.4, Sparsity Ratio: 0.9969
"""
# Print the results
print(f"Primary metric: {evaluator.primary_metric}")
# => Primary metric: NanoBEIR_mean_dot_ndcg@10
print(f"Primary metric value: {results[evaluator.primary_metric]:.4f}")
# => Primary metric value: 0.8089
|
import logging
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.evaluation import SparseNanoBEIREvaluator
logging.basicConfig(format="%(message)s", level=logging.INFO)
# Load a model
model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
datasets = ["QuoraRetrieval", "MSMARCO"]
evaluator = SparseNanoBEIREvaluator(
dataset_names=datasets,
show_progress_bar=True,
batch_size=32,
)
# Run evaluation
results = evaluator(model)
"""
Evaluating NanoQuoraRetrieval
Information Retrieval Evaluation of the model on the NanoQuoraRetrieval dataset:
Queries: 50
Corpus: 5046
Score-Function: dot
Accuracy@1: 92.00%
Accuracy@3: 96.00%
Accuracy@5: 98.00%
Accuracy@10: 100.00%
Precision@1: 92.00%
Precision@3: 40.00%
Precision@5: 24.80%
Precision@10: 13.20%
Recall@1: 79.73%
Recall@3: 92.53%
Recall@5: 94.93%
Recall@10: 98.27%
MRR@10: 0.9439
NDCG@10: 0.9339
MAP@100: 0.9072
Model Sparsity Stats Query : Row Non-Zero Mean: 62.97999954223633, Row Sparsity Mean: 0.9979365468025208
Model Sparsity Stats Corpus : Row Non-Zero Mean: 63.39932632446289, Row Sparsity Mean: 0.9979228377342224
Information Retrieval Evaluation of the model on the NanoMSMARCO dataset:
Queries: 50
Corpus: 5043
Score-Function: dot
Accuracy@1: 48.00%
Accuracy@3: 74.00%
Accuracy@5: 76.00%
Accuracy@10: 88.00%
Precision@1: 48.00%
Precision@3: 24.67%
Precision@5: 15.20%
Precision@10: 8.80%
Recall@1: 48.00%
Recall@3: 74.00%
Recall@5: 76.00%
Recall@10: 88.00%
MRR@10: 0.6211
NDCG@10: 0.6838
MAP@100: 0.6277
Model Sparsity Stats Query : Row Non-Zero Mean: 48.08000183105469, Row Sparsity Mean: 0.9984247088432312
Model Sparsity Stats Corpus : Row Non-Zero Mean: 125.3604965209961, Row Sparsity Mean: 0.9958928227424622
Average Queries: 50.0
Average Corpus: 5044.5
Aggregated for Score Function: dot
Accuracy@1: 70.00%
Accuracy@3: 85.00%
Accuracy@5: 87.00%
Accuracy@10: 94.00%
Precision@1: 70.00%
Recall@1: 63.87%
Precision@3: 32.33%
Recall@3: 83.27%
Precision@5: 20.00%
Recall@5: 85.47%
Precision@10: 11.00%
Recall@10: 93.13%
MRR@10: 0.7825
NDCG@10: 0.8089
Model Sparsity Stats Query : Row Non-Zero Mean: 55.53000068664551, Row Sparsity Mean: 0.998180627822876
Model Sparsity Stats Corpus : Row Non-Zero Mean: 94.37991142272949, Row Sparsity Mean: 0.9969078302383423
"""
# Print the results
print(f"Primary metric: {evaluator.primary_metric}")
# => Primary metric: NanoBEIR_mean_dot_ndcg@10
print(f"Primary metric value: {results[evaluator.primary_metric]:.4f}")
# => Primary metric value: 0.8089
|
from langchain_core.exceptions import TracerException
from langchain_core.tracers.base import BaseTracer
__all__ = ["TracerException", "BaseTracer"]
|
from langchain_core.tracers.base import BaseTracer, TracerException
__all__ = ["TracerException", "BaseTracer"]
|
import logging
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.evaluation import SparseNanoBEIREvaluator
logging.basicConfig(format="%(message)s", level=logging.INFO)
# Load a model
model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
datasets = ["QuoraRetrieval", "MSMARCO"]
evaluator = SparseNanoBEIREvaluator(
dataset_names=datasets,
show_progress_bar=True,
batch_size=32,
)
# Run evaluation
results = evaluator(model)
"""
Evaluating NanoQuoraRetrieval
Information Retrieval Evaluation of the model on the NanoQuoraRetrieval dataset:
Query info: num_rows: 50, num_cols: 30522, row_non_zero_mean: 62.97999954223633, row_sparsity_mean: 0.9979365468025208 1/1 [00:04<00:00, 4.12s/it]
Corpus info: num_rows: 5046, num_cols: 30522, row_non_zero_mean: 63.394371032714844, row_sparsity_mean: 0.9979230165481567
Score-Function: dot
Accuracy@1: 92.00%
Accuracy@3: 96.00%
Accuracy@5: 98.00%
Accuracy@10: 100.00%
Precision@1: 92.00%
Precision@3: 40.00%
Precision@5: 24.80%
Precision@10: 13.20%
Recall@1: 79.73%
Recall@3: 92.53%
Recall@5: 94.93%
Recall@10: 98.27%
MRR@10: 0.9439
NDCG@10: 0.9339
MAP@100: 0.9072
Evaluating NanoMSMARCO
Information Retrieval Evaluation of the model on the NanoMSMARCO dataset:
Query info: num_rows: 50, num_cols: 30522, row_non_zero_mean: 48.099998474121094, row_sparsity_mean: 0.99842399358749391/1 [00:19<00:00, 19.40s/it]
Corpus info: num_rows: 5043, num_cols: 30522, row_non_zero_mean: 125.38131713867188, row_sparsity_mean: 0.9958921670913696
Score-Function: dot
Accuracy@1: 48.00%
Accuracy@3: 74.00%
Accuracy@5: 76.00%
Accuracy@10: 88.00%
Precision@1: 48.00%
Precision@3: 24.67%
Precision@5: 15.20%
Precision@10: 8.80%
Recall@1: 48.00%
Recall@3: 74.00%
Recall@5: 76.00%
Recall@10: 88.00%
MRR@10: 0.6211
NDCG@10: 0.6838
MAP@100: 0.6277
Average Querie: num_rows: 50.0, num_cols: 30522.0, row_non_zero_mean: 55.53999900817871, row_sparsity_mean: 0.9981802701950073
Average Corpus: num_rows: 5044.5, num_cols: 30522.0, row_non_zero_mean: 94.38784408569336, row_sparsity_mean: 0.9969075918197632
Aggregated for Score Function: dot
Accuracy@1: 70.00%
Accuracy@3: 85.00%
Accuracy@5: 87.00%
Accuracy@10: 94.00%
Precision@1: 70.00%
Recall@1: 63.87%
Precision@3: 32.33%
Recall@3: 83.27%
Precision@5: 20.00%
Recall@5: 85.47%
Precision@10: 11.00%
Recall@10: 93.13%
MRR@10: 0.7825
NDCG@10: 0.8089
"""
# Print the results
print(f"Primary metric: {evaluator.primary_metric}")
# => Primary metric: NanoBEIR_mean_dot_ndcg@10
print(f"Primary metric value: {results[evaluator.primary_metric]:.4f}")
# => Primary metric value: 0.8089
|
import logging
from sentence_transformers.sparse_encoder import (
SparseEncoder,
SparseNanoBEIREvaluator,
)
logging.basicConfig(format="%(message)s", level=logging.INFO)
# Load a model
model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
datasets = ["QuoraRetrieval", "MSMARCO"]
evaluator = SparseNanoBEIREvaluator(
dataset_names=datasets,
show_progress_bar=True,
batch_size=32,
)
# Run evaluation
results = evaluator(model)
"""
Evaluating NanoQuoraRetrieval
Information Retrieval Evaluation of the model on the NanoQuoraRetrieval dataset:
Query info: num_rows: 50, num_cols: 30522, row_non_zero_mean: 62.97999954223633, row_sparsity_mean: 0.9979365468025208 1/1 [00:04<00:00, 4.12s/it]
Corpus info: num_rows: 5046, num_cols: 30522, row_non_zero_mean: 63.394371032714844, row_sparsity_mean: 0.9979230165481567
Score-Function: dot
Accuracy@1: 92.00%
Accuracy@3: 96.00%
Accuracy@5: 98.00%
Accuracy@10: 100.00%
Precision@1: 92.00%
Precision@3: 40.00%
Precision@5: 24.80%
Precision@10: 13.20%
Recall@1: 79.73%
Recall@3: 92.53%
Recall@5: 94.93%
Recall@10: 98.27%
MRR@10: 0.9439
NDCG@10: 0.9339
MAP@100: 0.9072
Evaluating NanoMSMARCO
Information Retrieval Evaluation of the model on the NanoMSMARCO dataset:
Query info: num_rows: 50, num_cols: 30522, row_non_zero_mean: 48.099998474121094, row_sparsity_mean: 0.99842399358749391/1 [00:19<00:00, 19.40s/it]
Corpus info: num_rows: 5043, num_cols: 30522, row_non_zero_mean: 125.38131713867188, row_sparsity_mean: 0.9958921670913696
Score-Function: dot
Accuracy@1: 48.00%
Accuracy@3: 74.00%
Accuracy@5: 76.00%
Accuracy@10: 88.00%
Precision@1: 48.00%
Precision@3: 24.67%
Precision@5: 15.20%
Precision@10: 8.80%
Recall@1: 48.00%
Recall@3: 74.00%
Recall@5: 76.00%
Recall@10: 88.00%
MRR@10: 0.6211
NDCG@10: 0.6838
MAP@100: 0.6277
Average Querie: num_rows: 50.0, num_cols: 30522.0, row_non_zero_mean: 55.53999900817871, row_sparsity_mean: 0.9981802701950073
Average Corpus: num_rows: 5044.5, num_cols: 30522.0, row_non_zero_mean: 94.38784408569336, row_sparsity_mean: 0.9969075918197632
Aggregated for Score Function: dot
Accuracy@1: 70.00%
Accuracy@3: 85.00%
Accuracy@5: 87.00%
Accuracy@10: 94.00%
Precision@1: 70.00%
Recall@1: 63.87%
Precision@3: 32.33%
Recall@3: 83.27%
Precision@5: 20.00%
Recall@5: 85.47%
Precision@10: 11.00%
Recall@10: 93.13%
MRR@10: 0.7825
NDCG@10: 0.8089
"""
# Print the results
print(f"Primary metric: {evaluator.primary_metric}")
# => Primary metric: NanoBEIR_mean_dot_ndcg@10
print(f"Primary metric value: {results[evaluator.primary_metric]:.4f}")
# => Primary metric value: 0.8089
|
import platform
from argparse import ArgumentParser
import huggingface_hub
import pandas
import pyarrow
from datasets import __version__ as version
from datasets.commands import BaseDatasetsCLICommand
def info_command_factory(_):
return EnvironmentCommand()
class EnvironmentCommand(BaseDatasetsCLICommand):
@staticmethod
def register_subcommand(parser: ArgumentParser):
download_parser = parser.add_parser("env", help="Print relevant system environment info.")
download_parser.set_defaults(func=info_command_factory)
def run(self):
info = {
"`datasets` version": version,
"Platform": platform.platform(),
"Python version": platform.python_version(),
"Huggingface_hub version": huggingface_hub.__version__,
"PyArrow version": pyarrow.__version__,
"Pandas version": pandas.__version__,
}
print("\nCopy-and-paste the text below in your GitHub issue.\n")
print(self.format_dict(info))
return info
@staticmethod
def format_dict(d):
return "\n".join([f"- {prop}: {val}" for prop, val in d.items()]) + "\n"
|
import platform
from argparse import ArgumentParser
import pandas
import pyarrow
from datasets import __version__ as version
from datasets.commands import BaseDatasetsCLICommand
def info_command_factory(_):
return EnvironmentCommand()
class EnvironmentCommand(BaseDatasetsCLICommand):
@staticmethod
def register_subcommand(parser: ArgumentParser):
download_parser = parser.add_parser("env", help="Print relevant system environment info.")
download_parser.set_defaults(func=info_command_factory)
def run(self):
info = {
"`datasets` version": version,
"Platform": platform.platform(),
"Python version": platform.python_version(),
"PyArrow version": pyarrow.__version__,
"Pandas version": pandas.__version__,
}
print("\nCopy-and-paste the text below in your GitHub issue.\n")
print(self.format_dict(info))
return info
@staticmethod
def format_dict(d):
return "\n".join([f"- {prop}: {val}" for prop, val in d.items()]) + "\n"
|
# Copyright (c) OpenMMLab. All rights reserved.
import unittest
from unittest import TestCase
import torch
from parameterized import parameterized
from mmdet import * # noqa
from mmdet.core import DetDataSample
from mmdet.testing import demo_mm_inputs, get_detector_cfg
from mmdet.utils import register_all_modules
class TestKDSingleStageDetector(TestCase):
def setUp(self):
register_all_modules()
@parameterized.expand(['ld/ld_r18_gflv1_r101_fpn_coco_1x.py'])
def test_init(self, cfg_file):
model = get_detector_cfg(cfg_file)
model.backbone.init_cfg = None
from mmdet.models import build_detector
detector = build_detector(model)
self.assertTrue(detector.backbone)
self.assertTrue(detector.neck)
self.assertTrue(detector.bbox_head)
@parameterized.expand([('ld/ld_r18_gflv1_r101_fpn_coco_1x.py', ('cpu',
'cuda'))])
def test_single_stage_forward_train(self, cfg_file, devices):
model = get_detector_cfg(cfg_file)
model.backbone.init_cfg = None
from mmdet.models import build_detector
assert all([device in ['cpu', 'cuda'] for device in devices])
for device in devices:
detector = build_detector(model)
if device == 'cuda':
if not torch.cuda.is_available():
return unittest.skip('test requires GPU and torch+cuda')
detector = detector.cuda()
packed_inputs = demo_mm_inputs(2, [[3, 128, 128], [3, 125, 130]])
batch_inputs, data_samples = detector.data_preprocessor(
packed_inputs, True)
# Test forward train
losses = detector.forward(batch_inputs, data_samples, mode='loss')
self.assertIsInstance(losses, dict)
@parameterized.expand([('ld/ld_r18_gflv1_r101_fpn_coco_1x.py', ('cpu',
'cuda'))])
def test_single_stage_forward_test(self, cfg_file, devices):
model = get_detector_cfg(cfg_file)
model.backbone.init_cfg = None
from mmdet.models import build_detector
assert all([device in ['cpu', 'cuda'] for device in devices])
for device in devices:
detector = build_detector(model)
if device == 'cuda':
if not torch.cuda.is_available():
return unittest.skip('test requires GPU and torch+cuda')
detector = detector.cuda()
packed_inputs = demo_mm_inputs(2, [[3, 128, 128], [3, 125, 130]])
batch_inputs, data_samples = detector.data_preprocessor(
packed_inputs, False)
# Test forward test
detector.eval()
with torch.no_grad():
batch_results = detector.forward(
batch_inputs, data_samples, mode='predict')
self.assertEqual(len(batch_results), 2)
self.assertIsInstance(batch_results[0], DetDataSample)
|
# Copyright (c) OpenMMLab. All rights reserved.
import unittest
from unittest import TestCase
import torch
from parameterized import parameterized
from mmdet import * # noqa
from mmdet.core import DetDataSample
from mmdet.testing import demo_mm_inputs, get_detector_cfg
class TestKDSingleStageDetector(TestCase):
# TODO: waiting ``ld/ld_r18_gflv1_r101_fpn_coco_1x.py`` ready
@parameterized.expand(['lad/lad_r101_paa_r50_fpn_coco_1x.py'])
def test_init(self, cfg_file):
model = get_detector_cfg(cfg_file)
model.backbone.init_cfg = None
from mmdet.models import build_detector
detector = build_detector(model)
assert detector.backbone
assert detector.neck
assert detector.bbox_head
assert detector.device.type == 'cpu'
@parameterized.expand([('lad/lad_r101_paa_r50_fpn_coco_1x.py', ('cpu',
'cuda'))])
def test_single_stage_forward_train(self, cfg_file, devices):
model = get_detector_cfg(cfg_file)
model.backbone.init_cfg = None
from mmdet.models import build_detector
assert all([device in ['cpu', 'cuda'] for device in devices])
for device in devices:
detector = build_detector(model)
if device == 'cuda':
if not torch.cuda.is_available():
return unittest.skip('test requires GPU and torch+cuda')
detector = detector.cuda()
assert detector.device.type == device
packed_inputs = demo_mm_inputs(2, [[3, 128, 128], [3, 125, 130]])
# Test forward train
losses = detector.forward(packed_inputs, return_loss=True)
assert isinstance(losses, dict)
# Test forward_dummy
batch = torch.ones((1, 3, 64, 64)).to(device=device)
out = detector.forward_dummy(batch)
assert isinstance(out, tuple)
assert len(out) == 3
@parameterized.expand([('lad/lad_r101_paa_r50_fpn_coco_1x.py', ('cpu',
'cuda'))])
def test_single_stage_forward_test(self, cfg_file, devices):
model = get_detector_cfg(cfg_file)
model.backbone.init_cfg = None
from mmdet.models import build_detector
assert all([device in ['cpu', 'cuda'] for device in devices])
for device in devices:
detector = build_detector(model)
if device == 'cuda':
if not torch.cuda.is_available():
return unittest.skip('test requires GPU and torch+cuda')
detector = detector.cuda()
assert detector.device.type == device
packed_inputs = demo_mm_inputs(2, [[3, 128, 128], [3, 125, 130]])
# Test forward test
detector.eval()
with torch.no_grad():
batch_results = detector.forward(
packed_inputs, return_loss=False)
assert len(batch_results) == 2
assert isinstance(batch_results[0], DetDataSample)
|
import re
from typing import TYPE_CHECKING, Any, Dict, Union
if TYPE_CHECKING:
from sentence_transformers.SentenceTransformer import SentenceTransformer
class SentenceEvaluator:
"""
Base class for all evaluators
Extend this class and implement __call__ for custom evaluators.
"""
def __init__(self):
"""
Base class for all evaluators. Notably, this class introduces the ``greater_is_better`` and ``primary_metric``
attributes. The former is a boolean indicating whether a higher evaluation score is better, which is used
for choosing the best checkpoint if ``load_best_model_at_end`` is set to ``True`` in the training arguments.
The latter is a string indicating the primary metric for the evaluator. This has to be defined whenever
the evaluator returns a dictionary of metrics, and the primary metric is the key pointing to the primary
metric, i.e. the one that is used for model selection and/or logging.
"""
self.greater_is_better = True
self.primary_metric = None
def __call__(
self, model: "SentenceTransformer", output_path: str = None, epoch: int = -1, steps: int = -1
) -> Union[float, Dict[str, float]]:
"""
This is called during training to evaluate the model.
It returns a score for the evaluation with a higher score indicating a better result.
Args:
model: the model to evaluate
output_path: path where predictions and metrics are written
to
epoch: the epoch where the evaluation takes place. This is
used for the file prefixes. If this is -1, then we
assume evaluation on test data.
steps: the steps in the current epoch at time of the
evaluation. This is used for the file prefixes. If this
is -1, then we assume evaluation at the end of the
epoch.
Returns:
Either a score for the evaluation with a higher score
indicating a better result, or a dictionary with scores. If
the latter is chosen, then `evaluator.primary_metric` must
be defined
"""
pass
def prefix_name_to_metrics(self, metrics: Dict[str, float], name: str):
if not name:
return metrics
metrics = {name + "_" + key: value for key, value in metrics.items()}
if hasattr(self, "primary_metric") and not self.primary_metric.startswith(name + "_"):
self.primary_metric = name + "_" + self.primary_metric
return metrics
def store_metrics_in_model_card_data(self, model: "SentenceTransformer", metrics: Dict[str, Any]) -> None:
model.model_card_data.set_evaluation_metrics(self, metrics)
@property
def description(self) -> str:
"""
Returns a human-readable description of the evaluator: BinaryClassificationEvaluator -> Binary Classification
1. Remove "Evaluator" from the class name
2. Add a space before every capital letter
"""
class_name = self.__class__.__name__
try:
index = class_name.index("Evaluator")
class_name = class_name[:index]
except IndexError:
pass
return re.sub(r"([a-z])([A-Z])", "\g<1> \g<2>", class_name)
|
from sentence_transformers import SentenceTransformer
class SentenceEvaluator:
"""
Base class for all evaluators
Extend this class and implement __call__ for custom evaluators.
"""
def __call__(self, model: SentenceTransformer, output_path: str = None, epoch: int = -1, steps: int = -1) -> float:
"""
This is called during training to evaluate the model.
It returns a score for the evaluation with a higher score indicating a better result.
:param model:
the model to evaluate
:param output_path:
path where predictions and metrics are written to
:param epoch
the epoch where the evaluation takes place.
This is used for the file prefixes.
If this is -1, then we assume evaluation on test data.
:param steps
the steps in the current epoch at time of the evaluation.
This is used for the file prefixes.
If this is -1, then we assume evaluation at the end of the epoch.
:return: a score for the evaluation with a higher score indicating a better result
"""
pass
|
# dataset settings
dataset_type = 'CocoPanopticDataset'
# data_root = 'data/coco/'
# Example to use different file client
# Method 1: simply set the data root and let the file I/O module
# automatically infer from prefix (not support LMDB and Memcache yet)
data_root = 's3://openmmlab/datasets/detection/coco/'
# Method 2: Use `backend_args`, `file_client_args` in versions before 3.0.0rc6
# backend_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/detection/',
# 'data/': 's3://openmmlab/datasets/detection/'
# }))
backend_args = None
train_pipeline = [
dict(type='LoadImageFromFile', backend_args=backend_args),
dict(type='LoadPanopticAnnotations', backend_args=backend_args),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile', backend_args=backend_args),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(type='LoadPanopticAnnotations', backend_args=backend_args),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(
batch_size=2,
num_workers=2,
persistent_workers=True,
sampler=dict(type='DefaultSampler', shuffle=True),
batch_sampler=dict(type='AspectRatioBatchSampler'),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/panoptic_train2017.json',
data_prefix=dict(
img='train2017/', seg='annotations/panoptic_train2017/'),
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=train_pipeline,
backend_args=backend_args))
val_dataloader = dict(
batch_size=1,
num_workers=2,
persistent_workers=True,
drop_last=False,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/panoptic_val2017.json',
data_prefix=dict(img='val2017/', seg='annotations/panoptic_val2017/'),
test_mode=True,
pipeline=test_pipeline,
backend_args=backend_args))
test_dataloader = val_dataloader
val_evaluator = dict(
type='CocoPanopticMetric',
ann_file=data_root + 'annotations/panoptic_val2017.json',
seg_prefix=data_root + 'annotations/panoptic_val2017/',
backend_args=backend_args)
test_evaluator = val_evaluator
# inference on test dataset and
# format the output results for submission.
# test_dataloader = dict(
# batch_size=1,
# num_workers=1,
# persistent_workers=True,
# drop_last=False,
# sampler=dict(type='DefaultSampler', shuffle=False),
# dataset=dict(
# type=dataset_type,
# data_root=data_root,
# ann_file='annotations/panoptic_image_info_test-dev2017.json',
# data_prefix=dict(img='test2017/'),
# test_mode=True,
# pipeline=test_pipeline))
# test_evaluator = dict(
# type='CocoPanopticMetric',
# format_only=True,
# ann_file=data_root + 'annotations/panoptic_image_info_test-dev2017.json',
# outfile_prefix='./work_dirs/coco_panoptic/test')
|
# dataset settings
dataset_type = 'CocoPanopticDataset'
data_root = 'data/coco/'
# file_client_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/detection/',
# 'data/': 's3://openmmlab/datasets/detection/'
# }))
file_client_args = dict(backend='disk')
train_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='LoadPanopticAnnotations', file_client_args=file_client_args),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(type='LoadPanopticAnnotations', file_client_args=file_client_args),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(
batch_size=2,
num_workers=2,
persistent_workers=True,
sampler=dict(type='DefaultSampler', shuffle=True),
batch_sampler=dict(type='AspectRatioBatchSampler'),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/panoptic_train2017.json',
data_prefix=dict(
img='train2017/', seg='annotations/panoptic_train2017/'),
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=train_pipeline))
val_dataloader = dict(
batch_size=1,
num_workers=2,
persistent_workers=True,
drop_last=False,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/panoptic_val2017.json',
data_prefix=dict(img='val2017/', seg='annotations/panoptic_val2017/'),
test_mode=True,
pipeline=test_pipeline))
test_dataloader = val_dataloader
val_evaluator = dict(
type='CocoPanopticMetric',
ann_file=data_root + 'annotations/panoptic_val2017.json',
seg_prefix=data_root + 'annotations/panoptic_val2017/',
file_client_args=file_client_args,
)
test_evaluator = val_evaluator
# inference on test dataset and
# format the output results for submission.
# test_dataloader = dict(
# batch_size=1,
# num_workers=1,
# persistent_workers=True,
# drop_last=False,
# sampler=dict(type='DefaultSampler', shuffle=False),
# dataset=dict(
# type=dataset_type,
# data_root=data_root,
# ann_file='annotations/panoptic_image_info_test-dev2017.json',
# data_prefix=dict(img='test2017/'),
# test_mode=True,
# pipeline=test_pipeline))
# test_evaluator = dict(
# type='CocoPanopticMetric',
# format_only=True,
# ann_file=data_root + 'annotations/panoptic_image_info_test-dev2017.json',
# outfile_prefix='./work_dirs/coco_panoptic/test')
|
_base_ = './fcos_r50_caffe_fpn_gn-head_1x_coco.py'
# dataset settings
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='RandomChoiceResize',
scale=[(1333, 640), (1333, 800)],
resize_cfg=dict(type='Resize', keep_ratio=True)),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
# training schedule for 2x
train_cfg = dict(by_epoch=True, max_epochs=24)
# learning rate
param_scheduler = [
dict(type='ConstantLR', factor=1.0 / 3, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=24,
by_epoch=True,
milestones=[16, 22],
gamma=0.1)
]
|
_base_ = './fcos_r50_caffe_fpn_gn-head_1x_coco.py'
img_norm_cfg = dict(
mean=[102.9801, 115.9465, 122.7717], std=[1.0, 1.0, 1.0], to_rgb=False)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='Resize',
img_scale=[(1333, 640), (1333, 800)],
multiscale_mode='value',
keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
train=dict(pipeline=train_pipeline),
val=dict(pipeline=test_pipeline),
test=dict(pipeline=test_pipeline))
# learning policy
lr_config = dict(step=[16, 22])
runner = dict(type='EpochBasedRunner', max_epochs=24)
|
from torchvision.transforms import AutoAugmentPolicy, InterpolationMode # usort: skip
from . import functional, utils # usort: skip
from ._transform import Transform # usort: skip
from ._augment import CutMix, MixUp, RandomErasing
from ._auto_augment import AugMix, AutoAugment, RandAugment, TrivialAugmentWide
from ._color import (
ColorJitter,
Grayscale,
RandomAdjustSharpness,
RandomAutocontrast,
RandomChannelPermutation,
RandomEqualize,
RandomGrayscale,
RandomInvert,
RandomPhotometricDistort,
RandomPosterize,
RandomSolarize,
)
from ._container import Compose, RandomApply, RandomChoice, RandomOrder
from ._geometry import (
CenterCrop,
ElasticTransform,
FiveCrop,
Pad,
RandomAffine,
RandomCrop,
RandomHorizontalFlip,
RandomIoUCrop,
RandomPerspective,
RandomResize,
RandomResizedCrop,
RandomRotation,
RandomShortestSize,
RandomVerticalFlip,
RandomZoomOut,
Resize,
ScaleJitter,
TenCrop,
)
from ._meta import ClampBoundingBoxes, ConvertBoundingBoxFormat
from ._misc import (
ConvertImageDtype,
GaussianBlur,
Identity,
Lambda,
LinearTransformation,
Normalize,
SanitizeBoundingBoxes,
ToDtype,
)
from ._temporal import UniformTemporalSubsample
from ._type_conversion import PILToTensor, ToImage, ToPILImage
from ._deprecated import ToTensor # usort: skip
from torchvision import _BETA_TRANSFORMS_WARNING, _WARN_ABOUT_BETA_TRANSFORMS
if _WARN_ABOUT_BETA_TRANSFORMS:
import warnings
warnings.warn(_BETA_TRANSFORMS_WARNING)
|
from torchvision.transforms import AutoAugmentPolicy, InterpolationMode # usort: skip
from . import functional, utils # usort: skip
from ._transform import Transform # usort: skip
from ._augment import CutMix, MixUp, RandomErasing
from ._auto_augment import AugMix, AutoAugment, RandAugment, TrivialAugmentWide
from ._color import (
ColorJitter,
Grayscale,
RandomAdjustSharpness,
RandomAutocontrast,
RandomChannelPermutation,
RandomEqualize,
RandomGrayscale,
RandomInvert,
RandomPhotometricDistort,
RandomPosterize,
RandomSolarize,
)
from ._container import Compose, RandomApply, RandomChoice, RandomOrder
from ._geometry import (
CenterCrop,
ElasticTransform,
FiveCrop,
Pad,
RandomAffine,
RandomCrop,
RandomHorizontalFlip,
RandomIoUCrop,
RandomPerspective,
RandomResize,
RandomResizedCrop,
RandomRotation,
RandomShortestSize,
RandomVerticalFlip,
RandomZoomOut,
Resize,
ScaleJitter,
TenCrop,
)
from ._meta import ClampBoundingBoxes, ConvertBoundingBoxFormat
from ._misc import (
ConvertImageDtype,
GaussianBlur,
Identity,
Lambda,
LinearTransformation,
Normalize,
SanitizeBoundingBoxes,
ToDtype,
)
from ._temporal import UniformTemporalSubsample
from ._type_conversion import PILToTensor, ToImagePIL, ToImageTensor, ToPILImage
from ._deprecated import ToTensor # usort: skip
from torchvision import _BETA_TRANSFORMS_WARNING, _WARN_ABOUT_BETA_TRANSFORMS
if _WARN_ABOUT_BETA_TRANSFORMS:
import warnings
warnings.warn(_BETA_TRANSFORMS_WARNING)
|
__copyright__ = 'Copyright (c) 2021 Jina AI Limited. All rights reserved.'
__license__ = 'Apache-2.0'
import os
import subprocess
from pathlib import Path
import pytest
from jina import Document, DocumentArray
@pytest.fixture(scope='session')
def build_docker_image() -> str:
img_name = Path(__file__).parents[1].stem.lower()
subprocess.run(['docker', 'build', '-t', img_name, '.'], check=True)
return img_name
@pytest.fixture()
def test_dir() -> str:
return os.path.dirname(os.path.abspath(__file__))
@pytest.fixture()
def data_generator(test_dir: str):
def _generator():
data_file_path = os.path.join(test_dir, 'data', 'test_data.txt')
with open(data_file_path, 'r') as file:
lines = file.readlines()
for line in lines:
yield Document(text=line.strip())
return _generator
@pytest.fixture()
def docs_with_text() -> DocumentArray:
return DocumentArray([Document(text='hello world') for _ in range(10)])
@pytest.fixture()
def docs_with_chunk_text() -> DocumentArray:
return DocumentArray(
[Document(chunks=[Document(text='hello world') for _ in range(10)])]
)
@pytest.fixture()
def docs_with_chunk_chunk_text() -> DocumentArray:
return DocumentArray(
[
Document(
chunks=[
Document(chunks=[Document(text='hello world') for _ in range(10)])
]
)
]
)
|
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import os
import pytest
from jina import Document, DocumentArray
@pytest.fixture()
def test_dir() -> str:
return os.path.dirname(os.path.abspath(__file__))
@pytest.fixture()
def data_generator(test_dir: str):
def _generator():
data_file_path = os.path.join(test_dir, 'data', 'test_data.txt')
with open(data_file_path, 'r') as file:
lines = file.readlines()
for line in lines:
yield Document(text=line.strip())
return _generator
@pytest.fixture()
def docs_with_text() -> DocumentArray:
return DocumentArray([
Document(text='hello world') for _ in range(10)
])
@pytest.fixture()
def docs_with_chunk_text() -> DocumentArray:
return DocumentArray([
Document(
chunks=[Document(text='hello world') for _ in range(10)]
)
])
@pytest.fixture()
def docs_with_chunk_chunk_text() -> DocumentArray:
return DocumentArray([
Document(
chunks=[Document(
chunks=[Document(text='hello world') for _ in range(10)])])
])
|
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__version__ = "3.0.3.dev0"
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled
from .info import DatasetInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_default_config_name,
get_dataset_infos,
get_dataset_split_names,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .utils import *
from .utils import logging
|
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__version__ = "3.0.2"
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled
from .info import DatasetInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_default_config_name,
get_dataset_infos,
get_dataset_split_names,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .utils import *
from .utils import logging
|
import re
from typing import Union
from langchain_core.agents import AgentAction, AgentFinish
from langchain_core.exceptions import OutputParserException
from langchain.agents.agent import AgentOutputParser
from langchain.agents.mrkl.prompt import FORMAT_INSTRUCTIONS
FINAL_ANSWER_ACTION = "Final Answer:"
MISSING_ACTION_AFTER_THOUGHT_ERROR_MESSAGE = (
"Invalid Format: Missing 'Action:' after 'Thought:'"
)
MISSING_ACTION_INPUT_AFTER_ACTION_ERROR_MESSAGE = (
"Invalid Format: Missing 'Action Input:' after 'Action:'"
)
FINAL_ANSWER_AND_PARSABLE_ACTION_ERROR_MESSAGE = (
"Parsing LLM output produced both a final answer and a parse-able action:"
)
class ReActSingleInputOutputParser(AgentOutputParser):
"""Parses ReAct-style LLM calls that have a single tool input.
Expects output to be in one of two formats.
If the output signals that an action should be taken,
should be in the below format. This will result in an AgentAction
being returned.
```
Thought: agent thought here
Action: search
Action Input: what is the temperature in SF?
```
If the output signals that a final answer should be given,
should be in the below format. This will result in an AgentFinish
being returned.
```
Thought: agent thought here
Final Answer: The temperature is 100 degrees
```
"""
def get_format_instructions(self) -> str:
return FORMAT_INSTRUCTIONS
def parse(self, text: str) -> Union[AgentAction, AgentFinish]:
includes_answer = FINAL_ANSWER_ACTION in text
regex = (
r"Action\s*\d*\s*:[\s]*(.*?)[\s]*Action\s*\d*\s*Input\s*\d*\s*:[\s]*(.*)"
)
action_match = re.search(regex, text, re.DOTALL)
if action_match:
if includes_answer:
msg = f"{FINAL_ANSWER_AND_PARSABLE_ACTION_ERROR_MESSAGE}: {text}"
raise OutputParserException(msg)
action = action_match.group(1).strip()
action_input = action_match.group(2)
tool_input = action_input.strip(" ")
tool_input = tool_input.strip('"')
return AgentAction(action, tool_input, text)
if includes_answer:
return AgentFinish(
{"output": text.split(FINAL_ANSWER_ACTION)[-1].strip()}, text
)
if not re.search(r"Action\s*\d*\s*:[\s]*(.*?)", text, re.DOTALL):
msg = f"Could not parse LLM output: `{text}`"
raise OutputParserException(
msg,
observation=MISSING_ACTION_AFTER_THOUGHT_ERROR_MESSAGE,
llm_output=text,
send_to_llm=True,
)
if not re.search(
r"[\s]*Action\s*\d*\s*Input\s*\d*\s*:[\s]*(.*)", text, re.DOTALL
):
msg = f"Could not parse LLM output: `{text}`"
raise OutputParserException(
msg,
observation=MISSING_ACTION_INPUT_AFTER_ACTION_ERROR_MESSAGE,
llm_output=text,
send_to_llm=True,
)
msg = f"Could not parse LLM output: `{text}`"
raise OutputParserException(msg)
@property
def _type(self) -> str:
return "react-single-input"
|
import re
from typing import Union
from langchain_core.agents import AgentAction, AgentFinish
from langchain_core.exceptions import OutputParserException
from langchain.agents.agent import AgentOutputParser
from langchain.agents.mrkl.prompt import FORMAT_INSTRUCTIONS
FINAL_ANSWER_ACTION = "Final Answer:"
MISSING_ACTION_AFTER_THOUGHT_ERROR_MESSAGE = (
"Invalid Format: Missing 'Action:' after 'Thought:'"
)
MISSING_ACTION_INPUT_AFTER_ACTION_ERROR_MESSAGE = (
"Invalid Format: Missing 'Action Input:' after 'Action:'"
)
FINAL_ANSWER_AND_PARSABLE_ACTION_ERROR_MESSAGE = (
"Parsing LLM output produced both a final answer and a parse-able action:"
)
class ReActSingleInputOutputParser(AgentOutputParser):
"""Parses ReAct-style LLM calls that have a single tool input.
Expects output to be in one of two formats.
If the output signals that an action should be taken,
should be in the below format. This will result in an AgentAction
being returned.
```
Thought: agent thought here
Action: search
Action Input: what is the temperature in SF?
```
If the output signals that a final answer should be given,
should be in the below format. This will result in an AgentFinish
being returned.
```
Thought: agent thought here
Final Answer: The temperature is 100 degrees
```
"""
def get_format_instructions(self) -> str:
return FORMAT_INSTRUCTIONS
def parse(self, text: str) -> Union[AgentAction, AgentFinish]:
includes_answer = FINAL_ANSWER_ACTION in text
regex = (
r"Action\s*\d*\s*:[\s]*(.*?)[\s]*Action\s*\d*\s*Input\s*\d*\s*:[\s]*(.*)"
)
action_match = re.search(regex, text, re.DOTALL)
if action_match:
if includes_answer:
msg = f"{FINAL_ANSWER_AND_PARSABLE_ACTION_ERROR_MESSAGE}: {text}"
raise OutputParserException(msg)
action = action_match.group(1).strip()
action_input = action_match.group(2)
tool_input = action_input.strip(" ")
tool_input = tool_input.strip('"')
return AgentAction(action, tool_input, text)
elif includes_answer:
return AgentFinish(
{"output": text.split(FINAL_ANSWER_ACTION)[-1].strip()}, text
)
if not re.search(r"Action\s*\d*\s*:[\s]*(.*?)", text, re.DOTALL):
msg = f"Could not parse LLM output: `{text}`"
raise OutputParserException(
msg,
observation=MISSING_ACTION_AFTER_THOUGHT_ERROR_MESSAGE,
llm_output=text,
send_to_llm=True,
)
elif not re.search(
r"[\s]*Action\s*\d*\s*Input\s*\d*\s*:[\s]*(.*)", text, re.DOTALL
):
msg = f"Could not parse LLM output: `{text}`"
raise OutputParserException(
msg,
observation=MISSING_ACTION_INPUT_AFTER_ACTION_ERROR_MESSAGE,
llm_output=text,
send_to_llm=True,
)
else:
msg = f"Could not parse LLM output: `{text}`"
raise OutputParserException(msg)
@property
def _type(self) -> str:
return "react-single-input"
|
from sentence_transformers import SentenceTransformer, losses, util
class AnglELoss(losses.CoSENTLoss):
def __init__(self, model: SentenceTransformer, scale: float = 20.0):
"""
This class implements AnglE (Angle Optimized) loss.
This is a modification of :class:`CoSENTLoss`, designed to address the following issue:
The cosine function's gradient approaches 0 as the wave approaches the top or bottom of its form.
This can hinder the optimization process, so AnglE proposes to instead optimize the angle difference
in complex space in order to mitigate this effect.
It expects that each of the InputExamples consists of a pair of texts and a float valued label, representing
the expected similarity score between the pair.
It computes the following loss function:
``loss = logsum(1+exp(s(k,l)-s(i,j))+exp...)``, where ``(i,j)`` and ``(k,l)`` are any of the input pairs in the
batch such that the expected similarity of ``(i,j)`` is greater than ``(k,l)``. The summation is over all possible
pairs of input pairs in the batch that match this condition. This is the same as CoSENTLoss, with a different
similarity function.
Args:
model: SentenceTransformerModel
scale: Output of similarity function is multiplied by scale
value. Represents the inverse temperature.
References:
- For further details, see: https://arxiv.org/abs/2309.12871v1
Requirements:
- Sentence pairs with corresponding similarity scores in range of the similarity function. Default is [-1,1].
Relations:
- :class:`CoSENTLoss` is AnglELoss with ``pairwise_cos_sim`` as the metric, rather than ``pairwise_angle_sim``.
- :class:`CosineSimilarityLoss` seems to produce a weaker training signal than ``CoSENTLoss`` or ``AnglELoss``.
Inputs:
+--------------------------------+------------------------+
| Texts | Labels |
+================================+========================+
| (sentence_A, sentence_B) pairs | float similarity score |
+--------------------------------+------------------------+
Example:
::
from sentence_transformers import SentenceTransformer, SentenceTransformerTrainer, losses
from datasets import Dataset
model = SentenceTransformer("microsoft/mpnet-base")
train_dataset = Dataset.from_dict({
"sentence1": ["It's nice weather outside today.", "He drove to work."],
"sentence2": ["It's so sunny.", "She walked to the store."],
"score": [1.0, 0.3],
})
loss = losses.AnglELoss(model)
trainer = SentenceTransformerTrainer(
model=model,
train_dataset=train_dataset,
loss=loss,
)
trainer.train()
"""
super().__init__(model, scale, similarity_fct=util.pairwise_angle_sim)
@property
def citation(self) -> str:
return """
@misc{li2023angleoptimized,
title={AnglE-optimized Text Embeddings},
author={Xianming Li and Jing Li},
year={2023},
eprint={2309.12871},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
"""
|
from sentence_transformers import losses, SentenceTransformer, util
class AnglELoss(losses.CoSENTLoss):
def __init__(self, model: SentenceTransformer, scale: float = 20.0):
"""
This class implements AnglE (Angle Optimized) loss.
This is a modification of :class:`CoSENTLoss`, designed to address the following issue:
The cosine function's gradient approaches 0 as the wave approaches the top or bottom of its form.
This can hinder the optimization process, so AnglE proposes to instead optimize the angle difference
in complex space in order to mitigate this effect.
It expects that each of the InputExamples consists of a pair of texts and a float valued label, representing
the expected similarity score between the pair.
It computes the following loss function:
``loss = logsum(1+exp(s(k,l)-s(i,j))+exp...)``, where ``(i,j)`` and ``(k,l)`` are any of the input pairs in the
batch such that the expected similarity of ``(i,j)`` is greater than ``(k,l)``. The summation is over all possible
pairs of input pairs in the batch that match this condition. This is the same as CoSENTLoss, with a different
similarity function.
:param model: SentenceTransformerModel
:param scale: Output of similarity function is multiplied by scale value. Represents the inverse temperature.
References:
- For further details, see: https://arxiv.org/abs/2309.12871v1
Requirements:
- Sentence pairs with corresponding similarity scores in range of the similarity function. Default is [-1,1].
Relations:
- :class:`CoSENTLoss` is AnglELoss with ``pairwise_cos_sim`` as the metric, rather than ``pairwise_angle_sim``.
- :class:`CosineSimilarityLoss` seems to produce a weaker training signal than ``CoSENTLoss`` or ``AnglELoss``.
Inputs:
+--------------------------------+------------------------+
| Texts | Labels |
+================================+========================+
| (sentence_A, sentence_B) pairs | float similarity score |
+--------------------------------+------------------------+
Example:
::
from sentence_transformers import SentenceTransformer, losses
from sentence_transformers.readers import InputExample
model = SentenceTransformer('bert-base-uncased')
train_examples = [InputExample(texts=['My first sentence', 'My second sentence'], label=1.0),
InputExample(texts=['My third sentence', 'Unrelated sentence'], label=0.3)]
train_dataloader = DataLoader(train_examples, shuffle=True, batch_size=train_batch_size)
train_loss = losses.AnglELoss(model=model)
"""
super().__init__(model, scale, similarity_fct=util.pairwise_angle_sim)
|
"""
Paged CSV reader.
A parser for tabular data files.
"""
from pathlib import Path
from typing import Any, Dict, List, Optional
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
class PagedCSVReader(BaseReader):
"""
Paged CSV parser.
Displayed each row in an LLM-friendly format on a separate document.
Args:
encoding (str): Encoding used to open the file.
utf-8 by default.
"""
def __init__(self, *args: Any, encoding: str = "utf-8", **kwargs: Any) -> None:
"""Init params."""
super().__init__(*args, **kwargs)
self._encoding = encoding
def load_data(
self,
file: Path,
extra_info: Optional[Dict] = None,
delimiter: str = ",",
quotechar: str = '"',
) -> List[Document]:
"""Parse file."""
import csv
docs = []
with open(file, encoding=self._encoding) as fp:
csv_reader = csv.DictReader(f=fp, delimiter=delimiter, quotechar=quotechar) # type: ignore
for row in csv_reader:
docs.append(
Document(
text="\n".join(
f"{k.strip()}: {v.strip()}" for k, v in row.items()
),
extra_info=extra_info or {},
)
)
return docs
|
"""Paged CSV reader.
A parser for tabular data files.
"""
from pathlib import Path
from typing import Any, Dict, List, Optional
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
class PagedCSVReader(BaseReader):
"""Paged CSV parser.
Displayed each row in an LLM-friendly format on a separate document.
Args:
encoding (str): Encoding used to open the file.
utf-8 by default.
"""
def __init__(self, *args: Any, encoding: str = "utf-8", **kwargs: Any) -> None:
"""Init params."""
super().__init__(*args, **kwargs)
self._encoding = encoding
def load_data(
self,
file: Path,
extra_info: Optional[Dict] = None,
delimiter: str = ",",
quotechar: str = '"',
) -> List[Document]:
"""Parse file."""
import csv
docs = []
with open(file, encoding=self._encoding) as fp:
csv_reader = csv.DictReader(f=fp, delimiter=delimiter, quotechar=quotechar) # type: ignore
for row in csv_reader:
docs.append(
Document(
text="\n".join(
f"{k.strip()}: {v.strip()}" for k, v in row.items()
),
extra_info=extra_info or {},
)
)
return docs
|
_base_ = './mask-rcnn_r101_fpn_1x_coco.py'
model = dict(
# ResNeXt-101-32x8d model trained with Caffe2 at FB,
# so the mean and std need to be changed.
data_preprocessor=dict(
mean=[103.530, 116.280, 123.675],
std=[57.375, 57.120, 58.395],
bgr_to_rgb=False),
backbone=dict(
type='ResNeXt',
depth=101,
groups=32,
base_width=8,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=False),
style='pytorch',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnext101_32x8d')))
train_pipeline = [
dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
dict(
type='LoadAnnotations',
with_bbox=True,
with_mask=True,
poly2mask=False),
dict(
type='RandomChoiceResize',
scales=[(1333, 640), (1333, 672), (1333, 704), (1333, 736),
(1333, 768), (1333, 800)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs'),
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
|
_base_ = './mask-rcnn_r101_fpn_1x_coco.py'
model = dict(
# ResNeXt-101-32x8d model trained with Caffe2 at FB,
# so the mean and std need to be changed.
data_preprocessor=dict(
mean=[103.530, 116.280, 123.675],
std=[57.375, 57.120, 58.395],
bgr_to_rgb=False),
backbone=dict(
type='ResNeXt',
depth=101,
groups=32,
base_width=8,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=False),
style='pytorch',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnext101_32x8d')))
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(
type='LoadAnnotations',
with_bbox=True,
with_mask=True,
poly2mask=False),
dict(
type='RandomChoiceResize',
scales=[(1333, 640), (1333, 672), (1333, 704), (1333, 736),
(1333, 768), (1333, 800)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs'),
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.