input
stringlengths 33
5k
| output
stringlengths 32
5k
|
---|---|
from sentence_transformers import SentenceTransformer, losses, util
class AnglELoss(losses.CoSENTLoss):
def __init__(self, model: SentenceTransformer, scale: float = 20.0) -> None:
"""
This class implements AnglE (Angle Optimized) loss.
This is a modification of :class:`CoSENTLoss`, designed to address the following issue:
The cosine function's gradient approaches 0 as the wave approaches the top or bottom of its form.
This can hinder the optimization process, so AnglE proposes to instead optimize the angle difference
in complex space in order to mitigate this effect.
It expects that each of the InputExamples consists of a pair of texts and a float valued label, representing
the expected similarity score between the pair.
It computes the following loss function:
``loss = logsum(1+exp(s(k,l)-s(i,j))+exp...)``, where ``(i,j)`` and ``(k,l)`` are any of the input pairs in the
batch such that the expected similarity of ``(i,j)`` is greater than ``(k,l)``. The summation is over all possible
pairs of input pairs in the batch that match this condition. This is the same as CoSENTLoss, with a different
similarity function.
Args:
model: SentenceTransformerModel
scale: Output of similarity function is multiplied by scale
value. Represents the inverse temperature.
References:
- For further details, see: https://arxiv.org/abs/2309.12871v1
Requirements:
- Sentence pairs with corresponding similarity scores in range of the similarity function. Default is [-1,1].
Relations:
- :class:`CoSENTLoss` is AnglELoss with ``pairwise_cos_sim`` as the metric, rather than ``pairwise_angle_sim``.
- :class:`CosineSimilarityLoss` seems to produce a weaker training signal than ``CoSENTLoss`` or ``AnglELoss``.
Inputs:
+--------------------------------+------------------------+
| Texts | Labels |
+================================+========================+
| (sentence_A, sentence_B) pairs | float similarity score |
+--------------------------------+------------------------+
Example:
::
from sentence_transformers import SentenceTransformer, SentenceTransformerTrainer, losses
from datasets import Dataset
model = SentenceTransformer("microsoft/mpnet-base")
train_dataset = Dataset.from_dict({
"sentence1": ["It's nice weather outside today.", "He drove to work."],
"sentence2": ["It's so sunny.", "She walked to the store."],
"score": [1.0, 0.3],
})
loss = losses.AnglELoss(model)
trainer = SentenceTransformerTrainer(
model=model,
train_dataset=train_dataset,
loss=loss,
)
trainer.train()
"""
super().__init__(model, scale, similarity_fct=util.pairwise_angle_sim)
@property
def citation(self) -> str:
return """
@misc{li2023angleoptimized,
title={AnglE-optimized Text Embeddings},
author={Xianming Li and Jing Li},
year={2023},
eprint={2309.12871},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
"""
|
from sentence_transformers import SentenceTransformer, losses, util
class AnglELoss(losses.CoSENTLoss):
def __init__(self, model: SentenceTransformer, scale: float = 20.0):
"""
This class implements AnglE (Angle Optimized) loss.
This is a modification of :class:`CoSENTLoss`, designed to address the following issue:
The cosine function's gradient approaches 0 as the wave approaches the top or bottom of its form.
This can hinder the optimization process, so AnglE proposes to instead optimize the angle difference
in complex space in order to mitigate this effect.
It expects that each of the InputExamples consists of a pair of texts and a float valued label, representing
the expected similarity score between the pair.
It computes the following loss function:
``loss = logsum(1+exp(s(k,l)-s(i,j))+exp...)``, where ``(i,j)`` and ``(k,l)`` are any of the input pairs in the
batch such that the expected similarity of ``(i,j)`` is greater than ``(k,l)``. The summation is over all possible
pairs of input pairs in the batch that match this condition. This is the same as CoSENTLoss, with a different
similarity function.
Args:
model: SentenceTransformerModel
scale: Output of similarity function is multiplied by scale
value. Represents the inverse temperature.
References:
- For further details, see: https://arxiv.org/abs/2309.12871v1
Requirements:
- Sentence pairs with corresponding similarity scores in range of the similarity function. Default is [-1,1].
Relations:
- :class:`CoSENTLoss` is AnglELoss with ``pairwise_cos_sim`` as the metric, rather than ``pairwise_angle_sim``.
- :class:`CosineSimilarityLoss` seems to produce a weaker training signal than ``CoSENTLoss`` or ``AnglELoss``.
Inputs:
+--------------------------------+------------------------+
| Texts | Labels |
+================================+========================+
| (sentence_A, sentence_B) pairs | float similarity score |
+--------------------------------+------------------------+
Example:
::
from sentence_transformers import SentenceTransformer, SentenceTransformerTrainer, losses
from datasets import Dataset
model = SentenceTransformer("microsoft/mpnet-base")
train_dataset = Dataset.from_dict({
"sentence1": ["It's nice weather outside today.", "He drove to work."],
"sentence2": ["It's so sunny.", "She walked to the store."],
"score": [1.0, 0.3],
})
loss = losses.AnglELoss(model)
trainer = SentenceTransformerTrainer(
model=model,
train_dataset=train_dataset,
loss=loss,
)
trainer.train()
"""
super().__init__(model, scale, similarity_fct=util.pairwise_angle_sim)
@property
def citation(self) -> str:
return """
@misc{li2023angleoptimized,
title={AnglE-optimized Text Embeddings},
author={Xianming Li and Jing Li},
year={2023},
eprint={2309.12871},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
"""
|
from jina import Flow
import os
os.environ['JINA_LOG_LEVEL'] = 'DEBUG'
if __name__ == '__main__':
with Flow.load_config('flow.yml') as f:
f.block()
|
from jina import Flow
if __name__ == '__main__':
with Flow.load_config('flow.yml') as f:
f.block()
|
from . import InputExample
import os
class LabelSentenceReader:
"""Reads in a file that has at least two columns: a label and a sentence.
This reader can for example be used with the BatchHardTripletLoss.
Maps labels automatically to integers
"""
def __init__(self, folder, label_col_idx=0, sentence_col_idx=1, separator="\t"):
self.folder = folder
self.label_map = {}
self.label_col_idx = label_col_idx
self.sentence_col_idx = sentence_col_idx
self.separator = separator
def get_examples(self, filename, max_examples=0):
examples = []
id = 0
for line in open(os.path.join(self.folder, filename), encoding="utf-8"):
splits = line.strip().split(self.separator)
label = splits[self.label_col_idx]
sentence = splits[self.sentence_col_idx]
if label not in self.label_map:
self.label_map[label] = len(self.label_map)
label_id = self.label_map[label]
guid = "%s-%d" % (filename, id)
id += 1
examples.append(InputExample(guid=guid, texts=[sentence], label=label_id))
if 0 < max_examples <= id:
break
return examples
|
from . import InputExample
import os
class LabelSentenceReader:
"""Reads in a file that has at least two columns: a label and a sentence.
This reader can for example be used with the BatchHardTripletLoss.
Maps labels automatically to integers"""
def __init__(self, folder, label_col_idx=0, sentence_col_idx=1, separator="\t"):
self.folder = folder
self.label_map = {}
self.label_col_idx = label_col_idx
self.sentence_col_idx = sentence_col_idx
self.separator = separator
def get_examples(self, filename, max_examples=0):
examples = []
id = 0
for line in open(os.path.join(self.folder, filename), encoding="utf-8"):
splits = line.strip().split(self.separator)
label = splits[self.label_col_idx]
sentence = splits[self.sentence_col_idx]
if label not in self.label_map:
self.label_map[label] = len(self.label_map)
label_id = self.label_map[label]
guid = "%s-%d" % (filename, id)
id += 1
examples.append(InputExample(guid=guid, texts=[sentence], label=label_id))
if 0 < max_examples <= id:
break
return examples
|
# Copyright (c) OpenMMLab. All rights reserved.
from .base_loop import BaseLoop
from .checkpoint import (CheckpointLoader, find_latest_checkpoint,
get_deprecated_model_names, get_external_models,
get_mmcls_models, get_state_dict,
get_torchvision_models, load_checkpoint,
load_state_dict, save_checkpoint, weights_to_cpu)
from .loops import EpochBasedTrainLoop, IterBasedTrainLoop, TestLoop, ValLoop
from .runner import Runner
__all__ = [
'BaseLoop', 'load_state_dict', 'get_torchvision_models',
'get_external_models', 'get_mmcls_models', 'get_deprecated_model_names',
'CheckpointLoader', 'load_checkpoint', 'weights_to_cpu', 'get_state_dict',
'save_checkpoint', 'EpochBasedTrainLoop', 'IterBasedTrainLoop', 'ValLoop',
'TestLoop', 'Runner', 'find_latest_checkpoint'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .base_loop import BaseLoop
from .checkpoint import (CheckpointLoader, get_deprecated_model_names,
get_external_models, get_mmcls_models, get_state_dict,
get_torchvision_models, load_checkpoint,
load_state_dict, save_checkpoint, weights_to_cpu)
from .loops import EpochBasedTrainLoop, IterBasedTrainLoop, TestLoop, ValLoop
from .runner import Runner
__all__ = [
'BaseLoop', 'load_state_dict', 'get_torchvision_models',
'get_external_models', 'get_mmcls_models', 'get_deprecated_model_names',
'CheckpointLoader', 'load_checkpoint', 'weights_to_cpu', 'get_state_dict',
'save_checkpoint', 'EpochBasedTrainLoop', 'IterBasedTrainLoop', 'ValLoop',
'TestLoop', 'Runner'
]
|
from typing import Optional
import numpy as np
import pytest
from pydantic import BaseModel, ValidationError
from typing_extensions import TypedDict
from docarray import BaseDocument, DocumentArray
from docarray.documents import AudioDoc, ImageDoc, TextDoc
from docarray.documents.helper import (
create_doc,
create_doc_from_typeddict,
create_doc_from_dict,
)
from docarray.typing import AudioNdArray
def test_multi_modal_doc():
class MyMultiModalDoc(BaseDocument):
image: ImageDoc
text: TextDoc
doc = MyMultiModalDoc(
image=ImageDoc(tensor=np.zeros((3, 224, 224))), text=TextDoc(text='hello')
)
assert isinstance(doc.image, BaseDocument)
assert isinstance(doc.image, ImageDoc)
assert isinstance(doc.text, TextDoc)
assert doc.text.text == 'hello'
assert (doc.image.tensor == np.zeros((3, 224, 224))).all()
def test_nested_chunks_document():
class ChunksDocument(BaseDocument):
text: str
images: DocumentArray[ImageDoc]
doc = ChunksDocument(
text='hello',
images=DocumentArray[ImageDoc]([ImageDoc() for _ in range(10)]),
)
assert isinstance(doc.images, DocumentArray)
def test_create_doc():
with pytest.raises(ValueError):
_ = create_doc(
'MyMultiModalDoc',
__base__=BaseModel,
image=(ImageDoc, ...),
text=(TextDoc, ...),
)
MyMultiModalDoc = create_doc(
'MyMultiModalDoc', image=(ImageDoc, ...), text=(TextDoc, ...)
)
assert issubclass(MyMultiModalDoc, BaseDocument)
doc = MyMultiModalDoc(
image=ImageDoc(tensor=np.zeros((3, 224, 224))), text=TextDoc(text='hello')
)
assert isinstance(doc.image, BaseDocument)
assert isinstance(doc.image, ImageDoc)
assert isinstance(doc.text, TextDoc)
assert doc.text.text == 'hello'
assert (doc.image.tensor == np.zeros((3, 224, 224))).all()
MyAudio = create_doc(
'MyAudio',
__base__=AudioDoc,
title=(str, ...),
tensor=(Optional[AudioNdArray], ...),
)
assert issubclass(MyAudio, BaseDocument)
assert issubclass(MyAudio, AudioDoc)
def test_create_doc_from_typeddict():
class MyMultiModalDoc(TypedDict):
image: ImageDoc
text: TextDoc
with pytest.raises(ValueError):
_ = create_doc_from_typeddict(MyMultiModalDoc, __base__=BaseModel)
Doc = create_doc_from_typeddict(MyMultiModalDoc)
assert issubclass(Doc, BaseDocument)
class MyAudio(TypedDict):
title: str
tensor: Optional[AudioNdArray]
Doc = create_doc_from_typeddict(MyAudio, __base__=AudioDoc)
assert issubclass(Doc, BaseDocument)
assert issubclass(Doc, AudioDoc)
def test_create_doc_from_dict():
data_dict = {
'image': ImageDoc(tensor=np.random.rand(3, 224, 224)),
'text': TextDoc(text='hello'),
'id': 123,
}
MyDoc = create_doc_from_dict(model_name='MyDoc', data_dict=data_dict)
assert issubclass(MyDoc, BaseDocument)
doc = MyDoc(
image=ImageDoc(tensor=np.random.rand(3, 224, 224)),
text=TextDoc(text='hey'),
id=111,
)
assert isinstance(doc, BaseDocument)
assert isinstance(doc.text, TextDoc)
assert isinstance(doc.image, ImageDoc)
assert isinstance(doc.id, int)
# Create a doc with an incorrect type
with pytest.raises(ValidationError):
doc = MyDoc(
image=ImageDoc(tensor=np.random.rand(3, 224, 224)),
text=['some', 'text'], # should be TextDoc
id=111,
)
# Handle empty data_dict
with pytest.raises(ValueError):
MyDoc = create_doc_from_dict(model_name='MyDoc', data_dict={})
# Data with a None value
data_dict = {'text': 'some text', 'other': None}
MyDoc = create_doc_from_dict(model_name='MyDoc', data_dict=data_dict)
assert issubclass(MyDoc, BaseDocument)
doc1 = MyDoc(text='txt', other=10)
doc2 = MyDoc(text='txt', other='also text')
assert isinstance(doc1, BaseDocument) and isinstance(doc2, BaseDocument)
|
from typing import Optional
import numpy as np
import pytest
from pydantic import BaseModel
from typing_extensions import TypedDict
from docarray import BaseDocument, DocumentArray
from docarray.documents import AudioDoc, ImageDoc, TextDoc
from docarray.documents.helper import create_doc, create_from_typeddict
from docarray.typing import AudioNdArray
def test_multi_modal_doc():
class MyMultiModalDoc(BaseDocument):
image: ImageDoc
text: TextDoc
doc = MyMultiModalDoc(
image=ImageDoc(tensor=np.zeros((3, 224, 224))), text=TextDoc(text='hello')
)
assert isinstance(doc.image, BaseDocument)
assert isinstance(doc.image, ImageDoc)
assert isinstance(doc.text, TextDoc)
assert doc.text.text == 'hello'
assert (doc.image.tensor == np.zeros((3, 224, 224))).all()
def test_nested_chunks_document():
class ChunksDocument(BaseDocument):
text: str
images: DocumentArray[ImageDoc]
doc = ChunksDocument(
text='hello',
images=DocumentArray[ImageDoc]([ImageDoc() for _ in range(10)]),
)
assert isinstance(doc.images, DocumentArray)
def test_create_doc():
with pytest.raises(ValueError):
_ = create_doc(
'MyMultiModalDoc',
__base__=BaseModel,
image=(ImageDoc, ...),
text=(TextDoc, ...),
)
MyMultiModalDoc = create_doc(
'MyMultiModalDoc', image=(ImageDoc, ...), text=(TextDoc, ...)
)
assert issubclass(MyMultiModalDoc, BaseDocument)
doc = MyMultiModalDoc(
image=ImageDoc(tensor=np.zeros((3, 224, 224))), text=TextDoc(text='hello')
)
assert isinstance(doc.image, BaseDocument)
assert isinstance(doc.image, ImageDoc)
assert isinstance(doc.text, TextDoc)
assert doc.text.text == 'hello'
assert (doc.image.tensor == np.zeros((3, 224, 224))).all()
MyAudio = create_doc(
'MyAudio',
__base__=AudioDoc,
title=(str, ...),
tensor=(Optional[AudioNdArray], ...),
)
assert issubclass(MyAudio, BaseDocument)
assert issubclass(MyAudio, AudioDoc)
def test_create_from_typeddict():
class MyMultiModalDoc(TypedDict):
image: ImageDoc
text: TextDoc
with pytest.raises(ValueError):
_ = create_from_typeddict(MyMultiModalDoc, __base__=BaseModel)
Doc = create_from_typeddict(MyMultiModalDoc)
assert issubclass(Doc, BaseDocument)
class MyAudio(TypedDict):
title: str
tensor: Optional[AudioNdArray]
Doc = create_from_typeddict(MyAudio, __base__=AudioDoc)
assert issubclass(Doc, BaseDocument)
assert issubclass(Doc, AudioDoc)
|
from typing import TYPE_CHECKING, Any, Type, TypeVar, Union
from uuid import UUID
from pydantic import parse_obj_as
from docarray.typing.proto_register import _register_proto
from docarray.utils._internal.pydantic import is_pydantic_v2
if TYPE_CHECKING:
from docarray.proto import NodeProto
from docarray.typing.abstract_type import AbstractType
if is_pydantic_v2:
from pydantic import GetCoreSchemaHandler, GetJsonSchemaHandler
from pydantic.json_schema import JsonSchemaValue
from pydantic_core import core_schema
T = TypeVar('T', bound='ID')
@_register_proto(proto_type_name='id')
class ID(str, AbstractType):
"""
Represent an unique ID
"""
@classmethod
def _docarray_validate(
cls: Type[T],
value: Union[str, int, UUID],
) -> T:
try:
id: str = str(value)
return cls(id)
except Exception:
raise ValueError(f'Expected a str, int or UUID, got {type(value)}')
def _to_node_protobuf(self) -> 'NodeProto':
"""Convert an ID into a NodeProto message. This function should
be called when the self is nested into another Document that need to be
converted into a protobuf
:return: the nested item protobuf message
"""
from docarray.proto import NodeProto
return NodeProto(text=self, type=self._proto_type_name)
@classmethod
def from_protobuf(cls: Type[T], pb_msg: 'str') -> T:
"""
read ndarray from a proto msg
:param pb_msg:
:return: a string
"""
return parse_obj_as(cls, pb_msg)
if is_pydantic_v2:
@classmethod
def __get_pydantic_core_schema__(
cls, source: Type[Any], handler: 'GetCoreSchemaHandler'
) -> core_schema.CoreSchema:
return core_schema.general_plain_validator_function(
cls.validate,
)
@classmethod
def __get_pydantic_json_schema__(
cls, core_schema: core_schema.CoreSchema, handler: GetJsonSchemaHandler
) -> JsonSchemaValue:
field_schema: dict[str, Any] = {}
field_schema.update(type='string')
return field_schema
|
from typing import TYPE_CHECKING, Type, TypeVar, Union
from uuid import UUID
from pydantic import BaseConfig, parse_obj_as
from pydantic.fields import ModelField
from docarray.typing.proto_register import _register_proto
if TYPE_CHECKING:
from docarray.proto import NodeProto
from docarray.typing.abstract_type import AbstractType
T = TypeVar('T', bound='ID')
@_register_proto(proto_type_name='id')
class ID(str, AbstractType):
"""
Represent an unique ID
"""
@classmethod
def __get_validators__(cls):
yield cls.validate
@classmethod
def validate(
cls: Type[T],
value: Union[str, int, UUID],
field: 'ModelField',
config: 'BaseConfig',
) -> T:
try:
id: str = str(value)
return cls(id)
except Exception:
raise ValueError(f'Expected a str, int or UUID, got {type(value)}')
def _to_node_protobuf(self) -> 'NodeProto':
"""Convert an ID into a NodeProto message. This function should
be called when the self is nested into another Document that need to be
converted into a protobuf
:return: the nested item protobuf message
"""
from docarray.proto import NodeProto
return NodeProto(text=self, type=self._proto_type_name)
@classmethod
def from_protobuf(cls: Type[T], pb_msg: 'str') -> T:
"""
read ndarray from a proto msg
:param pb_msg:
:return: a string
"""
return parse_obj_as(cls, pb_msg)
|
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
from pathlib import Path
from typing import List
import pytest
from jina import Document, DocumentArray, Executor
from ...spacy_text_encoder import SpacyTextEncoder
_EMBEDDING_DIM = 96
@pytest.fixture(scope='session')
def basic_encoder() -> SpacyTextEncoder:
return SpacyTextEncoder()
def test_config():
ex = Executor.load_config(str(Path(__file__).parents[2] / 'config.yml'))
assert ex.__class__.__name__ == 'SpacyTextEncoder'
def test_encoding_cpu():
enc = SpacyTextEncoder(require_gpu=False)
input_data = DocumentArray([Document(text='hello world')])
enc.encode(docs=input_data, parameters={})
assert input_data[0].embedding.shape == (_EMBEDDING_DIM,)
@pytest.mark.gpu
def test_encoding_gpu():
enc = SpacyTextEncoder(require_gpu=True)
input_data = DocumentArray([Document(text='hello world')])
enc.encode(docs=input_data, parameters={})
assert input_data[0].embedding.shape == (_EMBEDDING_DIM,)
@pytest.mark.parametrize(
'model_name, emb_dim',
[
('en_core_web_sm', 96),
('en_core_web_lg', 300),
('es_core_news_sm', 96),
],
)
def test_models(model_name: str, emb_dim: int):
encoder = SpacyTextEncoder(model_name)
input_data = DocumentArray([Document(text='hello world')])
encoder.encode(docs=input_data, parameters={})
assert input_data[0].embedding.shape == (emb_dim,)
@pytest.mark.parametrize(
'traversal_paths, counts',
[
(['r'], [['r', 1], ['c', 0], ['cc', 0]]),
(['c'], [['r', 0], ['c', 3], ['cc', 0]]),
(['cc'], [['r', 0], ['c', 0], ['cc', 2]]),
(['cc', 'r'], [['r', 1], ['c', 0], ['cc', 2]]),
],
)
def test_traversal_path(
traversal_paths: List[str], counts: List, basic_encoder: SpacyTextEncoder
):
text = 'blah'
docs = DocumentArray([Document(id='root1', text=text)])
docs[0].chunks = [
Document(id='chunk11', text=text),
Document(id='chunk12', text=text),
Document(id='chunk13', text=text),
]
docs[0].chunks[0].chunks = [
Document(id='chunk111', text=text),
Document(id='chunk112', text=text),
]
basic_encoder.encode(docs=docs, parameters={'traversal_paths': traversal_paths})
for path, count in counts:
embeddings = docs.traverse_flat([path]).get_attributes('embedding')
assert len(list(filter(lambda x: x is not None, embeddings))) == count
@pytest.mark.parametrize('batch_size', [1, 2, 4, 8])
def test_batch_size(basic_encoder: SpacyTextEncoder, batch_size: int):
docs = DocumentArray([Document(text='hello there') for _ in range(32)])
basic_encoder.encode(docs, parameters={'batch_size': batch_size})
for doc in docs:
assert doc.embedding.shape == (_EMBEDDING_DIM,)
def test_quality_embeddings(basic_encoder: SpacyTextEncoder):
docs = DocumentArray(
[
Document(id='A', text='a furry animal that with a long tail'),
Document(id='B', text='a domesticated mammal with four legs'),
Document(id='C', text='a type of aircraft that uses rotating wings'),
Document(id='D', text='flying vehicle that has fixed wings and engines'),
]
)
basic_encoder.encode(DocumentArray(docs), {})
# assert semantic meaning is captured in the encoding
docs.match(docs)
matches = ['B', 'A', 'D', 'C']
for i, doc in enumerate(docs):
assert doc.matches[1].id == matches[i]
|
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import os
from pathlib import Path
import pytest
import spacy
from jina import Document, DocumentArray, Executor
from ...spacy_text_encoder import SpacyTextEncoder
def test_config():
ex = Executor.load_config(str(Path(__file__).parents[2] / 'config.yml'))
assert ex.lang == 'en_core_web_sm'
def test_spacy_text_encoder():
# Input
docs = DocumentArray(
[
Document(text='Han likes eating pizza'),
Document(text='Han likes pizza'),
Document(text='Jina rocks'),
]
)
# Encoder embedding
encoder = SpacyTextEncoder()
encoder.encode(docs, parameters={})
# Compare with ouptut
assert len(docs) == 3
for doc in docs:
assert doc.embedding.shape == (96,)
def test_spacy_text_encoder_traversal_paths():
# Input
docs = DocumentArray(
[
Document(
chunks=[
Document(text='Han likes eating pizza'),
Document(text='Han likes pizza'),
]
),
Document(chunks=[Document(text='Jina rocks')]),
]
)
# Encoder embedding
encoder = SpacyTextEncoder()
encoder.encode(docs, parameters={'traversal_paths': ['c']})
# Compare with ouptut
assert len(docs) == 2
assert len(docs[0].chunks) == 2
for chunk in docs[0].chunks:
assert chunk.embedding.shape == (96,)
assert len(docs[1].chunks) == 1
for chunk in docs[1].chunks:
assert chunk.embedding.shape == (96,)
def test_unsupported_lang(tmpdir):
dummy1 = spacy.blank('xx')
dummy1_dir_path = os.path.join(tmpdir, 'xx1')
dummy1.to_disk(dummy1_dir_path)
dummy2 = spacy.blank('xx')
dummy2_dir_path = os.path.join(tmpdir, 'xx2')
dummy2.to_disk(dummy2_dir_path)
# No available language
with pytest.raises(IOError):
SpacyTextEncoder('abcd')
# Language does not have DependencyParser should thrown an error
# when try to use default encoder
with pytest.raises(ValueError):
SpacyTextEncoder(dummy1_dir_path, use_default_encoder=True)
# And should be fine when 'parser' pipeline is added
dummy1.add_pipe('parser')
dummy1.to_disk(dummy1_dir_path)
SpacyTextEncoder(dummy1_dir_path, use_default_encoder=True)
# Language does not have SentenceRecognizer should thrown an error
# when try to use non default encoder
with pytest.raises(ValueError):
SpacyTextEncoder(dummy2_dir_path, use_default_encoder=False)
# And should be fine when 'senter' pipeline is added
dummy2.add_pipe('tok2vec')
dummy2.to_disk(dummy2_dir_path)
SpacyTextEncoder(dummy2_dir_path, use_default_encoder=False)
|
_base_ = './tood_r50_fpn_1x_coco.py'
max_epochs = 24
# learning rate
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[16, 22],
gamma=0.1)
]
# training schedule for 2x
train_cfg = dict(max_epochs=max_epochs)
# multi-scale training
train_pipeline = [
dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='RandomResize', scale=[(1333, 480), (1333, 800)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
|
_base_ = './tood_r50_fpn_1x_coco.py'
max_epochs = 24
# learning rate
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[16, 22],
gamma=0.1)
]
# training schedule for 2x
train_cfg = dict(max_epochs=max_epochs)
# multi-scale training
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='RandomResize', scale=[(1333, 480), (1333, 800)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
|
from __future__ import annotations
from collections.abc import Iterable
from torch import Tensor
from sentence_transformers.losses.TripletLoss import TripletDistanceMetric, TripletLoss
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
class SparseTripletLoss(TripletLoss):
def __init__(
self, model: SparseEncoder, distance_metric=TripletDistanceMetric.EUCLIDEAN, triplet_margin: float = 5
) -> None:
"""
This class implements triplet loss. Given a triplet of (anchor, positive, negative),
the loss minimizes the distance between anchor and positive while it maximizes the distance
between anchor and negative. It compute the following loss function:
``loss = max(||anchor - positive|| - ||anchor - negative|| + margin, 0)``.
Margin is an important hyperparameter and needs to be tuned respectively.
Args:
model: SparseEncoder
distance_metric: Function to compute distance between two
embeddings. The class TripletDistanceMetric contains
common distance metrices that can be used.
triplet_margin: The negative should be at least this much
further away from the anchor than the positive.
References:
- For further details, see: https://en.wikipedia.org/wiki/Triplet_loss
Requirements:
1. Need to be used in SpladeLoss or CSRLoss as a loss function.
2. (anchor, positive, negative) triplets
Inputs:
+---------------------------------------+--------+
| Texts | Labels |
+=======================================+========+
| (anchor, positive, negative) triplets | none |
+---------------------------------------+--------+
Example:
::
from datasets import Dataset
from sentence_transformers.sparse_encoder import SparseEncoder, SparseEncoderTrainer, losses
model = SparseEncoder("distilbert/distilbert-base-uncased")
train_dataset = Dataset.from_dict(
{
"anchor": ["It's nice weather outside today.", "He drove to work."],
"positive": ["It's so sunny.", "He took the car to the office."],
"negative": ["It's quite rainy, sadly.", "She walked to the store."],
}
)
loss = losses.SpladeLoss(
model=model, loss=losses.SparseTripletLoss(model), corpus_regularizer_weight=3e-5, query_regularizer_weight=5e-5
)
trainer = SparseEncoderTrainer(model=model, train_dataset=train_dataset, loss=loss)
trainer.train()
"""
super().__init__(model, distance_metric=distance_metric, triplet_margin=triplet_margin)
def forward(self, sentence_features: Iterable[dict[str, Tensor]], labels: Tensor) -> Tensor:
raise AttributeError("SparseTripletLoss should not be used alone. Use it with SpladeLoss or CSRLoss.")
|
from __future__ import annotations
from collections.abc import Iterable
from torch import Tensor
from sentence_transformers.losses.TripletLoss import TripletDistanceMetric, TripletLoss
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
class SparseTripletLoss(TripletLoss):
def __init__(
self, model: SparseEncoder, distance_metric=TripletDistanceMetric.EUCLIDEAN, triplet_margin: float = 5
) -> None:
"""
This class implements triplet loss. Given a triplet of (anchor, positive, negative),
the loss minimizes the distance between anchor and positive while it maximizes the distance
between anchor and negative. It compute the following loss function:
``loss = max(||anchor - positive|| - ||anchor - negative|| + margin, 0)``.
Margin is an important hyperparameter and needs to be tuned respectively.
Args:
model: SparseEncoder
distance_metric: Function to compute distance between two
embeddings. The class TripletDistanceMetric contains
common distance metrices that can be used.
triplet_margin: The negative should be at least this much
further away from the anchor than the positive.
References:
- For further details, see: https://en.wikipedia.org/wiki/Triplet_loss
Requirements:
1. Need to be used in SpladeLoss or CSRLoss as a loss function.
2. (anchor, positive, negative) triplets
Inputs:
+---------------------------------------+--------+
| Texts | Labels |
+=======================================+========+
| (anchor, positive, negative) triplets | none |
+---------------------------------------+--------+
Example:
::
from datasets import Dataset
from sentence_transformers.sparse_encoder import SparseEncoder, SparseEncoderTrainer, losses
model = SparseEncoder("distilbert/distilbert-base-uncased")
train_dataset = Dataset.from_dict(
{
"anchor": ["It's nice weather outside today.", "He drove to work."],
"positive": ["It's so sunny.", "He took the car to the office."],
"negative": ["It's quite rainy, sadly.", "She walked to the store."],
}
)
loss = losses.SpladeLoss(model=model, loss=losses.SparseTripletLoss(model), lambda_corpus=3e-5, lambda_query=5e-5)
trainer = SparseEncoderTrainer(model=model, train_dataset=train_dataset, loss=loss)
trainer.train()
"""
super().__init__(model, distance_metric=distance_metric, triplet_margin=triplet_margin)
def forward(self, sentence_features: Iterable[dict[str, Tensor]], labels: Tensor) -> Tensor:
raise AttributeError("SparseTripletLoss should not be used alone. Use it with SpladeLoss or CSRLoss.")
|
import pprint
import torch
from torch.utils._pytree import tree_map, tree_map_only
class OpenRegTensorMeta:
def __init__(self, tensor, checked=True):
if checked and not tensor.device.type == "openreg":
raise RuntimeError(
"Creating OpenRegTensorMeta is only for Tensors on openreg device"
)
self.data_ptr = tensor.untyped_storage().data_ptr()
self.size = tensor.size()
self.stride = tensor.stride()
self.storage_offset = tensor.storage_offset()
self.dtype = tensor.dtype
self.nelem_in_bytes = tensor.nelement() * tensor.element_size()
def __repr__(self):
return (
f"OpenRegTensorMeta({self.data_ptr=}, {self.size=}, {self.stride=}, "
f"{self.storage_offset=}, {self.dtype=}, {self.nelem_in_bytes=})"
)
class OpenRegTensorData(torch.Tensor):
@staticmethod
def from_meta(allocator, tensor_meta):
return OpenRegTensorData(allocator.tensor_from_meta(tensor_meta))
VALID_QUEUE_TYPES_IN = {torch.Tensor, int, float}
VALID_QUEUE_TYPES_OUT = {OpenRegTensorMeta, int, float, str}
def safe_str(args):
def convert(obj):
if isinstance(obj, torch.Tensor):
return str(OpenRegTensorMeta(obj, checked=False))
else:
return obj
new_args = tree_map(convert, args)
return pprint.pformat(new_args)
def validate_send_queue_args(cmd, args):
def check(obj):
if type(obj) not in VALID_QUEUE_TYPES_OUT:
if (
cmd == "recv_data"
and type(obj) in [torch.Tensor, OpenRegTensorData]
and obj.device.type == "cpu"
):
# Only HtoD copy command can send cpu Tensors over
return
raise RuntimeError(
f"Trying to send invalid object through queue: {type(obj)}"
)
tree_map(check, args)
def prepare_for_sending(args, kwargs):
def convert(obj):
if type(obj) not in VALID_QUEUE_TYPES_IN:
raise RuntimeError(
f"Cannot send object of type {type(obj)} over openreg device pipe."
)
if isinstance(obj, torch.Tensor):
return OpenRegTensorMeta(obj)
else:
return obj
return tree_map(convert, (args, kwargs))
def receive_after_sending(allocator, args, kwargs):
def convert(obj):
if type(obj) not in VALID_QUEUE_TYPES_OUT:
raise RuntimeError(
f"Received invalid object of type {type(obj)} over openreg device pipe."
)
if isinstance(obj, OpenRegTensorMeta):
return allocator.tensor_from_meta(obj)
else:
return obj
return tree_map(convert, (args, kwargs))
def to_device_no_copy(device, args, kwargs):
def safe_to(t):
if device == "meta":
return t.to(device=device)
else:
return torch.empty_like(t, device=device)
return tree_map_only(torch.Tensor, safe_to, (args, kwargs))
|
import pprint
import torch
from torch.utils._pytree import tree_map, tree_map_only
class OpenRegTensorMeta:
def __init__(self, tensor, checked=True):
if checked and not tensor.device.type == "openreg":
raise RuntimeError(
"Creating OpenRegTensorMeta is only for Tensors on openreg device"
)
self.data_ptr = tensor.untyped_storage().data_ptr()
self.size = tensor.size()
self.stride = tensor.stride()
self.storage_offset = tensor.storage_offset()
self.dtype = tensor.dtype
self.nelem_in_bytes = tensor.nelement() * tensor.element_size()
def __repr__(self):
return (
f"OpenRegTensorMeta({self.data_ptr=}, {self.size=}, {self.stride=}, "
f"{self.storage_offset=}, {self.dtype=}, {self.nelem_in_bytes=})"
)
class OpenRegTensorData(torch.Tensor):
@staticmethod
def from_meta(allocator, tensor_meta):
return OpenRegTensorData(allocator.tensor_from_meta(tensor_meta))
VALID_QUEUE_TYPES_IN = {torch.Tensor, int, float}
VALID_QUEUE_TYPES_OUT = {OpenRegTensorMeta, int, float, str}
def safe_str(args):
def convert(obj):
if isinstance(obj, torch.Tensor):
return str(OpenRegTensorMeta(obj, checked=False))
else:
return obj
new_args = tree_map(convert, args)
return pprint.pformat(new_args)
def validate_send_queue_args(cmd, args):
def check(obj):
if type(obj) not in VALID_QUEUE_TYPES_OUT:
if (
cmd == "recv_data"
and type(obj) in [torch.Tensor, OpenRegTensorData]
and obj.device.type == "cpu"
):
# Only HtoD copy command can send cpu Tensors over
return
raise RuntimeError(
f"Trying to send invalid object through queue: {type(obj)}"
)
tree_map(check, args)
def prepare_for_sending(args, kwargs):
def convert(obj):
if type(obj) not in VALID_QUEUE_TYPES_IN:
raise RuntimeError(
f"Cannot send object of type {type(obj)} " "over openreg device pipe."
)
if isinstance(obj, torch.Tensor):
return OpenRegTensorMeta(obj)
else:
return obj
return tree_map(convert, (args, kwargs))
def receive_after_sending(allocator, args, kwargs):
def convert(obj):
if type(obj) not in VALID_QUEUE_TYPES_OUT:
raise RuntimeError(
f"Received invalid object of type {type(obj)} "
"over openreg device pipe."
)
if isinstance(obj, OpenRegTensorMeta):
return allocator.tensor_from_meta(obj)
else:
return obj
return tree_map(convert, (args, kwargs))
def to_device_no_copy(device, args, kwargs):
def safe_to(t):
if device == "meta":
return t.to(device=device)
else:
return torch.empty_like(t, device=device)
return tree_map_only(torch.Tensor, safe_to, (args, kwargs))
|
from __future__ import annotations
import logging
from datasets import load_dataset
from sentence_transformers import SparseEncoder, SparseEncoderTrainer, SparseEncoderTrainingArguments
from sentence_transformers.evaluation import SequentialEvaluator, SimilarityFunction
from sentence_transformers.models import Pooling, Transformer
from sentence_transformers.sparse_encoder import evaluation, losses, models
from sentence_transformers.training_args import BatchSamplers
# Set up logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
def main():
# Initialize model components
model_name = "tomaarsen/mpnet-base-nli"
transformer = Transformer(model_name)
transformer.requires_grad_(False) # Freeze the transformer model
pooling = Pooling(transformer.get_word_embedding_dimension(), pooling_mode="mean")
csr_sparsity = models.CSRSparsity(
input_dim=transformer.get_word_embedding_dimension(),
hidden_dim=4 * transformer.get_word_embedding_dimension(),
k=256, # Number of top values to keep
k_aux=512, # Number of top values for auxiliary loss
)
# Create the SparseEncoder model
model = SparseEncoder(modules=[transformer, pooling, csr_sparsity])
output_dir = "examples/sparse_encoder/output/sparse_encoder_nli_frozen_transformer_from_pretrained"
# 2. Load the AllNLI dataset: https://huggingface.co/datasets/sentence-transformers/all-nli
train_dataset = load_dataset("sentence-transformers/all-nli", "triplet", split="train")
eval_dataset = load_dataset("sentence-transformers/all-nli", "triplet", split="dev")
logging.info(train_dataset)
# 3. Initialize the loss
loss = losses.CSRLoss(
model=model,
beta=0.1, # Weight for auxiliary loss
gamma=1, # Weight for ranking loss
scale=20.0, # Scale for similarity computation
)
# 4. Define an evaluator for use during training. This is useful to keep track of alongside the evaluation loss.
stsb_eval_dataset = load_dataset("sentence-transformers/stsb", split="validation")
evaluators = []
for k_dim in [16, 32, 64, 128, 256]:
evaluators.append(
evaluation.SparseEmbeddingSimilarityEvaluator(
sentences1=stsb_eval_dataset["sentence1"],
sentences2=stsb_eval_dataset["sentence2"],
scores=stsb_eval_dataset["score"],
main_similarity=SimilarityFunction.COSINE,
name=f"sts-dev-{k_dim}",
max_active_dims=k_dim,
)
)
dev_evaluator = SequentialEvaluator(evaluators, main_score_function=lambda scores: scores[-1])
# Set up training arguments
training_args = SparseEncoderTrainingArguments(
output_dir=output_dir,
num_train_epochs=1,
per_device_train_batch_size=128,
per_device_eval_batch_size=128,
warmup_ratio=0.1,
fp16=True, # Set to False if you get an error that your GPU can't run on FP16
bf16=False, # Set to True if you have a GPU that supports BF16
batch_sampler=BatchSamplers.NO_DUPLICATES, # MultipleNegativesRankingLoss benefits from no duplicate samples in a batch
logging_steps=100,
eval_strategy="steps",
eval_steps=200,
save_strategy="steps",
save_steps=200,
learning_rate=4e-5,
optim="adamw_torch",
weight_decay=1e-4,
adam_epsilon=6.25e-10,
run_name="sparse_encoder_nli_frozen_transformer_from_pretrained",
)
# Initialize trainer
trainer = SparseEncoderTrainer(
model=model,
args=training_args,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
loss=loss,
evaluator=dev_evaluator,
)
# Train model
trainer.train()
# 7. Evaluate the model performance on the STS Benchmark test dataset
test_dataset = load_dataset("sentence-transformers/stsb", split="test")
evaluators = []
for k_dim in [16, 32, 64, 128, 256]:
evaluators.append(
evaluation.SparseEmbeddingSimilarityEvaluator(
sentences1=test_dataset["sentence1"],
sentences2=test_dataset["sentence2"],
scores=test_dataset["score"],
main_similarity=SimilarityFunction.COSINE,
name=f"sts-test-{k_dim}",
truncate_dim=k_dim,
)
)
test_evaluator = SequentialEvaluator(evaluators)
test_evaluator(model)
# 8. Save the trained & evaluated model locally
model.save(output_dir)
if __name__ == "__main__":
main()
|
from __future__ import annotations
import logging
from datasets import load_dataset
from sentence_transformers import SparseEncoder, SparseEncoderTrainer, SparseEncoderTrainingArguments
from sentence_transformers.evaluation import SequentialEvaluator, SimilarityFunction
from sentence_transformers.models import Pooling, Transformer
from sentence_transformers.sparse_encoder import evaluation, losses, models
from sentence_transformers.training_args import BatchSamplers
# Set up logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
def main():
# Initialize model components
model_name = "tomaarsen/mpnet-base-nli"
transformer = Transformer(model_name)
transformer.requires_grad_(False) # Freeze the transformer model
pooling = Pooling(transformer.get_word_embedding_dimension(), pooling_mode="mean")
csr_sparsity = models.CSRSparsity(
input_dim=transformer.get_word_embedding_dimension(),
hidden_dim=4 * transformer.get_word_embedding_dimension(),
k=256, # Number of top values to keep
k_aux=512, # Number of top values for auxiliary loss
)
# Create the SparseEncoder model
model = SparseEncoder(modules=[transformer, pooling, csr_sparsity])
output_dir = "examples/sparse_encoder/output/sparse_encoder_nli_frozen_transformer_from_pretrained"
# 2. Load the AllNLI dataset: https://huggingface.co/datasets/sentence-transformers/all-nli
train_dataset = load_dataset("sentence-transformers/all-nli", "triplet", split="train")
eval_dataset = load_dataset("sentence-transformers/all-nli", "triplet", split="dev")
logging.info(train_dataset)
# 3. Initialize the loss
loss = losses.CSRLoss(
model=model,
beta=0.1, # Weight for auxiliary loss
gamma=1, # Weight for ranking loss
scale=20.0, # Scale for similarity computation
)
# 4. Define an evaluator for use during training. This is useful to keep track of alongside the evaluation loss.
stsb_eval_dataset = load_dataset("sentence-transformers/stsb", split="validation")
evaluators = []
for k_dim in [16, 32, 64, 128, 256]:
evaluators.append(
evaluation.SparseEmbeddingSimilarityEvaluator(
sentences1=stsb_eval_dataset["sentence1"],
sentences2=stsb_eval_dataset["sentence2"],
scores=stsb_eval_dataset["score"],
main_similarity=SimilarityFunction.COSINE,
name=f"sts-dev-{k_dim}",
truncate_dim=k_dim,
)
)
dev_evaluator = SequentialEvaluator(evaluators, main_score_function=lambda scores: scores[-1])
# Set up training arguments
training_args = SparseEncoderTrainingArguments(
output_dir=output_dir,
num_train_epochs=1,
per_device_train_batch_size=128,
per_device_eval_batch_size=128,
warmup_ratio=0.1,
fp16=True, # Set to False if you get an error that your GPU can't run on FP16
bf16=False, # Set to True if you have a GPU that supports BF16
batch_sampler=BatchSamplers.NO_DUPLICATES, # MultipleNegativesRankingLoss benefits from no duplicate samples in a batch
logging_steps=100,
eval_strategy="steps",
eval_steps=200,
save_strategy="steps",
save_steps=200,
learning_rate=4e-5,
optim="adamw_torch",
weight_decay=1e-4,
adam_epsilon=6.25e-10,
run_name="sparse_encoder_nli_frozen_transformer_from_pretrained",
)
# Initialize trainer
trainer = SparseEncoderTrainer(
model=model,
args=training_args,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
loss=loss,
evaluator=dev_evaluator,
)
# Train model
trainer.train()
# 7. Evaluate the model performance on the STS Benchmark test dataset
test_dataset = load_dataset("sentence-transformers/stsb", split="test")
evaluators = []
for k_dim in [16, 32, 64, 128, 256]:
evaluators.append(
evaluation.SparseEmbeddingSimilarityEvaluator(
sentences1=test_dataset["sentence1"],
sentences2=test_dataset["sentence2"],
scores=test_dataset["score"],
main_similarity=SimilarityFunction.COSINE,
name=f"sts-test-{k_dim}",
truncate_dim=k_dim,
)
)
test_evaluator = SequentialEvaluator(evaluators)
test_evaluator(model)
# 8. Save the trained & evaluated model locally
model.save(output_dir)
if __name__ == "__main__":
main()
|
__version__ = '0.30.0a3'
from docarray.array import DocumentArray, DocumentArrayStacked
from docarray.base_document.document import BaseDocument
import logging
__all__ = ['BaseDocument', 'DocumentArray', 'DocumentArrayStacked']
logger = logging.getLogger('docarray')
handler = logging.StreamHandler()
formatter = logging.Formatter("%(levelname)s - %(name)s - %(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
|
__version__ = '0.30.0a3'
from docarray.array import DocumentArray, DocumentArrayStacked
from docarray.base_document.document import BaseDocument
__all__ = ['BaseDocument', 'DocumentArray', 'DocumentArrayStacked']
|
import pytest
import torchaudio
from torchaudio.pipelines import (
HUBERT_ASR_LARGE,
HUBERT_ASR_XLARGE,
HUBERT_BASE,
HUBERT_LARGE,
HUBERT_XLARGE,
VOXPOPULI_ASR_BASE_10K_DE,
VOXPOPULI_ASR_BASE_10K_EN,
VOXPOPULI_ASR_BASE_10K_ES,
VOXPOPULI_ASR_BASE_10K_FR,
VOXPOPULI_ASR_BASE_10K_IT,
WAV2VEC2_ASR_BASE_100H,
WAV2VEC2_ASR_BASE_10M,
WAV2VEC2_ASR_BASE_960H,
WAV2VEC2_ASR_LARGE_100H,
WAV2VEC2_ASR_LARGE_10M,
WAV2VEC2_ASR_LARGE_960H,
WAV2VEC2_ASR_LARGE_LV60K_100H,
WAV2VEC2_ASR_LARGE_LV60K_10M,
WAV2VEC2_ASR_LARGE_LV60K_960H,
WAV2VEC2_BASE,
WAV2VEC2_LARGE,
WAV2VEC2_LARGE_LV60K,
WAV2VEC2_XLSR53,
WAVLM_BASE,
WAVLM_BASE_PLUS,
WAVLM_LARGE,
)
@pytest.mark.parametrize(
"bundle",
[
WAV2VEC2_BASE,
WAV2VEC2_LARGE,
WAV2VEC2_LARGE_LV60K,
WAV2VEC2_XLSR53,
HUBERT_BASE,
HUBERT_LARGE,
HUBERT_XLARGE,
WAVLM_BASE,
WAVLM_BASE_PLUS,
WAVLM_LARGE,
],
)
def test_pretraining_models(bundle):
"""Smoke test of downloading weights for pretraining models"""
bundle.get_model()
@pytest.mark.parametrize(
"bundle,lang,expected",
[
(WAV2VEC2_ASR_BASE_10M, "en", "I|HAD|THAT|CURIYOSSITY|BESID|ME|AT|THIS|MOMENT|"),
(WAV2VEC2_ASR_BASE_100H, "en", "I|HAD|THAT|CURIOSITY|BESIDE|ME|AT|THIS|MOMENT|"),
(WAV2VEC2_ASR_BASE_960H, "en", "I|HAD|THAT|CURIOSITY|BESIDE|ME|AT|THIS|MOMENT|"),
(WAV2VEC2_ASR_LARGE_10M, "en", "I|HAD|THAT|CURIOUSITY|BESIDE|ME|AT|THIS|MOMENT|"),
(WAV2VEC2_ASR_LARGE_100H, "en", "I|HAD|THAT|CURIOSITY|BESIDE|ME|AT|THIS|MOMENT|"),
(WAV2VEC2_ASR_LARGE_960H, "en", "I|HAD|THAT|CURIOSITY|BESIDE|ME|AT|THIS|MOMENT|"),
(WAV2VEC2_ASR_LARGE_LV60K_10M, "en", "I|HAD|THAT|CURIOUSITY|BESID|ME|AT|THISS|MOMENT|"),
(WAV2VEC2_ASR_LARGE_LV60K_100H, "en", "I|HAVE|THAT|CURIOSITY|BESIDE|ME|AT|THIS|MOMENT|"),
(WAV2VEC2_ASR_LARGE_LV60K_960H, "en", "I|HAVE|THAT|CURIOSITY|BESIDE|ME|AT|THIS|MOMENT|"),
(HUBERT_ASR_LARGE, "en", "I|HAVE|THAT|CURIOSITY|BESIDE|ME|AT|THIS|MOMENT|"),
(HUBERT_ASR_XLARGE, "en", "I|HAVE|THAT|CURIOSITY|BESIDE|ME|AT|THIS|MOMENT|"),
(
VOXPOPULI_ASR_BASE_10K_EN,
"en2",
"i|hope|that|we|will|see|a|ddrasstic|decrease|of|funding|for|the|failed|eu|project|and|that|more|money|will|come|back|to|the|taxpayers", # noqa: E501
),
(
VOXPOPULI_ASR_BASE_10K_ES,
"es",
"la|primera|que|es|imprescindible|pensar|a|pequeña|a|escala|para|implicar|y|complementar|así|la|actuación|global", # noqa: E501
),
(VOXPOPULI_ASR_BASE_10K_DE, "de", "dabei|spielt|auch|eine|sorgfältige|berichterstattung|eine|wichtige|rolle"),
(
VOXPOPULI_ASR_BASE_10K_FR,
"fr",
"la|commission|va|faire|des|propositions|sur|ce|sujet|comment|mettre|en|place|cette|capacité|fiscale|et|le|conseil|européen|y|reviendra|sour|les|sujets|au|moins|de|mars", # noqa: E501
),
(
VOXPOPULI_ASR_BASE_10K_IT,
"it",
"credo|che|illatino|non|sia|contemplato|tra|le|traduzioni|e|quindi|mi|attengo|allitaliano",
),
],
)
def test_finetune_asr_model(
bundle,
lang,
expected,
sample_speech,
ctc_decoder,
):
"""Smoke test of downloading weights for fine-tuning models and simple transcription"""
model = bundle.get_model().eval()
waveform, sample_rate = torchaudio.load(sample_speech)
emission, _ = model(waveform)
decoder = ctc_decoder(bundle.get_labels())
result = decoder(emission[0])
assert result == expected
|
import pytest
import torchaudio
from torchaudio.pipelines import (
HUBERT_ASR_LARGE,
HUBERT_ASR_XLARGE,
HUBERT_BASE,
HUBERT_LARGE,
HUBERT_XLARGE,
VOXPOPULI_ASR_BASE_10K_DE,
VOXPOPULI_ASR_BASE_10K_EN,
VOXPOPULI_ASR_BASE_10K_ES,
VOXPOPULI_ASR_BASE_10K_FR,
VOXPOPULI_ASR_BASE_10K_IT,
WAV2VEC2_ASR_BASE_100H,
WAV2VEC2_ASR_BASE_10M,
WAV2VEC2_ASR_BASE_960H,
WAV2VEC2_ASR_LARGE_100H,
WAV2VEC2_ASR_LARGE_10M,
WAV2VEC2_ASR_LARGE_960H,
WAV2VEC2_ASR_LARGE_LV60K_100H,
WAV2VEC2_ASR_LARGE_LV60K_10M,
WAV2VEC2_ASR_LARGE_LV60K_960H,
WAV2VEC2_BASE,
WAV2VEC2_LARGE,
WAV2VEC2_LARGE_LV60K,
WAV2VEC2_XLSR53,
WAVLM_BASE,
WAVLM_BASE_PLUS,
WAVLM_LARGE,
)
@pytest.mark.parametrize(
"bundle",
[
WAV2VEC2_BASE,
WAV2VEC2_LARGE,
WAV2VEC2_LARGE_LV60K,
WAV2VEC2_XLSR53,
HUBERT_BASE,
HUBERT_LARGE,
HUBERT_XLARGE,
WAVLM_BASE,
WAVLM_BASE_PLUS,
WAVLM_LARGE,
],
)
def test_pretraining_models(bundle):
"""Smoke test of downloading weights for pretraining models"""
bundle.get_model()
@pytest.mark.parametrize(
"bundle,lang,expected",
[
(WAV2VEC2_ASR_BASE_10M, "en", "I|HAD|THAT|CURIYOSSITY|BESID|ME|AT|THIS|MOMENT|"),
(WAV2VEC2_ASR_BASE_100H, "en", "I|HAD|THAT|CURIOSITY|BESIDE|ME|AT|THIS|MOMENT|"),
(WAV2VEC2_ASR_BASE_960H, "en", "I|HAD|THAT|CURIOSITY|BESIDE|ME|AT|THIS|MOMENT|"),
(WAV2VEC2_ASR_LARGE_10M, "en", "I|HAD|THAT|CURIOUSITY|BESIDE|ME|AT|THIS|MOMENT|"),
(WAV2VEC2_ASR_LARGE_100H, "en", "I|HAD|THAT|CURIOSITY|BESIDE|ME|AT|THIS|MOMENT|"),
(WAV2VEC2_ASR_LARGE_960H, "en", "I|HAD|THAT|CURIOSITY|BESIDE|ME|AT|THIS|MOMENT|"),
(WAV2VEC2_ASR_LARGE_LV60K_10M, "en", "I|HAD|THAT|CURIOUSSITY|BESID|ME|AT|THISS|MOMENT|"),
(WAV2VEC2_ASR_LARGE_LV60K_100H, "en", "I|HAVE|THAT|CURIOSITY|BESIDE|ME|AT|THIS|MOMENT|"),
(WAV2VEC2_ASR_LARGE_LV60K_960H, "en", "I|HAVE|THAT|CURIOSITY|BESIDE|ME|AT|THIS|MOMENT|"),
(HUBERT_ASR_LARGE, "en", "I|HAVE|THAT|CURIOSITY|BESIDE|ME|AT|THIS|MOMENT|"),
(HUBERT_ASR_XLARGE, "en", "I|HAVE|THAT|CURIOSITY|BESIDE|ME|AT|THIS|MOMENT|"),
(
VOXPOPULI_ASR_BASE_10K_EN,
"en2",
"i|hope|that|we|will|see|a|ddrasstic|decrease|of|funding|for|the|failed|eu|project|and|that|more|money|will|come|back|to|the|taxpayers", # noqa: E501
),
(
VOXPOPULI_ASR_BASE_10K_ES,
"es",
"la|primera|que|es|imprescindible|pensar|a|pequeña|a|escala|para|implicar|y|complementar|así|la|actuación|global", # noqa: E501
),
(VOXPOPULI_ASR_BASE_10K_DE, "de", "dabei|spielt|auch|eine|sorgfältige|berichterstattung|eine|wichtige|rolle"),
(
VOXPOPULI_ASR_BASE_10K_FR,
"fr",
"la|commission|va|faire|des|propositions|sur|ce|sujet|comment|mettre|en|place|cette|capacité|fiscale|et|le|conseil|européen|y|reviendra|sour|les|sujets|au|moins|de|mars", # noqa: E501
),
(
VOXPOPULI_ASR_BASE_10K_IT,
"it",
"credo|che|illatino|non|sia|contemplato|tra|le|traduzioni|e|quindi|mi|attengo|allitaliano",
),
],
)
def test_finetune_asr_model(
bundle,
lang,
expected,
sample_speech,
ctc_decoder,
):
"""Smoke test of downloading weights for fine-tuning models and simple transcription"""
model = bundle.get_model().eval()
waveform, sample_rate = torchaudio.load(sample_speech)
emission, _ = model(waveform)
decoder = ctc_decoder(bundle.get_labels())
result = decoder(emission[0])
assert result == expected
|
from typing import Union
from docarray.typing.tensor.video.video_ndarray import VideoNdArray
from docarray.utils._internal.misc import is_tf_available, is_torch_available
torch_available = is_torch_available()
if torch_available:
from docarray.typing.tensor.video.video_torch_tensor import VideoTorchTensor
tf_available = is_tf_available()
if tf_available:
from docarray.typing.tensor.video.video_tensorflow_tensor import (
VideoTensorFlowTensor as VideoTFTensor,
)
if tf_available and torch_available:
VideoTensor = Union[VideoNdArray, VideoTorchTensor, VideoTFTensor] # type: ignore
elif tf_available:
VideoTensor = Union[VideoNdArray, VideoTFTensor] # type: ignore
elif torch_available:
VideoTensor = Union[VideoNdArray, VideoTorchTensor] # type: ignore
else:
VideoTensor = Union[VideoNdArray] # type: ignore
|
from typing import Union
from docarray.typing.tensor.video.video_ndarray import VideoNdArray
from docarray.utils.misc import is_tf_available, is_torch_available
torch_available = is_torch_available()
if torch_available:
from docarray.typing.tensor.video.video_torch_tensor import VideoTorchTensor
tf_available = is_tf_available()
if tf_available:
from docarray.typing.tensor.video.video_tensorflow_tensor import (
VideoTensorFlowTensor as VideoTFTensor,
)
if tf_available and torch_available:
VideoTensor = Union[VideoNdArray, VideoTorchTensor, VideoTFTensor] # type: ignore
elif tf_available:
VideoTensor = Union[VideoNdArray, VideoTFTensor] # type: ignore
elif torch_available:
VideoTensor = Union[VideoNdArray, VideoTorchTensor] # type: ignore
else:
VideoTensor = Union[VideoNdArray] # type: ignore
|
import logging
from typing import Optional, cast
from autogpt_libs.supabase_integration_credentials_store.types import (
UserIntegrations,
UserMetadata,
UserMetadataRaw,
)
from fastapi import HTTPException
from prisma import Json
from prisma.models import User
from backend.data.db import prisma
from backend.util.encryption import JSONCryptor
logger = logging.getLogger(__name__)
DEFAULT_USER_ID = "3e53486c-cf57-477e-ba2a-cb02dc828e1a"
DEFAULT_EMAIL = "default@example.com"
async def get_or_create_user(user_data: dict) -> User:
user_id = user_data.get("sub")
if not user_id:
raise HTTPException(status_code=401, detail="User ID not found in token")
user_email = user_data.get("email")
if not user_email:
raise HTTPException(status_code=401, detail="Email not found in token")
user = await prisma.user.find_unique(where={"id": user_id})
if not user:
user = await prisma.user.create(
data={
"id": user_id,
"email": user_email,
"name": user_data.get("user_metadata", {}).get("name"),
}
)
return User.model_validate(user)
async def get_user_by_id(user_id: str) -> Optional[User]:
user = await prisma.user.find_unique(where={"id": user_id})
return User.model_validate(user) if user else None
async def create_default_user() -> Optional[User]:
user = await prisma.user.find_unique(where={"id": DEFAULT_USER_ID})
if not user:
user = await prisma.user.create(
data={
"id": DEFAULT_USER_ID,
"email": "default@example.com",
"name": "Default User",
}
)
return User.model_validate(user)
async def get_user_metadata(user_id: str) -> UserMetadata:
user = await User.prisma().find_unique_or_raise(
where={"id": user_id},
)
metadata = cast(UserMetadataRaw, user.metadata)
return UserMetadata.model_validate(metadata)
async def update_user_metadata(user_id: str, metadata: UserMetadata):
await User.prisma().update(
where={"id": user_id},
data={"metadata": Json(metadata.model_dump())},
)
async def get_user_integrations(user_id: str) -> UserIntegrations:
user = await User.prisma().find_unique_or_raise(
where={"id": user_id},
)
encrypted_integrations = user.integrations
if not encrypted_integrations:
return UserIntegrations()
else:
return UserIntegrations.model_validate(
JSONCryptor().decrypt(encrypted_integrations)
)
async def update_user_integrations(user_id: str, data: UserIntegrations):
encrypted_data = JSONCryptor().encrypt(data.model_dump())
await User.prisma().update(
where={"id": user_id},
data={"integrations": encrypted_data},
)
|
import logging
from typing import Optional, cast
from autogpt_libs.supabase_integration_credentials_store.types import (
UserIntegrations,
UserMetadata,
UserMetadataRaw,
)
from fastapi import HTTPException
from prisma import Json
from prisma.models import User
from backend.data.db import prisma
from backend.util.encryption import JSONCryptor
logger = logging.getLogger(__name__)
DEFAULT_USER_ID = "3e53486c-cf57-477e-ba2a-cb02dc828e1a"
DEFAULT_EMAIL = "default@example.com"
async def get_or_create_user(user_data: dict) -> User:
user_id = user_data.get("sub")
if not user_id:
raise HTTPException(status_code=401, detail="User ID not found in token")
user_email = user_data.get("email")
if not user_email:
raise HTTPException(status_code=401, detail="Email not found in token")
user = await prisma.user.find_unique(where={"id": user_id})
if not user:
user = await prisma.user.create(
data={
"id": user_id,
"email": user_email,
"name": user_data.get("user_metadata", {}).get("name"),
}
)
return User.model_validate(user)
async def get_user_by_id(user_id: str) -> Optional[User]:
user = await prisma.user.find_unique(where={"id": user_id})
return User.model_validate(user) if user else None
async def create_default_user() -> Optional[User]:
user = await prisma.user.find_unique(where={"id": DEFAULT_USER_ID})
if not user:
user = await prisma.user.create(
data={
"id": DEFAULT_USER_ID,
"email": "default@example.com",
"name": "Default User",
}
)
return User.model_validate(user)
async def get_user_metadata(user_id: str) -> UserMetadata:
user = await User.prisma().find_unique_or_raise(
where={"id": user_id},
)
metadata = cast(UserMetadataRaw, user.metadata)
return UserMetadata.model_validate(metadata)
async def update_user_metadata(user_id: str, metadata: UserMetadata):
await User.prisma().update(
where={"id": user_id},
data={"metadata": Json(metadata.model_dump())},
)
async def get_user_integrations(user_id: str) -> UserIntegrations:
user = await User.prisma().find_unique_or_raise(
where={"id": user_id},
)
encrypted_integrations = user.integrations
if not encrypted_integrations:
return UserIntegrations()
else:
return UserIntegrations.model_validate(
JSONCryptor().decrypt(encrypted_integrations)
)
async def update_user_integrations(user_id: str, data: UserIntegrations):
encrypted_data = JSONCryptor().encrypt(data.model_dump())
await User.prisma().update(
where={"id": user_id},
data={"integrations": encrypted_data},
)
async def migrate_and_encrypt_user_integrations():
"""Migrate integration credentials and OAuth states from metadata to integrations column."""
users = await User.prisma().find_many(
where={
"metadata": {
"path": ["integration_credentials"],
"not": Json({"a": "yolo"}), # bogus value works to check if key exists
} # type: ignore
}
)
logger.info(f"Migrating integration credentials for {len(users)} users")
for user in users:
raw_metadata = cast(UserMetadataRaw, user.metadata)
metadata = UserMetadata.model_validate(raw_metadata)
# Get existing integrations data
integrations = await get_user_integrations(user_id=user.id)
# Copy credentials and oauth states from metadata if they exist
if metadata.integration_credentials and not integrations.credentials:
integrations.credentials = metadata.integration_credentials
if metadata.integration_oauth_states:
integrations.oauth_states = metadata.integration_oauth_states
# Save to integrations column
await update_user_integrations(user_id=user.id, data=integrations)
# Remove from metadata
raw_metadata = dict(raw_metadata)
raw_metadata.pop("integration_credentials", None)
raw_metadata.pop("integration_oauth_states", None)
# Update metadata without integration data
await User.prisma().update(
where={"id": user.id},
data={"metadata": Json(raw_metadata)},
)
|
_base_ = '../_base_/default_runtime.py'
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
# file_client_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/detection/',
# 'data/': 's3://openmmlab/datasets/detection/'
# }))
file_client_args = dict(backend='disk')
train_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='RandomResize', scale=[(1333, 640), (1333, 800)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(
batch_size=2,
num_workers=2,
persistent_workers=True,
sampler=dict(type='DefaultSampler', shuffle=True),
batch_sampler=dict(type='AspectRatioBatchSampler'),
dataset=dict(
type='RepeatDataset',
times=3,
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instances_train2017.json',
data_prefix=dict(img='train2017/'),
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=train_pipeline)))
val_dataloader = dict(
batch_size=2,
num_workers=2,
persistent_workers=True,
drop_last=False,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instances_val2017.json',
data_prefix=dict(img='val2017/'),
test_mode=True,
pipeline=test_pipeline))
test_dataloader = val_dataloader
val_evaluator = dict(
type='CocoMetric',
ann_file=data_root + 'annotations/instances_val2017.json',
metric='bbox')
test_evaluator = val_evaluator
# training schedule for 3x with `RepeatDataset`
train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=12, val_interval=1)
val_cfg = dict(type='ValLoop')
test_cfg = dict(type='TestLoop')
# learning rate
# Experiments show that using milestones=[9, 11] has higher performance
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=12,
by_epoch=True,
milestones=[9, 11],
gamma=0.1)
]
# optimizer
optim_wrapper = dict(
type='OptimWrapper',
optimizer=dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001))
# Default setting for scaling LR automatically
# - `enable` means enable scaling LR automatically
# or not by default.
# - `base_batch_size` = (8 GPUs) x (2 samples per GPU).
auto_scale_lr = dict(enable=False, base_batch_size=16)
|
_base_ = '../_base_/default_runtime.py'
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
# file_client_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/detection/',
# 'data/': 's3://openmmlab/datasets/detection/'
# }))
file_client_args = dict(backend='disk')
train_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='RandomResize', scale=[(1333, 640), (1333, 800)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(
batch_size=2,
num_workers=2,
persistent_workers=True,
sampler=dict(type='DefaultSampler', shuffle=True),
batch_sampler=dict(type='AspectRatioBatchSampler'),
dataset=dict(
type='RepeatDataset',
times=3,
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instances_train2017.json',
data_prefix=dict(img='train2017/'),
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=train_pipeline)))
val_dataloader = dict(
batch_size=2,
num_workers=2,
persistent_workers=True,
drop_last=False,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instances_val2017.json',
data_prefix=dict(img='val2017/'),
test_mode=True,
pipeline=test_pipeline))
test_dataloader = val_dataloader
val_evaluator = dict(
type='CocoMetric',
ann_file=data_root + 'annotations/instances_val2017.json',
metric='bbox')
test_evaluator = val_evaluator
# training schedule for 3x with `RepeatDataset`
train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=12, val_interval=1)
val_cfg = dict(type='ValLoop')
test_cfg = dict(type='TestLoop')
# learning rate
# Experiments show that using milestones=[9, 11] has higher performance
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=12,
by_epoch=True,
milestones=[9, 11],
gamma=0.1)
]
# optimizer
optim_wrapper = dict(
type='OptimWrapper',
optimizer=dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001))
|
import os
from typing import Dict
from hubble.executor.helper import parse_hub_uri
from hubble.executor.hubio import HubIO
from jina import (
__default_executor__,
__default_grpc_gateway__,
__default_http_gateway__,
__default_websocket_gateway__,
__version__,
)
from jina.enums import PodRoleType
def get_image_name(uses: str) -> str:
"""The image can be provided in different formats by the user.
This function converts it to an image name which can be understood by k8s.
It uses the Hub api to get the image name and the latest tag on Docker Hub.
If you don't want to rebuild image on Jina Hub,
you can set `JINA_HUB_NO_IMAGE_REBUILD` environment variable.
:param uses: image name
:return: normalized image name
"""
try:
rebuild_image = 'JINA_HUB_NO_IMAGE_REBUILD' not in os.environ
scheme, name, tag, secret = parse_hub_uri(uses)
meta_data, _ = HubIO.fetch_meta(
name, tag, secret=secret, rebuild_image=rebuild_image, force=True
)
image_name = meta_data.image_name
return image_name
except Exception:
if uses.startswith('docker'):
# docker:// is a valid requirement and user may want to put its own image
return uses.replace('docker://', '')
raise
def to_compatible_name(name: str) -> str:
"""Converts the deployment name to a valid name for K8s and docker compose.
:param name: name of the deployment
:return: compatible name
"""
return name.replace('/', '-').replace('_', '-').lower()
def get_base_executor_version():
"""
Get the version of jina to be used
:return: the version tag
"""
import requests
try:
url = 'https://registry.hub.docker.com/v2/repositories/jinaai/jina/tags'
result: Dict = requests.get(url, params={'name': __version__}).json()
if result.get('count', 0) > 0:
return __version__
else:
return 'master'
except:
return 'master'
def construct_runtime_container_args(cargs, uses_metas, uses_with, pod_type):
"""
Construct a set of Namespace arguments into a list of arguments to pass to a container entrypoint
:param cargs: The namespace arguments
:param uses_metas: The uses_metas to override
:param uses_with: The uses_with to override
:param pod_type: The pod_type
:return: Arguments to pass to container
"""
import json
from jina.helper import ArgNamespace
from jina.parsers import set_pod_parser
taboo = {
'uses_with',
'uses_metas',
'volumes',
'uses_before',
'uses_after',
'workspace_id',
'noblock_on_start',
'env',
}
if pod_type == PodRoleType.HEAD:
taboo.add('uses')
taboo.add('workspace')
if pod_type in {PodRoleType.WORKER, PodRoleType.GATEWAY}:
taboo.add('polling')
non_defaults = ArgNamespace.get_non_defaults_args(
cargs,
set_pod_parser(),
taboo=taboo,
)
_args = ArgNamespace.kwargs2list(non_defaults)
container_args = ['executor'] + _args
if uses_metas is not None:
container_args.extend(['--uses-metas', json.dumps(uses_metas)])
if uses_with is not None:
container_args.extend(['--uses-with', json.dumps(uses_with)])
container_args.append('--native')
return container_args
def validate_uses(uses: str):
"""Validate uses argument
:param uses: uses argument
:return: boolean indicating whether is a valid uses to be used in K8s or docker compose
"""
# Uses can be either None (not specified), default gateway class, default executor or docker image
# None => deplyoment uses base container image and uses is determined inside container
# default gateway class or default executor => deployment uses base container and sets uses in command
# container images => deployment uses the specified container image and uses is defined by container
if (
uses is None
or uses
in [
__default_http_gateway__,
__default_websocket_gateway__,
__default_grpc_gateway__,
__default_executor__,
]
or uses.startswith('docker://')
):
return True
try:
scheme, _, _, _ = parse_hub_uri(uses)
if scheme in {'jinahub+docker', 'jinahub+sandbox'}:
return True
except ValueError:
return False
|
import os
from typing import Dict
from hubble.executor.helper import parse_hub_uri
from hubble.executor.hubio import HubIO
from jina import (
__default_executor__,
__default_grpc_gateway__,
__default_http_gateway__,
__default_websocket_gateway__,
__version__,
)
from jina.enums import PodRoleType
def get_image_name(uses: str) -> str:
"""The image can be provided in different formats by the user.
This function converts it to an image name which can be understood by k8s.
It uses the Hub api to get the image name and the latest tag on Docker Hub.
If you don't want to rebuild image on Jina Hub,
you can set `JINA_HUB_NO_IMAGE_REBUILD` environment variable.
:param uses: image name
:return: normalized image name
"""
try:
rebuild_image = 'JINA_HUB_NO_IMAGE_REBUILD' not in os.environ
scheme, name, tag, secret = parse_hub_uri(uses)
meta_data, _ = HubIO.fetch_meta(
name, tag, secret=secret, rebuild_image=rebuild_image, force=True
)
image_name = meta_data.image_name
return image_name
except Exception:
if uses.startswith('docker'):
# docker:// is a valid requirement and user may want to put its own image
return uses.replace('docker://', '')
raise
def to_compatible_name(name: str) -> str:
"""Converts the deployment name to a valid name for K8s and docker compose.
:param name: name of the deployment
:return: compatible name
"""
return name.replace('/', '-').replace('_', '-').lower()
def get_base_executor_version():
"""
Get the version of jina to be used
:return: the version tag
"""
import requests
try:
url = 'https://registry.hub.docker.com/v2/repositories/jinaai/jina/tags'
result: Dict = requests.get(url, params={'name': __version__}).json()
if result.get('count', 0) > 0:
return __version__
else:
return 'master'
except:
return 'master'
def construct_runtime_container_args(cargs, uses_metas, uses_with, pod_type):
"""
Construct a set of Namespace arguments into a list of arguments to pass to a container entrypoint
:param cargs: The namespace arguments
:param uses_metas: The uses_metas to override
:param uses_with: The uses_with to override
:param pod_type: The pod_type
:return: Arguments to pass to container
"""
import json
from jina.helper import ArgNamespace
from jina.parsers import set_pod_parser
taboo = {
'uses_with',
'uses_metas',
'volumes',
'uses_before',
'uses_after',
'workspace_id',
'upload_files',
'noblock_on_start',
'env',
}
if pod_type == PodRoleType.HEAD:
taboo.add('uses')
taboo.add('workspace')
if pod_type in {PodRoleType.WORKER, PodRoleType.GATEWAY}:
taboo.add('polling')
non_defaults = ArgNamespace.get_non_defaults_args(
cargs,
set_pod_parser(),
taboo=taboo,
)
_args = ArgNamespace.kwargs2list(non_defaults)
container_args = ['executor'] + _args
if uses_metas is not None:
container_args.extend(['--uses-metas', json.dumps(uses_metas)])
if uses_with is not None:
container_args.extend(['--uses-with', json.dumps(uses_with)])
container_args.append('--native')
return container_args
def validate_uses(uses: str):
"""Validate uses argument
:param uses: uses argument
:return: boolean indicating whether is a valid uses to be used in K8s or docker compose
"""
# Uses can be either None (not specified), default gateway class, default executor or docker image
# None => deplyoment uses base container image and uses is determined inside container
# default gateway class or default executor => deployment uses base container and sets uses in command
# container images => deployment uses the specified container image and uses is defined by container
if (
uses is None
or uses
in [
__default_http_gateway__,
__default_websocket_gateway__,
__default_grpc_gateway__,
__default_executor__,
]
or uses.startswith('docker://')
):
return True
try:
scheme, _, _, _ = parse_hub_uri(uses)
if scheme in {'jinahub+docker', 'jinahub+sandbox'}:
return True
except ValueError:
return False
|
_base_ = [
'../_base_/models/faster-rcnn_r50-caffe-dc5.py',
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
# use caffe img_norm
img_norm_cfg = dict(
mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
train=dict(pipeline=train_pipeline),
val=dict(pipeline=test_pipeline),
test=dict(pipeline=test_pipeline))
|
_base_ = [
'../_base_/models/faster_rcnn_r50_caffe_dc5.py',
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
# use caffe img_norm
img_norm_cfg = dict(
mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
train=dict(pipeline=train_pipeline),
val=dict(pipeline=test_pipeline),
test=dict(pipeline=test_pipeline))
|
import os
import pytest
from llama_index.core.base.llms.base import BaseLLM
from llama_index.core.base.llms.types import ChatMessage, ImageBlock, MessageRole
from llama_index.llms.gemini import Gemini
from llama_index.llms.gemini.utils import chat_message_to_gemini
def test_embedding_class():
names_of_base_classes = [b.__name__ for b in Gemini.__mro__]
assert BaseLLM.__name__ in names_of_base_classes
def test_chat_message_to_gemini():
msg = ChatMessage("Some content")
assert chat_message_to_gemini(msg) == {
"role": MessageRole.USER,
"parts": ["Some content"],
}
msg = ChatMessage("Some content")
msg.blocks.append(ImageBlock(image=b"foo", image_mimetype="image/png"))
assert chat_message_to_gemini(msg) == {
"role": MessageRole.USER,
"parts": ["Some content", {"data": b"foo", "mime_type": "image/png"}],
}
@pytest.mark.skipif(
os.environ.get("GOOGLE_API_KEY") is None, reason="GOOGLE_API_KEY not set"
)
def test_generate_image_prompt():
msg = ChatMessage("Tell me the brand of the car in this image:")
msg.blocks.append(
ImageBlock(
url="https://upload.wikimedia.org/wikipedia/commons/5/52/Ferrari_SP_FFX.jpg"
)
)
response = Gemini().chat(messages=[msg])
assert "ferrari" in str(response).lower()
@pytest.mark.skipif(
os.environ.get("GOOGLE_API_KEY") is None, reason="GOOGLE_API_KEY not set"
)
def test_chat_stream():
msg = ChatMessage("List three types of software testing strategies")
response = list(Gemini().stream_chat(messages=[msg]))
assert response
|
from llama_index.core.base.llms.base import BaseLLM
from llama_index.llms.gemini import Gemini
def test_embedding_class():
names_of_base_classes = [b.__name__ for b in Gemini.__mro__]
assert BaseLLM.__name__ in names_of_base_classes
|
from docarray.array.queryset.parser import QueryParser
|
from .parser import QueryParser
|
# Copyright (c) OpenMMLab. All rights reserved.
from argparse import ArgumentParser, Namespace
from pathlib import Path
from tempfile import TemporaryDirectory
from mmengine.config import Config
from mmengine.utils import mkdir_or_exist
try:
from model_archiver.model_packaging import package_model
from model_archiver.model_packaging_utils import ModelExportUtils
except ImportError:
package_model = None
def mmdet2torchserve(
config_file: str,
checkpoint_file: str,
output_folder: str,
model_name: str,
model_version: str = '1.0',
force: bool = False,
):
"""Converts MMDetection model (config + checkpoint) to TorchServe `.mar`.
Args:
config_file:
In MMDetection config format.
The contents vary for each task repository.
checkpoint_file:
In MMDetection checkpoint format.
The contents vary for each task repository.
output_folder:
Folder where `{model_name}.mar` will be created.
The file created will be in TorchServe archive format.
model_name:
If not None, used for naming the `{model_name}.mar` file
that will be created under `output_folder`.
If None, `{Path(checkpoint_file).stem}` will be used.
model_version:
Model's version.
force:
If True, if there is an existing `{model_name}.mar`
file under `output_folder` it will be overwritten.
"""
mkdir_or_exist(output_folder)
config = Config.fromfile(config_file)
with TemporaryDirectory() as tmpdir:
config.dump(f'{tmpdir}/config.py')
args = Namespace(
**{
'model_file': f'{tmpdir}/config.py',
'config_file': f'{tmpdir}/config.py',
'serialized_file': checkpoint_file,
'handler': f'{Path(__file__).parent}/mmdet_handler.py',
'model_name': model_name or Path(checkpoint_file).stem,
'version': model_version,
'export_path': output_folder,
'force': force,
'requirements_file': None,
'extra_files': None,
'runtime': 'python',
'archive_format': 'default'
})
manifest = ModelExportUtils.generate_manifest_json(args)
package_model(args, manifest)
def parse_args():
parser = ArgumentParser(
description='Convert MMDetection models to TorchServe `.mar` format.')
parser.add_argument('config', type=str, help='config file path')
parser.add_argument('checkpoint', type=str, help='checkpoint file path')
parser.add_argument(
'--output-folder',
type=str,
required=True,
help='Folder where `{model_name}.mar` will be created.')
parser.add_argument(
'--model-name',
type=str,
default=None,
help='If not None, used for naming the `{model_name}.mar`'
'file that will be created under `output_folder`.'
'If None, `{Path(checkpoint_file).stem}` will be used.')
parser.add_argument(
'--model-version',
type=str,
default='1.0',
help='Number used for versioning.')
parser.add_argument(
'-f',
'--force',
action='store_true',
help='overwrite the existing `{model_name}.mar`')
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
if package_model is None:
raise ImportError('`torch-model-archiver` is required.'
'Try: pip install torch-model-archiver')
mmdet2torchserve(args.config, args.checkpoint, args.output_folder,
args.model_name, args.model_version, args.force)
|
# Copyright (c) OpenMMLab. All rights reserved.
from argparse import ArgumentParser, Namespace
from pathlib import Path
from tempfile import TemporaryDirectory
from mmengine.config import Config
from mmengine.utils import mkdir_or_exist
try:
from model_archiver.model_packaging import package_model
from model_archiver.model_packaging_utils import ModelExportUtils
except ImportError:
package_model = None
def mmdet2torchserve(
config_file: str,
checkpoint_file: str,
output_folder: str,
model_name: str,
model_version: str = '1.0',
force: bool = False,
):
"""Converts MMDetection model (config + checkpoint) to TorchServe `.mar`.
Args:
config_file:
In MMDetection config format.
The contents vary for each task repository.
checkpoint_file:
In MMDetection checkpoint format.
The contents vary for each task repository.
output_folder:
Folder where `{model_name}.mar` will be created.
The file created will be in TorchServe archive format.
model_name:
If not None, used for naming the `{model_name}.mar` file
that will be created under `output_folder`.
If None, `{Path(checkpoint_file).stem}` will be used.
model_version:
Model's version.
force:
If True, if there is an existing `{model_name}.mar`
file under `output_folder` it will be overwritten.
"""
mkdir_or_exist(output_folder)
config = Config.fromfile(config_file)
with TemporaryDirectory() as tmpdir:
config.dump(f'{tmpdir}/config.py')
args = Namespace(
**{
'model_file': f'{tmpdir}/config.py',
'serialized_file': checkpoint_file,
'handler': f'{Path(__file__).parent}/mmdet_handler.py',
'model_name': model_name or Path(checkpoint_file).stem,
'version': model_version,
'export_path': output_folder,
'force': force,
'requirements_file': None,
'extra_files': None,
'runtime': 'python',
'archive_format': 'default'
})
manifest = ModelExportUtils.generate_manifest_json(args)
package_model(args, manifest)
def parse_args():
parser = ArgumentParser(
description='Convert MMDetection models to TorchServe `.mar` format.')
parser.add_argument('config', type=str, help='config file path')
parser.add_argument('checkpoint', type=str, help='checkpoint file path')
parser.add_argument(
'--output-folder',
type=str,
required=True,
help='Folder where `{model_name}.mar` will be created.')
parser.add_argument(
'--model-name',
type=str,
default=None,
help='If not None, used for naming the `{model_name}.mar`'
'file that will be created under `output_folder`.'
'If None, `{Path(checkpoint_file).stem}` will be used.')
parser.add_argument(
'--model-version',
type=str,
default='1.0',
help='Number used for versioning.')
parser.add_argument(
'-f',
'--force',
action='store_true',
help='overwrite the existing `{model_name}.mar`')
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
if package_model is None:
raise ImportError('`torch-model-archiver` is required.'
'Try: pip install torch-model-archiver')
mmdet2torchserve(args.config, args.checkpoint, args.output_folder,
args.model_name, args.model_version, args.force)
|
"""PDF Marker reader."""
from pathlib import Path
from typing import Any, Dict, List, Optional
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
class PDFMarkerReader(BaseReader):
"""
PDF Marker Reader. Reads a pdf to markdown format and tables with layout.
"""
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
def load_data(
self,
file: Path,
max_pages: int = None,
langs: List[str] = None,
batch_multiplier: int = 2,
start_page: int = None,
extra_info: Optional[Dict] = None,
) -> List[Document]:
"""
Load data from PDF
Args:
file (Path): Path for the PDF file.
max_pages (int): is the maximum number of pages to process. Omit this to convert the entire document.
langs (List[str]): List of languages to use for OCR. See supported languages : https://github.com/VikParuchuri/surya/blob/master/surya/languages.py
batch_multiplier (int): is how much to multiply default batch sizes by if you have extra VRAM. Higher numbers will take more VRAM, but process faster. Set to 2 by default. The default batch sizes will take ~3GB of VRAM.
start_page (int): Start page for conversion.
Returns:
List[Document]: List of documents.
"""
from marker.convert import convert_single_pdf
from marker.models import load_all_models
model_lst = load_all_models()
full_text, images, out_meta = convert_single_pdf(
str(file),
model_lst,
max_pages=max_pages,
langs=langs,
batch_multiplier=batch_multiplier,
start_page=start_page,
)
doc = Document(text=full_text, extra_info=extra_info or {})
return [doc]
|
"""PDF Marker reader."""
from pathlib import Path
from typing import Any, Dict, List, Optional
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
class PDFMarkerReader(BaseReader):
"""
PDF Marker Reader. Reads a pdf to markdown format and tables with layout.
"""
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
def load_data(
self,
file: Path,
max_pages: int = None,
langs: List[str] = None,
batch_multiplier: int = 2,
start_page: int = None,
extra_info: Optional[Dict] = None,
) -> List[Document]:
"""Load data from PDF
Args:
file (Path): Path for the PDF file.
max_pages (int): is the maximum number of pages to process. Omit this to convert the entire document.
langs (List[str]): List of languages to use for OCR. See supported languages : https://github.com/VikParuchuri/surya/blob/master/surya/languages.py
batch_multiplier (int): is how much to multiply default batch sizes by if you have extra VRAM. Higher numbers will take more VRAM, but process faster. Set to 2 by default. The default batch sizes will take ~3GB of VRAM.
start_page (int): Start page for conversion.
Returns:
List[Document]: List of documents.
"""
from marker.convert import convert_single_pdf
from marker.models import load_all_models
model_lst = load_all_models()
full_text, images, out_meta = convert_single_pdf(
str(file),
model_lst,
max_pages=max_pages,
langs=langs,
batch_multiplier=batch_multiplier,
start_page=start_page,
)
doc = Document(text=full_text, extra_info=extra_info or {})
return [doc]
|
import wave
from abc import ABC
from typing import BinaryIO, TypeVar, Union
from docarray.typing.tensor.abstract_tensor import AbstractTensor
T = TypeVar('T', bound='AbstractAudioTensor')
MAX_INT_16 = 2**15
class AbstractAudioTensor(AbstractTensor, ABC):
def to_bytes(self):
"""
Convert audio tensor to bytes.
"""
tensor = self.get_comp_backend().to_numpy(self)
tensor = (tensor * MAX_INT_16).astype('<h')
return tensor.tobytes()
def save_to_wav_file(
self: 'T',
file_path: Union[str, BinaryIO],
sample_rate: int = 44100,
sample_width: int = 2,
) -> None:
"""
Save audio tensor to a .wav file. Mono/stereo is preserved.
:param file_path: path to a .wav file. If file is a string, open the file by
that name, otherwise treat it as a file-like object.
:param sample_rate: sampling frequency
:param sample_width: sample width in bytes
"""
comp_backend = self.get_comp_backend()
n_channels = 2 if comp_backend.n_dim(array=self) > 1 else 1 # type: ignore
with wave.open(file_path, 'w') as f:
f.setnchannels(n_channels)
f.setsampwidth(sample_width)
f.setframerate(sample_rate)
f.writeframes(self.to_bytes())
|
import wave
from abc import ABC, abstractmethod
from typing import BinaryIO, TypeVar, Union
from docarray.typing.tensor.abstract_tensor import AbstractTensor
T = TypeVar('T', bound='AbstractAudioTensor')
class AbstractAudioTensor(AbstractTensor, ABC):
@abstractmethod
def to_audio_bytes(self):
"""
Convert audio tensor to bytes.
"""
...
def save_to_wav_file(
self: 'T',
file_path: Union[str, BinaryIO],
sample_rate: int = 44100,
sample_width: int = 2,
) -> None:
"""
Save audio tensor to a .wav file. Mono/stereo is preserved.
:param file_path: path to a .wav file. If file is a string, open the file by
that name, otherwise treat it as a file-like object.
:param sample_rate: sampling frequency
:param sample_width: sample width in bytes
"""
comp_backend = self.get_comp_backend()
n_channels = 2 if comp_backend.n_dim(array=self) > 1 else 1 # type: ignore
with wave.open(file_path, 'w') as f:
f.setnchannels(n_channels)
f.setsampwidth(sample_width)
f.setframerate(sample_rate)
f.writeframes(self.to_audio_bytes())
|
"""
Top-level module of Jina.
The primary function of this module is to import all of the public Jina
interfaces into a single place. The interfaces themselves are located in
sub-modules, as described below.
"""
import os as _os
import platform as _platform
import signal as _signal
import sys as _sys
import warnings as _warnings
import docarray as _docarray
if _sys.version_info < (3, 7, 0):
raise OSError(f'Jina requires Python >= 3.7, but yours is {_sys.version_info}')
def _warning_on_one_line(message, category, filename, lineno, *args, **kwargs):
return '\033[1;33m%s: %s\033[0m \033[1;30m(raised from %s:%s)\033[0m\n' % (
category.__name__,
message,
filename,
lineno,
)
def _ignore_google_warnings():
import warnings
warnings.filterwarnings(
'ignore',
category=DeprecationWarning,
message='Deprecated call to `pkg_resources.declare_namespace(\'google\')`.',
append=True,
)
_warnings.formatwarning = _warning_on_one_line
_warnings.simplefilter('always', DeprecationWarning, append=True)
_ignore_google_warnings()
# fix fork error on MacOS but seems no effect? must do EXPORT manually before jina start
_os.environ['OBJC_DISABLE_INITIALIZE_FORK_SAFETY'] = 'YES'
# JINA_MP_START_METHOD has higher priority than os-patch
_start_method = _os.environ.get('JINA_MP_START_METHOD', None)
if _start_method and _start_method.lower() in {'fork', 'spawn', 'forkserver'}:
from multiprocessing import set_start_method as _set_start_method
try:
_set_start_method(_start_method.lower())
_warnings.warn(
f'multiprocessing start method is set to `{_start_method.lower()}`'
)
except Exception as e:
_warnings.warn(
f'failed to set multiprocessing start_method to `{_start_method.lower()}`: {e!r}'
)
elif _sys.version_info >= (3, 8, 0) and _platform.system() == 'Darwin':
# DO SOME OS-WISE PATCHES
# temporary fix for python 3.8 on macos where the default start is set to "spawn"
# https://docs.python.org/3/library/multiprocessing.html#contexts-and-start-methods
from multiprocessing import set_start_method as _set_start_method
try:
_set_start_method('fork')
_warnings.warn(f'multiprocessing start method is set to `fork`')
except Exception as e:
_warnings.warn(f'failed to set multiprocessing start_method to `fork`: {e!r}')
# do not change this line manually this is managed by git tag and updated on every release
# NOTE: this represents the NEXT release version
__version__ = '3.27.1'
# do not change this line manually
# this is managed by proto/build-proto.sh and updated on every execution
__proto_version__ = '0.1.27'
try:
__docarray_version__ = _docarray.__version__
except AttributeError as e:
raise RuntimeError(
'`docarray` dependency is not installed correctly, please reinstall with `pip install -U --force-reinstall docarray`'
)
try:
_signal.signal(_signal.SIGINT, _signal.default_int_handler)
except Exception as exc:
_warnings.warn(f'failed to set default signal handler: {exc!r}`')
def _set_nofile(nofile_atleast=4096):
"""
Set nofile soft limit to at least 4096, useful for running matlplotlib/seaborn on
parallel executing plot generators vs. Ubuntu default ulimit -n 1024 or OS X El Captian 256
temporary setting extinguishing with Python session.
:param nofile_atleast: nofile soft limit
:return: nofile soft limit and nofile hard limit
"""
try:
import resource as res
except ImportError: # Windows
res = None
if res is None:
return (None,) * 2
soft, ohard = res.getrlimit(res.RLIMIT_NOFILE)
hard = ohard
if soft < nofile_atleast:
soft = nofile_atleast
if hard < soft:
hard = soft
try:
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except (ValueError, res.error):
try:
hard = soft
print(f'trouble with max limit, retrying with soft,hard {soft},{hard}')
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except Exception:
print('failed to set ulimit, giving up')
soft, hard = res.getrlimit(res.RLIMIT_NOFILE)
return soft, hard
_set_nofile()
# ONLY FIRST CLASS CITIZENS ARE ALLOWED HERE, namely Document, Executor Flow
# Document
from jina._docarray import Document, DocumentArray
# Client
from jina.clients import Client
# Deployment
from jina.orchestrate.deployments import Deployment
from jina.orchestrate.flow.asyncio import AsyncFlow
# Flow
from jina.orchestrate.flow.base import Flow
# Executor
from jina.serve.executors import BaseExecutor as Executor
from jina.serve.executors.decorators import dynamic_batching, monitor, requests
# Custom Gateway
from jina.serve.runtimes.gateway.gateway import Gateway
|
"""
Top-level module of Jina.
The primary function of this module is to import all of the public Jina
interfaces into a single place. The interfaces themselves are located in
sub-modules, as described below.
"""
import os as _os
import platform as _platform
import signal as _signal
import sys as _sys
import warnings as _warnings
import docarray as _docarray
if _sys.version_info < (3, 7, 0):
raise OSError(f'Jina requires Python >= 3.7, but yours is {_sys.version_info}')
def _warning_on_one_line(message, category, filename, lineno, *args, **kwargs):
return '\033[1;33m%s: %s\033[0m \033[1;30m(raised from %s:%s)\033[0m\n' % (
category.__name__,
message,
filename,
lineno,
)
def _ignore_google_warnings():
import warnings
warnings.filterwarnings(
'ignore',
category=DeprecationWarning,
message='Deprecated call to `pkg_resources.declare_namespace(\'google\')`.',
append=True,
)
_warnings.formatwarning = _warning_on_one_line
_warnings.simplefilter('always', DeprecationWarning, append=True)
_ignore_google_warnings()
# fix fork error on MacOS but seems no effect? must do EXPORT manually before jina start
_os.environ['OBJC_DISABLE_INITIALIZE_FORK_SAFETY'] = 'YES'
# JINA_MP_START_METHOD has higher priority than os-patch
_start_method = _os.environ.get('JINA_MP_START_METHOD', None)
if _start_method and _start_method.lower() in {'fork', 'spawn', 'forkserver'}:
from multiprocessing import set_start_method as _set_start_method
try:
_set_start_method(_start_method.lower())
_warnings.warn(
f'multiprocessing start method is set to `{_start_method.lower()}`'
)
except Exception as e:
_warnings.warn(
f'failed to set multiprocessing start_method to `{_start_method.lower()}`: {e!r}'
)
elif _sys.version_info >= (3, 8, 0) and _platform.system() == 'Darwin':
# DO SOME OS-WISE PATCHES
# temporary fix for python 3.8 on macos where the default start is set to "spawn"
# https://docs.python.org/3/library/multiprocessing.html#contexts-and-start-methods
from multiprocessing import set_start_method as _set_start_method
try:
_set_start_method('fork')
_warnings.warn(f'multiprocessing start method is set to `fork`')
except Exception as e:
_warnings.warn(f'failed to set multiprocessing start_method to `fork`: {e!r}')
# do not change this line manually this is managed by git tag and updated on every release
# NOTE: this represents the NEXT release version
__version__ = '3.27.0'
# do not change this line manually
# this is managed by proto/build-proto.sh and updated on every execution
__proto_version__ = '0.1.27'
try:
__docarray_version__ = _docarray.__version__
except AttributeError as e:
raise RuntimeError(
'`docarray` dependency is not installed correctly, please reinstall with `pip install -U --force-reinstall docarray`'
)
try:
_signal.signal(_signal.SIGINT, _signal.default_int_handler)
except Exception as exc:
_warnings.warn(f'failed to set default signal handler: {exc!r}`')
def _set_nofile(nofile_atleast=4096):
"""
Set nofile soft limit to at least 4096, useful for running matlplotlib/seaborn on
parallel executing plot generators vs. Ubuntu default ulimit -n 1024 or OS X El Captian 256
temporary setting extinguishing with Python session.
:param nofile_atleast: nofile soft limit
:return: nofile soft limit and nofile hard limit
"""
try:
import resource as res
except ImportError: # Windows
res = None
if res is None:
return (None,) * 2
soft, ohard = res.getrlimit(res.RLIMIT_NOFILE)
hard = ohard
if soft < nofile_atleast:
soft = nofile_atleast
if hard < soft:
hard = soft
try:
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except (ValueError, res.error):
try:
hard = soft
print(f'trouble with max limit, retrying with soft,hard {soft},{hard}')
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except Exception:
print('failed to set ulimit, giving up')
soft, hard = res.getrlimit(res.RLIMIT_NOFILE)
return soft, hard
_set_nofile()
# ONLY FIRST CLASS CITIZENS ARE ALLOWED HERE, namely Document, Executor Flow
# Document
from jina._docarray import Document, DocumentArray
# Client
from jina.clients import Client
# Deployment
from jina.orchestrate.deployments import Deployment
from jina.orchestrate.flow.asyncio import AsyncFlow
# Flow
from jina.orchestrate.flow.base import Flow
# Executor
from jina.serve.executors import BaseExecutor as Executor
from jina.serve.executors.decorators import dynamic_batching, monitor, requests
# Custom Gateway
from jina.serve.runtimes.gateway.gateway import Gateway
|
# Copyright (c) OpenMMLab. All rights reserved.
from .optimizer import (OPTIM_WRAPPER_CONSTRUCTORS, OPTIMIZERS,
AmpOptimWrapper, DefaultOptimWrapperConstructor,
OptimWrapper, OptimWrapperDict, build_optim_wrapper)
# yapf: disable
from .scheduler import (ConstantLR, ConstantMomentum, ConstantParamScheduler,
CosineAnnealingLR, CosineAnnealingMomentum,
CosineAnnealingParamScheduler, ExponentialLR,
ExponentialMomentum, ExponentialParamScheduler,
LinearLR, LinearMomentum, LinearParamScheduler,
MultiStepLR, MultiStepMomentum,
MultiStepParamScheduler, OneCycleLR,
OneCycleParamScheduler, PolyLR, PolyMomentum,
PolyParamScheduler, StepLR, StepMomentum,
StepParamScheduler, _ParamScheduler)
# yapf: enable
__all__ = [
'OPTIM_WRAPPER_CONSTRUCTORS', 'OPTIMIZERS', 'build_optim_wrapper',
'DefaultOptimWrapperConstructor', 'ConstantLR', 'CosineAnnealingLR',
'ExponentialLR', 'LinearLR', 'MultiStepLR', 'StepLR', 'ConstantMomentum',
'CosineAnnealingMomentum', 'ExponentialMomentum', 'LinearMomentum',
'MultiStepMomentum', 'StepMomentum', 'ConstantParamScheduler',
'CosineAnnealingParamScheduler', 'ExponentialParamScheduler',
'LinearParamScheduler', 'MultiStepParamScheduler', 'StepParamScheduler',
'_ParamScheduler', 'OptimWrapper', 'AmpOptimWrapper', 'OptimWrapperDict',
'OneCycleParamScheduler', 'OneCycleLR', 'PolyLR', 'PolyMomentum',
'PolyParamScheduler'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .optimizer import (OPTIM_WRAPPER_CONSTRUCTORS, OPTIMIZERS,
AmpOptimWrapper, DefaultOptimWrapperConstructor,
OptimWrapper, OptimWrapperDict, build_optim_wrapper)
# yapf: disable
from .scheduler import (ConstantLR, ConstantMomentum, ConstantParamScheduler,
CosineAnnealingLR, CosineAnnealingMomentum,
CosineAnnealingParamScheduler, ExponentialLR,
ExponentialMomentum, ExponentialParamScheduler,
LinearLR, LinearMomentum, LinearParamScheduler,
MultiStepLR, MultiStepMomentum,
MultiStepParamScheduler, OneCycleLR,
OneCycleParamScheduler, StepLR, StepMomentum,
StepParamScheduler, _ParamScheduler)
# yapf: enable
__all__ = [
'OPTIM_WRAPPER_CONSTRUCTORS', 'OPTIMIZERS', 'build_optim_wrapper',
'DefaultOptimWrapperConstructor', 'ConstantLR', 'CosineAnnealingLR',
'ExponentialLR', 'LinearLR', 'MultiStepLR', 'StepLR', 'ConstantMomentum',
'CosineAnnealingMomentum', 'ExponentialMomentum', 'LinearMomentum',
'MultiStepMomentum', 'StepMomentum', 'ConstantParamScheduler',
'CosineAnnealingParamScheduler', 'ExponentialParamScheduler',
'LinearParamScheduler', 'MultiStepParamScheduler', 'StepParamScheduler',
'_ParamScheduler', 'OptimWrapper', 'AmpOptimWrapper', 'OptimWrapperDict',
'OneCycleParamScheduler', 'OneCycleLR'
]
|
from io import BytesIO
from typing import TYPE_CHECKING, Any, Optional, Tuple, Type, TypeVar
import numpy as np
from pydantic import parse_obj_as
from pydantic.validators import bytes_validator
from docarray.typing.abstract_type import AbstractType
from docarray.typing.proto_register import _register_proto
from docarray.utils._internal.misc import import_library
if TYPE_CHECKING:
from pydantic.fields import BaseConfig, ModelField
from docarray.proto import NodeProto
T = TypeVar('T', bound='ImageBytes')
@_register_proto(proto_type_name='image_bytes')
class ImageBytes(bytes, AbstractType):
"""
Bytes that store an image and that can be load into an image tensor
"""
@classmethod
def validate(
cls: Type[T],
value: Any,
field: 'ModelField',
config: 'BaseConfig',
) -> T:
value = bytes_validator(value)
return cls(value)
@classmethod
def from_protobuf(cls: Type[T], pb_msg: T) -> T:
return parse_obj_as(cls, pb_msg)
def _to_node_protobuf(self: T) -> 'NodeProto':
from docarray.proto import NodeProto
return NodeProto(blob=self, type=self._proto_type_name)
def load(
self,
width: Optional[int] = None,
height: Optional[int] = None,
axis_layout: Tuple[str, str, str] = ('H', 'W', 'C'),
) -> np.ndarray:
"""
Load the image from the bytes into a numpy.ndarray image tensor
---
```python
from docarray import BaseDoc
from docarray.typing import ImageUrl
import numpy as np
class MyDoc(BaseDoc):
img_url: ImageUrl
doc = MyDoc(
img_url="https://upload.wikimedia.org/wikipedia/commons/8/80/"
"Dag_Sebastian_Ahlander_at_G%C3%B6teborg_Book_Fair_2012b.jpg"
)
img_tensor = doc.img_url.load()
assert isinstance(img_tensor, np.ndarray)
img_tensor = doc.img_url.load(height=224, width=224)
assert img_tensor.shape == (224, 224, 3)
layout = ('C', 'W', 'H')
img_tensor = doc.img_url.load(height=100, width=200, axis_layout=layout)
assert img_tensor.shape == (3, 200, 100)
```
---
:param width: width of the image tensor.
:param height: height of the image tensor.
:param axis_layout: ordering of the different image axes.
'H' = height, 'W' = width, 'C' = color channel
:return: np.ndarray representing the image as RGB values
"""
if TYPE_CHECKING:
from PIL import Image as PILImage
else:
PIL = import_library('PIL', raise_error=True) # noqa: F841
from PIL import Image as PILImage
raw_img = PILImage.open(BytesIO(self))
if width or height:
new_width = width or raw_img.width
new_height = height or raw_img.height
raw_img = raw_img.resize((new_width, new_height))
try:
tensor = np.array(raw_img.convert('RGB'))
except Exception:
tensor = np.array(raw_img)
return self._move_channel_axis(tensor, axis_layout=axis_layout)
@staticmethod
def _move_channel_axis(
tensor: np.ndarray, axis_layout: Tuple[str, str, str] = ('H', 'W', 'C')
) -> np.ndarray:
"""Moves channel axis around."""
channel_to_offset = {'H': 0, 'W': 1, 'C': 2}
permutation = tuple(channel_to_offset[axis] for axis in axis_layout)
return np.transpose(tensor, permutation)
|
from io import BytesIO
from typing import TYPE_CHECKING, Any, Optional, Tuple, Type, TypeVar
import numpy as np
from pydantic import parse_obj_as
from pydantic.validators import bytes_validator
from docarray.typing.abstract_type import AbstractType
from docarray.typing.proto_register import _register_proto
if TYPE_CHECKING:
from pydantic.fields import BaseConfig, ModelField
from docarray.proto import NodeProto
T = TypeVar('T', bound='ImageBytes')
@_register_proto(proto_type_name='image_bytes')
class ImageBytes(bytes, AbstractType):
"""
Bytes that store an image and that can be load into an image tensor
"""
@classmethod
def validate(
cls: Type[T],
value: Any,
field: 'ModelField',
config: 'BaseConfig',
) -> T:
value = bytes_validator(value)
return cls(value)
@classmethod
def from_protobuf(cls: Type[T], pb_msg: T) -> T:
return parse_obj_as(cls, pb_msg)
def _to_node_protobuf(self: T) -> 'NodeProto':
from docarray.proto import NodeProto
return NodeProto(blob=self, type=self._proto_type_name)
def load(
self,
width: Optional[int] = None,
height: Optional[int] = None,
axis_layout: Tuple[str, str, str] = ('H', 'W', 'C'),
) -> np.ndarray:
"""
Load the image from the bytes into a numpy.ndarray image tensor
---
```python
from docarray import BaseDoc
from docarray.typing import ImageUrl
import numpy as np
class MyDoc(BaseDoc):
img_url: ImageUrl
doc = MyDoc(
img_url="https://upload.wikimedia.org/wikipedia/commons/8/80/"
"Dag_Sebastian_Ahlander_at_G%C3%B6teborg_Book_Fair_2012b.jpg"
)
img_tensor = doc.img_url.load()
assert isinstance(img_tensor, np.ndarray)
img_tensor = doc.img_url.load(height=224, width=224)
assert img_tensor.shape == (224, 224, 3)
layout = ('C', 'W', 'H')
img_tensor = doc.img_url.load(height=100, width=200, axis_layout=layout)
assert img_tensor.shape == (3, 200, 100)
```
---
:param width: width of the image tensor.
:param height: height of the image tensor.
:param axis_layout: ordering of the different image axes.
'H' = height, 'W' = width, 'C' = color channel
:return: np.ndarray representing the image as RGB values
"""
from PIL import Image as PILImage
raw_img = PILImage.open(BytesIO(self))
if width or height:
new_width = width or raw_img.width
new_height = height or raw_img.height
raw_img = raw_img.resize((new_width, new_height))
try:
tensor = np.array(raw_img.convert('RGB'))
except Exception:
tensor = np.array(raw_img)
return self._move_channel_axis(tensor, axis_layout=axis_layout)
@staticmethod
def _move_channel_axis(
tensor: np.ndarray, axis_layout: Tuple[str, str, str] = ('H', 'W', 'C')
) -> np.ndarray:
"""Moves channel axis around."""
channel_to_offset = {'H': 0, 'W': 1, 'C': 2}
permutation = tuple(channel_to_offset[axis] for axis in axis_layout)
return np.transpose(tensor, permutation)
|
# Copyright (c) OpenMMLab. All rights reserved.
import time
from typing import Optional, Sequence, Union
from mmengine.registry import HOOKS
from .hook import Hook
DATA_BATCH = Optional[Union[dict, tuple, list]]
@HOOKS.register_module()
class IterTimerHook(Hook):
"""A hook that logs the time spent during iteration.
E.g. ``data_time`` for loading data and ``time`` for a model train step.
"""
priority = 'NORMAL'
def __init__(self):
self.time_sec_tot = 0
self.start_iter = 0
def before_train(self, runner) -> None:
"""Synchronize the number of iterations with the runner after resuming
from checkpoints.
Args:
runner: The runner of the training, validation or testing
process.
"""
self.start_iter = runner.iter
def _before_epoch(self, runner, mode: str = 'train') -> None:
"""Record timestamp before start an epoch.
Args:
runner (Runner): The runner of the training validation and
testing process.
mode (str): Current mode of runner. Defaults to 'train'.
"""
self.t = time.time()
def _before_iter(self,
runner,
batch_idx: int,
data_batch: DATA_BATCH = None,
mode: str = 'train') -> None:
"""Calculating time for loading data and updating "data_time"
``HistoryBuffer`` of ``runner.message_hub``.
Args:
runner (Runner): The runner of the training, validation and
testing process.
batch_idx (int): The index of the current batch in the loop.
data_batch (dict or tuple or list, optional): Data from
dataloader.
mode (str): Current mode of runner. Defaults to 'train'.
"""
# Update data loading time in `runner.message_hub`.
runner.message_hub.update_scalar(f'{mode}/data_time',
time.time() - self.t)
def _after_iter(self,
runner,
batch_idx: int,
data_batch: DATA_BATCH = None,
outputs: Optional[Union[dict, Sequence]] = None,
mode: str = 'train') -> None:
"""Calculating time for an iteration and updating "time"
``HistoryBuffer`` of ``runner.message_hub``.
Args:
runner (Runner): The runner of the training validation and
testing process.
batch_idx (int): The index of the current batch in the loop.
data_batch (dict or tuple or list, optional): Data from dataloader.
outputs (dict or sequence, optional): Outputs from model.
mode (str): Current mode of runner. Defaults to 'train'.
"""
# Update iteration time in `runner.message_hub`.
message_hub = runner.message_hub
message_hub.update_scalar(f'{mode}/time', time.time() - self.t)
self.t = time.time()
window_size = runner.log_processor.window_size
# Calculate eta every `window_size` iterations. Since test and val
# loop will not update runner.iter, use `every_n_innter_iters`to check
# the interval.
if self.every_n_inner_iters(batch_idx, window_size):
iter_time = message_hub.get_scalar(f'{mode}/time').mean(
window_size)
if mode == 'train':
self.time_sec_tot += iter_time * window_size
# Calculate average iterative time.
time_sec_avg = self.time_sec_tot / (
runner.iter - self.start_iter + 1)
# Calculate eta.
eta_sec = time_sec_avg * (runner.max_iters - runner.iter - 1)
runner.message_hub.update_info('eta', eta_sec)
else:
if mode == 'val':
cur_dataloader = runner.val_dataloader
else:
cur_dataloader = runner.test_dataloader
eta_sec = iter_time * (len(cur_dataloader) - batch_idx - 1)
runner.message_hub.update_info('eta', eta_sec)
|
# Copyright (c) OpenMMLab. All rights reserved.
import time
from typing import Optional, Sequence, Union
from mmengine.registry import HOOKS
from mmengine.structures import BaseDataElement
from .hook import Hook
DATA_BATCH = Optional[Sequence[dict]]
@HOOKS.register_module()
class IterTimerHook(Hook):
"""A hook that logs the time spent during iteration.
E.g. ``data_time`` for loading data and ``time`` for a model train step.
"""
priority = 'NORMAL'
def __init__(self):
self.time_sec_tot = 0
self.start_iter = 0
def before_train(self, runner) -> None:
"""Synchronize the number of iterations with the runner after resuming
from checkpoints.
Args:
runner: The runner of the training, validation or testing
process.
"""
self.start_iter = runner.iter
def _before_epoch(self, runner, mode: str = 'train') -> None:
"""Record timestamp before start an epoch.
Args:
runner (Runner): The runner of the training validation and
testing process.
mode (str): Current mode of runner. Defaults to 'train'.
"""
self.t = time.time()
def _before_iter(self,
runner,
batch_idx: int,
data_batch: DATA_BATCH = None,
mode: str = 'train') -> None:
"""Calculating time for loading data and updating "data_time"
``HistoryBuffer`` of ``runner.message_hub``.
Args:
runner (Runner): The runner of the training, validation and
testing process.
batch_idx (int): The index of the current batch in the loop.
data_batch (Sequence[dict], optional): Data from dataloader.
Defaults to None.
mode (str): Current mode of runner. Defaults to 'train'.
"""
# Update data loading time in `runner.message_hub`.
runner.message_hub.update_scalar(f'{mode}/data_time',
time.time() - self.t)
def _after_iter(self,
runner,
batch_idx: int,
data_batch: DATA_BATCH = None,
outputs: Optional[Union[dict,
Sequence[BaseDataElement]]] = None,
mode: str = 'train') -> None:
"""Calculating time for an iteration and updating "time"
``HistoryBuffer`` of ``runner.message_hub``.
Args:
runner (Runner): The runner of the training validation and
testing process.
batch_idx (int): The index of the current batch in the loop.
data_batch (Sequence[dict], optional): Data from dataloader.
Defaults to None.
outputs (dict or sequence, optional): Outputs from model. Defaults
to None.
mode (str): Current mode of runner. Defaults to 'train'.
"""
# Update iteration time in `runner.message_hub`.
message_hub = runner.message_hub
message_hub.update_scalar(f'{mode}/time', time.time() - self.t)
self.t = time.time()
window_size = runner.log_processor.window_size
# Calculate eta every `window_size` iterations. Since test and val
# loop will not update runner.iter, use `every_n_innter_iters`to check
# the interval.
if self.every_n_inner_iters(batch_idx, window_size):
iter_time = message_hub.get_scalar(f'{mode}/time').mean(
window_size)
if mode == 'train':
self.time_sec_tot += iter_time * window_size
# Calculate average iterative time.
time_sec_avg = self.time_sec_tot / (
runner.iter - self.start_iter + 1)
# Calculate eta.
eta_sec = time_sec_avg * (runner.max_iters - runner.iter - 1)
runner.message_hub.update_info('eta', eta_sec)
else:
if mode == 'val':
cur_dataloader = runner.val_dataloader
else:
cur_dataloader = runner.test_dataloader
eta_sec = iter_time * (len(cur_dataloader) - batch_idx - 1)
runner.message_hub.update_info('eta', eta_sec)
|
from __future__ import annotations
from typing import Any, List
from langchain_text_splitters.base import TextSplitter
class SpacyTextSplitter(TextSplitter):
"""Splitting text using Spacy package.
Per default, Spacy's `en_core_web_sm` model is used and
its default max_length is 1000000 (it is the length of maximum character
this model takes which can be increased for large files). For a faster, but
potentially less accurate splitting, you can use `pipeline='sentencizer'`.
"""
def __init__(
self,
separator: str = "\n\n",
pipeline: str = "en_core_web_sm",
max_length: int = 1_000_000,
*,
strip_whitespace: bool = True,
**kwargs: Any,
) -> None:
"""Initialize the spacy text splitter."""
super().__init__(**kwargs)
self._tokenizer = _make_spacy_pipeline_for_splitting(
pipeline, max_length=max_length
)
self._separator = separator
self._strip_whitespace = strip_whitespace
def split_text(self, text: str) -> List[str]:
"""Split incoming text and return chunks."""
splits = (
s.text if self._strip_whitespace else s.text_with_ws
for s in self._tokenizer(text).sents
)
return self._merge_splits(splits, self._separator)
def _make_spacy_pipeline_for_splitting(
pipeline: str, *, max_length: int = 1_000_000
) -> Any: # avoid importing spacy
try:
import spacy
except ImportError:
raise ImportError(
"Spacy is not installed, please install it with `pip install spacy`."
)
if pipeline == "sentencizer":
sentencizer: Any = spacy.lang.en.English()
sentencizer.add_pipe("sentencizer")
else:
sentencizer = spacy.load(pipeline, exclude=["ner", "tagger"])
sentencizer.max_length = max_length
return sentencizer
|
from __future__ import annotations
from typing import Any, List
from langchain_text_splitters.base import TextSplitter
class SpacyTextSplitter(TextSplitter):
"""Splitting text using Spacy package.
Per default, Spacy's `en_core_web_sm` model is used and
its default max_length is 1000000 (it is the length of maximum character
this model takes which can be increased for large files). For a faster, but
potentially less accurate splitting, you can use `pipeline='sentencizer'`.
"""
def __init__(
self,
separator: str = "\n\n",
pipeline: str = "en_core_web_sm",
max_length: int = 1_000_000,
*,
strip_whitespace: bool = True,
**kwargs: Any,
) -> None:
"""Initialize the spacy text splitter."""
super().__init__(**kwargs)
self._tokenizer = _make_spacy_pipeline_for_splitting(
pipeline, max_length=max_length
)
self._separator = separator
self._strip_whitespace = strip_whitespace
def split_text(self, text: str) -> List[str]:
"""Split incoming text and return chunks."""
splits = (
s.text if self._strip_whitespace else s.text_with_ws
for s in self._tokenizer(text).sents
)
return self._merge_splits(splits, self._separator)
def _make_spacy_pipeline_for_splitting(
pipeline: str, *, max_length: int = 1_000_000
) -> Any: # avoid importing spacy
try:
import spacy
except ImportError:
raise ImportError(
"Spacy is not installed, please install it with `pip install spacy`."
)
if pipeline == "sentencizer":
from spacy.lang.en import English
sentencizer: Any = English()
sentencizer.add_pipe("sentencizer")
else:
sentencizer = spacy.load(pipeline, exclude=["ner", "tagger"])
sentencizer.max_length = max_length
return sentencizer
|
# Copyright (c) OpenMMLab. All rights reserved.
from .builder import DATASETS
from .coco import CocoDataset
@DATASETS.register_module()
class DeepFashionDataset(CocoDataset):
CLASSES = ('top', 'skirt', 'leggings', 'dress', 'outer', 'pants', 'bag',
'neckwear', 'headwear', 'eyeglass', 'belt', 'footwear', 'hair',
'skin', 'face')
PALETTE = [(0, 192, 64), (0, 64, 96), (128, 192, 192), (0, 64, 64),
(0, 192, 224), (0, 192, 192), (128, 192, 64), (0, 192, 96),
(128, 32, 192), (0, 0, 224), (0, 0, 64), (0, 160, 192),
(128, 0, 96), (128, 0, 192), (0, 32, 192)]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .builder import DATASETS
from .coco import CocoDataset
@DATASETS.register_module()
class DeepFashionDataset(CocoDataset):
CLASSES = ('top', 'skirt', 'leggings', 'dress', 'outer', 'pants', 'bag',
'neckwear', 'headwear', 'eyeglass', 'belt', 'footwear', 'hair',
'skin', 'face')
|
_base_ = './detr_r50_8xb2-500e_coco.py'
model = dict(
backbone=dict(
depth=18,
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet18')),
bbox_head=dict(in_channels=512))
|
_base_ = './detr_r50_8xb2-500e_coco.py'
model = dict(
backbone=dict(
depth=18,
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet18')),
neck=dict(in_channels=[64, 128, 256, 512]))
|
from typing import Union, Iterable, MutableSequence, Iterator
from docarray.array.storage.memory.backend import needs_id2offset_rebuild
from docarray.array.storage.base.seqlike import BaseSequenceLikeMixin
from docarray import Document
class SequenceLikeMixin(BaseSequenceLikeMixin):
"""Implement sequence-like methods"""
@needs_id2offset_rebuild
def insert(self, index: int, value: 'Document'):
"""Insert `doc` at `index`.
:param index: Position of the insertion.
:param value: The doc needs to be inserted.
"""
self._data.insert(index, value)
def _append(self, value: 'Document', **kwargs):
"""Append `doc` to the end of the array.
:param value: The doc needs to be appended.
"""
self._data.append(value)
if not self._needs_id2offset_rebuild:
self._id_to_index[value.id] = len(self) - 1
def __eq__(self, other):
return (
type(self) is type(other)
and type(self._data) is type(other._data)
and self._data == other._data
)
def __len__(self):
return len(self._data)
def __iter__(self) -> Iterator['Document']:
yield from self._data
def __contains__(self, x: Union[str, 'Document']):
if isinstance(x, str):
return x in self._id2offset
elif isinstance(x, Document):
return x.id in self._id2offset
else:
return False
def __repr__(self):
return f'<DocumentArray (length={len(self)}) at {id(self)}>'
def __add__(self, other: Union['Document', Iterable['Document']]):
v = type(self)(self)
v.extend(other)
return v
def _extend(self, values: Iterable['Document'], **kwargs) -> None:
values = list(values) # consume the iterator only once
last_idx = len(self._id2offset)
self._data.extend(values)
self._id_to_index.update({d.id: i + last_idx for i, d in enumerate(values)})
|
from typing import Union, Iterable, MutableSequence, Iterator
from docarray.array.storage.memory.backend import needs_id2offset_rebuild
from docarray.array.storage.base.seqlike import BaseSequenceLikeMixin
from docarray import Document
class SequenceLikeMixin(BaseSequenceLikeMixin):
"""Implement sequence-like methods"""
@needs_id2offset_rebuild
def insert(self, index: int, value: 'Document'):
"""Insert `doc` at `index`.
:param index: Position of the insertion.
:param value: The doc needs to be inserted.
"""
self._data.insert(index, value)
def append(self, value: 'Document'):
"""Append `doc` to the end of the array.
:param value: The doc needs to be appended.
"""
self._data.append(value)
if not self._needs_id2offset_rebuild:
self._id_to_index[value.id] = len(self) - 1
def __eq__(self, other):
return (
type(self) is type(other)
and type(self._data) is type(other._data)
and self._data == other._data
)
def __len__(self):
return len(self._data)
def __iter__(self) -> Iterator['Document']:
yield from self._data
def __contains__(self, x: Union[str, 'Document']):
if isinstance(x, str):
return x in self._id2offset
elif isinstance(x, Document):
return x.id in self._id2offset
else:
return False
def __repr__(self):
return f'<DocumentArray (length={len(self)}) at {id(self)}>'
def __add__(self, other: Union['Document', Iterable['Document']]):
v = type(self)(self)
v.extend(other)
return v
def extend(self, values: Iterable['Document']) -> None:
values = list(values) # consume the iterator only once
last_idx = len(self._id2offset)
self._data.extend(values)
self._id_to_index.update({d.id: i + last_idx for i, d in enumerate(values)})
|
from typing import List, Union, Any
from docarray.helper import dunder_get
class GetAttributesMixin:
"""Provide helper functions for :class:`Document` to allow advanced set and get attributes"""
def _get_attributes(self, *fields: str) -> Union[Any, List[Any]]:
"""Bulk fetch Document fields and return a list of the values of these fields
:param fields: the variable length values to extract from the document
:return: a list with the attributes of this document ordered as the args
"""
ret = []
for k in fields:
if '__' in k:
value = dunder_get(self, k)
else:
value = getattr(self, k)
ret.append(value)
# unboxing if args is single
if len(fields) == 1:
ret = ret[0]
return ret
|
from typing import List, Union, Any
from ...helper import dunder_get
class GetAttributesMixin:
"""Provide helper functions for :class:`Document` to allow advanced set and get attributes """
def _get_attributes(self, *fields: str) -> Union[Any, List[Any]]:
"""Bulk fetch Document fields and return a list of the values of these fields
:param fields: the variable length values to extract from the document
:return: a list with the attributes of this document ordered as the args
"""
ret = []
for k in fields:
if '__' in k:
value = dunder_get(self, k)
else:
value = getattr(self, k)
ret.append(value)
# unboxing if args is single
if len(fields) == 1:
ret = ret[0]
return ret
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.tools import O365SearchEvents
from langchain_community.tools.office365.events_search import SearchEventsInput
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"SearchEventsInput": "langchain_community.tools.office365.events_search",
"O365SearchEvents": "langchain_community.tools",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"O365SearchEvents",
"SearchEventsInput",
]
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.tools import O365SearchEvents
from langchain_community.tools.office365.events_search import SearchEventsInput
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"SearchEventsInput": "langchain_community.tools.office365.events_search",
"O365SearchEvents": "langchain_community.tools",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"SearchEventsInput",
"O365SearchEvents",
]
|
from .sox_effects import apply_effects_file, apply_effects_tensor, effect_names, init_sox_effects, shutdown_sox_effects
__all__ = [
"init_sox_effects",
"shutdown_sox_effects",
"effect_names",
"apply_effects_tensor",
"apply_effects_file",
]
|
from torchaudio._internal import module_utils as _mod_utils
from .sox_effects import apply_effects_file, apply_effects_tensor, effect_names, init_sox_effects, shutdown_sox_effects
if _mod_utils.is_sox_available():
import atexit
init_sox_effects()
atexit.register(shutdown_sox_effects)
__all__ = [
"init_sox_effects",
"shutdown_sox_effects",
"effect_names",
"apply_effects_tensor",
"apply_effects_file",
]
|
import pytest
from datasets.utils.version import Version
@pytest.mark.parametrize(
"other, expected_equality",
[
(Version("1.0.0"), True),
("1.0.0", True),
(Version("2.0.0"), False),
("2.0.0", False),
("1", False),
("a", False),
(1, False),
(None, False),
],
)
def test_version_equality_and_hash(other, expected_equality):
version = Version("1.0.0")
assert (version == other) is expected_equality
assert (version != other) is not expected_equality
assert (hash(version) == hash(other)) is expected_equality
|
import pytest
from datasets.utils.version import Version
@pytest.mark.parametrize(
"other, expected_equality",
[
(Version("1.0.0"), True),
("1.0.0", True),
(Version("2.0.0"), False),
("2.0.0", False),
("1", False),
("a", False),
(1, False),
(None, False),
],
)
def test_version_equalities(other, expected_equality):
version = Version("1.0.0")
assert (version == other) is expected_equality
assert (version != other) is not expected_equality
|
import multiprocessing
from typing import TYPE_CHECKING, Optional, Union
from .. import Dataset, Features, config
from ..formatting import query_table
from ..packaged_modules.sql.sql import Sql
from ..utils import tqdm as hf_tqdm
from .abc import AbstractDatasetInputStream
if TYPE_CHECKING:
import sqlite3
import sqlalchemy
class SqlDatasetReader(AbstractDatasetInputStream):
def __init__(
self,
sql: Union[str, "sqlalchemy.sql.Selectable"],
con: Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"],
features: Optional[Features] = None,
cache_dir: str = None,
keep_in_memory: bool = False,
**kwargs,
):
super().__init__(features=features, cache_dir=cache_dir, keep_in_memory=keep_in_memory, **kwargs)
self.builder = Sql(
cache_dir=cache_dir,
features=features,
sql=sql,
con=con,
**kwargs,
)
def read(self):
download_config = None
download_mode = None
verification_mode = None
base_path = None
self.builder.download_and_prepare(
download_config=download_config,
download_mode=download_mode,
verification_mode=verification_mode,
# try_from_hf_gcs=try_from_hf_gcs,
base_path=base_path,
)
# Build dataset for splits
dataset = self.builder.as_dataset(
split="train", verification_mode=verification_mode, in_memory=self.keep_in_memory
)
return dataset
class SqlDatasetWriter:
def __init__(
self,
dataset: Dataset,
name: str,
con: Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"],
batch_size: Optional[int] = None,
num_proc: Optional[int] = None,
**to_sql_kwargs,
):
if num_proc is not None and num_proc <= 0:
raise ValueError(f"num_proc {num_proc} must be an integer > 0.")
self.dataset = dataset
self.name = name
self.con = con
self.batch_size = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
self.num_proc = num_proc
self.to_sql_kwargs = to_sql_kwargs
def write(self) -> int:
_ = self.to_sql_kwargs.pop("sql", None)
_ = self.to_sql_kwargs.pop("con", None)
index = self.to_sql_kwargs.pop("index", False)
written = self._write(index=index, **self.to_sql_kwargs)
return written
def _batch_sql(self, args):
offset, index, to_sql_kwargs = args
to_sql_kwargs = {**to_sql_kwargs, "if_exists": "append"} if offset > 0 else to_sql_kwargs
batch = query_table(
table=self.dataset.data,
key=slice(offset, offset + self.batch_size),
indices=self.dataset._indices,
)
df = batch.to_pandas()
num_rows = df.to_sql(self.name, self.con, index=index, **to_sql_kwargs)
return num_rows or len(df)
def _write(self, index, **to_sql_kwargs) -> int:
"""Writes the pyarrow table as SQL to a database.
Caller is responsible for opening and closing the SQL connection.
"""
written = 0
if self.num_proc is None or self.num_proc == 1:
for offset in hf_tqdm(
range(0, len(self.dataset), self.batch_size),
unit="ba",
desc="Creating SQL from Arrow format",
):
written += self._batch_sql((offset, index, to_sql_kwargs))
else:
num_rows, batch_size = len(self.dataset), self.batch_size
with multiprocessing.Pool(self.num_proc) as pool:
for num_rows in hf_tqdm(
pool.imap(
self._batch_sql,
[(offset, index, to_sql_kwargs) for offset in range(0, num_rows, batch_size)],
),
total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size,
unit="ba",
desc="Creating SQL from Arrow format",
):
written += num_rows
return written
|
import multiprocessing
from typing import TYPE_CHECKING, Optional, Union
from .. import Dataset, Features, config
from ..formatting import query_table
from ..packaged_modules.sql.sql import Sql
from ..utils import logging
from .abc import AbstractDatasetInputStream
if TYPE_CHECKING:
import sqlite3
import sqlalchemy
class SqlDatasetReader(AbstractDatasetInputStream):
def __init__(
self,
sql: Union[str, "sqlalchemy.sql.Selectable"],
con: Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"],
features: Optional[Features] = None,
cache_dir: str = None,
keep_in_memory: bool = False,
**kwargs,
):
super().__init__(features=features, cache_dir=cache_dir, keep_in_memory=keep_in_memory, **kwargs)
self.builder = Sql(
cache_dir=cache_dir,
features=features,
sql=sql,
con=con,
**kwargs,
)
def read(self):
download_config = None
download_mode = None
verification_mode = None
base_path = None
self.builder.download_and_prepare(
download_config=download_config,
download_mode=download_mode,
verification_mode=verification_mode,
# try_from_hf_gcs=try_from_hf_gcs,
base_path=base_path,
)
# Build dataset for splits
dataset = self.builder.as_dataset(
split="train", verification_mode=verification_mode, in_memory=self.keep_in_memory
)
return dataset
class SqlDatasetWriter:
def __init__(
self,
dataset: Dataset,
name: str,
con: Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"],
batch_size: Optional[int] = None,
num_proc: Optional[int] = None,
**to_sql_kwargs,
):
if num_proc is not None and num_proc <= 0:
raise ValueError(f"num_proc {num_proc} must be an integer > 0.")
self.dataset = dataset
self.name = name
self.con = con
self.batch_size = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
self.num_proc = num_proc
self.to_sql_kwargs = to_sql_kwargs
def write(self) -> int:
_ = self.to_sql_kwargs.pop("sql", None)
_ = self.to_sql_kwargs.pop("con", None)
index = self.to_sql_kwargs.pop("index", False)
written = self._write(index=index, **self.to_sql_kwargs)
return written
def _batch_sql(self, args):
offset, index, to_sql_kwargs = args
to_sql_kwargs = {**to_sql_kwargs, "if_exists": "append"} if offset > 0 else to_sql_kwargs
batch = query_table(
table=self.dataset.data,
key=slice(offset, offset + self.batch_size),
indices=self.dataset._indices,
)
df = batch.to_pandas()
num_rows = df.to_sql(self.name, self.con, index=index, **to_sql_kwargs)
return num_rows or len(df)
def _write(self, index, **to_sql_kwargs) -> int:
"""Writes the pyarrow table as SQL to a database.
Caller is responsible for opening and closing the SQL connection.
"""
written = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0, len(self.dataset), self.batch_size),
unit="ba",
disable=not logging.is_progress_bar_enabled(),
desc="Creating SQL from Arrow format",
):
written += self._batch_sql((offset, index, to_sql_kwargs))
else:
num_rows, batch_size = len(self.dataset), self.batch_size
with multiprocessing.Pool(self.num_proc) as pool:
for num_rows in logging.tqdm(
pool.imap(
self._batch_sql,
[(offset, index, to_sql_kwargs) for offset in range(0, num_rows, batch_size)],
),
total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size,
unit="ba",
disable=not logging.is_progress_bar_enabled(),
desc="Creating SQL from Arrow format",
):
written += num_rows
return written
|
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from backend.data.model import SchemaField
class WordCharacterCountBlock(Block):
class Input(BlockSchema):
text: str = SchemaField(
description="Input text to count words and characters",
placeholder="Enter your text here",
advanced=False,
)
class Output(BlockSchema):
word_count: int = SchemaField(description="Number of words in the input text")
character_count: int = SchemaField(
description="Number of characters in the input text"
)
error: str = SchemaField(
description="Error message if the counting operation failed"
)
def __init__(self):
super().__init__(
id="ab2a782d-22cf-4587-8a70-55b59b3f9f90",
description="Counts the number of words and characters in a given text.",
categories={BlockCategory.TEXT},
input_schema=WordCharacterCountBlock.Input,
output_schema=WordCharacterCountBlock.Output,
test_input={"text": "Hello, how are you?"},
test_output=[("word_count", 4), ("character_count", 19)],
)
async def run(self, input_data: Input, **kwargs) -> BlockOutput:
try:
text = input_data.text
word_count = len(text.split())
character_count = len(text)
yield "word_count", word_count
yield "character_count", character_count
except Exception as e:
yield "error", str(e)
|
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from backend.data.model import SchemaField
class WordCharacterCountBlock(Block):
class Input(BlockSchema):
text: str = SchemaField(
description="Input text to count words and characters",
placeholder="Enter your text here",
advanced=False,
)
class Output(BlockSchema):
word_count: int = SchemaField(description="Number of words in the input text")
character_count: int = SchemaField(
description="Number of characters in the input text"
)
error: str = SchemaField(
description="Error message if the counting operation failed"
)
def __init__(self):
super().__init__(
id="ab2a782d-22cf-4587-8a70-55b59b3f9f90",
description="Counts the number of words and characters in a given text.",
categories={BlockCategory.TEXT},
input_schema=WordCharacterCountBlock.Input,
output_schema=WordCharacterCountBlock.Output,
test_input={"text": "Hello, how are you?"},
test_output=[("word_count", 4), ("character_count", 19)],
)
def run(self, input_data: Input, **kwargs) -> BlockOutput:
try:
text = input_data.text
word_count = len(text.split())
character_count = len(text)
yield "word_count", word_count
yield "character_count", character_count
except Exception as e:
yield "error", str(e)
|
"""QuantileDMatrix related tests."""
import numpy as np
import pytest
from sklearn.model_selection import train_test_split
import xgboost as xgb
from .data import make_batches, make_categorical
def check_ref_quantile_cut(device: str) -> None:
"""Check obtaining the same cut values given a reference."""
X, y, _ = (
data[0]
for data in make_batches(
n_samples_per_batch=8192,
n_features=16,
n_batches=1,
use_cupy=device.startswith("cuda"),
)
)
X_train, X_valid, y_train, y_valid = train_test_split(X, y)
Xy_train = xgb.QuantileDMatrix(X_train, y_train)
Xy_valid = xgb.QuantileDMatrix(X_valid, y_valid, ref=Xy_train)
cut_train = Xy_train.get_quantile_cut()
cut_valid = Xy_valid.get_quantile_cut()
np.testing.assert_allclose(cut_train[0], cut_valid[0])
np.testing.assert_allclose(cut_train[1], cut_valid[1])
Xy_valid = xgb.QuantileDMatrix(X_valid, y_valid)
cut_valid = Xy_valid.get_quantile_cut()
assert not np.allclose(cut_train[1], cut_valid[1])
def check_categorical_strings(device: str) -> None:
"""Check string inputs."""
if device == "cpu":
pd = pytest.importorskip("pandas")
else:
pd = pytest.importorskip("cudf")
n_categories = 32
X, y = make_categorical(
1024,
8,
n_categories,
onehot=False,
cat_dtype=np.str_,
cat_ratio=0.5,
shuffle=True,
)
X = pd.DataFrame(X)
Xy = xgb.QuantileDMatrix(X, y, enable_categorical=True)
assert Xy.num_col() == 8
cuts = Xy.get_quantile_cut()
indptr = cuts[0]
values = cuts[1]
for i in range(1, len(indptr)):
f_idx = i - 1
if isinstance(X[X.columns[f_idx]].dtype, pd.CategoricalDtype):
beg, end = indptr[f_idx], indptr[i]
col = values[beg:end]
np.testing.assert_allclose(col, np.arange(0, n_categories))
|
"""QuantileDMatrix related tests."""
import numpy as np
from sklearn.model_selection import train_test_split
import xgboost as xgb
from .data import make_batches
def check_ref_quantile_cut(device: str) -> None:
"""Check obtaining the same cut values given a reference."""
X, y, _ = (
data[0]
for data in make_batches(
n_samples_per_batch=8192,
n_features=16,
n_batches=1,
use_cupy=device.startswith("cuda"),
)
)
X_train, X_valid, y_train, y_valid = train_test_split(X, y)
Xy_train = xgb.QuantileDMatrix(X_train, y_train)
Xy_valid = xgb.QuantileDMatrix(X_valid, y_valid, ref=Xy_train)
cut_train = Xy_train.get_quantile_cut()
cut_valid = Xy_valid.get_quantile_cut()
np.testing.assert_allclose(cut_train[0], cut_valid[0])
np.testing.assert_allclose(cut_train[1], cut_valid[1])
Xy_valid = xgb.QuantileDMatrix(X_valid, y_valid)
cut_valid = Xy_valid.get_quantile_cut()
assert not np.allclose(cut_train[1], cut_valid[1])
|
import json
from enum import Enum
from typing import Any
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from backend.data.model import SchemaField
from backend.util.request import requests
class HttpMethod(Enum):
GET = "GET"
POST = "POST"
PUT = "PUT"
DELETE = "DELETE"
PATCH = "PATCH"
OPTIONS = "OPTIONS"
HEAD = "HEAD"
class SendWebRequestBlock(Block):
class Input(BlockSchema):
url: str = SchemaField(
description="The URL to send the request to",
placeholder="https://api.example.com",
)
method: HttpMethod = SchemaField(
description="The HTTP method to use for the request",
default=HttpMethod.POST,
)
headers: dict[str, str] = SchemaField(
description="The headers to include in the request",
default={},
)
json_format: bool = SchemaField(
title="JSON format",
description="Whether to send and receive body as JSON",
default=True,
)
body: Any = SchemaField(
description="The body of the request",
default=None,
)
class Output(BlockSchema):
response: object = SchemaField(description="The response from the server")
client_error: object = SchemaField(description="The error on 4xx status codes")
server_error: object = SchemaField(description="The error on 5xx status codes")
def __init__(self):
super().__init__(
id="6595ae1f-b924-42cb-9a41-551a0611c4b4",
description="This block makes an HTTP request to the given URL.",
categories={BlockCategory.OUTPUT},
input_schema=SendWebRequestBlock.Input,
output_schema=SendWebRequestBlock.Output,
)
def run(self, input_data: Input, **kwargs) -> BlockOutput:
if isinstance(input_data.body, str):
input_data.body = json.loads(input_data.body)
response = requests.request(
input_data.method.value,
input_data.url,
headers=input_data.headers,
json=input_data.body if input_data.json_format else None,
data=input_data.body if not input_data.json_format else None,
)
result = response.json() if input_data.json_format else response.text
if response.status_code // 100 == 2:
yield "response", result
elif response.status_code // 100 == 4:
yield "client_error", result
elif response.status_code // 100 == 5:
yield "server_error", result
else:
raise ValueError(f"Unexpected status code: {response.status_code}")
|
import json
from enum import Enum
import requests
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from backend.data.model import SchemaField
class HttpMethod(Enum):
GET = "GET"
POST = "POST"
PUT = "PUT"
DELETE = "DELETE"
PATCH = "PATCH"
OPTIONS = "OPTIONS"
HEAD = "HEAD"
class SendWebRequestBlock(Block):
class Input(BlockSchema):
url: str = SchemaField(
description="The URL to send the request to",
placeholder="https://api.example.com",
)
method: HttpMethod = SchemaField(
description="The HTTP method to use for the request",
default=HttpMethod.POST,
)
headers: dict[str, str] = SchemaField(
description="The headers to include in the request",
default={},
)
body: object = SchemaField(
description="The body of the request",
default={},
)
class Output(BlockSchema):
response: object = SchemaField(description="The response from the server")
client_error: object = SchemaField(description="The error on 4xx status codes")
server_error: object = SchemaField(description="The error on 5xx status codes")
def __init__(self):
super().__init__(
id="6595ae1f-b924-42cb-9a41-551a0611c4b4",
description="This block makes an HTTP request to the given URL.",
categories={BlockCategory.OUTPUT},
input_schema=SendWebRequestBlock.Input,
output_schema=SendWebRequestBlock.Output,
)
def run(self, input_data: Input, **kwargs) -> BlockOutput:
if isinstance(input_data.body, str):
input_data.body = json.loads(input_data.body)
response = requests.request(
input_data.method.value,
input_data.url,
headers=input_data.headers,
json=input_data.body,
)
if response.status_code // 100 == 2:
yield "response", response.json()
elif response.status_code // 100 == 4:
yield "client_error", response.json()
elif response.status_code // 100 == 5:
yield "server_error", response.json()
else:
raise ValueError(f"Unexpected status code: {response.status_code}")
|
import os
from typing import Dict, List, Tuple, TypeVar, Union
T = TypeVar("T")
ListLike = Union[List[T], Tuple[T, ...]]
NestedDataStructureLike = Union[T, List[T], Dict[str, T]]
PathLike = Union[str, bytes, os.PathLike]
|
import os
from typing import Dict, List, TypeVar, Union
T = TypeVar("T")
NestedDataStructureLike = Union[T, List[T], Dict[str, T]]
PathLike = Union[str, bytes, os.PathLike]
|
import pytest
from llama_index.llms.nvidia import NVIDIA
@pytest.mark.integration
def test_available_models(mode: dict) -> None:
models = NVIDIA(**mode).available_models
assert models
assert isinstance(models, list)
assert all(isinstance(model.id, str) for model in models)
|
import pytest
from llama_index.llms.nvidia import NVIDIA
@pytest.mark.integration()
def test_available_models(mode: dict) -> None:
models = NVIDIA(**mode).available_models
assert models
assert isinstance(models, list)
assert all(isinstance(model.id, str) for model in models)
|
import json
from json import JSONDecodeError
from typing import Union
from langchain_core.agents import AgentAction, AgentActionMessageLog, AgentFinish
from langchain_core.exceptions import OutputParserException
from langchain_core.messages import (
AIMessage,
BaseMessage,
)
from langchain_core.outputs import ChatGeneration, Generation
from langchain.agents.agent import AgentOutputParser
class OpenAIFunctionsAgentOutputParser(AgentOutputParser):
"""Parses a message into agent action/finish.
Is meant to be used with OpenAI models, as it relies on the specific
function_call parameter from OpenAI to convey what tools to use.
If a function_call parameter is passed, then that is used to get
the tool and tool input.
If one is not passed, then the AIMessage is assumed to be the final output.
"""
@property
def _type(self) -> str:
return "openai-functions-agent"
@staticmethod
def _parse_ai_message(message: BaseMessage) -> Union[AgentAction, AgentFinish]:
"""Parse an AI message."""
if not isinstance(message, AIMessage):
msg = f"Expected an AI message got {type(message)}"
raise TypeError(msg)
function_call = message.additional_kwargs.get("function_call", {})
if function_call:
function_name = function_call["name"]
try:
if len(function_call["arguments"].strip()) == 0:
# OpenAI returns an empty string for functions containing no args
_tool_input = {}
else:
# otherwise it returns a json object
_tool_input = json.loads(function_call["arguments"], strict=False)
except JSONDecodeError as e:
msg = (
f"Could not parse tool input: {function_call} because "
f"the `arguments` is not valid JSON."
)
raise OutputParserException(msg) from e
# HACK HACK HACK:
# The code that encodes tool input into Open AI uses a special variable
# name called `__arg1` to handle old style tools that do not expose a
# schema and expect a single string argument as an input.
# We unpack the argument here if it exists.
# Open AI does not support passing in a JSON array as an argument.
if "__arg1" in _tool_input:
tool_input = _tool_input["__arg1"]
else:
tool_input = _tool_input
content_msg = f"responded: {message.content}\n" if message.content else "\n"
log = f"\nInvoking: `{function_name}` with `{tool_input}`\n{content_msg}\n"
return AgentActionMessageLog(
tool=function_name,
tool_input=tool_input,
log=log,
message_log=[message],
)
return AgentFinish(
return_values={"output": message.content},
log=str(message.content),
)
def parse_result(
self,
result: list[Generation],
*,
partial: bool = False,
) -> Union[AgentAction, AgentFinish]:
if not isinstance(result[0], ChatGeneration):
msg = "This output parser only works on ChatGeneration output"
raise ValueError(msg)
message = result[0].message
return self._parse_ai_message(message)
def parse(self, text: str) -> Union[AgentAction, AgentFinish]:
msg = "Can only parse messages"
raise ValueError(msg)
|
import json
from json import JSONDecodeError
from typing import Union
from langchain_core.agents import AgentAction, AgentActionMessageLog, AgentFinish
from langchain_core.exceptions import OutputParserException
from langchain_core.messages import (
AIMessage,
BaseMessage,
)
from langchain_core.outputs import ChatGeneration, Generation
from langchain.agents.agent import AgentOutputParser
class OpenAIFunctionsAgentOutputParser(AgentOutputParser):
"""Parses a message into agent action/finish.
Is meant to be used with OpenAI models, as it relies on the specific
function_call parameter from OpenAI to convey what tools to use.
If a function_call parameter is passed, then that is used to get
the tool and tool input.
If one is not passed, then the AIMessage is assumed to be the final output.
"""
@property
def _type(self) -> str:
return "openai-functions-agent"
@staticmethod
def _parse_ai_message(message: BaseMessage) -> Union[AgentAction, AgentFinish]:
"""Parse an AI message."""
if not isinstance(message, AIMessage):
msg = f"Expected an AI message got {type(message)}"
raise TypeError(msg)
function_call = message.additional_kwargs.get("function_call", {})
if function_call:
function_name = function_call["name"]
try:
if len(function_call["arguments"].strip()) == 0:
# OpenAI returns an empty string for functions containing no args
_tool_input = {}
else:
# otherwise it returns a json object
_tool_input = json.loads(function_call["arguments"], strict=False)
except JSONDecodeError:
msg = (
f"Could not parse tool input: {function_call} because "
f"the `arguments` is not valid JSON."
)
raise OutputParserException(msg)
# HACK HACK HACK:
# The code that encodes tool input into Open AI uses a special variable
# name called `__arg1` to handle old style tools that do not expose a
# schema and expect a single string argument as an input.
# We unpack the argument here if it exists.
# Open AI does not support passing in a JSON array as an argument.
if "__arg1" in _tool_input:
tool_input = _tool_input["__arg1"]
else:
tool_input = _tool_input
content_msg = f"responded: {message.content}\n" if message.content else "\n"
log = f"\nInvoking: `{function_name}` with `{tool_input}`\n{content_msg}\n"
return AgentActionMessageLog(
tool=function_name,
tool_input=tool_input,
log=log,
message_log=[message],
)
return AgentFinish(
return_values={"output": message.content},
log=str(message.content),
)
def parse_result(
self,
result: list[Generation],
*,
partial: bool = False,
) -> Union[AgentAction, AgentFinish]:
if not isinstance(result[0], ChatGeneration):
msg = "This output parser only works on ChatGeneration output"
raise ValueError(msg)
message = result[0].message
return self._parse_ai_message(message)
def parse(self, text: str) -> Union[AgentAction, AgentFinish]:
msg = "Can only parse messages"
raise ValueError(msg)
|
import os
import numpy as np
import pytest
import requests
from jina import Client, Document, Flow
from tests import random_docs
# noinspection PyUnresolvedReferences
from tests.integration.crud import CrudIndexer
PARAMS = {'top_k': 10}
def rest_post(f, endpoint, documents):
data = [d.to_dict() for d in documents]
if endpoint == 'delete':
method = 'delete'
elif endpoint == 'update':
method = 'put'
else:
method = 'post'
response = getattr(requests, method)(
f'http://localhost:{f.port}/{endpoint}',
json={'data': data, 'parameters': PARAMS},
)
if response.status_code != 200:
raise Exception(f'exception in status code {response.status_code}')
return response.json()
@pytest.mark.parametrize('rest', [True, False])
def test_crud(tmpdir, rest):
os.environ['RESTFUL'] = 'http' if rest else 'grpc'
os.environ['WORKSPACE'] = str(tmpdir)
with Flow.load_config('flow.yml') as f:
c = Client(port=f.port)
original_docs = list(random_docs(10, chunks_per_doc=0))
if rest:
rest_post(f, 'index', original_docs)
else:
c.post(on='/index', inputs=original_docs, return_responses=True)
with Flow.load_config('flow.yml') as f:
c = Client(port=f.port)
inputs = list(random_docs(1))
if rest:
results = rest_post(f, 'search', inputs)
matches = results['data'][0]['matches']
for doc in results['data']:
assert Document.from_dict(doc).text == 'hello world'
else:
results = c.post(
on='/search', inputs=inputs, parameters=PARAMS, return_responses=True
)
matches = results[0].docs[0].matches
for doc in results[0].docs:
assert doc.text == 'hello world'
assert len(matches) == 10
with Flow.load_config('flow.yml') as f:
c = Client(port=f.port)
inputs = list(random_docs(5, chunks_per_doc=0))
if rest:
rest_post(f, 'delete', inputs)
else:
c.post(on='/delete', inputs=inputs, return_responses=True)
with Flow.load_config('flow.yml') as f:
c = Client(port=f.port)
inputs = list(random_docs(1))
if rest:
results = rest_post(f, 'search', inputs)
matches = results['data'][0]['matches']
else:
results = c.post(
on='/search', inputs=inputs, parameters=PARAMS, return_responses=True
)
matches = results[0].docs[0].matches
assert len(matches) == 5
updated_docs = list(
random_docs(5, chunks_per_doc=5, start_id=5, text='hello again')
)
with Flow.load_config('flow.yml') as f:
c = Client(port=f.port)
if rest:
rest_post(f, 'update', updated_docs)
else:
c.post(on='/update', inputs=updated_docs)
with Flow.load_config('flow.yml') as f:
c = Client(port=f.port)
inputs = list(random_docs(1))
if rest:
results = rest_post(f, 'search', inputs)
matches = sorted(
results['data'][0]['matches'], key=lambda match: match['id']
)
else:
results = c.post(
on='/search', inputs=inputs, parameters=PARAMS, return_responses=True
)
matches = sorted(results[0].docs[0].matches, key=lambda match: match.id)
assert len(matches) == 5
for match, updated_doc in zip(matches, updated_docs):
if isinstance(match, dict):
match = Document.from_dict(match)
assert updated_doc.id == match.id
assert updated_doc.text == match.text
np.testing.assert_array_equal(updated_doc.embedding, match.embedding)
assert len(match.chunks) == 5
assert len(match.chunks) == len(updated_doc.chunks)
for match_chunk, updated_doc_chunk in zip(match.chunks, updated_doc.chunks):
assert match_chunk.text == updated_doc_chunk.text
np.testing.assert_array_equal(
match_chunk.embedding, updated_doc_chunk.embedding
)
|
import numpy as np
import os
import pytest
import requests
from jina import Flow, Document, Client
from tests import random_docs
# noinspection PyUnresolvedReferences
from tests.integration.crud import CrudIndexer
PARAMS = {'top_k': 10}
def rest_post(f, endpoint, documents):
data = [d.to_dict() for d in documents]
if endpoint == 'delete':
method = 'delete'
elif endpoint == 'update':
method = 'put'
else:
method = 'post'
response = getattr(requests, method)(
f'http://localhost:{f.port}/{endpoint}',
json={'data': data, 'parameters': PARAMS},
)
if response.status_code != 200:
raise Exception(f'exception in status code {response.status_code}')
return response.json()
@pytest.mark.parametrize('rest', [True, False])
def test_crud(tmpdir, rest):
os.environ['RESTFUL'] = 'http' if rest else 'grpc'
os.environ['WORKSPACE'] = str(tmpdir)
with Flow.load_config('flow.yml') as f:
c = Client(port=f.port, return_responses=True)
original_docs = list(random_docs(10, chunks_per_doc=0))
if rest:
rest_post(f, 'index', original_docs)
else:
c.post(
on='/index',
inputs=original_docs,
)
with Flow.load_config('flow.yml') as f:
c = Client(port=f.port, return_responses=True)
inputs = list(random_docs(1))
if rest:
results = rest_post(f, 'search', inputs)
matches = results['data'][0]['matches']
for doc in results['data']:
assert Document.from_dict(doc).text == 'hello world'
else:
results = c.post(on='/search', inputs=inputs, parameters=PARAMS)
matches = results[0].docs[0].matches
for doc in results[0].docs:
assert doc.text == 'hello world'
assert len(matches) == 10
with Flow.load_config('flow.yml') as f:
c = Client(port=f.port, return_responses=True)
inputs = list(random_docs(5, chunks_per_doc=0))
if rest:
rest_post(f, 'delete', inputs)
else:
c.post(on='/delete', inputs=inputs)
with Flow.load_config('flow.yml') as f:
c = Client(port=f.port, return_responses=True)
inputs = list(random_docs(1))
if rest:
results = rest_post(f, 'search', inputs)
matches = results['data'][0]['matches']
else:
results = c.post(on='/search', inputs=inputs, parameters=PARAMS)
matches = results[0].docs[0].matches
assert len(matches) == 5
updated_docs = list(
random_docs(5, chunks_per_doc=5, start_id=5, text='hello again')
)
with Flow.load_config('flow.yml') as f:
c = Client(port=f.port, return_responses=True)
if rest:
rest_post(f, 'update', updated_docs)
else:
c.post(on='/update', inputs=updated_docs)
with Flow.load_config('flow.yml') as f:
c = Client(port=f.port, return_responses=True)
inputs = list(random_docs(1))
if rest:
results = rest_post(f, 'search', inputs)
matches = sorted(
results['data'][0]['matches'], key=lambda match: match['id']
)
else:
results = c.post(on='/search', inputs=inputs, parameters=PARAMS)
matches = sorted(results[0].docs[0].matches, key=lambda match: match.id)
assert len(matches) == 5
for match, updated_doc in zip(matches, updated_docs):
if isinstance(match, dict):
match = Document.from_dict(match)
assert updated_doc.id == match.id
assert updated_doc.text == match.text
np.testing.assert_array_equal(updated_doc.embedding, match.embedding)
assert len(match.chunks) == 5
assert len(match.chunks) == len(updated_doc.chunks)
for match_chunk, updated_doc_chunk in zip(match.chunks, updated_doc.chunks):
assert match_chunk.text == updated_doc_chunk.text
np.testing.assert_array_equal(
match_chunk.embedding, updated_doc_chunk.embedding
)
|
from functools import wraps
from typing import Any, Callable, Concatenate, Coroutine, ParamSpec, TypeVar, cast
from backend.data.credit import get_user_credit_model
from backend.data.execution import (
ExecutionResult,
RedisExecutionEventBus,
create_graph_execution,
get_execution_results,
get_incomplete_executions,
get_latest_execution,
update_execution_status,
update_graph_execution_stats,
update_node_execution_stats,
upsert_execution_input,
upsert_execution_output,
)
from backend.data.graph import get_graph, get_node
from backend.data.user import (
get_user_integrations,
get_user_metadata,
update_user_integrations,
update_user_metadata,
)
from backend.util.service import AppService, expose, register_pydantic_serializers
from backend.util.settings import Config
P = ParamSpec("P")
R = TypeVar("R")
config = Config()
class DatabaseManager(AppService):
def __init__(self):
super().__init__()
self.use_db = True
self.use_redis = True
self.event_queue = RedisExecutionEventBus()
@classmethod
def get_port(cls) -> int:
return config.database_api_port
@expose
def send_execution_update(self, execution_result: ExecutionResult):
self.event_queue.publish(execution_result)
@staticmethod
def exposed_run_and_wait(
f: Callable[P, Coroutine[None, None, R]]
) -> Callable[Concatenate[object, P], R]:
@expose
@wraps(f)
def wrapper(self, *args: P.args, **kwargs: P.kwargs) -> R:
coroutine = f(*args, **kwargs)
res = self.run_and_wait(coroutine)
return res
# Register serializers for annotations on bare function
register_pydantic_serializers(f)
return wrapper
# Executions
create_graph_execution = exposed_run_and_wait(create_graph_execution)
get_execution_results = exposed_run_and_wait(get_execution_results)
get_incomplete_executions = exposed_run_and_wait(get_incomplete_executions)
get_latest_execution = exposed_run_and_wait(get_latest_execution)
update_execution_status = exposed_run_and_wait(update_execution_status)
update_graph_execution_stats = exposed_run_and_wait(update_graph_execution_stats)
update_node_execution_stats = exposed_run_and_wait(update_node_execution_stats)
upsert_execution_input = exposed_run_and_wait(upsert_execution_input)
upsert_execution_output = exposed_run_and_wait(upsert_execution_output)
# Graphs
get_node = exposed_run_and_wait(get_node)
get_graph = exposed_run_and_wait(get_graph)
# Credits
user_credit_model = get_user_credit_model()
get_or_refill_credit = cast(
Callable[[Any, str], int],
exposed_run_and_wait(user_credit_model.get_or_refill_credit),
)
spend_credits = cast(
Callable[[Any, str, int, str, dict[str, str], float, float], int],
exposed_run_and_wait(user_credit_model.spend_credits),
)
# User + User Metadata + User Integrations
get_user_metadata = exposed_run_and_wait(get_user_metadata)
update_user_metadata = exposed_run_and_wait(update_user_metadata)
get_user_integrations = exposed_run_and_wait(get_user_integrations)
update_user_integrations = exposed_run_and_wait(update_user_integrations)
|
from functools import wraps
from typing import Any, Callable, Concatenate, Coroutine, ParamSpec, TypeVar, cast
from backend.data.credit import get_user_credit_model
from backend.data.execution import (
ExecutionResult,
create_graph_execution,
get_execution_results,
get_incomplete_executions,
get_latest_execution,
update_execution_status,
update_graph_execution_stats,
update_node_execution_stats,
upsert_execution_input,
upsert_execution_output,
)
from backend.data.graph import get_graph, get_node
from backend.data.queue import RedisExecutionEventBus
from backend.data.user import (
get_user_integrations,
get_user_metadata,
update_user_integrations,
update_user_metadata,
)
from backend.util.service import AppService, expose
from backend.util.settings import Config
P = ParamSpec("P")
R = TypeVar("R")
config = Config()
class DatabaseManager(AppService):
def __init__(self):
super().__init__()
self.use_db = True
self.use_redis = True
self.event_queue = RedisExecutionEventBus()
@classmethod
def get_port(cls) -> int:
return config.database_api_port
@expose
def send_execution_update(self, execution_result: ExecutionResult):
self.event_queue.publish(execution_result)
@staticmethod
def exposed_run_and_wait(
f: Callable[P, Coroutine[None, None, R]]
) -> Callable[Concatenate[object, P], R]:
@expose
@wraps(f)
def wrapper(self, *args: P.args, **kwargs: P.kwargs) -> R:
coroutine = f(*args, **kwargs)
res = self.run_and_wait(coroutine)
return res
return wrapper
# Executions
create_graph_execution = exposed_run_and_wait(create_graph_execution)
get_execution_results = exposed_run_and_wait(get_execution_results)
get_incomplete_executions = exposed_run_and_wait(get_incomplete_executions)
get_latest_execution = exposed_run_and_wait(get_latest_execution)
update_execution_status = exposed_run_and_wait(update_execution_status)
update_graph_execution_stats = exposed_run_and_wait(update_graph_execution_stats)
update_node_execution_stats = exposed_run_and_wait(update_node_execution_stats)
upsert_execution_input = exposed_run_and_wait(upsert_execution_input)
upsert_execution_output = exposed_run_and_wait(upsert_execution_output)
# Graphs
get_node = exposed_run_and_wait(get_node)
get_graph = exposed_run_and_wait(get_graph)
# Credits
user_credit_model = get_user_credit_model()
get_or_refill_credit = cast(
Callable[[Any, str], int],
exposed_run_and_wait(user_credit_model.get_or_refill_credit),
)
spend_credits = cast(
Callable[[Any, str, int, str, dict[str, str], float, float], int],
exposed_run_and_wait(user_credit_model.spend_credits),
)
# User + User Metadata + User Integrations
get_user_metadata = exposed_run_and_wait(get_user_metadata)
update_user_metadata = exposed_run_and_wait(update_user_metadata)
get_user_integrations = exposed_run_and_wait(get_user_integrations)
update_user_integrations = exposed_run_and_wait(update_user_integrations)
|
from __future__ import annotations
from typing import Optional, Type
from langchain_core.callbacks import (
AsyncCallbackManagerForToolRun,
CallbackManagerForToolRun,
)
from pydantic import BaseModel
from langchain_community.tools.playwright.base import BaseBrowserTool
from langchain_community.tools.playwright.utils import (
aget_current_page,
get_current_page,
)
class NavigateBackToolInput(BaseModel):
"""Explicit no-args input for NavigateBackTool."""
class NavigateBackTool(BaseBrowserTool):
"""Navigate back to the previous page in the browser history."""
name: str = "previous_webpage"
description: str = "Navigate back to the previous page in the browser history"
args_schema: Type[BaseModel] = NavigateBackToolInput
def _run(self, run_manager: Optional[CallbackManagerForToolRun] = None) -> str:
"""Use the tool."""
if self.sync_browser is None:
raise ValueError(f"Synchronous browser not provided to {self.name}")
page = get_current_page(self.sync_browser)
response = page.go_back()
if response:
return (
f"Navigated back to the previous page with URL '{response.url}'."
f" Status code {response.status}"
)
else:
return "Unable to navigate back; no previous page in the history"
async def _arun(
self,
run_manager: Optional[AsyncCallbackManagerForToolRun] = None,
) -> str:
"""Use the tool."""
if self.async_browser is None:
raise ValueError(f"Asynchronous browser not provided to {self.name}")
page = await aget_current_page(self.async_browser)
response = await page.go_back()
if response:
return (
f"Navigated back to the previous page with URL '{response.url}'."
f" Status code {response.status}"
)
else:
return "Unable to navigate back; no previous page in the history"
|
from __future__ import annotations
from typing import Optional, Type
from langchain_core.callbacks import (
AsyncCallbackManagerForToolRun,
CallbackManagerForToolRun,
)
from pydantic import BaseModel
from langchain_community.tools.playwright.base import BaseBrowserTool
from langchain_community.tools.playwright.utils import (
aget_current_page,
get_current_page,
)
class NavigateBackToolInput(BaseModel):
"""Explicit no-args input for NavigateBackTool."""
class NavigateBackTool(BaseBrowserTool): # type: ignore[override, override]
"""Navigate back to the previous page in the browser history."""
name: str = "previous_webpage"
description: str = "Navigate back to the previous page in the browser history"
args_schema: Type[BaseModel] = NavigateBackToolInput
def _run(self, run_manager: Optional[CallbackManagerForToolRun] = None) -> str:
"""Use the tool."""
if self.sync_browser is None:
raise ValueError(f"Synchronous browser not provided to {self.name}")
page = get_current_page(self.sync_browser)
response = page.go_back()
if response:
return (
f"Navigated back to the previous page with URL '{response.url}'."
f" Status code {response.status}"
)
else:
return "Unable to navigate back; no previous page in the history"
async def _arun(
self,
run_manager: Optional[AsyncCallbackManagerForToolRun] = None,
) -> str:
"""Use the tool."""
if self.async_browser is None:
raise ValueError(f"Asynchronous browser not provided to {self.name}")
page = await aget_current_page(self.async_browser)
response = await page.go_back()
if response:
return (
f"Navigated back to the previous page with URL '{response.url}'."
f" Status code {response.status}"
)
else:
return "Unable to navigate back; no previous page in the history"
|
import numpy as np
import pytest
from docarray import BaseDoc
from docarray.base_doc.io.json import orjson_dumps
from docarray.typing import NdArray, TorchTensor
class NpDoc(BaseDoc):
embedding: NdArray[3, 4]
embedding_no_shape: NdArray
class TorchDoc(BaseDoc):
embedding: TorchTensor[3, 4]
embedding_no_shape: TorchTensor
def test_np_schema():
schema = NpDoc.schema()
assert schema['properties']['embedding']['tensor/array shape'] == '[3, 4]'
assert schema['properties']['embedding']['type'] == 'array'
assert schema['properties']['embedding']['items']['type'] == 'number'
assert (
schema['properties']['embedding']['example']
== orjson_dumps(np.zeros([3, 4])).decode()
)
assert (
schema['properties']['embedding_no_shape']['tensor/array shape']
== 'not specified'
)
assert schema['properties']['embedding_no_shape']['type'] == 'array'
assert schema['properties']['embedding']['items']['type'] == 'number'
def test_torch_schema():
schema = TorchDoc.schema()
assert schema['properties']['embedding']['tensor/array shape'] == '[3, 4]'
assert schema['properties']['embedding']['type'] == 'array'
assert schema['properties']['embedding']['items']['type'] == 'number'
assert (
schema['properties']['embedding']['example']
== orjson_dumps(np.zeros([3, 4])).decode()
)
assert (
schema['properties']['embedding_no_shape']['tensor/array shape']
== 'not specified'
)
assert schema['properties']['embedding_no_shape']['type'] == 'array'
assert schema['properties']['embedding']['items']['type'] == 'number'
@pytest.mark.tensorflow
def test_tensorflow_schema():
from docarray.typing import TensorFlowTensor
class TensorflowDoc(BaseDoc):
embedding: TensorFlowTensor[3, 4]
embedding_no_shape: TensorFlowTensor
schema = TensorflowDoc.schema()
assert schema['properties']['embedding']['tensor/array shape'] == '[3, 4]'
assert schema['properties']['embedding']['type'] == 'array'
assert schema['properties']['embedding']['items']['type'] == 'number'
assert (
schema['properties']['embedding']['example']
== orjson_dumps(np.zeros([3, 4])).decode()
)
assert (
schema['properties']['embedding_no_shape']['tensor/array shape']
== 'not specified'
)
assert schema['properties']['embedding_no_shape']['type'] == 'array'
assert schema['properties']['embedding']['items']['type'] == 'number'
|
import numpy as np
import pytest
from docarray import BaseDoc
from docarray.base_doc.io.json import orjson_dumps
from docarray.typing import NdArray, TorchTensor
class NpDoc(BaseDoc):
embedding: NdArray[3, 4]
embedding_no_shape: NdArray
class TorchDoc(BaseDoc):
embedding: TorchTensor[3, 4]
embedding_no_shape: TorchTensor
def test_np_schema():
schema = NpDoc.schema()
assert schema['properties']['embedding']['tensor/array shape'] == '[3, 4]'
assert schema['properties']['embedding']['type'] == 'array'
assert schema['properties']['embedding']['items']['type'] == 'number'
assert schema['properties']['embedding']['example'] == orjson_dumps(
np.zeros([3, 4])
)
assert (
schema['properties']['embedding_no_shape']['tensor/array shape']
== 'not specified'
)
assert schema['properties']['embedding_no_shape']['type'] == 'array'
assert schema['properties']['embedding']['items']['type'] == 'number'
def test_torch_schema():
schema = TorchDoc.schema()
assert schema['properties']['embedding']['tensor/array shape'] == '[3, 4]'
assert schema['properties']['embedding']['type'] == 'array'
assert schema['properties']['embedding']['items']['type'] == 'number'
assert schema['properties']['embedding']['example'] == orjson_dumps(
np.zeros([3, 4])
)
assert (
schema['properties']['embedding_no_shape']['tensor/array shape']
== 'not specified'
)
assert schema['properties']['embedding_no_shape']['type'] == 'array'
assert schema['properties']['embedding']['items']['type'] == 'number'
@pytest.mark.tensorflow
def test_tensorflow_schema():
from docarray.typing import TensorFlowTensor
class TensorflowDoc(BaseDoc):
embedding: TensorFlowTensor[3, 4]
embedding_no_shape: TensorFlowTensor
schema = TensorflowDoc.schema()
assert schema['properties']['embedding']['tensor/array shape'] == '[3, 4]'
assert schema['properties']['embedding']['type'] == 'array'
assert schema['properties']['embedding']['items']['type'] == 'number'
assert schema['properties']['embedding']['example'] == orjson_dumps(
np.zeros([3, 4])
)
assert (
schema['properties']['embedding_no_shape']['tensor/array shape']
== 'not specified'
)
assert schema['properties']['embedding_no_shape']['type'] == 'array'
assert schema['properties']['embedding']['items']['type'] == 'number'
|
from pydantic import BaseModel
from backend.data.block import (
Block,
BlockCategory,
BlockManualWebhookConfig,
BlockOutput,
BlockSchema,
)
from backend.data.model import SchemaField
from backend.integrations.providers import ProviderName
from backend.integrations.webhooks.compass import CompassWebhookType
class Transcription(BaseModel):
text: str
speaker: str
end: float
start: float
duration: float
class TranscriptionDataModel(BaseModel):
date: str
transcription: str
transcriptions: list[Transcription]
class CompassAITriggerBlock(Block):
class Input(BlockSchema):
payload: TranscriptionDataModel = SchemaField(hidden=True)
class Output(BlockSchema):
transcription: str = SchemaField(
description="The contents of the compass transcription."
)
def __init__(self):
super().__init__(
id="9464a020-ed1d-49e1-990f-7f2ac924a2b7",
description="This block will output the contents of the compass transcription.",
categories={BlockCategory.HARDWARE},
input_schema=CompassAITriggerBlock.Input,
output_schema=CompassAITriggerBlock.Output,
webhook_config=BlockManualWebhookConfig(
provider=ProviderName.COMPASS,
webhook_type=CompassWebhookType.TRANSCRIPTION,
),
test_input=[
{"input": "Hello, World!"},
{"input": "Hello, World!", "data": "Existing Data"},
],
# test_output=[
# ("output", "Hello, World!"), # No data provided, so trigger is returned
# ("output", "Existing Data"), # Data is provided, so data is returned.
# ],
)
def run(self, input_data: Input, **kwargs) -> BlockOutput:
yield "transcription", input_data.payload.transcription
|
from pydantic import BaseModel
from backend.data.block import (
Block,
BlockCategory,
BlockManualWebhookConfig,
BlockOutput,
BlockSchema,
)
from backend.data.model import SchemaField
from backend.integrations.webhooks.compass import CompassWebhookType
class Transcription(BaseModel):
text: str
speaker: str
end: float
start: float
duration: float
class TranscriptionDataModel(BaseModel):
date: str
transcription: str
transcriptions: list[Transcription]
class CompassAITriggerBlock(Block):
class Input(BlockSchema):
payload: TranscriptionDataModel = SchemaField(hidden=True)
class Output(BlockSchema):
transcription: str = SchemaField(
description="The contents of the compass transcription."
)
def __init__(self):
super().__init__(
id="9464a020-ed1d-49e1-990f-7f2ac924a2b7",
description="This block will output the contents of the compass transcription.",
categories={BlockCategory.HARDWARE},
input_schema=CompassAITriggerBlock.Input,
output_schema=CompassAITriggerBlock.Output,
webhook_config=BlockManualWebhookConfig(
provider="compass",
webhook_type=CompassWebhookType.TRANSCRIPTION,
),
test_input=[
{"input": "Hello, World!"},
{"input": "Hello, World!", "data": "Existing Data"},
],
# test_output=[
# ("output", "Hello, World!"), # No data provided, so trigger is returned
# ("output", "Existing Data"), # Data is provided, so data is returned.
# ],
)
def run(self, input_data: Input, **kwargs) -> BlockOutput:
yield "transcription", input_data.payload.transcription
|
"""
Opendal file and directory reader.
A loader that fetches a file or iterates through a directory on AWS S3 or other compatible service.
"""
import asyncio
import tempfile
from pathlib import Path
from typing import Any, Dict, List, Optional, Union, cast
from llama_index.core.readers import SimpleDirectoryReader
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
class OpendalReader(BaseReader):
"""General reader for any opendal operator."""
def __init__(
self,
scheme: str,
path: str = "/",
file_extractor: Optional[Dict[str, Union[str, BaseReader]]] = None,
**kwargs,
) -> None:
"""
Initialize opendal operator, along with credentials if needed.
Args:
scheme (str): the scheme of the service
path (str): the path of the data. If none is provided,
this loader will iterate through the entire bucket. If path is endswith `/`, this loader will iterate through the entire dir. Otherwise, this loeader will load the file.
file_extractor (Optional[Dict[str, BaseReader]]): A mapping of file
extension to a BaseReader class that specifies how to convert that file
to text. See `SimpleDirectoryReader` for more details.
"""
import opendal
super().__init__()
self.path = path
self.file_extractor = file_extractor
self.op = opendal.AsyncOperator(scheme, **kwargs)
def load_data(self) -> List[Document]:
"""Load file(s) from OpenDAL."""
with tempfile.TemporaryDirectory() as temp_dir:
if not self.path.endswith("/"):
asyncio.run(download_file_from_opendal(self.op, temp_dir, self.path))
else:
asyncio.run(download_dir_from_opendal(self.op, temp_dir, self.path))
loader = SimpleDirectoryReader(temp_dir, file_extractor=self.file_extractor)
return loader.load_data()
async def download_file_from_opendal(op: Any, temp_dir: str, path: str) -> str:
"""Download file from OpenDAL."""
import opendal
op = cast(opendal.AsyncOperator, op)
suffix = Path(path).suffix
filepath = f"{temp_dir}/{next(tempfile._get_candidate_names())}{suffix}"
async with op.open_reader(path) as r:
with open(filepath, "wb") as w:
w.write(await r.read())
return filepath
async def download_dir_from_opendal(op: Any, temp_dir: str, dir: str) -> str:
"""Download directory from opendal."""
import opendal
op = cast(opendal.AsyncOperator, op)
async for obj in await op.scan(dir):
await download_file_from_opendal(op, temp_dir, obj.path)
|
"""Opendal file and directory reader.
A loader that fetches a file or iterates through a directory on AWS S3 or other compatible service.
"""
import asyncio
import tempfile
from pathlib import Path
from typing import Any, Dict, List, Optional, Union, cast
from llama_index.core.readers import SimpleDirectoryReader
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
class OpendalReader(BaseReader):
"""General reader for any opendal operator."""
def __init__(
self,
scheme: str,
path: str = "/",
file_extractor: Optional[Dict[str, Union[str, BaseReader]]] = None,
**kwargs,
) -> None:
"""Initialize opendal operator, along with credentials if needed.
Args:
scheme (str): the scheme of the service
path (str): the path of the data. If none is provided,
this loader will iterate through the entire bucket. If path is endswith `/`, this loader will iterate through the entire dir. Otherwise, this loeader will load the file.
file_extractor (Optional[Dict[str, BaseReader]]): A mapping of file
extension to a BaseReader class that specifies how to convert that file
to text. See `SimpleDirectoryReader` for more details.
"""
import opendal
super().__init__()
self.path = path
self.file_extractor = file_extractor
self.op = opendal.AsyncOperator(scheme, **kwargs)
def load_data(self) -> List[Document]:
"""Load file(s) from OpenDAL."""
with tempfile.TemporaryDirectory() as temp_dir:
if not self.path.endswith("/"):
asyncio.run(download_file_from_opendal(self.op, temp_dir, self.path))
else:
asyncio.run(download_dir_from_opendal(self.op, temp_dir, self.path))
loader = SimpleDirectoryReader(temp_dir, file_extractor=self.file_extractor)
return loader.load_data()
async def download_file_from_opendal(op: Any, temp_dir: str, path: str) -> str:
"""Download file from OpenDAL."""
import opendal
op = cast(opendal.AsyncOperator, op)
suffix = Path(path).suffix
filepath = f"{temp_dir}/{next(tempfile._get_candidate_names())}{suffix}"
async with op.open_reader(path) as r:
with open(filepath, "wb") as w:
w.write(await r.read())
return filepath
async def download_dir_from_opendal(op: Any, temp_dir: str, dir: str) -> str:
"""Download directory from opendal."""
import opendal
op = cast(opendal.AsyncOperator, op)
async for obj in await op.scan(dir):
await download_file_from_opendal(op, temp_dir, obj.path)
|
_base_ = './mask-rcnn_r50_fpn_8xb8-amp-lsj-200e_coco.py'
model = dict(
backbone=dict(
depth=18,
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet18')),
neck=dict(in_channels=[64, 128, 256, 512]))
|
_base_ = './mask_rcnn_r50_fpn_lsj_200e_8x8_fp16_coco.py'
model = dict(
backbone=dict(
depth=18,
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet18')),
neck=dict(in_channels=[64, 128, 256, 512]))
|
from __future__ import annotations
import os
import pytest
from sentence_transformers import CrossEncoder, SentenceTransformer
from sentence_transformers.models import Pooling, Transformer
from sentence_transformers.util import is_datasets_available
from tests.utils import SafeTemporaryDirectory
if is_datasets_available():
from datasets import DatasetDict, load_dataset
@pytest.fixture()
def stsb_bert_tiny_model() -> SentenceTransformer:
return SentenceTransformer("sentence-transformers-testing/stsb-bert-tiny-safetensors")
@pytest.fixture(scope="session")
def stsb_bert_tiny_model_reused() -> SentenceTransformer:
return SentenceTransformer("sentence-transformers-testing/stsb-bert-tiny-safetensors")
@pytest.fixture()
def paraphrase_distilroberta_base_v1_model() -> SentenceTransformer:
return SentenceTransformer("paraphrase-distilroberta-base-v1")
@pytest.fixture()
def distilroberta_base_ce_model() -> CrossEncoder:
return CrossEncoder("distilroberta-base", num_labels=1)
@pytest.fixture()
def clip_vit_b_32_model() -> SentenceTransformer:
return SentenceTransformer("clip-ViT-B-32")
@pytest.fixture()
def distilbert_base_uncased_model() -> SentenceTransformer:
word_embedding_model = Transformer("distilbert-base-uncased")
pooling_model = Pooling(word_embedding_model.get_word_embedding_dimension())
model = SentenceTransformer(modules=[word_embedding_model, pooling_model])
return model
@pytest.fixture(scope="session")
def stsb_dataset_dict() -> DatasetDict:
return load_dataset("mteb/stsbenchmark-sts")
@pytest.fixture()
def cache_dir():
"""
In the CI environment, we use a temporary directory as `cache_dir`
to avoid keeping the downloaded models on disk after the test.
"""
if os.environ.get("CI", None):
# Note: `ignore_cleanup_errors=True` is used to avoid NotADirectoryError in Windows on GitHub Actions.
# See https://github.com/python/cpython/issues/107408, https://www.scivision.dev/python-tempfile-permission-error-windows/
with SafeTemporaryDirectory() as tmp_dir:
yield tmp_dir
else:
yield None
|
from __future__ import annotations
import os
import platform
import tempfile
import pytest
from sentence_transformers import CrossEncoder, SentenceTransformer
from sentence_transformers.models import Pooling, Transformer
from sentence_transformers.util import is_datasets_available
if is_datasets_available():
from datasets import DatasetDict, load_dataset
@pytest.fixture()
def stsb_bert_tiny_model() -> SentenceTransformer:
return SentenceTransformer("sentence-transformers-testing/stsb-bert-tiny-safetensors")
@pytest.fixture(scope="session")
def stsb_bert_tiny_model_reused() -> SentenceTransformer:
return SentenceTransformer("sentence-transformers-testing/stsb-bert-tiny-safetensors")
@pytest.fixture()
def paraphrase_distilroberta_base_v1_model() -> SentenceTransformer:
return SentenceTransformer("paraphrase-distilroberta-base-v1")
@pytest.fixture()
def distilroberta_base_ce_model() -> CrossEncoder:
return CrossEncoder("distilroberta-base", num_labels=1)
@pytest.fixture()
def clip_vit_b_32_model() -> SentenceTransformer:
return SentenceTransformer("clip-ViT-B-32")
@pytest.fixture()
def distilbert_base_uncased_model() -> SentenceTransformer:
word_embedding_model = Transformer("distilbert-base-uncased")
pooling_model = Pooling(word_embedding_model.get_word_embedding_dimension())
model = SentenceTransformer(modules=[word_embedding_model, pooling_model])
return model
@pytest.fixture(scope="session")
def stsb_dataset_dict() -> DatasetDict:
return load_dataset("mteb/stsbenchmark-sts")
@pytest.fixture()
def cache_dir():
"""
In the CI environment, we use a temporary directory as `cache_dir`
to avoid keeping the downloaded models on disk after the test.
This is only required for Ubuntu, as we otherwise have disk space issues there.
"""
if os.environ.get("CI", None) and platform.system() == "Linux":
with tempfile.TemporaryDirectory() as tmp_dir:
yield tmp_dir
else:
yield None
|
import os
from pathlib import Path
from torchaudio.datasets import vctk
from torchaudio_unittest.common_utils import get_whitenoise, normalize_wav, save_wav, TempDirMixin, TorchaudioTestCase
# Used to generate a unique transcript for each dummy audio file
_TRANSCRIPT = [
"Please call Stella",
"Ask her to bring these things",
"with her from the store",
"Six spoons of fresh snow peas, five thick slabs of blue cheese, and maybe a snack for her brother Bob",
"We also need a small plastic snake and a big toy frog for the kids",
"She can scoop these things into three red bags, and we will go meet her Wednesday at the train station",
"When the sunlight strikes raindrops in the air, they act as a prism and form a rainbow",
"The rainbow is a division of white light into many beautiful colors",
"These take the shape of a long round arch, with its path high above, and its two ends \
apparently beyond the horizon",
"There is, according to legend, a boiling pot of gold at one end",
]
def get_mock_dataset(root_dir):
"""
root_dir: root directory of the mocked data
"""
mocked_samples = []
dataset_dir = os.path.join(root_dir, "VCTK-Corpus-0.92")
os.makedirs(dataset_dir, exist_ok=True)
sample_rate = 48000
seed = 0
for speaker in range(225, 230):
speaker_id = "p" + str(speaker)
audio_dir = os.path.join(dataset_dir, "wav48_silence_trimmed", speaker_id)
os.makedirs(audio_dir, exist_ok=True)
file_dir = os.path.join(dataset_dir, "txt", speaker_id)
os.makedirs(file_dir, exist_ok=True)
for utterance_id in range(1, 11):
filename = f"{speaker_id}_{utterance_id:03d}_mic2"
audio_file_path = os.path.join(audio_dir, filename + ".wav")
data = get_whitenoise(sample_rate=sample_rate, duration=0.01, n_channels=1, dtype="float32", seed=seed)
save_wav(audio_file_path, data, sample_rate)
txt_file_path = os.path.join(file_dir, filename[:-5] + ".txt")
transcript = _TRANSCRIPT[utterance_id - 1]
with open(txt_file_path, "w") as f:
f.write(transcript)
sample = (normalize_wav(data), sample_rate, transcript, speaker_id, utterance_id)
mocked_samples.append(sample)
seed += 1
return mocked_samples
class TestVCTK(TempDirMixin, TorchaudioTestCase):
backend = "default"
root_dir = None
samples = []
@classmethod
def setUpClass(cls):
cls.root_dir = cls.get_base_temp_dir()
cls.samples = get_mock_dataset(cls.root_dir)
def _test_vctk(self, dataset):
num_samples = 0
for i, (data, sample_rate, transcript, speaker_id, utterance_id) in enumerate(dataset):
self.assertEqual(data, self.samples[i][0], atol=5e-5, rtol=1e-8)
assert sample_rate == self.samples[i][1]
assert transcript == self.samples[i][2]
assert speaker_id == self.samples[i][3]
assert int(utterance_id) == self.samples[i][4]
num_samples += 1
assert num_samples == len(self.samples)
def test_vctk_str(self):
dataset = vctk.VCTK_092(self.root_dir, audio_ext=".wav")
self._test_vctk(dataset)
def test_vctk_path(self):
dataset = vctk.VCTK_092(Path(self.root_dir), audio_ext=".wav")
self._test_vctk(dataset)
|
import os
from pathlib import Path
from torchaudio.datasets import vctk
from torchaudio_unittest.common_utils import (
get_whitenoise,
normalize_wav,
save_wav,
TempDirMixin,
TorchaudioTestCase,
)
# Used to generate a unique transcript for each dummy audio file
_TRANSCRIPT = [
"Please call Stella",
"Ask her to bring these things",
"with her from the store",
"Six spoons of fresh snow peas, five thick slabs of blue cheese, and maybe a snack for her brother Bob",
"We also need a small plastic snake and a big toy frog for the kids",
"She can scoop these things into three red bags, and we will go meet her Wednesday at the train station",
"When the sunlight strikes raindrops in the air, they act as a prism and form a rainbow",
"The rainbow is a division of white light into many beautiful colors",
"These take the shape of a long round arch, with its path high above, and its two ends \
apparently beyond the horizon",
"There is, according to legend, a boiling pot of gold at one end",
]
def get_mock_dataset(root_dir):
"""
root_dir: root directory of the mocked data
"""
mocked_samples = []
dataset_dir = os.path.join(root_dir, "VCTK-Corpus-0.92")
os.makedirs(dataset_dir, exist_ok=True)
sample_rate = 48000
seed = 0
for speaker in range(225, 230):
speaker_id = "p" + str(speaker)
audio_dir = os.path.join(dataset_dir, "wav48_silence_trimmed", speaker_id)
os.makedirs(audio_dir, exist_ok=True)
file_dir = os.path.join(dataset_dir, "txt", speaker_id)
os.makedirs(file_dir, exist_ok=True)
for utterance_id in range(1, 11):
filename = f"{speaker_id}_{utterance_id:03d}_mic2"
audio_file_path = os.path.join(audio_dir, filename + ".wav")
data = get_whitenoise(sample_rate=sample_rate, duration=0.01, n_channels=1, dtype="float32", seed=seed)
save_wav(audio_file_path, data, sample_rate)
txt_file_path = os.path.join(file_dir, filename[:-5] + ".txt")
transcript = _TRANSCRIPT[utterance_id - 1]
with open(txt_file_path, "w") as f:
f.write(transcript)
sample = (normalize_wav(data), sample_rate, transcript, speaker_id, utterance_id)
mocked_samples.append(sample)
seed += 1
return mocked_samples
class TestVCTK(TempDirMixin, TorchaudioTestCase):
backend = "default"
root_dir = None
samples = []
@classmethod
def setUpClass(cls):
cls.root_dir = cls.get_base_temp_dir()
cls.samples = get_mock_dataset(cls.root_dir)
def _test_vctk(self, dataset):
num_samples = 0
for i, (data, sample_rate, transcript, speaker_id, utterance_id) in enumerate(dataset):
self.assertEqual(data, self.samples[i][0], atol=5e-5, rtol=1e-8)
assert sample_rate == self.samples[i][1]
assert transcript == self.samples[i][2]
assert speaker_id == self.samples[i][3]
assert int(utterance_id) == self.samples[i][4]
num_samples += 1
assert num_samples == len(self.samples)
def test_vctk_str(self):
dataset = vctk.VCTK_092(self.root_dir, audio_ext=".wav")
self._test_vctk(dataset)
def test_vctk_path(self):
dataset = vctk.VCTK_092(Path(self.root_dir), audio_ext=".wav")
self._test_vctk(dataset)
|
from typing import TYPE_CHECKING, Any
from langchain_core.document_loaders import Blob, BlobLoader
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.document_loaders import (
FileSystemBlobLoader,
YoutubeAudioLoader,
)
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"BlobLoader": "langchain_community.document_loaders",
"Blob": "langchain_community.document_loaders",
"FileSystemBlobLoader": "langchain_community.document_loaders",
"YoutubeAudioLoader": "langchain_community.document_loaders",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"Blob",
"BlobLoader",
"FileSystemBlobLoader",
"YoutubeAudioLoader",
]
|
from typing import TYPE_CHECKING, Any
from langchain_core.document_loaders import Blob, BlobLoader
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.document_loaders import (
FileSystemBlobLoader,
YoutubeAudioLoader,
)
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"BlobLoader": "langchain_community.document_loaders",
"Blob": "langchain_community.document_loaders",
"FileSystemBlobLoader": "langchain_community.document_loaders",
"YoutubeAudioLoader": "langchain_community.document_loaders",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"BlobLoader",
"Blob",
"FileSystemBlobLoader",
"YoutubeAudioLoader",
]
|
"""Handle chained inputs."""
from typing import Optional, TextIO
_TEXT_COLOR_MAPPING = {
"blue": "36;1",
"yellow": "33;1",
"pink": "38;5;200",
"green": "32;1",
"red": "31;1",
}
def get_color_mapping(
items: list[str], excluded_colors: Optional[list] = None
) -> dict[str, str]:
"""Get mapping for items to a support color.
Args:
items: The items to map to colors.
excluded_colors: The colors to exclude.
Returns:
The mapping of items to colors.
"""
colors = list(_TEXT_COLOR_MAPPING.keys())
if excluded_colors is not None:
colors = [c for c in colors if c not in excluded_colors]
return {item: colors[i % len(colors)] for i, item in enumerate(items)}
def get_colored_text(text: str, color: str) -> str:
"""Get colored text.
Args:
text: The text to color.
color: The color to use.
Returns:
The colored text.
"""
color_str = _TEXT_COLOR_MAPPING[color]
return f"\u001b[{color_str}m\033[1;3m{text}\u001b[0m"
def get_bolded_text(text: str) -> str:
"""Get bolded text.
Args:
text: The text to bold.
Returns:
The bolded text.
"""
return f"\033[1m{text}\033[0m"
def print_text(
text: str, color: Optional[str] = None, end: str = "", file: Optional[TextIO] = None
) -> None:
"""Print text with highlighting and no end characters.
If a color is provided, the text will be printed in that color.
If a file is provided, the text will be written to that file.
Args:
text: The text to print.
color: The color to use. Defaults to None.
end: The end character to use. Defaults to "".
file: The file to write to. Defaults to None.
"""
text_to_print = get_colored_text(text, color) if color else text
print(text_to_print, end=end, file=file)
if file:
file.flush() # ensure all printed content are written to file
|
"""Handle chained inputs."""
from typing import Optional, TextIO
_TEXT_COLOR_MAPPING = {
"blue": "36;1",
"yellow": "33;1",
"pink": "38;5;200",
"green": "32;1",
"red": "31;1",
}
def get_color_mapping(
items: list[str], excluded_colors: Optional[list] = None
) -> dict[str, str]:
"""Get mapping for items to a support color.
Args:
items: The items to map to colors.
excluded_colors: The colors to exclude.
Returns:
The mapping of items to colors.
"""
colors = list(_TEXT_COLOR_MAPPING.keys())
if excluded_colors is not None:
colors = [c for c in colors if c not in excluded_colors]
color_mapping = {item: colors[i % len(colors)] for i, item in enumerate(items)}
return color_mapping
def get_colored_text(text: str, color: str) -> str:
"""Get colored text.
Args:
text: The text to color.
color: The color to use.
Returns:
The colored text.
"""
color_str = _TEXT_COLOR_MAPPING[color]
return f"\u001b[{color_str}m\033[1;3m{text}\u001b[0m"
def get_bolded_text(text: str) -> str:
"""Get bolded text.
Args:
text: The text to bold.
Returns:
The bolded text.
"""
return f"\033[1m{text}\033[0m"
def print_text(
text: str, color: Optional[str] = None, end: str = "", file: Optional[TextIO] = None
) -> None:
"""Print text with highlighting and no end characters.
If a color is provided, the text will be printed in that color.
If a file is provided, the text will be written to that file.
Args:
text: The text to print.
color: The color to use. Defaults to None.
end: The end character to use. Defaults to "".
file: The file to write to. Defaults to None.
"""
text_to_print = get_colored_text(text, color) if color else text
print(text_to_print, end=end, file=file)
if file:
file.flush() # ensure all printed content are written to file
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmengine.model import is_model_wrapper
from mmengine.runner import ValLoop
from mmdet.registry import LOOPS
@LOOPS.register_module()
class TeacherStudentValLoop(ValLoop):
"""Loop for validation of model teacher and student."""
def run(self):
"""Launch validation for model teacher and student."""
self.runner.call_hook('before_val')
self.runner.call_hook('before_val_epoch')
self.runner.model.eval()
model = self.runner.model
if is_model_wrapper(model):
model = model.module
assert hasattr(model, 'teacher')
assert hasattr(model, 'student')
predict_on = model.semi_test_cfg.get('predict_on', None)
multi_metrics = dict()
for _predict_on in ['teacher', 'student']:
model.semi_test_cfg['predict_on'] = _predict_on
for idx, data_batch in enumerate(self.dataloader):
self.run_iter(idx, data_batch)
# compute metrics
metrics = self.evaluator.evaluate(len(self.dataloader.dataset))
multi_metrics.update(
{'/'.join((_predict_on, k)): v
for k, v in metrics.items()})
model.semi_test_cfg['predict_on'] = predict_on
self.runner.call_hook('after_val_epoch', metrics=multi_metrics)
self.runner.call_hook('after_val')
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmengine.model import is_model_wrapper
from mmengine.runner import ValLoop
from mmdet.registry import LOOPS
@LOOPS.register_module()
class TeacherStudentValLoop(ValLoop):
"""Loop for validation of model teacher and student."""
def run(self):
"""Launch validation for model teacher and student."""
self.runner.call_hook('before_val')
self.runner.call_hook('before_val_epoch')
self.runner.model.eval()
model = self.runner.model
if is_model_wrapper(model):
model = model.module
assert hasattr(model, 'teacher')
assert hasattr(model, 'student')
predict_on = model.semi_test_cfg.get('predict_on', None)
multi_metrics = dict()
for _predict_on in ['teacher', 'student']:
model.semi_test_cfg['predict_on'] = _predict_on
for idx, data_batch in enumerate(self.dataloader):
self.run_iter(idx, data_batch)
# compute metrics
metrics = self.evaluator.evaluate(len(self.dataloader.dataset))
multi_metrics.update(
{'/'.join((_predict_on, k)): v
for k, v in metrics.items()})
model.semi_test_cfg['predict_on'] = predict_on
self.runner.call_hook('after_val_epoch', metrics=multi_metrics)
self.runner.call_hook('after_val')
|
from typing import TYPE_CHECKING
from .github import GitHubOAuthHandler
from .google import GoogleOAuthHandler
from .notion import NotionOAuthHandler
if TYPE_CHECKING:
from ..providers import ProviderName
from .base import BaseOAuthHandler
# --8<-- [start:HANDLERS_BY_NAMEExample]
HANDLERS_BY_NAME: dict["ProviderName", type["BaseOAuthHandler"]] = {
handler.PROVIDER_NAME: handler
for handler in [
GitHubOAuthHandler,
GoogleOAuthHandler,
NotionOAuthHandler,
]
}
# --8<-- [end:HANDLERS_BY_NAMEExample]
__all__ = ["HANDLERS_BY_NAME"]
|
from .base import BaseOAuthHandler
from .github import GitHubOAuthHandler
from .google import GoogleOAuthHandler
from .notion import NotionOAuthHandler
# --8<-- [start:HANDLERS_BY_NAMEExample]
HANDLERS_BY_NAME: dict[str, type[BaseOAuthHandler]] = {
handler.PROVIDER_NAME: handler
for handler in [
GitHubOAuthHandler,
GoogleOAuthHandler,
NotionOAuthHandler,
]
}
# --8<-- [end:HANDLERS_BY_NAMEExample]
__all__ = ["HANDLERS_BY_NAME"]
|
"""
Collection of examples for using sklearn interface
==================================================
For an introduction to XGBoost's scikit-learn estimator interface, see
:doc:`/python/sklearn_estimator`.
Created on 1 Apr 2015
@author: Jamie Hall
"""
import pickle
import numpy as np
from sklearn.datasets import fetch_california_housing, load_digits, load_iris
from sklearn.metrics import confusion_matrix, mean_squared_error
from sklearn.model_selection import GridSearchCV, KFold, train_test_split
import xgboost as xgb
rng = np.random.RandomState(31337)
print("Zeros and Ones from the Digits dataset: binary classification")
digits = load_digits(n_class=2)
y = digits["target"]
X = digits["data"]
kf = KFold(n_splits=2, shuffle=True, random_state=rng)
for train_index, test_index in kf.split(X):
xgb_model = xgb.XGBClassifier(n_jobs=1).fit(X[train_index], y[train_index])
predictions = xgb_model.predict(X[test_index])
actuals = y[test_index]
print(confusion_matrix(actuals, predictions))
print("Iris: multiclass classification")
iris = load_iris()
y = iris["target"]
X = iris["data"]
kf = KFold(n_splits=2, shuffle=True, random_state=rng)
for train_index, test_index in kf.split(X):
xgb_model = xgb.XGBClassifier(n_jobs=1).fit(X[train_index], y[train_index])
predictions = xgb_model.predict(X[test_index])
actuals = y[test_index]
print(confusion_matrix(actuals, predictions))
print("California Housing: regression")
X, y = fetch_california_housing(return_X_y=True)
kf = KFold(n_splits=2, shuffle=True, random_state=rng)
for train_index, test_index in kf.split(X):
xgb_model = xgb.XGBRegressor(n_jobs=1).fit(X[train_index], y[train_index])
predictions = xgb_model.predict(X[test_index])
actuals = y[test_index]
print(mean_squared_error(actuals, predictions))
print("Parameter optimization")
xgb_model = xgb.XGBRegressor(n_jobs=1)
clf = GridSearchCV(
xgb_model,
{"max_depth": [2, 4], "n_estimators": [50, 100]},
verbose=1,
n_jobs=1,
cv=3,
)
clf.fit(X, y)
print(clf.best_score_)
print(clf.best_params_)
# The sklearn API models are picklable
print("Pickling sklearn API models")
# must open in binary format to pickle
pickle.dump(clf, open("best_calif.pkl", "wb"))
clf2 = pickle.load(open("best_calif.pkl", "rb"))
print(np.allclose(clf.predict(X), clf2.predict(X)))
# Early-stopping
X = digits["data"]
y = digits["target"]
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf = xgb.XGBClassifier(n_jobs=1, early_stopping_rounds=10, eval_metric="auc")
clf.fit(X_train, y_train, eval_set=[(X_test, y_test)])
|
'''
Collection of examples for using sklearn interface
==================================================
For an introduction to XGBoost's scikit-learn estimator interface, see
:doc:`/python/sklearn_estimator`.
Created on 1 Apr 2015
@author: Jamie Hall
'''
import pickle
import numpy as np
from sklearn.datasets import fetch_california_housing, load_digits, load_iris
from sklearn.metrics import confusion_matrix, mean_squared_error
from sklearn.model_selection import GridSearchCV, KFold, train_test_split
import xgboost as xgb
rng = np.random.RandomState(31337)
print("Zeros and Ones from the Digits dataset: binary classification")
digits = load_digits(n_class=2)
y = digits['target']
X = digits['data']
kf = KFold(n_splits=2, shuffle=True, random_state=rng)
for train_index, test_index in kf.split(X):
xgb_model = xgb.XGBClassifier(n_jobs=1).fit(X[train_index], y[train_index])
predictions = xgb_model.predict(X[test_index])
actuals = y[test_index]
print(confusion_matrix(actuals, predictions))
print("Iris: multiclass classification")
iris = load_iris()
y = iris['target']
X = iris['data']
kf = KFold(n_splits=2, shuffle=True, random_state=rng)
for train_index, test_index in kf.split(X):
xgb_model = xgb.XGBClassifier(n_jobs=1).fit(X[train_index], y[train_index])
predictions = xgb_model.predict(X[test_index])
actuals = y[test_index]
print(confusion_matrix(actuals, predictions))
print("California Housing: regression")
X, y = fetch_california_housing(return_X_y=True)
kf = KFold(n_splits=2, shuffle=True, random_state=rng)
for train_index, test_index in kf.split(X):
xgb_model = xgb.XGBRegressor(n_jobs=1).fit(X[train_index], y[train_index])
predictions = xgb_model.predict(X[test_index])
actuals = y[test_index]
print(mean_squared_error(actuals, predictions))
print("Parameter optimization")
xgb_model = xgb.XGBRegressor(n_jobs=1)
clf = GridSearchCV(xgb_model,
{'max_depth': [2, 4],
'n_estimators': [50, 100]}, verbose=1, n_jobs=1, cv=3)
clf.fit(X, y)
print(clf.best_score_)
print(clf.best_params_)
# The sklearn API models are picklable
print("Pickling sklearn API models")
# must open in binary format to pickle
pickle.dump(clf, open("best_calif.pkl", "wb"))
clf2 = pickle.load(open("best_calif.pkl", "rb"))
print(np.allclose(clf.predict(X), clf2.predict(X)))
# Early-stopping
X = digits['data']
y = digits['target']
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf = xgb.XGBClassifier(n_jobs=1)
clf.fit(X_train, y_train, early_stopping_rounds=10, eval_metric="auc",
eval_set=[(X_test, y_test)])
|
# Copyright (c) OpenMMLab. All rights reserved.
import copy
import time
from contextlib import contextmanager
from typing import Generator, Optional
from mmengine.utils.manager import ManagerMixin, _accquire_lock, _release_lock
class DefaultScope(ManagerMixin):
"""Scope of current task used to reset the current registry, which can be
accessed globally.
Consider the case of resetting the current ``Resgitry`` by``default_scope``
in the internal module which cannot access runner directly, it is difficult
to get the ``default_scope`` defined in ``Runner``. However, if ``Runner``
created ``DefaultScope`` instance by given ``default_scope``, the internal
module can get ``default_scope`` by ``DefaultScope.get_current_instance``
everywhere.
Args:
name (str): Name of default scope for global access.
scope_name (str): Scope of current task.
Examples:
>>> from mmengine.model import MODELS
>>> # Define default scope in runner.
>>> DefaultScope.get_instance('task', scope_name='mmdet')
>>> # Get default scope globally.
>>> scope_name = DefaultScope.get_instance('task').scope_name
"""
def __init__(self, name: str, scope_name: str):
super().__init__(name)
assert isinstance(
scope_name,
str), (f'scope_name should be a string, but got {scope_name}')
self._scope_name = scope_name
@property
def scope_name(self) -> str:
"""
Returns:
str: Get current scope.
"""
return self._scope_name
@classmethod
def get_current_instance(cls) -> Optional['DefaultScope']:
"""Get latest created default scope.
Since default_scope is an optional argument for ``Registry.build``.
``get_current_instance`` should return ``None`` if there is no
``DefaultScope`` created.
Examples:
>>> default_scope = DefaultScope.get_current_instance()
>>> # There is no `DefaultScope` created yet,
>>> # `get_current_instance` return `None`.
>>> default_scope = DefaultScope.get_instance(
>>> 'instance_name', scope_name='mmengine')
>>> default_scope.scope_name
mmengine
>>> default_scope = DefaultScope.get_current_instance()
>>> default_scope.scope_name
mmengine
Returns:
Optional[DefaultScope]: Return None If there has not been
``DefaultScope`` instance created yet, otherwise return the
latest created DefaultScope instance.
"""
_accquire_lock()
if cls._instance_dict:
instance = super().get_current_instance()
else:
instance = None
_release_lock()
return instance
@classmethod
@contextmanager
def overwrite_default_scope(cls, scope_name: Optional[str]) -> Generator:
"""overwrite the current default scope with `scope_name`"""
if scope_name is None:
yield
else:
tmp = copy.deepcopy(cls._instance_dict)
cls.get_instance(f'overwrite-{time.time()}', scope_name=scope_name)
try:
yield
finally:
cls._instance_dict = tmp
|
# Copyright (c) OpenMMLab. All rights reserved.
import copy
import time
from contextlib import contextmanager
from typing import Generator, Optional
from mmengine.utils.manager import ManagerMixin, _accquire_lock, _release_lock
class DefaultScope(ManagerMixin):
"""Scope of current task used to reset the current registry, which can be
accessed globally.
Consider the case of resetting the current ``Resgitry`` by``default_scope``
in the internal module which cannot access runner directly, it is difficult
to get the ``default_scope`` defined in ``Runner``. However, if ``Runner``
created ``DefaultScope`` instance by given ``default_scope``, the internal
module can get ``default_scope`` by ``DefaultScope.get_current_instance``
everywhere.
Args:
name (str): Name of default scope for global access.
scope_name (str): Scope of current task.
Examples:
>>> from mmengine import MODELS
>>> # Define default scope in runner.
>>> DefaultScope.get_instance('task', scope_name='mmdet')
>>> # Get default scope globally.
>>> scope_name = DefaultScope.get_instance('task').scope_name
"""
def __init__(self, name: str, scope_name: str):
super().__init__(name)
assert isinstance(
scope_name,
str), (f'scope_name should be a string, but got {scope_name}')
self._scope_name = scope_name
@property
def scope_name(self) -> str:
"""
Returns:
str: Get current scope.
"""
return self._scope_name
@classmethod
def get_current_instance(cls) -> Optional['DefaultScope']:
"""Get latest created default scope.
Since default_scope is an optional argument for ``Registry.build``.
``get_current_instance`` should return ``None`` if there is no
``DefaultScope`` created.
Examples:
>>> default_scope = DefaultScope.get_current_instance()
>>> # There is no `DefaultScope` created yet,
>>> # `get_current_instance` return `None`.
>>> default_scope = DefaultScope.get_instance(
>>> 'instance_name', scope_name='mmengine')
>>> default_scope.scope_name
mmengine
>>> default_scope = DefaultScope.get_current_instance()
>>> default_scope.scope_name
mmengine
Returns:
Optional[DefaultScope]: Return None If there has not been
``DefaultScope`` instance created yet, otherwise return the
latest created DefaultScope instance.
"""
_accquire_lock()
if cls._instance_dict:
instance = super().get_current_instance()
else:
instance = None
_release_lock()
return instance
@classmethod
@contextmanager
def overwrite_default_scope(cls, scope_name: Optional[str]) -> Generator:
"""overwrite the current default scope with `scope_name`"""
if scope_name is None:
yield
else:
tmp = copy.deepcopy(cls._instance_dict)
cls.get_instance(f'overwrite-{time.time()}', scope_name=scope_name)
try:
yield
finally:
cls._instance_dict = tmp
|
import logging
import numpy as np
import os
import csv
from typing import Optional
from sklearn.metrics import ndcg_score
logger = logging.getLogger(__name__)
class CERerankingEvaluator:
"""
This class evaluates a CrossEncoder model for the task of re-ranking.
Given a query and a list of documents, it computes the score [query, doc_i] for all possible
documents and sorts them in decreasing order. Then, MRR@10 and NDCG@10 are computed to measure the quality of the ranking.
Args:
samples (List[Dict, str, Union[str, List[str]]): Must be a list and each element is of the form:
{'query': '', 'positive': [], 'negative': []}. Query is the search query, positive is a list
of positive (relevant) documents, negative is a list of negative (irrelevant) documents.
"""
def __init__(
self, samples, at_k: int = 10, name: str = "", write_csv: bool = True, mrr_at_k: Optional[int] = None
):
self.samples = samples
self.name = name
if mrr_at_k is not None:
logger.warning(f"The `mrr_at_k` parameter has been deprecated; please use `at_k={mrr_at_k}` instead.")
self.at_k = mrr_at_k
else:
self.at_k = at_k
if isinstance(self.samples, dict):
self.samples = list(self.samples.values())
self.csv_file = "CERerankingEvaluator" + ("_" + name if name else "") + f"_results_@{self.at_k}.csv"
self.csv_headers = [
"epoch",
"steps",
"MRR@{}".format(self.at_k),
"NDCG@{}".format(self.at_k),
]
self.write_csv = write_csv
def __call__(self, model, output_path: str = None, epoch: int = -1, steps: int = -1) -> float:
if epoch != -1:
if steps == -1:
out_txt = " after epoch {}:".format(epoch)
else:
out_txt = " in epoch {} after {} steps:".format(epoch, steps)
else:
out_txt = ":"
logger.info("CERerankingEvaluator: Evaluating the model on " + self.name + " dataset" + out_txt)
all_mrr_scores = []
all_ndcg_scores = []
num_queries = 0
num_positives = []
num_negatives = []
for instance in self.samples:
query = instance["query"]
positive = list(instance["positive"])
negative = list(instance["negative"])
docs = positive + negative
is_relevant = [1] * len(positive) + [0] * len(negative)
if len(positive) == 0 or len(negative) == 0:
continue
num_queries += 1
num_positives.append(len(positive))
num_negatives.append(len(negative))
model_input = [[query, doc] for doc in docs]
pred_scores = model.predict(model_input, convert_to_numpy=True, show_progress_bar=False)
pred_scores_argsort = np.argsort(-pred_scores) # Sort in decreasing order
mrr_score = 0
for rank, index in enumerate(pred_scores_argsort[0 : self.at_k]):
if is_relevant[index]:
mrr_score = 1 / (rank + 1)
break
all_mrr_scores.append(mrr_score)
all_ndcg_scores.append(ndcg_score([is_relevant], [pred_scores], k=self.at_k))
mean_mrr = np.mean(all_mrr_scores)
mean_ndcg = np.mean(all_ndcg_scores)
logger.info(
"Queries: {} \t Positives: Min {:.1f}, Mean {:.1f}, Max {:.1f} \t Negatives: Min {:.1f}, Mean {:.1f}, Max {:.1f}".format(
num_queries,
np.min(num_positives),
np.mean(num_positives),
np.max(num_positives),
np.min(num_negatives),
np.mean(num_negatives),
np.max(num_negatives),
)
)
logger.info("MRR@{}: {:.2f}".format(self.at_k, mean_mrr * 100))
logger.info("NDCG@{}: {:.2f}".format(self.at_k, mean_ndcg * 100))
if output_path is not None and self.write_csv:
csv_path = os.path.join(output_path, self.csv_file)
output_file_exists = os.path.isfile(csv_path)
with open(csv_path, mode="a" if output_file_exists else "w", encoding="utf-8") as f:
writer = csv.writer(f)
if not output_file_exists:
writer.writerow(self.csv_headers)
writer.writerow([epoch, steps, mean_mrr, mean_ndcg])
return mean_mrr
|
import logging
import numpy as np
import os
import csv
from typing import Optional
from sklearn.metrics import ndcg_score
logger = logging.getLogger(__name__)
class CERerankingEvaluator:
"""
This class evaluates a CrossEncoder model for the task of re-ranking.
Given a query and a list of documents, it computes the score [query, doc_i] for all possible
documents and sorts them in decreasing order. Then, MRR@10 and NDCG@10 are computed to measure the quality of the ranking.
:param samples: Must be a list and each element is of the form: {'query': '', 'positive': [], 'negative': []}. Query is the search query,
positive is a list of positive (relevant) documents, negative is a list of negative (irrelevant) documents.
"""
def __init__(
self, samples, at_k: int = 10, name: str = "", write_csv: bool = True, mrr_at_k: Optional[int] = None
):
self.samples = samples
self.name = name
if mrr_at_k is not None:
logger.warning(f"The `mrr_at_k` parameter has been deprecated; please use `at_k={mrr_at_k}` instead.")
self.at_k = mrr_at_k
else:
self.at_k = at_k
if isinstance(self.samples, dict):
self.samples = list(self.samples.values())
self.csv_file = "CERerankingEvaluator" + ("_" + name if name else "") + f"_results_@{self.at_k}.csv"
self.csv_headers = [
"epoch",
"steps",
"MRR@{}".format(self.at_k),
"NDCG@{}".format(self.at_k),
]
self.write_csv = write_csv
def __call__(self, model, output_path: str = None, epoch: int = -1, steps: int = -1) -> float:
if epoch != -1:
if steps == -1:
out_txt = " after epoch {}:".format(epoch)
else:
out_txt = " in epoch {} after {} steps:".format(epoch, steps)
else:
out_txt = ":"
logger.info("CERerankingEvaluator: Evaluating the model on " + self.name + " dataset" + out_txt)
all_mrr_scores = []
all_ndcg_scores = []
num_queries = 0
num_positives = []
num_negatives = []
for instance in self.samples:
query = instance["query"]
positive = list(instance["positive"])
negative = list(instance["negative"])
docs = positive + negative
is_relevant = [1] * len(positive) + [0] * len(negative)
if len(positive) == 0 or len(negative) == 0:
continue
num_queries += 1
num_positives.append(len(positive))
num_negatives.append(len(negative))
model_input = [[query, doc] for doc in docs]
pred_scores = model.predict(model_input, convert_to_numpy=True, show_progress_bar=False)
pred_scores_argsort = np.argsort(-pred_scores) # Sort in decreasing order
mrr_score = 0
for rank, index in enumerate(pred_scores_argsort[0 : self.at_k]):
if is_relevant[index]:
mrr_score = 1 / (rank + 1)
break
all_mrr_scores.append(mrr_score)
all_ndcg_scores.append(ndcg_score([is_relevant], [pred_scores], k=self.at_k))
mean_mrr = np.mean(all_mrr_scores)
mean_ndcg = np.mean(all_ndcg_scores)
logger.info(
"Queries: {} \t Positives: Min {:.1f}, Mean {:.1f}, Max {:.1f} \t Negatives: Min {:.1f}, Mean {:.1f}, Max {:.1f}".format(
num_queries,
np.min(num_positives),
np.mean(num_positives),
np.max(num_positives),
np.min(num_negatives),
np.mean(num_negatives),
np.max(num_negatives),
)
)
logger.info("MRR@{}: {:.2f}".format(self.at_k, mean_mrr * 100))
logger.info("NDCG@{}: {:.2f}".format(self.at_k, mean_ndcg * 100))
if output_path is not None and self.write_csv:
csv_path = os.path.join(output_path, self.csv_file)
output_file_exists = os.path.isfile(csv_path)
with open(csv_path, mode="a" if output_file_exists else "w", encoding="utf-8") as f:
writer = csv.writer(f)
if not output_file_exists:
writer.writerow(self.csv_headers)
writer.writerow([epoch, steps, mean_mrr, mean_ndcg])
return mean_mrr
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.registry import MODELS
from mmdet.utils import ConfigType, OptConfigType, OptMultiConfig
from .two_stage import TwoStageDetector
@MODELS.register_module()
class MaskScoringRCNN(TwoStageDetector):
"""Mask Scoring RCNN.
https://arxiv.org/abs/1903.00241
"""
def __init__(self,
backbone: ConfigType,
rpn_head: ConfigType,
roi_head: ConfigType,
train_cfg: ConfigType,
test_cfg: ConfigType,
neck: OptConfigType = None,
data_preprocessor: OptConfigType = None,
init_cfg: OptMultiConfig = None) -> None:
super().__init__(
backbone=backbone,
neck=neck,
rpn_head=rpn_head,
roi_head=roi_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
data_preprocessor=data_preprocessor,
init_cfg=init_cfg)
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.core.utils import ConfigType, OptConfigType, OptMultiConfig
from mmdet.registry import MODELS
from .two_stage import TwoStageDetector
@MODELS.register_module()
class MaskScoringRCNN(TwoStageDetector):
"""Mask Scoring RCNN.
https://arxiv.org/abs/1903.00241
"""
def __init__(self,
backbone: ConfigType,
rpn_head: ConfigType,
roi_head: ConfigType,
train_cfg: ConfigType,
test_cfg: ConfigType,
neck: OptConfigType = None,
data_preprocessor: OptConfigType = None,
init_cfg: OptMultiConfig = None) -> None:
super().__init__(
backbone=backbone,
neck=neck,
rpn_head=rpn_head,
roi_head=roi_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
data_preprocessor=data_preprocessor,
init_cfg=init_cfg)
|
"""FastAPI framework, high performance, easy to learn, fast to code, ready for production"""
__version__ = "0.115.14"
from starlette import status as status
from .applications import FastAPI as FastAPI
from .background import BackgroundTasks as BackgroundTasks
from .datastructures import UploadFile as UploadFile
from .exceptions import HTTPException as HTTPException
from .exceptions import WebSocketException as WebSocketException
from .param_functions import Body as Body
from .param_functions import Cookie as Cookie
from .param_functions import Depends as Depends
from .param_functions import File as File
from .param_functions import Form as Form
from .param_functions import Header as Header
from .param_functions import Path as Path
from .param_functions import Query as Query
from .param_functions import Security as Security
from .requests import Request as Request
from .responses import Response as Response
from .routing import APIRouter as APIRouter
from .websockets import WebSocket as WebSocket
from .websockets import WebSocketDisconnect as WebSocketDisconnect
|
"""FastAPI framework, high performance, easy to learn, fast to code, ready for production"""
__version__ = "0.115.13"
from starlette import status as status
from .applications import FastAPI as FastAPI
from .background import BackgroundTasks as BackgroundTasks
from .datastructures import UploadFile as UploadFile
from .exceptions import HTTPException as HTTPException
from .exceptions import WebSocketException as WebSocketException
from .param_functions import Body as Body
from .param_functions import Cookie as Cookie
from .param_functions import Depends as Depends
from .param_functions import File as File
from .param_functions import Form as Form
from .param_functions import Header as Header
from .param_functions import Path as Path
from .param_functions import Query as Query
from .param_functions import Security as Security
from .requests import Request as Request
from .responses import Response as Response
from .routing import APIRouter as APIRouter
from .websockets import WebSocket as WebSocket
from .websockets import WebSocketDisconnect as WebSocketDisconnect
|
from typing import Any, Dict, List, Tuple, Type, cast, Set
from docarray import BaseDoc, DocList
from docarray.index.abstract import BaseDocIndex
from docarray.utils.filter import filter_docs
from docarray.utils.find import FindResult
def _collect_query_args(method_name: str): # TODO: use partialmethod instead
def inner(self, *args, **kwargs):
if args:
raise ValueError(
f'Positional arguments are not supported for '
f'`{type(self)}.{method_name}`.'
f' Use keyword arguments instead.'
)
updated_query = self._queries + [(method_name, kwargs)]
return type(self)(updated_query)
return inner
def _collect_query_required_args(method_name: str, required_args: Set[str] = None):
"""
Returns a function that ensures required keyword arguments are provided.
:param method_name: The name of the method for which the required arguments are being checked.
:type method_name: str
:param required_args: A set containing the names of required keyword arguments. Defaults to None.
:type required_args: Optional[Set[str]]
:return: A function that checks for required keyword arguments before executing the specified method.
Raises ValueError if positional arguments are provided.
Raises TypeError if any required keyword argument is missing.
:rtype: Callable
"""
if required_args is None:
required_args = set()
def inner(self, *args, **kwargs):
if args:
raise ValueError(
f"Positional arguments are not supported for "
f"`{type(self)}.{method_name}`. "
f"Use keyword arguments instead."
)
missing_args = required_args - set(kwargs.keys())
if missing_args:
raise ValueError(
f"`{type(self)}.{method_name}` is missing required argument(s): {', '.join(missing_args)}"
)
updated_query = self._queries + [(method_name, kwargs)]
return type(self)(updated_query)
return inner
def _execute_find_and_filter_query(
doc_index: BaseDocIndex, query: List[Tuple[str, Dict]], reverse_order: bool = False
) -> FindResult:
"""
Executes all find calls from query first using `doc_index.find()`,
and filtering queries after that using DocArray's `filter_docs()`.
Text search is not supported.
:param doc_index: Document index instance.
Either InMemoryExactNNIndex or HnswDocumentIndex.
:param query: Dictionary containing search and filtering configuration.
:param reverse_order: Flag indicating whether to sort in descending order.
If set to False (default), the sorting will be in ascending order.
This option is necessary because, depending on the index, lower scores
can correspond to better matches, and vice versa.
:return: Sorted documents and their corresponding scores.
"""
docs_found = DocList.__class_getitem__(cast(Type[BaseDoc], doc_index._schema))([])
filter_conditions = []
filter_limit = None
doc_to_score: Dict[BaseDoc, Any] = {}
for op, op_kwargs in query:
if op == 'find':
docs, scores = doc_index.find(**op_kwargs)
docs_found.extend(docs)
doc_to_score.update(zip(docs.__getattribute__('id'), scores))
elif op == 'filter':
filter_conditions.append(op_kwargs['filter_query'])
filter_limit = op_kwargs.get('limit')
else:
raise ValueError(f'Query operation is not supported: {op}')
doc_index._logger.debug(f'Executing query {query}')
docs_filtered = docs_found
for cond in filter_conditions:
docs_cls = DocList.__class_getitem__(cast(Type[BaseDoc], doc_index._schema))
docs_filtered = docs_cls(filter_docs(docs_filtered, cond))
if filter_limit:
docs_filtered = docs_filtered[:filter_limit]
doc_index._logger.debug(f'{len(docs_filtered)} results found')
docs_and_scores = zip(
docs_filtered, (doc_to_score[doc.id] for doc in docs_filtered)
)
docs_sorted = sorted(docs_and_scores, key=lambda x: x[1], reverse=reverse_order)
out_docs, out_scores = zip(*docs_sorted)
return FindResult(documents=out_docs, scores=out_scores)
|
from typing import Any, Dict, List, Tuple, Type, cast
from docarray import BaseDoc, DocList
from docarray.index.abstract import BaseDocIndex
from docarray.utils.filter import filter_docs
from docarray.utils.find import FindResult
def _collect_query_args(method_name: str): # TODO: use partialmethod instead
def inner(self, *args, **kwargs):
if args:
raise ValueError(
f'Positional arguments are not supported for '
f'`{type(self)}.{method_name}`.'
f' Use keyword arguments instead.'
)
updated_query = self._queries + [(method_name, kwargs)]
return type(self)(updated_query)
return inner
def _execute_find_and_filter_query(
doc_index: BaseDocIndex, query: List[Tuple[str, Dict]], reverse_order: bool = False
) -> FindResult:
"""
Executes all find calls from query first using `doc_index.find()`,
and filtering queries after that using DocArray's `filter_docs()`.
Text search is not supported.
:param doc_index: Document index instance.
Either InMemoryExactNNIndex or HnswDocumentIndex.
:param query: Dictionary containing search and filtering configuration.
:param reverse_order: Flag indicating whether to sort in descending order.
If set to False (default), the sorting will be in ascending order.
This option is necessary because, depending on the index, lower scores
can correspond to better matches, and vice versa.
:return: Sorted documents and their corresponding scores.
"""
docs_found = DocList.__class_getitem__(cast(Type[BaseDoc], doc_index._schema))([])
filter_conditions = []
filter_limit = None
doc_to_score: Dict[BaseDoc, Any] = {}
for op, op_kwargs in query:
if op == 'find':
docs, scores = doc_index.find(**op_kwargs)
docs_found.extend(docs)
doc_to_score.update(zip(docs.__getattribute__('id'), scores))
elif op == 'filter':
filter_conditions.append(op_kwargs['filter_query'])
filter_limit = op_kwargs.get('limit')
else:
raise ValueError(f'Query operation is not supported: {op}')
doc_index._logger.debug(f'Executing query {query}')
docs_filtered = docs_found
for cond in filter_conditions:
docs_cls = DocList.__class_getitem__(cast(Type[BaseDoc], doc_index._schema))
docs_filtered = docs_cls(filter_docs(docs_filtered, cond))
if filter_limit:
docs_filtered = docs_filtered[:filter_limit]
doc_index._logger.debug(f'{len(docs_filtered)} results found')
docs_and_scores = zip(
docs_filtered, (doc_to_score[doc.id] for doc in docs_filtered)
)
docs_sorted = sorted(docs_and_scores, key=lambda x: x[1], reverse=reverse_order)
out_docs, out_scores = zip(*docs_sorted)
return FindResult(documents=out_docs, scores=out_scores)
|
"""In memory document index."""
import operator
import uuid
from collections.abc import Sequence
from typing import Any, Optional, cast
from pydantic import Field
from langchain_core._api import beta
from langchain_core.callbacks import CallbackManagerForRetrieverRun
from langchain_core.documents import Document
from langchain_core.indexing import UpsertResponse
from langchain_core.indexing.base import DeleteResponse, DocumentIndex
@beta(message="Introduced in version 0.2.29. Underlying abstraction subject to change.")
class InMemoryDocumentIndex(DocumentIndex):
"""In memory document index.
This is an in-memory document index that stores documents in a dictionary.
It provides a simple search API that returns documents by the number of
counts the given query appears in the document.
.. versionadded:: 0.2.29
"""
store: dict[str, Document] = Field(default_factory=dict)
top_k: int = 4
def upsert(self, items: Sequence[Document], /, **kwargs: Any) -> UpsertResponse:
"""Upsert items into the index."""
ok_ids = []
for item in items:
if item.id is None:
id_ = str(uuid.uuid4())
item_ = item.model_copy()
item_.id = id_
else:
item_ = item
id_ = item.id
self.store[id_] = item_
ok_ids.append(cast("str", item_.id))
return UpsertResponse(succeeded=ok_ids, failed=[])
def delete(self, ids: Optional[list[str]] = None, **kwargs: Any) -> DeleteResponse:
"""Delete by ID."""
if ids is None:
msg = "IDs must be provided for deletion"
raise ValueError(msg)
ok_ids = []
for id_ in ids:
if id_ in self.store:
del self.store[id_]
ok_ids.append(id_)
return DeleteResponse(
succeeded=ok_ids, num_deleted=len(ok_ids), num_failed=0, failed=[]
)
def get(self, ids: Sequence[str], /, **kwargs: Any) -> list[Document]:
"""Get by ids."""
found_documents = []
for id_ in ids:
if id_ in self.store:
found_documents.append(self.store[id_])
return found_documents
def _get_relevant_documents(
self, query: str, *, run_manager: CallbackManagerForRetrieverRun
) -> list[Document]:
counts_by_doc = []
for document in self.store.values():
count = document.page_content.count(query)
counts_by_doc.append((document, count))
counts_by_doc.sort(key=operator.itemgetter(1), reverse=True)
return [doc.model_copy() for doc, count in counts_by_doc[: self.top_k]]
|
import operator
import uuid
from collections.abc import Sequence
from typing import Any, Optional, cast
from pydantic import Field
from langchain_core._api import beta
from langchain_core.callbacks import CallbackManagerForRetrieverRun
from langchain_core.documents import Document
from langchain_core.indexing import UpsertResponse
from langchain_core.indexing.base import DeleteResponse, DocumentIndex
@beta(message="Introduced in version 0.2.29. Underlying abstraction subject to change.")
class InMemoryDocumentIndex(DocumentIndex):
"""In memory document index.
This is an in-memory document index that stores documents in a dictionary.
It provides a simple search API that returns documents by the number of
counts the given query appears in the document.
.. versionadded:: 0.2.29
"""
store: dict[str, Document] = Field(default_factory=dict)
top_k: int = 4
def upsert(self, items: Sequence[Document], /, **kwargs: Any) -> UpsertResponse:
"""Upsert items into the index."""
ok_ids = []
for item in items:
if item.id is None:
id_ = str(uuid.uuid4())
item_ = item.model_copy()
item_.id = id_
else:
item_ = item
id_ = item.id
self.store[id_] = item_
ok_ids.append(cast("str", item_.id))
return UpsertResponse(succeeded=ok_ids, failed=[])
def delete(self, ids: Optional[list[str]] = None, **kwargs: Any) -> DeleteResponse:
"""Delete by ID."""
if ids is None:
msg = "IDs must be provided for deletion"
raise ValueError(msg)
ok_ids = []
for id_ in ids:
if id_ in self.store:
del self.store[id_]
ok_ids.append(id_)
return DeleteResponse(
succeeded=ok_ids, num_deleted=len(ok_ids), num_failed=0, failed=[]
)
def get(self, ids: Sequence[str], /, **kwargs: Any) -> list[Document]:
"""Get by ids."""
found_documents = []
for id_ in ids:
if id_ in self.store:
found_documents.append(self.store[id_])
return found_documents
def _get_relevant_documents(
self, query: str, *, run_manager: CallbackManagerForRetrieverRun
) -> list[Document]:
counts_by_doc = []
for document in self.store.values():
count = document.page_content.count(query)
counts_by_doc.append((document, count))
counts_by_doc.sort(key=operator.itemgetter(1), reverse=True)
return [doc.model_copy() for doc, count in counts_by_doc[: self.top_k]]
|
from typing import Union, BinaryIO, TYPE_CHECKING
from docarray.document.mixins.helper import _uri_to_blob, _get_file_context
if TYPE_CHECKING:
from docarray.typing import T
class UriFileMixin:
"""Provide helper functions for :class:`Document` to dump content to a file."""
def save_uri_to_file(self: 'T', file: Union[str, BinaryIO]) -> 'T':
"""Save :attr:`.uri` into a file
:param file: File or filename to which the data is saved.
:return: itself after processed
"""
fp = _get_file_context(file)
with fp:
blob = _uri_to_blob(self.uri)
fp.write(blob)
return self
|
from typing import Union, BinaryIO, TYPE_CHECKING
from .helper import _uri_to_blob, _get_file_context
if TYPE_CHECKING:
from ...typing import T
class UriFileMixin:
"""Provide helper functions for :class:`Document` to dump content to a file."""
def save_uri_to_file(self: 'T', file: Union[str, BinaryIO]) -> 'T':
"""Save :attr:`.uri` into a file
:param file: File or filename to which the data is saved.
:return: itself after processed
"""
fp = _get_file_context(file)
with fp:
blob = _uri_to_blob(self.uri)
fp.write(blob)
return self
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os
import os.path as osp
import mmcv
from mmcv import Config, DictAction
from mmcv.runner import init_dist
from tools.analysis_tools.benchmark import measure_inferense_speed
def parse_args():
parser = argparse.ArgumentParser(
description='MMDet benchmark a model of FPS')
parser.add_argument('config', help='test config file path')
parser.add_argument('checkpoint_root', help='Checkpoint file root path')
parser.add_argument(
'--round-num',
type=int,
default=1,
help='round a number to a given precision in decimal digits')
parser.add_argument(
'--out', type=str, help='output path of gathered fps to be stored')
parser.add_argument(
'--max-iter', type=int, default=400, help='num of max iter')
parser.add_argument(
'--log-interval', type=int, default=40, help='interval of logging')
parser.add_argument(
'--fuse-conv-bn',
action='store_true',
help='Whether to fuse conv and bn, this will slightly increase'
'the inference speed')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
parser.add_argument(
'--launcher',
choices=['none', 'pytorch', 'slurm', 'mpi'],
default='none',
help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
args = parser.parse_args()
if 'LOCAL_RANK' not in os.environ:
os.environ['LOCAL_RANK'] = str(args.local_rank)
return args
if __name__ == '__main__':
args = parse_args()
assert args.round_num >= 0
config = Config.fromfile(args.config)
if args.launcher == 'none':
raise NotImplementedError('Only supports distributed mode')
else:
init_dist(args.launcher)
result_dict = {}
for model_key in config:
model_infos = config[model_key]
if not isinstance(model_infos, list):
model_infos = [model_infos]
for model_info in model_infos:
record_metrics = model_info['metric']
cfg_path = model_info['config'].strip()
cfg = Config.fromfile(cfg_path)
checkpoint = osp.join(args.checkpoint_root,
model_info['checkpoint'].strip())
try:
fps = measure_inferense_speed(cfg, checkpoint, args.max_iter,
args.log_interval,
args.fuse_conv_bn)
print(
f'{cfg_path} fps : {fps:.{args.round_num}f} img / s, '
f'times per image: {1000/fps:.{args.round_num}f} ms / img',
flush=True)
result_dict[cfg_path] = dict(
fps=round(fps, args.round_num),
ms_times_pre_image=round(1000 / fps, args.round_num))
except Exception as e:
print(f'{config} error: {repr(e)}')
result_dict[cfg_path] = 0
if args.out:
mmcv.mkdir_or_exist(args.out)
mmcv.dump(result_dict, osp.join(args.out, 'batch_inference_fps.json'))
|
import argparse
import os
import os.path as osp
import mmcv
from mmcv import Config, DictAction
from mmcv.runner import init_dist
from tools.analysis_tools.benchmark import measure_inferense_speed
def parse_args():
parser = argparse.ArgumentParser(
description='MMDet benchmark a model of FPS')
parser.add_argument('config', help='test config file path')
parser.add_argument('checkpoint_root', help='Checkpoint file root path')
parser.add_argument(
'--round-num',
type=int,
default=1,
help='round a number to a given precision in decimal digits')
parser.add_argument(
'--out', type=str, help='output path of gathered fps to be stored')
parser.add_argument(
'--max-iter', type=int, default=400, help='num of max iter')
parser.add_argument(
'--log-interval', type=int, default=40, help='interval of logging')
parser.add_argument(
'--fuse-conv-bn',
action='store_true',
help='Whether to fuse conv and bn, this will slightly increase'
'the inference speed')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
parser.add_argument(
'--launcher',
choices=['none', 'pytorch', 'slurm', 'mpi'],
default='none',
help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
args = parser.parse_args()
if 'LOCAL_RANK' not in os.environ:
os.environ['LOCAL_RANK'] = str(args.local_rank)
return args
if __name__ == '__main__':
args = parse_args()
assert args.round_num >= 0
config = Config.fromfile(args.config)
if args.launcher == 'none':
raise NotImplementedError('Only supports distributed mode')
else:
init_dist(args.launcher)
result_dict = {}
for model_key in config:
model_infos = config[model_key]
if not isinstance(model_infos, list):
model_infos = [model_infos]
for model_info in model_infos:
record_metrics = model_info['metric']
cfg_path = model_info['config'].strip()
cfg = Config.fromfile(cfg_path)
checkpoint = osp.join(args.checkpoint_root,
model_info['checkpoint'].strip())
try:
fps = measure_inferense_speed(cfg, checkpoint, args.max_iter,
args.log_interval,
args.fuse_conv_bn)
print(
f'{cfg_path} fps : {fps:.{args.round_num}f} img / s, '
f'times per image: {1000/fps:.{args.round_num}f} ms / img',
flush=True)
result_dict[cfg_path] = dict(
fps=round(fps, args.round_num),
ms_times_pre_image=round(1000 / fps, args.round_num))
except Exception as e:
print(f'{config} error: {repr(e)}')
result_dict[cfg_path] = 0
if args.out:
mmcv.mkdir_or_exist(args.out)
mmcv.dump(result_dict, osp.join(args.out, 'batch_inference_fps.json'))
|
"""Popular unsupervised clustering algorithms."""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
from ._affinity_propagation import AffinityPropagation, affinity_propagation
from ._agglomerative import (
AgglomerativeClustering,
FeatureAgglomeration,
linkage_tree,
ward_tree,
)
from ._bicluster import SpectralBiclustering, SpectralCoclustering
from ._birch import Birch
from ._bisect_k_means import BisectingKMeans
from ._dbscan import DBSCAN, dbscan
from ._hdbscan.hdbscan import HDBSCAN
from ._kmeans import KMeans, MiniBatchKMeans, k_means, kmeans_plusplus
from ._mean_shift import MeanShift, estimate_bandwidth, get_bin_seeds, mean_shift
from ._optics import (
OPTICS,
cluster_optics_dbscan,
cluster_optics_xi,
compute_optics_graph,
)
from ._spectral import SpectralClustering, spectral_clustering
__all__ = [
"DBSCAN",
"HDBSCAN",
"OPTICS",
"AffinityPropagation",
"AgglomerativeClustering",
"Birch",
"BisectingKMeans",
"FeatureAgglomeration",
"KMeans",
"MeanShift",
"MiniBatchKMeans",
"SpectralBiclustering",
"SpectralClustering",
"SpectralCoclustering",
"affinity_propagation",
"cluster_optics_dbscan",
"cluster_optics_xi",
"compute_optics_graph",
"dbscan",
"estimate_bandwidth",
"get_bin_seeds",
"k_means",
"kmeans_plusplus",
"linkage_tree",
"mean_shift",
"spectral_clustering",
"ward_tree",
]
|
"""Popular unsupervised clustering algorithms."""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
from ._affinity_propagation import AffinityPropagation, affinity_propagation
from ._agglomerative import (
AgglomerativeClustering,
FeatureAgglomeration,
linkage_tree,
ward_tree,
)
from ._bicluster import SpectralBiclustering, SpectralCoclustering
from ._birch import Birch
from ._bisect_k_means import BisectingKMeans
from ._dbscan import DBSCAN, dbscan
from ._hdbscan.hdbscan import HDBSCAN
from ._kmeans import KMeans, MiniBatchKMeans, k_means, kmeans_plusplus
from ._mean_shift import MeanShift, estimate_bandwidth, get_bin_seeds, mean_shift
from ._optics import (
OPTICS,
cluster_optics_dbscan,
cluster_optics_xi,
compute_optics_graph,
)
from ._spectral import SpectralClustering, spectral_clustering
__all__ = [
"AffinityPropagation",
"AgglomerativeClustering",
"Birch",
"DBSCAN",
"OPTICS",
"cluster_optics_dbscan",
"cluster_optics_xi",
"compute_optics_graph",
"KMeans",
"BisectingKMeans",
"FeatureAgglomeration",
"MeanShift",
"MiniBatchKMeans",
"SpectralClustering",
"affinity_propagation",
"dbscan",
"estimate_bandwidth",
"get_bin_seeds",
"k_means",
"kmeans_plusplus",
"linkage_tree",
"mean_shift",
"spectral_clustering",
"ward_tree",
"SpectralBiclustering",
"SpectralCoclustering",
"HDBSCAN",
]
|
from __future__ import annotations
import logging
from typing import TYPE_CHECKING, Any, Callable
import numpy as np
from sentence_transformers.evaluation.NanoBEIREvaluator import NanoBEIREvaluator, dataset_name_to_id
from sentence_transformers.sparse_encoder.evaluation.SparseInformationRetrievalEvaluator import (
SparseInformationRetrievalEvaluator,
)
from sentence_transformers.util import is_datasets_available
if TYPE_CHECKING:
from torch import Tensor
from sentence_transformers.evaluation import SimilarityFunction
from sentence_transformers.evaluation.NanoBEIREvaluator import (
DatasetNameType,
)
from sentence_transformers.sparse_encoder import SparseEncoder
logger = logging.getLogger(__name__)
class SparseNanoBEIREvaluator(NanoBEIREvaluator):
def __init__(
self,
dataset_names: list[DatasetNameType] | None = None,
mrr_at_k: list[int] = [10],
ndcg_at_k: list[int] = [10],
accuracy_at_k: list[int] = [1, 3, 5, 10],
precision_recall_at_k: list[int] = [1, 3, 5, 10],
map_at_k: list[int] = [100],
show_progress_bar: bool = False,
batch_size: int = 32,
write_csv: bool = True,
truncate_dim: int | None = None,
score_functions: dict[str, Callable[[Tensor, Tensor], Tensor]] = None,
main_score_function: str | SimilarityFunction | None = None,
aggregate_fn: Callable[[list[float]], float] = np.mean,
aggregate_key: str = "mean",
query_prompts: str | dict[str, str] | None = None,
corpus_prompts: str | dict[str, str] | None = None,
):
super().__init__(
dataset_names=dataset_names,
mrr_at_k=mrr_at_k,
ndcg_at_k=ndcg_at_k,
accuracy_at_k=accuracy_at_k,
precision_recall_at_k=precision_recall_at_k,
map_at_k=map_at_k,
show_progress_bar=show_progress_bar,
batch_size=batch_size,
write_csv=write_csv,
truncate_dim=truncate_dim,
score_functions=score_functions,
main_score_function=main_score_function,
aggregate_fn=aggregate_fn,
aggregate_key=aggregate_key,
query_prompts=query_prompts,
corpus_prompts=corpus_prompts,
)
def __call__(
self, model: SparseEncoder, output_path: str = None, epoch: int = -1, steps: int = -1, *args, **kwargs
) -> dict[str, float]:
return super().__call__(model, output_path, epoch, steps, *args, **kwargs)
def _load_dataset(
self, dataset_name: DatasetNameType, **ir_evaluator_kwargs
) -> SparseInformationRetrievalEvaluator:
if not is_datasets_available():
raise ValueError(
"datasets is not available. Please install it to use the SparseNanoBEIREvaluator via `pip install datasets`."
)
from datasets import load_dataset
dataset_path = dataset_name_to_id[dataset_name.lower()]
corpus = load_dataset(dataset_path, "corpus", split="train")
queries = load_dataset(dataset_path, "queries", split="train")
qrels = load_dataset(dataset_path, "qrels", split="train")
corpus_dict = {sample["_id"]: sample["text"] for sample in corpus if len(sample["text"]) > 0}
queries_dict = {sample["_id"]: sample["text"] for sample in queries if len(sample["text"]) > 0}
qrels_dict = {}
for sample in qrels:
if sample["query-id"] not in qrels_dict:
qrels_dict[sample["query-id"]] = set()
qrels_dict[sample["query-id"]].add(sample["corpus-id"])
if self.query_prompts is not None:
ir_evaluator_kwargs["query_prompt"] = self.query_prompts.get(dataset_name, None)
if self.corpus_prompts is not None:
ir_evaluator_kwargs["corpus_prompt"] = self.corpus_prompts.get(dataset_name, None)
human_readable_name = self._get_human_readable_name(dataset_name)
return SparseInformationRetrievalEvaluator(
queries=queries_dict,
corpus=corpus_dict,
relevant_docs=qrels_dict,
name=human_readable_name,
**ir_evaluator_kwargs,
)
def store_metrics_in_model_card_data(
self, model: SparseEncoder, metrics: dict[str, Any], epoch: int = 0, step: int = 0
) -> None:
model.model_card_data.set_evaluation_metrics(self, metrics, epoch, step)
|
from __future__ import annotations
import logging
from typing import TYPE_CHECKING, Any
from sentence_transformers.evaluation.NanoBEIREvaluator import NanoBEIREvaluator, dataset_name_to_id
from sentence_transformers.sparse_encoder.evaluation.SparseInformationRetrievalEvaluator import (
SparseInformationRetrievalEvaluator,
)
from sentence_transformers.util import is_datasets_available
if TYPE_CHECKING:
from sentence_transformers.evaluation.NanoBEIREvaluator import (
DatasetNameType,
)
from sentence_transformers.sparse_encoder import SparseEncoder
logger = logging.getLogger(__name__)
class SparseNanoBEIREvaluator(NanoBEIREvaluator):
def __call__(
self, model: SparseEncoder, output_path: str = None, epoch: int = -1, steps: int = -1, *args, **kwargs
) -> dict[str, float]:
return super.__call__(model, output_path, epoch, steps, *args, **kwargs)
def _load_dataset(
self, dataset_name: DatasetNameType, **ir_evaluator_kwargs
) -> SparseInformationRetrievalEvaluator:
if not is_datasets_available():
raise ValueError(
"datasets is not available. Please install it to use the SparseNanoBEIREvaluator via `pip install datasets`."
)
from datasets import load_dataset
dataset_path = dataset_name_to_id[dataset_name.lower()]
corpus = load_dataset(dataset_path, "corpus", split="train")
queries = load_dataset(dataset_path, "queries", split="train")
qrels = load_dataset(dataset_path, "qrels", split="train")
corpus_dict = {sample["_id"]: sample["text"] for sample in corpus if len(sample["text"]) > 0}
queries_dict = {sample["_id"]: sample["text"] for sample in queries if len(sample["text"]) > 0}
qrels_dict = {}
for sample in qrels:
if sample["query-id"] not in qrels_dict:
qrels_dict[sample["query-id"]] = set()
qrels_dict[sample["query-id"]].add(sample["corpus-id"])
if self.query_prompts is not None:
ir_evaluator_kwargs["query_prompt"] = self.query_prompts.get(dataset_name, None)
if self.corpus_prompts is not None:
ir_evaluator_kwargs["corpus_prompt"] = self.corpus_prompts.get(dataset_name, None)
human_readable_name = self._get_human_readable_name(dataset_name)
return SparseInformationRetrievalEvaluator(
queries=queries_dict,
corpus=corpus_dict,
relevant_docs=qrels_dict,
name=human_readable_name,
**ir_evaluator_kwargs,
)
def store_metrics_in_model_card_data(
self, model: SparseEncoder, metrics: dict[str, Any], epoch: int = 0, step: int = 0
) -> None:
model.model_card_data.set_evaluation_metrics(self, metrics, epoch, step)
|
"""
The :mod:`jina.proto` defines the protobuf used in jina. It is the core message protocol used in communicating between :class:`jina.orchestrate.deployments.Deployment`. It also defines the interface of a gRPC service.
"""
|
"""
The :mod:`jina.proto` defines the protobuf used in jina. It is the core message protocol used in communicating between :class:`jina.orchestrate.deployments.BaseDeployment`. It also defines the interface of a gRPC service.
"""
|
import warnings
from typing import TYPE_CHECKING, Optional, Tuple, TypeVar
from docarray.typing import ImageBytes
from docarray.typing.proto_register import _register_proto
from docarray.typing.tensor.image import ImageNdArray
from docarray.typing.url.any_url import AnyUrl
from docarray.utils._internal.misc import is_notebook
if TYPE_CHECKING:
from PIL import Image as PILImage
T = TypeVar('T', bound='ImageUrl')
@_register_proto(proto_type_name='image_url')
class ImageUrl(AnyUrl):
"""
URL to an image file.
Can be remote (web) URL, or a local file path.
"""
def load_pil(self, timeout: Optional[float] = None) -> 'PILImage.Image':
"""
Load the image from the bytes into a `PIL.Image.Image` instance
---
```python
from pydantic import parse_obj_as
from docarray import BaseDoc
from docarray.typing import ImageUrl
img_url = "https://upload.wikimedia.org/wikipedia/commons/8/80/Dag_Sebastian_Ahlander_at_G%C3%B6teborg_Book_Fair_2012b.jpg"
img_url = parse_obj_as(ImageUrl, img_url)
img = img_url.load_pil()
from PIL.Image import Image
assert isinstance(img, Image)
```
---
:return: a Pillow image
"""
from docarray.typing.bytes.image_bytes import ImageBytes
return ImageBytes(self.load_bytes(timeout=timeout)).load_pil()
def load(
self,
width: Optional[int] = None,
height: Optional[int] = None,
axis_layout: Tuple[str, str, str] = ('H', 'W', 'C'),
timeout: Optional[float] = None,
) -> ImageNdArray:
"""
Load the data from the url into an ImageNdArray
---
```python
from docarray import BaseDoc
from docarray.typing import ImageUrl, ImageNdArray
class MyDoc(BaseDoc):
img_url: ImageUrl
doc = MyDoc(
img_url="https://upload.wikimedia.org/wikipedia/commons/8/80/"
"Dag_Sebastian_Ahlander_at_G%C3%B6teborg_Book_Fair_2012b.jpg"
)
img_tensor = doc.img_url.load()
assert isinstance(img_tensor, ImageNdArray)
img_tensor = doc.img_url.load(height=224, width=224)
assert img_tensor.shape == (224, 224, 3)
layout = ('C', 'W', 'H')
img_tensor = doc.img_url.load(height=100, width=200, axis_layout=layout)
assert img_tensor.shape == (3, 200, 100)
```
---
:param width: width of the image tensor.
:param height: height of the image tensor.
:param axis_layout: ordering of the different image axes.
'H' = height, 'W' = width, 'C' = color channel
:param timeout: timeout (sec) for urlopen network request.
Only relevant if URL is not local
:return: ImageNdArray representing the image as RGB values
"""
from docarray.typing.bytes.image_bytes import ImageBytes
buffer = ImageBytes(self.load_bytes(timeout=timeout))
return buffer.load(width, height, axis_layout)
def load_bytes(self, timeout: Optional[float] = None) -> ImageBytes:
"""
Convert url to ImageBytes. This will either load or download the file and save
it into an ImageBytes object.
:param timeout: timeout for urlopen. Only relevant if url is not local
:return: ImageBytes object
"""
bytes_ = super().load_bytes(timeout=timeout)
return ImageBytes(bytes_)
def display(self) -> None:
"""
Display image data from url in notebook.
"""
if is_notebook():
from IPython.display import Image, display
remote_url = True if self.startswith('http') else False
if remote_url:
display(Image(url=self))
else:
display(Image(filename=self))
else:
warnings.warn('Display of image is only possible in a notebook.')
|
import warnings
from typing import TYPE_CHECKING, Any, Optional, Tuple, Type, TypeVar, Union
from docarray.typing import ImageBytes
from docarray.typing.proto_register import _register_proto
from docarray.typing.tensor.image import ImageNdArray
from docarray.typing.url.any_url import AnyUrl
from docarray.utils._internal.misc import is_notebook
if TYPE_CHECKING:
from PIL import Image as PILImage
from pydantic import BaseConfig
from pydantic.fields import ModelField
T = TypeVar('T', bound='ImageUrl')
IMAGE_FILE_FORMATS = ('png', 'jpeg', 'jpg')
@_register_proto(proto_type_name='image_url')
class ImageUrl(AnyUrl):
"""
URL to a .png, .jpeg, or .jpg file.
Can be remote (web) URL, or a local file path.
"""
@classmethod
def validate(
cls: Type[T],
value: Union[T, str, Any],
field: 'ModelField',
config: 'BaseConfig',
) -> T:
url = super().validate(value, field, config) # basic url validation
has_image_extension = any(url.endswith(ext) for ext in IMAGE_FILE_FORMATS)
if not has_image_extension:
raise ValueError(
f'Image URL must have one of the following extensions:'
f'{IMAGE_FILE_FORMATS}'
)
return cls(str(url), scheme=None)
def load_pil(self, timeout: Optional[float] = None) -> 'PILImage.Image':
"""
Load the image from the bytes into a `PIL.Image.Image` instance
---
```python
from pydantic import parse_obj_as
from docarray import BaseDoc
from docarray.typing import ImageUrl
img_url = "https://upload.wikimedia.org/wikipedia/commons/8/80/Dag_Sebastian_Ahlander_at_G%C3%B6teborg_Book_Fair_2012b.jpg"
img_url = parse_obj_as(ImageUrl, img_url)
img = img_url.load_pil()
from PIL.Image import Image
assert isinstance(img, Image)
```
---
:return: a Pillow image
"""
from docarray.typing.bytes.image_bytes import ImageBytes
return ImageBytes(self.load_bytes(timeout=timeout)).load_pil()
def load(
self,
width: Optional[int] = None,
height: Optional[int] = None,
axis_layout: Tuple[str, str, str] = ('H', 'W', 'C'),
timeout: Optional[float] = None,
) -> ImageNdArray:
"""
Load the data from the url into an ImageNdArray
---
```python
from docarray import BaseDoc
from docarray.typing import ImageUrl, ImageNdArray
class MyDoc(BaseDoc):
img_url: ImageUrl
doc = MyDoc(
img_url="https://upload.wikimedia.org/wikipedia/commons/8/80/"
"Dag_Sebastian_Ahlander_at_G%C3%B6teborg_Book_Fair_2012b.jpg"
)
img_tensor = doc.img_url.load()
assert isinstance(img_tensor, ImageNdArray)
img_tensor = doc.img_url.load(height=224, width=224)
assert img_tensor.shape == (224, 224, 3)
layout = ('C', 'W', 'H')
img_tensor = doc.img_url.load(height=100, width=200, axis_layout=layout)
assert img_tensor.shape == (3, 200, 100)
```
---
:param width: width of the image tensor.
:param height: height of the image tensor.
:param axis_layout: ordering of the different image axes.
'H' = height, 'W' = width, 'C' = color channel
:param timeout: timeout (sec) for urlopen network request.
Only relevant if URL is not local
:return: ImageNdArray representing the image as RGB values
"""
from docarray.typing.bytes.image_bytes import ImageBytes
buffer = ImageBytes(self.load_bytes(timeout=timeout))
return buffer.load(width, height, axis_layout)
def load_bytes(self, timeout: Optional[float] = None) -> ImageBytes:
"""
Convert url to ImageBytes. This will either load or download the file and save
it into an ImageBytes object.
:param timeout: timeout for urlopen. Only relevant if url is not local
:return: ImageBytes object
"""
bytes_ = super().load_bytes(timeout=timeout)
return ImageBytes(bytes_)
def display(self) -> None:
"""
Display image data from url in notebook.
"""
if is_notebook():
from IPython.display import Image, display
remote_url = True if self.startswith('http') else False
if remote_url:
display(Image(url=self))
else:
display(Image(filename=self))
else:
warnings.warn('Display of image is only possible in a notebook.')
|
from typing import Any, Optional
from typing_inspect import get_args, is_union_type
from docarray.typing.tensor.abstract_tensor import AbstractTensor
def is_type_tensor(type_: Any) -> bool:
"""Return True if type is a type Tensor or an Optional Tensor type."""
return isinstance(type_, type) and issubclass(type_, AbstractTensor)
def is_tensor_union(type_: Any) -> bool:
"""Return True if type is a Union of type Tensors."""
is_union = is_union_type(type_)
if is_union is None:
return False
else:
return is_union and all(
(is_type_tensor(t) or issubclass(t, type(None))) for t in get_args(type_)
)
def change_cls_name(cls: type, new_name: str, scope: Optional[dict] = None) -> None:
"""Change the name of a class.
:param cls: the class to change the name of
:param new_name: the new name
:param scope: the scope in which the class is defined
"""
if scope:
scope[new_name] = cls
cls.__qualname__ = cls.__qualname__[: -len(cls.__name__)] + new_name
cls.__name__ = new_name
|
from typing import Any, Optional
from typing_inspect import get_args, is_optional_type, is_union_type
from docarray.typing.tensor.abstract_tensor import AbstractTensor
def is_type_tensor(type_: Any) -> bool:
"""Return True if type is a type Tensor or an Optional Tensor type."""
return isinstance(type_, type) and issubclass(type_, AbstractTensor)
def is_tensor_union(type_: Any) -> bool:
"""Return True if type is a Union of type Tensors."""
is_union = is_union_type(type_)
if is_union is None:
return False
else:
return is_union and all(
(is_type_tensor(t) or issubclass(t, type(None))) for t in get_args(type_)
)
def change_cls_name(cls: type, new_name: str, scope: Optional[dict] = None) -> None:
"""Change the name of a class.
:param cls: the class to change the name of
:param new_name: the new name
:param scope: the scope in which the class is defined
"""
if scope:
scope[new_name] = cls
cls.__qualname__ = cls.__qualname__[: -len(cls.__name__)] + new_name
cls.__name__ = new_name
def unwrap_optional_type(type_: Any) -> Any:
"""Return the type of an Optional type, e.g. `unwrap_optional(Optional[str]) == str`;
`unwrap_optional(Union[None, int, None]) == int`.
:param type_: the type to unwrap
:return: the "core" type of an Optional type
"""
if not is_optional_type(type_):
return type_
for arg in get_args(type_):
if arg is not type(None):
return arg
|
"""
This script trains sentence transformers with a triplet loss function.
As corpus, we use the wikipedia sections dataset that was describd by Dor et al., 2018, Learning Thematic Similarity Metric Using Triplet Networks.
"""
import logging
import traceback
from datetime import datetime
from datasets import load_dataset
from sentence_transformers import SentenceTransformer
from sentence_transformers.evaluation import TripletEvaluator
from sentence_transformers.losses import TripletLoss
from sentence_transformers.trainer import SentenceTransformerTrainer
from sentence_transformers.training_args import SentenceTransformerTrainingArguments
# Set the log level to INFO to get more information
logging.basicConfig(format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO)
# You can specify any huggingface/transformers pre-trained model here, for example, bert-base-uncased, roberta-base, xlm-roberta-base
model_name = "distilbert-base-uncased"
batch_size = 16
num_train_epochs = 1
output_dir = "output/training-wikipedia-sections-" + model_name + "-" + datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
# 1. Here we define our SentenceTransformer model. If not already a Sentence Transformer model, it will automatically
# create one with "mean" pooling.
model = SentenceTransformer(model_name)
# If we want, we can limit the maximum sequence length for the model
# model.max_seq_length = 75
logging.info(model)
# 2. Load the Wikipedia-Sections dataset: https://huggingface.co/datasets/sentence-transformers/wikipedia-sections
train_dataset = load_dataset("sentence-transformers/wikipedia-sections", "triplet", split="train").select(
range(10_000)
)
eval_dataset = load_dataset("sentence-transformers/wikipedia-sections", "triplet", split="validation").select(
range(1000)
)
test_dataset = load_dataset("sentence-transformers/wikipedia-sections", "triplet", split="test").select(range(1000))
logging.info(train_dataset)
# 3. Define our training loss
# TripletLoss (https://sbert.net/docs/package_reference/sentence_transformer/losses.html#tripletloss) needs three text columns
train_loss = TripletLoss(model)
# 4. Define an evaluator for use during training. This is useful to keep track of alongside the evaluation loss.
dev_evaluator = TripletEvaluator(
anchors=eval_dataset[:1000]["anchor"],
positives=eval_dataset[:1000]["positive"],
negatives=eval_dataset[:1000]["negative"],
name="wikipedia-sections-dev",
)
# 5. Define the training arguments
args = SentenceTransformerTrainingArguments(
# Required parameter:
output_dir=output_dir,
# Optional training parameters:
num_train_epochs=num_train_epochs,
per_device_train_batch_size=batch_size,
per_device_eval_batch_size=batch_size,
warmup_ratio=0.1,
fp16=True, # Set to False if you get an error that your GPU can't run on FP16
bf16=False, # Set to True if you have a GPU that supports BF16
# Optional tracking/debugging parameters:
eval_strategy="steps",
eval_steps=100,
save_strategy="steps",
save_steps=100,
save_total_limit=2,
logging_steps=100,
run_name="wikipedia-sections-triplet", # Will be used in W&B if `wandb` is installed
)
# 6. Create the trainer & start training
trainer = SentenceTransformerTrainer(
model=model,
args=args,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
loss=train_loss,
evaluator=dev_evaluator,
)
trainer.train()
# 7. Evaluate the model performance on the STS Benchmark test dataset
test_evaluator = TripletEvaluator(
anchors=test_dataset["anchor"],
positives=test_dataset["positive"],
negatives=test_dataset["negative"],
name="wikipedia-sections-test",
)
test_evaluator(model)
# 8. Save the trained & evaluated model locally
final_output_dir = f"{output_dir}/final"
model.save(final_output_dir)
# 9. (Optional) save the model to the Hugging Face Hub!
# It is recommended to run `huggingface-cli login` to log into your Hugging Face account first
model_name = model_name if "/" not in model_name else model_name.split("/")[-1]
try:
model.push_to_hub(f"{model_name}-wikipedia-sections-triplet")
except Exception:
logging.error(
f"Error uploading model to the Hugging Face Hub:\n{traceback.format_exc()}To upload it manually, you can run "
f"`huggingface-cli login`, followed by loading the model using `model = SentenceTransformer({final_output_dir!r})` "
f"and saving it using `model.push_to_hub('{model_name}-wikipedia-sections-triplet')`."
)
|
"""
This script trains sentence transformers with a triplet loss function.
As corpus, we use the wikipedia sections dataset that was describd by Dor et al., 2018, Learning Thematic Similarity Metric Using Triplet Networks.
"""
import logging
import traceback
from datetime import datetime
from datasets import load_dataset
from sentence_transformers import SentenceTransformer
from sentence_transformers.evaluation import TripletEvaluator
from sentence_transformers.losses import TripletLoss
from sentence_transformers.trainer import SentenceTransformerTrainer
from sentence_transformers.training_args import SentenceTransformerTrainingArguments
# Set the log level to INFO to get more information
logging.basicConfig(format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO)
# You can specify any huggingface/transformers pre-trained model here, for example, bert-base-uncased, roberta-base, xlm-roberta-base
model_name = "distilbert-base-uncased"
batch_size = 16
num_train_epochs = 1
output_dir = "output/training-wikipedia-sections-" + model_name + "-" + datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
# 1. Here we define our SentenceTransformer model. If not already a Sentence Transformer model, it will automatically
# create one with "mean" pooling.
model = SentenceTransformer(model_name)
# If we want, we can limit the maximum sequence length for the model
# model.max_seq_length = 75
logging.info(model)
# 2. Load the Wikipedia-Sections dataset: https://huggingface.co/datasets/sentence-transformers/wikipedia-sections
train_dataset = load_dataset("sentence-transformers/wikipedia-sections", "triplet", split="train").select(
range(10_000)
)
eval_dataset = load_dataset("sentence-transformers/wikipedia-sections", "triplet", split="validation").select(
range(1000)
)
test_dataset = load_dataset("sentence-transformers/wikipedia-sections", "triplet", split="test").select(range(1000))
logging.info(train_dataset)
# 3. Define our training loss
# TripletLoss (https://sbert.net/docs/package_reference/losses.html#tripletloss) needs three text columns
train_loss = TripletLoss(model)
# 4. Define an evaluator for use during training. This is useful to keep track of alongside the evaluation loss.
dev_evaluator = TripletEvaluator(
anchors=eval_dataset[:1000]["anchor"],
positives=eval_dataset[:1000]["positive"],
negatives=eval_dataset[:1000]["negative"],
name="wikipedia-sections-dev",
)
# 5. Define the training arguments
args = SentenceTransformerTrainingArguments(
# Required parameter:
output_dir=output_dir,
# Optional training parameters:
num_train_epochs=num_train_epochs,
per_device_train_batch_size=batch_size,
per_device_eval_batch_size=batch_size,
warmup_ratio=0.1,
fp16=True, # Set to False if you get an error that your GPU can't run on FP16
bf16=False, # Set to True if you have a GPU that supports BF16
# Optional tracking/debugging parameters:
eval_strategy="steps",
eval_steps=100,
save_strategy="steps",
save_steps=100,
save_total_limit=2,
logging_steps=100,
run_name="wikipedia-sections-triplet", # Will be used in W&B if `wandb` is installed
)
# 6. Create the trainer & start training
trainer = SentenceTransformerTrainer(
model=model,
args=args,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
loss=train_loss,
evaluator=dev_evaluator,
)
trainer.train()
# 7. Evaluate the model performance on the STS Benchmark test dataset
test_evaluator = TripletEvaluator(
anchors=test_dataset["anchor"],
positives=test_dataset["positive"],
negatives=test_dataset["negative"],
name="wikipedia-sections-test",
)
test_evaluator(model)
# 8. Save the trained & evaluated model locally
final_output_dir = f"{output_dir}/final"
model.save(final_output_dir)
# 9. (Optional) save the model to the Hugging Face Hub!
# It is recommended to run `huggingface-cli login` to log into your Hugging Face account first
model_name = model_name if "/" not in model_name else model_name.split("/")[-1]
try:
model.push_to_hub(f"{model_name}-wikipedia-sections-triplet")
except Exception:
logging.error(
f"Error uploading model to the Hugging Face Hub:\n{traceback.format_exc()}To upload it manually, you can run "
f"`huggingface-cli login`, followed by loading the model using `model = SentenceTransformer({final_output_dir!r})` "
f"and saving it using `model.push_to_hub('{model_name}-wikipedia-sections-triplet')`."
)
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Optional, Sequence
import torch
from mmengine.data import BaseDataSample
from mmengine.registry import HOOKS
from .hook import Hook
@HOOKS.register_module()
class EmptyCacheHook(Hook):
"""Releases all unoccupied cached GPU memory during the process of
training.
Args:
before_epoch (bool): Whether to release cache before an epoch. Defaults
to False.
after_epoch (bool): Whether to release cache after an epoch. Defaults
to True.
after_iter (bool): Whether to release cache after an iteration.
Defaults to False.
"""
priority = 'NORMAL'
def __init__(self,
before_epoch: bool = False,
after_epoch: bool = True,
after_iter: bool = False) -> None:
self._before_epoch = before_epoch
self._after_epoch = after_epoch
self._after_iter = after_iter
def after_iter(self,
runner: object,
data_batch: Optional[Sequence[BaseDataSample]] = None,
outputs: Optional[Sequence[BaseDataSample]] = None) -> None:
"""Empty cache after an iteration.
Args:
runner (object): The runner of the training process.
data_batch (Sequence[BaseDataSample]): Data from dataloader.
Defaults to None.
outputs (Sequence[BaseDataSample]): Outputs from model.
Defaults to None.
"""
if self._after_iter:
torch.cuda.empty_cache()
def before_epoch(self, runner: object) -> None:
"""Empty cache before an epoch.
Args:
runner (object): The runner of the training process.
"""
if self._before_epoch:
torch.cuda.empty_cache()
def after_epoch(self, runner: object) -> None:
"""Empty cache after an epoch.
Args:
runner (object): The runner of the training process.
"""
if self._after_epoch:
torch.cuda.empty_cache()
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Optional, Sequence
import torch
from mmengine.data import BaseDataSample
from mmengine.registry import HOOKS
from .hook import Hook
@HOOKS.register_module()
class EmptyCacheHook(Hook):
"""Releases all unoccupied cached GPU memory during the process of
training.
Args:
before_epoch (bool): Whether to release cache before an epoch. Defaults
to False.
after_epoch (bool): Whether to release cache after an epoch. Defaults
to True.
after_iter (bool): Whether to release cache after an iteration.
Defaults to False.
"""
def __init__(self,
before_epoch: bool = False,
after_epoch: bool = True,
after_iter: bool = False) -> None:
self._before_epoch = before_epoch
self._after_epoch = after_epoch
self._after_iter = after_iter
def after_iter(self,
runner: object,
data_batch: Optional[Sequence[BaseDataSample]] = None,
outputs: Optional[Sequence[BaseDataSample]] = None) -> None:
"""Empty cache after an iteration.
Args:
runner (object): The runner of the training process.
data_batch (Sequence[BaseDataSample]): Data from dataloader.
Defaults to None.
outputs (Sequence[BaseDataSample]): Outputs from model.
Defaults to None.
"""
if self._after_iter:
torch.cuda.empty_cache()
def before_epoch(self, runner: object) -> None:
"""Empty cache before an epoch.
Args:
runner (object): The runner of the training process.
"""
if self._before_epoch:
torch.cuda.empty_cache()
def after_epoch(self, runner: object) -> None:
"""Empty cache after an epoch.
Args:
runner (object): The runner of the training process.
"""
if self._after_epoch:
torch.cuda.empty_cache()
|
# TODO: Add _log_api_usage_once() in all mid-level kernels. If they remain not jit-scriptable we can use decorators
from torchvision.transforms import InterpolationMode # usort: skip
from ._meta import (
clamp_bounding_box,
convert_format_bounding_box,
convert_color_space_image_tensor,
convert_color_space_image_pil,
convert_color_space_video,
convert_color_space,
convert_dtype_image_tensor,
convert_dtype,
convert_dtype_video,
convert_image_dtype,
get_dimensions_image_tensor,
get_dimensions_image_pil,
get_dimensions,
get_num_frames_video,
get_num_frames,
get_image_num_channels,
get_num_channels_image_tensor,
get_num_channels_image_pil,
get_num_channels_video,
get_num_channels,
get_spatial_size_bounding_box,
get_spatial_size_image_tensor,
get_spatial_size_image_pil,
get_spatial_size_mask,
get_spatial_size_video,
get_spatial_size,
) # usort: skip
from ._augment import erase, erase_image_pil, erase_image_tensor, erase_video
from ._color import (
adjust_brightness,
adjust_brightness_image_pil,
adjust_brightness_image_tensor,
adjust_brightness_video,
adjust_contrast,
adjust_contrast_image_pil,
adjust_contrast_image_tensor,
adjust_contrast_video,
adjust_gamma,
adjust_gamma_image_pil,
adjust_gamma_image_tensor,
adjust_gamma_video,
adjust_hue,
adjust_hue_image_pil,
adjust_hue_image_tensor,
adjust_hue_video,
adjust_saturation,
adjust_saturation_image_pil,
adjust_saturation_image_tensor,
adjust_saturation_video,
adjust_sharpness,
adjust_sharpness_image_pil,
adjust_sharpness_image_tensor,
adjust_sharpness_video,
autocontrast,
autocontrast_image_pil,
autocontrast_image_tensor,
autocontrast_video,
equalize,
equalize_image_pil,
equalize_image_tensor,
equalize_video,
invert,
invert_image_pil,
invert_image_tensor,
invert_video,
posterize,
posterize_image_pil,
posterize_image_tensor,
posterize_video,
solarize,
solarize_image_pil,
solarize_image_tensor,
solarize_video,
)
from ._geometry import (
affine,
affine_bounding_box,
affine_image_pil,
affine_image_tensor,
affine_mask,
affine_video,
center_crop,
center_crop_bounding_box,
center_crop_image_pil,
center_crop_image_tensor,
center_crop_mask,
center_crop_video,
crop,
crop_bounding_box,
crop_image_pil,
crop_image_tensor,
crop_mask,
crop_video,
elastic,
elastic_bounding_box,
elastic_image_pil,
elastic_image_tensor,
elastic_mask,
elastic_transform,
elastic_video,
five_crop,
five_crop_image_pil,
five_crop_image_tensor,
five_crop_video,
hflip, # TODO: Consider moving all pure alias definitions at the bottom of the file
horizontal_flip,
horizontal_flip_bounding_box,
horizontal_flip_image_pil,
horizontal_flip_image_tensor,
horizontal_flip_mask,
horizontal_flip_video,
pad,
pad_bounding_box,
pad_image_pil,
pad_image_tensor,
pad_mask,
pad_video,
perspective,
perspective_bounding_box,
perspective_image_pil,
perspective_image_tensor,
perspective_mask,
perspective_video,
resize,
resize_bounding_box,
resize_image_pil,
resize_image_tensor,
resize_mask,
resize_video,
resized_crop,
resized_crop_bounding_box,
resized_crop_image_pil,
resized_crop_image_tensor,
resized_crop_mask,
resized_crop_video,
rotate,
rotate_bounding_box,
rotate_image_pil,
rotate_image_tensor,
rotate_mask,
rotate_video,
ten_crop,
ten_crop_image_pil,
ten_crop_image_tensor,
ten_crop_video,
vertical_flip,
vertical_flip_bounding_box,
vertical_flip_image_pil,
vertical_flip_image_tensor,
vertical_flip_mask,
vertical_flip_video,
vflip,
)
from ._misc import (
gaussian_blur,
gaussian_blur_image_pil,
gaussian_blur_image_tensor,
gaussian_blur_video,
normalize,
normalize_image_tensor,
normalize_video,
)
from ._temporal import uniform_temporal_subsample, uniform_temporal_subsample_video
from ._type_conversion import pil_to_tensor, to_image_pil, to_image_tensor, to_pil_image
from ._deprecated import get_image_size, rgb_to_grayscale, to_grayscale, to_tensor # usort: skip
|
# TODO: Add _log_api_usage_once() in all mid-level kernels. If they remain not jit-scriptable we can use decorators
from torchvision.transforms import InterpolationMode # usort: skip
from ._meta import (
clamp_bounding_box,
convert_format_bounding_box,
convert_color_space_image_tensor,
convert_color_space_image_pil,
convert_color_space_video,
convert_color_space,
convert_dtype_image_tensor,
convert_dtype,
convert_dtype_video,
convert_image_dtype,
get_dimensions_image_tensor,
get_dimensions_image_pil,
get_dimensions,
get_num_frames_video,
get_num_frames,
get_image_num_channels,
get_num_channels_image_tensor,
get_num_channels_image_pil,
get_num_channels_video,
get_num_channels,
get_spatial_size_bounding_box,
get_spatial_size_image_tensor,
get_spatial_size_image_pil,
get_spatial_size_mask,
get_spatial_size_video,
get_spatial_size,
) # usort: skip
from ._augment import erase, erase_image_pil, erase_image_tensor, erase_video
from ._color import (
adjust_brightness,
adjust_brightness_image_pil,
adjust_brightness_image_tensor,
adjust_brightness_video,
adjust_contrast,
adjust_contrast_image_pil,
adjust_contrast_image_tensor,
adjust_contrast_video,
adjust_gamma,
adjust_gamma_image_pil,
adjust_gamma_image_tensor,
adjust_gamma_video,
adjust_hue,
adjust_hue_image_pil,
adjust_hue_image_tensor,
adjust_hue_video,
adjust_saturation,
adjust_saturation_image_pil,
adjust_saturation_image_tensor,
adjust_saturation_video,
adjust_sharpness,
adjust_sharpness_image_pil,
adjust_sharpness_image_tensor,
adjust_sharpness_video,
autocontrast,
autocontrast_image_pil,
autocontrast_image_tensor,
autocontrast_video,
equalize,
equalize_image_pil,
equalize_image_tensor,
equalize_video,
invert,
invert_image_pil,
invert_image_tensor,
invert_video,
posterize,
posterize_image_pil,
posterize_image_tensor,
posterize_video,
solarize,
solarize_image_pil,
solarize_image_tensor,
solarize_video,
)
from ._geometry import (
affine,
affine_bounding_box,
affine_image_pil,
affine_image_tensor,
affine_mask,
affine_video,
center_crop,
center_crop_bounding_box,
center_crop_image_pil,
center_crop_image_tensor,
center_crop_mask,
center_crop_video,
crop,
crop_bounding_box,
crop_image_pil,
crop_image_tensor,
crop_mask,
crop_video,
elastic,
elastic_bounding_box,
elastic_image_pil,
elastic_image_tensor,
elastic_mask,
elastic_transform,
elastic_video,
five_crop,
five_crop_image_pil,
five_crop_image_tensor,
five_crop_video,
hflip, # TODO: Consider moving all pure alias definitions at the bottom of the file
horizontal_flip,
horizontal_flip_bounding_box,
horizontal_flip_image_pil,
horizontal_flip_image_tensor,
horizontal_flip_mask,
horizontal_flip_video,
pad,
pad_bounding_box,
pad_image_pil,
pad_image_tensor,
pad_mask,
pad_video,
perspective,
perspective_bounding_box,
perspective_image_pil,
perspective_image_tensor,
perspective_mask,
perspective_video,
resize,
resize_bounding_box,
resize_image_pil,
resize_image_tensor,
resize_mask,
resize_video,
resized_crop,
resized_crop_bounding_box,
resized_crop_image_pil,
resized_crop_image_tensor,
resized_crop_mask,
resized_crop_video,
rotate,
rotate_bounding_box,
rotate_image_pil,
rotate_image_tensor,
rotate_mask,
rotate_video,
ten_crop,
ten_crop_image_pil,
ten_crop_image_tensor,
ten_crop_video,
vertical_flip,
vertical_flip_bounding_box,
vertical_flip_image_pil,
vertical_flip_image_tensor,
vertical_flip_mask,
vertical_flip_video,
vflip,
)
from ._misc import (
gaussian_blur,
gaussian_blur_image_pil,
gaussian_blur_image_tensor,
gaussian_blur_video,
normalize,
normalize_image_tensor,
normalize_video,
)
from ._temporal import uniform_temporal_subsample, uniform_temporal_subsample_video
from ._type_conversion import (
decode_image_with_pil,
decode_video_with_av,
pil_to_tensor,
to_image_pil,
to_image_tensor,
to_pil_image,
)
from ._deprecated import get_image_size, rgb_to_grayscale, to_grayscale, to_tensor # usort: skip
|
import numpy as np
import pytest
from typing import Dict, List
from docarray import BaseDoc, DocList
from docarray.base_doc import AnyDoc
from docarray.documents import ImageDoc, TextDoc
from docarray.typing import NdArray
@pytest.mark.proto
def test_simple_proto():
class CustomDoc(BaseDoc):
text: str
tensor: NdArray
da = DocList(
[CustomDoc(text='hello', tensor=np.zeros((3, 224, 224))) for _ in range(10)]
)
new_da = DocList[CustomDoc].from_protobuf(da.to_protobuf())
for doc1, doc2 in zip(da, new_da):
assert doc1.text == doc2.text
assert (doc1.tensor == doc2.tensor).all()
@pytest.mark.proto
def test_nested_proto():
class CustomDocument(BaseDoc):
text: TextDoc
image: ImageDoc
da = DocList[CustomDocument](
[
CustomDocument(
text=TextDoc(text='hello'),
image=ImageDoc(tensor=np.zeros((3, 224, 224))),
)
for _ in range(10)
]
)
DocList[CustomDocument].from_protobuf(da.to_protobuf())
@pytest.mark.proto
def test_nested_proto_any_doc():
class CustomDocument(BaseDoc):
text: TextDoc
image: ImageDoc
da = DocList[CustomDocument](
[
CustomDocument(
text=TextDoc(text='hello'),
image=ImageDoc(tensor=np.zeros((3, 224, 224))),
)
for _ in range(10)
]
)
DocList.from_protobuf(da.to_protobuf())
@pytest.mark.proto
def test_any_doc_list_proto():
doc = AnyDoc(hello='world')
pt = DocList([doc]).to_protobuf()
docs = DocList.from_protobuf(pt)
assert docs[0].hello == 'world'
@pytest.mark.proto
def test_any_nested_doc_list_proto():
from docarray import BaseDoc, DocList
class TextDocWithId(BaseDoc):
id: str
text: str
class ResultTestDoc(BaseDoc):
matches: DocList[TextDocWithId]
index_da = DocList[TextDocWithId](
[TextDocWithId(id=f'{i}', text=f'ID {i}') for i in range(10)]
)
out_da = DocList[ResultTestDoc]([ResultTestDoc(matches=index_da[0:2])])
pb = out_da.to_protobuf()
docs = DocList.from_protobuf(pb)
assert docs[0].matches[0].id == '0'
assert len(docs[0].matches) == 2
assert len(docs) == 1
@pytest.mark.proto
def test_union_type_error():
from typing import Union
class CustomDoc(BaseDoc):
ud: Union[TextDoc, ImageDoc] = TextDoc(text='union type')
docs = DocList[CustomDoc]([CustomDoc(ud=TextDoc(text='union type'))])
with pytest.raises(ValueError):
DocList[CustomDoc].from_protobuf(docs.to_protobuf())
class BasisUnion(BaseDoc):
ud: Union[int, str]
docs_basic = DocList[BasisUnion]([BasisUnion(ud="hello")])
docs_copy = DocList[BasisUnion].from_protobuf(docs_basic.to_protobuf())
assert docs_copy == docs_basic
class MySimpleDoc(BaseDoc):
title: str
class MyComplexDoc(BaseDoc):
content_dict_doclist: Dict[str, DocList[MySimpleDoc]]
content_dict_list: Dict[str, List[MySimpleDoc]]
aux_dict: Dict[str, int]
def test_to_from_proto_complex():
da = DocList[MyComplexDoc](
[
MyComplexDoc(
content_dict_doclist={
'test1': DocList[MySimpleDoc](
[MySimpleDoc(title='123'), MySimpleDoc(title='456')]
)
},
content_dict_list={
'test1': [MySimpleDoc(title='123'), MySimpleDoc(title='456')]
},
aux_dict={'a': 0},
)
]
)
da2 = DocList[MyComplexDoc].from_protobuf(da.to_protobuf())
assert len(da2) == 1
d2 = da2[0]
assert d2.aux_dict == {'a': 0}
assert len(d2.content_dict_doclist['test1']) == 2
assert d2.content_dict_doclist['test1'][0].title == '123'
assert d2.content_dict_doclist['test1'][1].title == '456'
assert len(d2.content_dict_list['test1']) == 2
assert d2.content_dict_list['test1'][0].title == '123'
assert d2.content_dict_list['test1'][1].title == '456'
|
import numpy as np
import pytest
from docarray import BaseDoc, DocList
from docarray.base_doc import AnyDoc
from docarray.documents import ImageDoc, TextDoc
from docarray.typing import NdArray
@pytest.mark.proto
def test_simple_proto():
class CustomDoc(BaseDoc):
text: str
tensor: NdArray
da = DocList(
[CustomDoc(text='hello', tensor=np.zeros((3, 224, 224))) for _ in range(10)]
)
new_da = DocList[CustomDoc].from_protobuf(da.to_protobuf())
for doc1, doc2 in zip(da, new_da):
assert doc1.text == doc2.text
assert (doc1.tensor == doc2.tensor).all()
@pytest.mark.proto
def test_nested_proto():
class CustomDocument(BaseDoc):
text: TextDoc
image: ImageDoc
da = DocList[CustomDocument](
[
CustomDocument(
text=TextDoc(text='hello'),
image=ImageDoc(tensor=np.zeros((3, 224, 224))),
)
for _ in range(10)
]
)
DocList[CustomDocument].from_protobuf(da.to_protobuf())
@pytest.mark.proto
def test_nested_proto_any_doc():
class CustomDocument(BaseDoc):
text: TextDoc
image: ImageDoc
da = DocList[CustomDocument](
[
CustomDocument(
text=TextDoc(text='hello'),
image=ImageDoc(tensor=np.zeros((3, 224, 224))),
)
for _ in range(10)
]
)
DocList.from_protobuf(da.to_protobuf())
@pytest.mark.proto
def test_any_doc_list_proto():
doc = AnyDoc(hello='world')
pt = DocList([doc]).to_protobuf()
docs = DocList.from_protobuf(pt)
assert docs[0].hello == 'world'
@pytest.mark.proto
def test_any_nested_doc_list_proto():
from docarray import BaseDoc, DocList
class TextDocWithId(BaseDoc):
id: str
text: str
class ResultTestDoc(BaseDoc):
matches: DocList[TextDocWithId]
index_da = DocList[TextDocWithId](
[TextDocWithId(id=f'{i}', text=f'ID {i}') for i in range(10)]
)
out_da = DocList[ResultTestDoc]([ResultTestDoc(matches=index_da[0:2])])
pb = out_da.to_protobuf()
docs = DocList.from_protobuf(pb)
assert docs[0].matches[0].id == '0'
assert len(docs[0].matches) == 2
assert len(docs) == 1
@pytest.mark.proto
def test_union_type_error():
from typing import Union
class CustomDoc(BaseDoc):
ud: Union[TextDoc, ImageDoc] = TextDoc(text='union type')
docs = DocList[CustomDoc]([CustomDoc(ud=TextDoc(text='union type'))])
with pytest.raises(ValueError):
DocList[CustomDoc].from_protobuf(docs.to_protobuf())
class BasisUnion(BaseDoc):
ud: Union[int, str]
docs_basic = DocList[BasisUnion]([BasisUnion(ud="hello")])
docs_copy = DocList[BasisUnion].from_protobuf(docs_basic.to_protobuf())
assert docs_copy == docs_basic
|
from typing import Any, Union
import pytest
from langchain_core.callbacks.manager import (
AsyncCallbackManagerForRetrieverRun,
CallbackManagerForRetrieverRun,
)
from langchain_core.documents import Document
from langchain_core.structured_query import (
Comparator,
Comparison,
Operation,
Operator,
StructuredQuery,
Visitor,
)
from langchain.chains.query_constructor.schema import AttributeInfo
from langchain.retrievers import SelfQueryRetriever
from tests.unit_tests.indexes.test_indexing import InMemoryVectorStore
from tests.unit_tests.llms.fake_llm import FakeLLM
class FakeTranslator(Visitor):
allowed_comparators = (
Comparator.EQ,
Comparator.NE,
Comparator.LT,
Comparator.LTE,
Comparator.GT,
Comparator.GTE,
Comparator.CONTAIN,
Comparator.LIKE,
)
allowed_operators = (Operator.AND, Operator.OR, Operator.NOT)
def _format_func(self, func: Union[Operator, Comparator]) -> str:
self._validate_func(func)
return f"${func.value}"
def visit_operation(self, operation: Operation) -> dict:
args = [arg.accept(self) for arg in operation.arguments]
return {self._format_func(operation.operator): args}
def visit_comparison(self, comparison: Comparison) -> dict:
return {
comparison.attribute: {
self._format_func(comparison.comparator): comparison.value
}
}
def visit_structured_query(
self, structured_query: StructuredQuery
) -> tuple[str, dict]:
if structured_query.filter is None:
kwargs = {}
else:
kwargs = {"filter": structured_query.filter.accept(self)}
return structured_query.query, kwargs
class InMemoryVectorstoreWithSearch(InMemoryVectorStore):
def similarity_search(
self, query: str, k: int = 4, **kwargs: Any
) -> list[Document]:
res = self.store.get(query)
if res is None:
return []
return [res]
@pytest.fixture()
def fake_llm() -> FakeLLM:
return FakeLLM(
queries={
"1": """```json
{
"query": "test",
"filter": null
}
```""",
"bar": "baz",
},
sequential_responses=True,
)
@pytest.fixture()
def fake_vectorstore() -> InMemoryVectorstoreWithSearch:
vectorstore = InMemoryVectorstoreWithSearch()
vectorstore.add_documents(
[
Document(
page_content="test",
metadata={
"foo": "bar",
},
),
],
ids=["test"],
)
return vectorstore
@pytest.fixture()
def fake_self_query_retriever(
fake_llm: FakeLLM, fake_vectorstore: InMemoryVectorstoreWithSearch
) -> SelfQueryRetriever:
return SelfQueryRetriever.from_llm(
llm=fake_llm,
vectorstore=fake_vectorstore,
document_contents="test",
metadata_field_info=[
AttributeInfo(
name="foo",
type="string",
description="test",
),
],
structured_query_translator=FakeTranslator(),
)
def test__get_relevant_documents(fake_self_query_retriever: SelfQueryRetriever) -> None:
relevant_documents = fake_self_query_retriever._get_relevant_documents(
"foo",
run_manager=CallbackManagerForRetrieverRun.get_noop_manager(),
)
assert len(relevant_documents) == 1
assert relevant_documents[0].metadata["foo"] == "bar"
async def test__aget_relevant_documents(
fake_self_query_retriever: SelfQueryRetriever,
) -> None:
relevant_documents = await fake_self_query_retriever._aget_relevant_documents(
"foo",
run_manager=AsyncCallbackManagerForRetrieverRun.get_noop_manager(),
)
assert len(relevant_documents) == 1
assert relevant_documents[0].metadata["foo"] == "bar"
|
from typing import Any, Dict, List, Tuple, Union
import pytest
from langchain_core.callbacks.manager import (
AsyncCallbackManagerForRetrieverRun,
CallbackManagerForRetrieverRun,
)
from langchain_core.documents import Document
from langchain_core.structured_query import (
Comparator,
Comparison,
Operation,
Operator,
StructuredQuery,
Visitor,
)
from langchain.chains.query_constructor.schema import AttributeInfo
from langchain.retrievers import SelfQueryRetriever
from tests.unit_tests.indexes.test_indexing import InMemoryVectorStore
from tests.unit_tests.llms.fake_llm import FakeLLM
class FakeTranslator(Visitor):
allowed_comparators = (
Comparator.EQ,
Comparator.NE,
Comparator.LT,
Comparator.LTE,
Comparator.GT,
Comparator.GTE,
Comparator.CONTAIN,
Comparator.LIKE,
)
allowed_operators = (Operator.AND, Operator.OR, Operator.NOT)
def _format_func(self, func: Union[Operator, Comparator]) -> str:
self._validate_func(func)
return f"${func.value}"
def visit_operation(self, operation: Operation) -> Dict:
args = [arg.accept(self) for arg in operation.arguments]
return {self._format_func(operation.operator): args}
def visit_comparison(self, comparison: Comparison) -> Dict:
return {
comparison.attribute: {
self._format_func(comparison.comparator): comparison.value
}
}
def visit_structured_query(
self, structured_query: StructuredQuery
) -> Tuple[str, dict]:
if structured_query.filter is None:
kwargs = {}
else:
kwargs = {"filter": structured_query.filter.accept(self)}
return structured_query.query, kwargs
class InMemoryVectorstoreWithSearch(InMemoryVectorStore):
def similarity_search(
self, query: str, k: int = 4, **kwargs: Any
) -> List[Document]:
res = self.store.get(query)
if res is None:
return []
return [res]
@pytest.fixture()
def fake_llm() -> FakeLLM:
return FakeLLM(
queries={
"1": """```json
{
"query": "test",
"filter": null
}
```""",
"bar": "baz",
},
sequential_responses=True,
)
@pytest.fixture()
def fake_vectorstore() -> InMemoryVectorstoreWithSearch:
vectorstore = InMemoryVectorstoreWithSearch()
vectorstore.add_documents(
[
Document(
page_content="test",
metadata={
"foo": "bar",
},
),
],
ids=["test"],
)
return vectorstore
@pytest.fixture()
def fake_self_query_retriever(
fake_llm: FakeLLM, fake_vectorstore: InMemoryVectorstoreWithSearch
) -> SelfQueryRetriever:
return SelfQueryRetriever.from_llm(
llm=fake_llm,
vectorstore=fake_vectorstore,
document_contents="test",
metadata_field_info=[
AttributeInfo(
name="foo",
type="string",
description="test",
),
],
structured_query_translator=FakeTranslator(),
)
def test__get_relevant_documents(fake_self_query_retriever: SelfQueryRetriever) -> None:
relevant_documents = fake_self_query_retriever._get_relevant_documents(
"foo",
run_manager=CallbackManagerForRetrieverRun.get_noop_manager(),
)
assert len(relevant_documents) == 1
assert relevant_documents[0].metadata["foo"] == "bar"
async def test__aget_relevant_documents(
fake_self_query_retriever: SelfQueryRetriever,
) -> None:
relevant_documents = await fake_self_query_retriever._aget_relevant_documents(
"foo",
run_manager=AsyncCallbackManagerForRetrieverRun.get_noop_manager(),
)
assert len(relevant_documents) == 1
assert relevant_documents[0].metadata["foo"] == "bar"
|
_base_ = ['./mask2former_swin-t-p4-w7-224_8xb2-lsj-50e_coco.py']
pretrained = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_small_patch4_window7_224.pth' # noqa
depths = [2, 2, 18, 2]
model = dict(
backbone=dict(
depths=depths, init_cfg=dict(type='Pretrained',
checkpoint=pretrained)))
# set all layers in backbone to lr_mult=0.1
# set all norm layers, position_embeding,
# query_embeding, level_embeding to decay_multi=0.0
backbone_norm_multi = dict(lr_mult=0.1, decay_mult=0.0)
backbone_embed_multi = dict(lr_mult=0.1, decay_mult=0.0)
embed_multi = dict(lr_mult=1.0, decay_mult=0.0)
custom_keys = {
'backbone': dict(lr_mult=0.1, decay_mult=1.0),
'backbone.patch_embed.norm': backbone_norm_multi,
'backbone.norm': backbone_norm_multi,
'absolute_pos_embed': backbone_embed_multi,
'relative_position_bias_table': backbone_embed_multi,
'query_embed': embed_multi,
'query_feat': embed_multi,
'level_embed': embed_multi
}
custom_keys.update({
f'backbone.stages.{stage_id}.blocks.{block_id}.norm': backbone_norm_multi
for stage_id, num_blocks in enumerate(depths)
for block_id in range(num_blocks)
})
custom_keys.update({
f'backbone.stages.{stage_id}.downsample.norm': backbone_norm_multi
for stage_id in range(len(depths) - 1)
})
# optimizer
optim_wrapper = dict(
paramwise_cfg=dict(custom_keys=custom_keys, norm_decay_mult=0.0))
|
_base_ = ['./mask2former_swin-t-p4-w7-224_lsj_8x2_50e_coco.py']
pretrained = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_small_patch4_window7_224.pth' # noqa
depths = [2, 2, 18, 2]
model = dict(
backbone=dict(
depths=depths, init_cfg=dict(type='Pretrained',
checkpoint=pretrained)))
# set all layers in backbone to lr_mult=0.1
# set all norm layers, position_embeding,
# query_embeding, level_embeding to decay_multi=0.0
backbone_norm_multi = dict(lr_mult=0.1, decay_mult=0.0)
backbone_embed_multi = dict(lr_mult=0.1, decay_mult=0.0)
embed_multi = dict(lr_mult=1.0, decay_mult=0.0)
custom_keys = {
'backbone': dict(lr_mult=0.1, decay_mult=1.0),
'backbone.patch_embed.norm': backbone_norm_multi,
'backbone.norm': backbone_norm_multi,
'absolute_pos_embed': backbone_embed_multi,
'relative_position_bias_table': backbone_embed_multi,
'query_embed': embed_multi,
'query_feat': embed_multi,
'level_embed': embed_multi
}
custom_keys.update({
f'backbone.stages.{stage_id}.blocks.{block_id}.norm': backbone_norm_multi
for stage_id, num_blocks in enumerate(depths)
for block_id in range(num_blocks)
})
custom_keys.update({
f'backbone.stages.{stage_id}.downsample.norm': backbone_norm_multi
for stage_id in range(len(depths) - 1)
})
# optimizer
optim_wrapper = dict(
paramwise_cfg=dict(custom_keys=custom_keys, norm_decay_mult=0.0))
|
from typing import Any, Optional, Type, TypeVar, Union
import numpy as np
from docarray.base_document import BaseDocument
from docarray.documents import AudioDoc
from docarray.typing import AnyEmbedding, AnyTensor
from docarray.typing.tensor.abstract_tensor import AbstractTensor
from docarray.typing.tensor.video.video_tensor import VideoTensor
from docarray.typing.url.video_url import VideoUrl
from docarray.utils.misc import is_tf_available, is_torch_available
torch_available = is_torch_available()
if torch_available:
import torch
tf_available = is_tf_available()
if tf_available:
import tensorflow as tf # type: ignore
T = TypeVar('T', bound='VideoDoc')
class VideoDoc(BaseDocument):
"""
Document for handling video.
The Video Document can contain a VideoUrl (`VideoDoc.url`), an Audio Document
(`VideoDoc.audio`), a VideoTensor (`VideoDoc.tensor`), an AnyTensor representing
the indices of the video's key frames (`VideoDoc.key_frame_indices`) and an
AnyEmbedding (`VideoDoc.embedding`).
EXAMPLE USAGE:
You can use this Document directly:
.. code-block:: python
from docarray.documents import Video
# use it directly
vid = Video(
url='https://github.com/docarray/docarray/tree/feat-add-video-v2/tests/toydata/mov_bbb.mp4?raw=true'
)
vid.audio.tensor, vid.tensor, vid.key_frame_indices = vid.url.load()
model = MyEmbeddingModel()
vid.embedding = model(vid.tensor)
You can extend this Document:
.. code-block:: python
from typing import Optional
from docarray.documents import TextDoc, VideoDoc
# extend it
class MyVideo(Video):
name: Optional[Text]
video = MyVideo(
url='https://github.com/docarray/docarray/blob/feat-rewrite-v2/tests/toydata/mov_bbb.mp4?raw=true'
)
video.video_tensor = video.url.load().video
model = MyEmbeddingModel()
video.embedding = model(video.tensor)
video.name = Text(text='my first video')
You can use this Document for composition:
.. code-block:: python
from docarray import BaseDocument
from docarray.documents import TextDoc, VideoDoc
# compose it
class MultiModalDoc(BaseDocument):
video: Video
text: Text
mmdoc = MultiModalDoc(
video=Video(
url='https://github.com/docarray/docarray/blob/feat-rewrite-v2/tests/toydata/mov_bbb.mp4?raw=true'
),
text=Text(text='hello world, how are you doing?'),
)
mmdoc.video.video_tensor = mmdoc.video.url.load().video
# or
mmdoc.video.bytes = mmdoc.video.url.load_bytes()
"""
url: Optional[VideoUrl]
audio: Optional[AudioDoc] = AudioDoc()
tensor: Optional[VideoTensor]
key_frame_indices: Optional[AnyTensor]
embedding: Optional[AnyEmbedding]
bytes: Optional[bytes]
@classmethod
def validate(
cls: Type[T],
value: Union[str, AbstractTensor, Any],
) -> T:
if isinstance(value, str):
value = cls(url=value)
elif isinstance(value, (AbstractTensor, np.ndarray)) or (
torch_available
and isinstance(value, torch.Tensor)
or (tf_available and isinstance(value, tf.Tensor))
):
value = cls(tensor=value)
return super().validate(value)
|
from typing import Any, Optional, Type, TypeVar, Union
import numpy as np
from docarray.base_document import BaseDocument
from docarray.documents import AudioDoc
from docarray.typing import AnyEmbedding, AnyTensor
from docarray.typing.tensor.abstract_tensor import AbstractTensor
from docarray.typing.tensor.video.video_tensor import VideoTensor
from docarray.typing.url.video_url import VideoUrl
from docarray.utils.misc import is_tf_available, is_torch_available
torch_available = is_torch_available()
if torch_available:
import torch
tf_available = is_tf_available()
if tf_available:
import tensorflow as tf # type: ignore
T = TypeVar('T', bound='VideoDoc')
class VideoDoc(BaseDocument):
"""
Document for handling video.
The Video Document can contain a VideoUrl (`VideoDoc.url`), an Audio Document
(`VideoDoc.audio`), a VideoTensor (`VideoDoc.tensor`), an AnyTensor representing
the indices of the video's key frames (`VideoDoc.key_frame_indices`) and an
AnyEmbedding (`VideoDoc.embedding`).
EXAMPLE USAGE:
You can use this Document directly:
.. code-block:: python
from docarray.documents import Video
# use it directly
vid = Video(
url='https://github.com/docarray/docarray/tree/feat-add-video-v2/tests/toydata/mov_bbb.mp4?raw=true'
)
vid.audio.tensor, vid.tensor, vid.key_frame_indices = vid.url.load()
model = MyEmbeddingModel()
vid.embedding = model(vid.tensor)
You can extend this Document:
.. code-block:: python
from typing import Optional
from docarray.documents import TextDoc, VideoDoc
# extend it
class MyVideo(Video):
name: Optional[Text]
video = MyVideo(
url='https://github.com/docarray/docarray/blob/feat-rewrite-v2/tests/toydata/mov_bbb.mp4?raw=true'
)
video.video_tensor = video.url.load().video
model = MyEmbeddingModel()
video.embedding = model(video.tensor)
video.name = Text(text='my first video')
You can use this Document for composition:
.. code-block:: python
from docarray import BaseDocument
from docarray.documents import TextDoc, VideoDoc
# compose it
class MultiModalDoc(BaseDocument):
video: Video
text: Text
mmdoc = MultiModalDoc(
video=Video(
url='https://github.com/docarray/docarray/blob/feat-rewrite-v2/tests/toydata/mov_bbb.mp4?raw=true'
),
text=Text(text='hello world, how are you doing?'),
)
mmdoc.video.video_tensor = mmdoc.video.url.load().video
# or
mmdoc.video.bytes = mmdoc.video.url.load_bytes()
"""
url: Optional[VideoUrl]
audio: Optional[AudioDoc] = AudioDoc()
tensor: Optional[VideoTensor]
key_frame_indices: Optional[AnyTensor]
embedding: Optional[AnyEmbedding]
bytes: Optional[bytes] = None
@classmethod
def validate(
cls: Type[T],
value: Union[str, AbstractTensor, Any],
) -> T:
if isinstance(value, str):
value = cls(url=value)
elif isinstance(value, (AbstractTensor, np.ndarray)) or (
torch_available
and isinstance(value, torch.Tensor)
or (tf_available and isinstance(value, tf.Tensor))
):
value = cls(tensor=value)
return super().validate(value)
|
"""Test Aleph Alpha specific stuff."""
import pytest
from pydantic import SecretStr
from pytest import CaptureFixture, MonkeyPatch
from langchain_community.llms.aleph_alpha import AlephAlpha
@pytest.mark.requires("aleph_alpha_client")
def test_api_key_is_secret_string() -> None:
llm = AlephAlpha(aleph_alpha_api_key="secret-api-key") # type: ignore[arg-type]
assert isinstance(llm.aleph_alpha_api_key, SecretStr)
@pytest.mark.requires("aleph_alpha_client")
def test_api_key_masked_when_passed_via_constructor(
capsys: CaptureFixture,
) -> None:
llm = AlephAlpha(aleph_alpha_api_key="secret-api-key") # type: ignore[arg-type]
print(llm.aleph_alpha_api_key, end="") # noqa: T201
captured = capsys.readouterr()
assert captured.out == "**********"
@pytest.mark.requires("aleph_alpha_client")
def test_api_key_masked_when_passed_from_env(
monkeypatch: MonkeyPatch, capsys: CaptureFixture
) -> None:
monkeypatch.setenv("ALEPH_ALPHA_API_KEY", "secret-api-key")
llm = AlephAlpha() # type: ignore[call-arg]
print(llm.aleph_alpha_api_key, end="") # noqa: T201
captured = capsys.readouterr()
assert captured.out == "**********"
|
"""Test Aleph Alpha specific stuff."""
import pytest
from pydantic import SecretStr
from pytest import CaptureFixture, MonkeyPatch
from langchain_community.llms.aleph_alpha import AlephAlpha
@pytest.mark.requires("aleph_alpha_client")
def test_api_key_is_secret_string() -> None:
llm = AlephAlpha(aleph_alpha_api_key="secret-api-key") # type: ignore
assert isinstance(llm.aleph_alpha_api_key, SecretStr)
@pytest.mark.requires("aleph_alpha_client")
def test_api_key_masked_when_passed_via_constructor(
capsys: CaptureFixture,
) -> None:
llm = AlephAlpha(aleph_alpha_api_key="secret-api-key") # type: ignore
print(llm.aleph_alpha_api_key, end="") # noqa: T201
captured = capsys.readouterr()
assert captured.out == "**********"
@pytest.mark.requires("aleph_alpha_client")
def test_api_key_masked_when_passed_from_env(
monkeypatch: MonkeyPatch, capsys: CaptureFixture
) -> None:
monkeypatch.setenv("ALEPH_ALPHA_API_KEY", "secret-api-key")
llm = AlephAlpha() # type: ignore[call-arg]
print(llm.aleph_alpha_api_key, end="") # noqa: T201
captured = capsys.readouterr()
assert captured.out == "**********"
|
# Copyright (c) OpenMMLab. All rights reserved.
import ast
import os.path as osp
import re
import warnings
from typing import Tuple
from mmengine.fileio import load
from mmengine.utils import check_file_exist
MODULE2PACKAGE = {
'mmcls': 'mmcls',
'mmdet': 'mmdet',
'mmdet3d': 'mmdet3d',
'mmseg': 'mmsegmentation',
'mmaction': 'mmaction2',
'mmtrack': 'mmtrack',
'mmpose': 'mmpose',
'mmedit': 'mmedit',
'mmocr': 'mmocr',
'mmgen': 'mmgen',
'mmfewshot': 'mmfewshot',
'mmrazor': 'mmrazor',
'mmflow': 'mmflow',
'mmhuman3d': 'mmhuman3d',
'mmrotate': 'mmrotate',
'mmselfsup': 'mmselfsup',
'mmyolo': 'mmyolo',
'mmpretrain': 'mmpretrain',
}
# PKG2PROJECT is not a proper name to represent the mapping between module name
# (module import from) and package name (used by pip install). Therefore,
# PKG2PROJECT will be deprecated and this alias will only be kept until
# MMEngine v1.0.0
PKG2PROJECT = MODULE2PACKAGE
def _get_cfg_metainfo(package_path: str, cfg_path: str) -> dict:
"""Get target meta information from all 'metafile.yml' defined in `mode-
index.yml` of external package.
Args:
package_path (str): Path of external package.
cfg_path (str): Name of experiment config.
Returns:
dict: Meta information of target experiment.
"""
meta_index_path = osp.join(package_path, '.mim', 'model-index.yml')
meta_index = load(meta_index_path)
cfg_dict = dict()
for meta_path in meta_index['Import']:
meta_path = osp.join(package_path, '.mim', meta_path)
cfg_meta = load(meta_path)
for model_cfg in cfg_meta['Models']:
if 'Config' not in model_cfg:
warnings.warn(f'There is not `Config` define in {model_cfg}')
continue
cfg_name = model_cfg['Config'].partition('/')[-1]
# Some config could have multiple weights, we only pick the
# first one.
if cfg_name in cfg_dict:
continue
cfg_dict[cfg_name] = model_cfg
if cfg_path not in cfg_dict:
raise ValueError(f'Expected configs: {cfg_dict.keys()}, but got '
f'{cfg_path}')
return cfg_dict[cfg_path]
def _get_external_cfg_path(package_path: str, cfg_file: str) -> str:
"""Get config path of external package.
Args:
package_path (str): Path of external package.
cfg_file (str): Name of experiment config.
Returns:
str: Absolute config path from external package.
"""
cfg_file = cfg_file.split('.')[0]
model_cfg = _get_cfg_metainfo(package_path, cfg_file)
cfg_path = osp.join(package_path, model_cfg['Config'])
check_file_exist(cfg_path)
return cfg_path
def _get_external_cfg_base_path(package_path: str, cfg_name: str) -> str:
"""Get base config path of external package.
Args:
package_path (str): Path of external package.
cfg_name (str): External relative config path with 'package::'.
Returns:
str: Absolute config path from external package.
"""
cfg_path = osp.join(package_path, '.mim', 'configs', cfg_name)
check_file_exist(cfg_path)
return cfg_path
def _get_package_and_cfg_path(cfg_path: str) -> Tuple[str, str]:
"""Get package name and relative config path.
Args:
cfg_path (str): External relative config path with 'package::'.
Returns:
Tuple[str, str]: Package name and config path.
"""
if re.match(r'\w*::\w*/\w*', cfg_path) is None:
raise ValueError(
'`_get_package_and_cfg_path` is used for get external package, '
'please specify the package name and relative config path, just '
'like `mmdet::faster_rcnn/faster-rcnn_r50_fpn_1x_coco.py`')
package_cfg = cfg_path.split('::')
if len(package_cfg) > 2:
raise ValueError('`::` should only be used to separate package and '
'config name, but found multiple `::` in '
f'{cfg_path}')
package, cfg_path = package_cfg
assert package in MODULE2PACKAGE, (
f'mmengine does not support to load {package} config.')
package = MODULE2PACKAGE[package]
return package, cfg_path
class RemoveAssignFromAST(ast.NodeTransformer):
"""Remove Assign node if the target's name match the key.
Args:
key (str): The target name of the Assign node.
"""
def __init__(self, key):
self.key = key
def visit_Assign(self, node):
if (isinstance(node.targets[0], ast.Name)
and node.targets[0].id == self.key):
return None
else:
return node
|
# Copyright (c) OpenMMLab. All rights reserved.
import ast
import os.path as osp
import re
import warnings
from typing import Tuple
from mmengine.fileio import load
from mmengine.utils import check_file_exist
MODULE2PACKAGE = {
'mmcls': 'mmcls',
'mmdet': 'mmdet',
'mmdet3d': 'mmdet3d',
'mmseg': 'mmsegmentation',
'mmaction': 'mmaction2',
'mmtrack': 'mmtrack',
'mmpose': 'mmpose',
'mmedit': 'mmedit',
'mmocr': 'mmocr',
'mmgen': 'mmgen',
'mmfewshot': 'mmfewshot',
'mmrazor': 'mmrazor',
'mmflow': 'mmflow',
'mmhuman3d': 'mmhuman3d',
'mmrotate': 'mmrotate',
'mmselfsup': 'mmselfsup',
'mmyolo': 'mmyolo',
}
# PKG2PROJECT is not a proper name to represent the mapping between module name
# (module import from) and package name (used by pip install). Therefore,
# PKG2PROJECT will be deprecated and this alias will only be kept until
# MMEngine v1.0.0
PKG2PROJECT = MODULE2PACKAGE
def _get_cfg_metainfo(package_path: str, cfg_path: str) -> dict:
"""Get target meta information from all 'metafile.yml' defined in `mode-
index.yml` of external package.
Args:
package_path (str): Path of external package.
cfg_path (str): Name of experiment config.
Returns:
dict: Meta information of target experiment.
"""
meta_index_path = osp.join(package_path, '.mim', 'model-index.yml')
meta_index = load(meta_index_path)
cfg_dict = dict()
for meta_path in meta_index['Import']:
meta_path = osp.join(package_path, '.mim', meta_path)
cfg_meta = load(meta_path)
for model_cfg in cfg_meta['Models']:
if 'Config' not in model_cfg:
warnings.warn(f'There is not `Config` define in {model_cfg}')
continue
cfg_name = model_cfg['Config'].partition('/')[-1]
# Some config could have multiple weights, we only pick the
# first one.
if cfg_name in cfg_dict:
continue
cfg_dict[cfg_name] = model_cfg
if cfg_path not in cfg_dict:
raise ValueError(f'Expected configs: {cfg_dict.keys()}, but got '
f'{cfg_path}')
return cfg_dict[cfg_path]
def _get_external_cfg_path(package_path: str, cfg_file: str) -> str:
"""Get config path of external package.
Args:
package_path (str): Path of external package.
cfg_file (str): Name of experiment config.
Returns:
str: Absolute config path from external package.
"""
cfg_file = cfg_file.split('.')[0]
model_cfg = _get_cfg_metainfo(package_path, cfg_file)
cfg_path = osp.join(package_path, model_cfg['Config'])
check_file_exist(cfg_path)
return cfg_path
def _get_external_cfg_base_path(package_path: str, cfg_name: str) -> str:
"""Get base config path of external package.
Args:
package_path (str): Path of external package.
cfg_name (str): External relative config path with 'package::'.
Returns:
str: Absolute config path from external package.
"""
cfg_path = osp.join(package_path, '.mim', 'configs', cfg_name)
check_file_exist(cfg_path)
return cfg_path
def _get_package_and_cfg_path(cfg_path: str) -> Tuple[str, str]:
"""Get package name and relative config path.
Args:
cfg_path (str): External relative config path with 'package::'.
Returns:
Tuple[str, str]: Package name and config path.
"""
if re.match(r'\w*::\w*/\w*', cfg_path) is None:
raise ValueError(
'`_get_package_and_cfg_path` is used for get external package, '
'please specify the package name and relative config path, just '
'like `mmdet::faster_rcnn/faster-rcnn_r50_fpn_1x_coco.py`')
package_cfg = cfg_path.split('::')
if len(package_cfg) > 2:
raise ValueError('`::` should only be used to separate package and '
'config name, but found multiple `::` in '
f'{cfg_path}')
package, cfg_path = package_cfg
assert package in MODULE2PACKAGE, (
f'mmengine does not support to load {package} config.')
package = MODULE2PACKAGE[package]
return package, cfg_path
class RemoveAssignFromAST(ast.NodeTransformer):
"""Remove Assign node if the target's name match the key.
Args:
key (str): The target name of the Assign node.
"""
def __init__(self, key):
self.key = key
def visit_Assign(self, node):
if (isinstance(node.targets[0], ast.Name)
and node.targets[0].id == self.key):
return None
else:
return node
|
_base_ = '../_base_/default_runtime.py'
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
# Example to use different file client
# Method 1: simply set the data root and let the file I/O module
# automatically infer from prefix (not support LMDB and Memcache yet)
# data_root = 's3://openmmlab/datasets/detection/coco/'
# Method 2: Use `backend_args`, `file_client_args` in versions before 3.0.0rc6
# backend_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/detection/',
# 'data/': 's3://openmmlab/datasets/detection/'
# }))
backend_args = None
# In mstrain 3x config, img_scale=[(1333, 640), (1333, 800)],
# multiscale_mode='range'
train_pipeline = [
dict(type='LoadImageFromFile', backend_args=backend_args),
dict(
type='LoadAnnotations',
with_bbox=True,
with_mask=True,
poly2mask=False),
dict(
type='RandomResize', scale=[(1333, 640), (1333, 800)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs'),
]
test_pipeline = [
dict(type='LoadImageFromFile', backend_args=backend_args),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(
type='LoadAnnotations',
with_bbox=True,
with_mask=True,
poly2mask=False),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(
batch_size=2,
num_workers=2,
persistent_workers=True,
sampler=dict(type='DefaultSampler', shuffle=True),
batch_sampler=dict(type='AspectRatioBatchSampler'),
dataset=dict(
type='RepeatDataset',
times=3,
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instances_train2017.json',
data_prefix=dict(img='train2017/'),
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=train_pipeline,
backend_args=backend_args)))
val_dataloader = dict(
batch_size=2,
num_workers=2,
persistent_workers=True,
drop_last=False,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instances_val2017.json',
data_prefix=dict(img='val2017/'),
test_mode=True,
pipeline=test_pipeline,
backend_args=backend_args))
test_dataloader = val_dataloader
val_evaluator = dict(
type='CocoMetric',
ann_file=data_root + 'annotations/instances_val2017.json',
metric=['bbox', 'segm'],
backend_args=backend_args)
test_evaluator = val_evaluator
# training schedule for 3x with `RepeatDataset`
train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=12, val_interval=1)
val_cfg = dict(type='ValLoop')
test_cfg = dict(type='TestLoop')
# learning rate
# Experiments show that using milestones=[9, 11] has higher performance
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=12,
by_epoch=True,
milestones=[9, 11],
gamma=0.1)
]
# optimizer
optim_wrapper = dict(
type='OptimWrapper',
optimizer=dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001))
# Default setting for scaling LR automatically
# - `enable` means enable scaling LR automatically
# or not by default.
# - `base_batch_size` = (8 GPUs) x (2 samples per GPU).
auto_scale_lr = dict(enable=False, base_batch_size=16)
|
_base_ = '../_base_/default_runtime.py'
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
# file_client_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/detection/',
# 'data/': 's3://openmmlab/datasets/detection/'
# }))
file_client_args = dict(backend='disk')
# In mstrain 3x config, img_scale=[(1333, 640), (1333, 800)],
# multiscale_mode='range'
train_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(
type='LoadAnnotations',
with_bbox=True,
with_mask=True,
poly2mask=False),
dict(
type='RandomResize', scale=[(1333, 640), (1333, 800)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs'),
]
test_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(
type='LoadAnnotations',
with_bbox=True,
with_mask=True,
poly2mask=False),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(
batch_size=2,
num_workers=2,
persistent_workers=True,
sampler=dict(type='DefaultSampler', shuffle=True),
batch_sampler=dict(type='AspectRatioBatchSampler'),
dataset=dict(
type='RepeatDataset',
times=3,
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instances_train2017.json',
data_prefix=dict(img='train2017/'),
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=train_pipeline)))
val_dataloader = dict(
batch_size=2,
num_workers=2,
persistent_workers=True,
drop_last=False,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instances_val2017.json',
data_prefix=dict(img='val2017/'),
test_mode=True,
pipeline=test_pipeline))
test_dataloader = val_dataloader
val_evaluator = dict(
type='CocoMetric',
ann_file=data_root + 'annotations/instances_val2017.json',
metric=['bbox', 'segm'])
test_evaluator = val_evaluator
# training schedule for 3x with `RepeatDataset`
train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=12, val_interval=1)
val_cfg = dict(type='ValLoop')
test_cfg = dict(type='TestLoop')
# learning rate
# Experiments show that using milestones=[9, 11] has higher performance
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=12,
by_epoch=True,
milestones=[9, 11],
gamma=0.1)
]
# optimizer
optim_wrapper = dict(
type='OptimWrapper',
optimizer=dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001))
# Default setting for scaling LR automatically
# - `enable` means enable scaling LR automatically
# or not by default.
# - `base_batch_size` = (8 GPUs) x (2 samples per GPU).
auto_scale_lr = dict(enable=False, base_batch_size=16)
|
"""Module wrapping the Client of Jina."""
import argparse
from typing import TYPE_CHECKING, Optional, Union, overload
from jina.helper import parse_client
__all__ = ['Client']
from jina.enums import GatewayProtocolType
if TYPE_CHECKING:
from jina.clients.grpc import AsyncGRPCClient, GRPCClient
from jina.clients.http import AsyncHTTPClient, HTTPClient
from jina.clients.websocket import AsyncWebSocketClient, WebSocketClient
# overload_inject_start_client
@overload
def Client(
*,
asyncio: Optional[bool] = False,
host: Optional[str] = '0.0.0.0',
port: Optional[int] = None,
protocol: Optional[str] = 'GRPC',
proxy: Optional[bool] = False,
tls: Optional[bool] = False,
**kwargs
) -> Union[
'AsyncWebSocketClient',
'WebSocketClient',
'AsyncGRPCClient',
'GRPCClient',
'HTTPClient',
'AsyncHTTPClient',
]:
"""Create a Client. Client is how user interact with Flow
:param asyncio: If set, then the input and output of this Client work in an asynchronous manner.
:param host: The host address of the runtime, by default it is 0.0.0.0.
:param port: The port of the Gateway, which the client should connect to.
:param protocol: Communication protocol between server and client.
:param proxy: If set, respect the http_proxy and https_proxy environment variables. otherwise, it will unset these proxy variables before start. gRPC seems to prefer no proxy
:param tls: If set, connect to gateway using tls encryption
:return: the new Client object
.. # noqa: DAR202
.. # noqa: DAR101
.. # noqa: DAR003
"""
# overload_inject_end_client
def Client(
args: Optional['argparse.Namespace'] = None, **kwargs
) -> Union[
'AsyncWebSocketClient',
'WebSocketClient',
'AsyncGRPCClient',
'GRPCClient',
'HTTPClient',
'AsyncHTTPClient',
]:
# implementation_stub_inject_start_client
"""Convenience function that returns client instance for given protocol.
EXAMPLE USAGE
.. code-block:: python
from jina import Client
from docarray import Document
# select protocol from 'grpc', 'http', or 'websocket'; default is 'grpc'
# select asyncio True of False; default is False
# select host address to connect to
c = Client(
protocol='grpc', asyncio=False, host='grpc://my.awesome.flow:1234'
) # returns GRPCClient instance
c.post(on='/index', inputs=Document(text='hello!'))
:param asyncio: If set, then the input and output of this Client work in an asynchronous manner.
:param host: The host address of the runtime, by default it is 0.0.0.0.
:param port: The port of the Gateway, which the client should connect to.
:param protocol: Communication protocol between server and client.
:param proxy: If set, respect the http_proxy and https_proxy environment variables. otherwise, it will unset these proxy variables before start. gRPC seems to prefer no proxy
:param tls: If set, connect to gateway using tls encryption
:return: the new Client object
.. # noqa: DAR102
.. # noqa: DAR202
.. # noqa: DAR101
.. # noqa: DAR003
"""
# implementation_stub_inject_end_client
if not (
args and isinstance(args, argparse.Namespace)
): # we need to parse the kwargs as soon as possible otherwise to get the gateway type
args = parse_client(kwargs)
protocol = (
args.protocol if args else kwargs.get('protocol', GatewayProtocolType.GRPC)
)
if isinstance(protocol, str):
protocol = GatewayProtocolType.from_string(protocol)
is_async = (args and args.asyncio) or kwargs.get('asyncio', False)
if protocol == GatewayProtocolType.GRPC:
if is_async:
from jina.clients.grpc import AsyncGRPCClient
return AsyncGRPCClient(args, **kwargs)
else:
from jina.clients.grpc import GRPCClient
return GRPCClient(args, **kwargs)
elif protocol == GatewayProtocolType.WEBSOCKET:
if is_async:
from jina.clients.websocket import AsyncWebSocketClient
return AsyncWebSocketClient(args, **kwargs)
else:
from jina.clients.websocket import WebSocketClient
return WebSocketClient(args, **kwargs)
elif protocol == GatewayProtocolType.HTTP:
if is_async:
from jina.clients.http import AsyncHTTPClient
return AsyncHTTPClient(args, **kwargs)
else:
from jina.clients.http import HTTPClient
return HTTPClient(args, **kwargs)
|
"""Module wrapping the Client of Jina."""
import argparse
from typing import TYPE_CHECKING, Optional, Union, overload
from jina.helper import parse_client
__all__ = ['Client']
from jina.enums import GatewayProtocolType
if TYPE_CHECKING:
from jina.clients.grpc import AsyncGRPCClient, GRPCClient
from jina.clients.http import AsyncHTTPClient, HTTPClient
from jina.clients.websocket import AsyncWebSocketClient, WebSocketClient
# overload_inject_start_client
@overload
def Client(
*,
asyncio: Optional[bool] = False,
host: Optional[str] = '0.0.0.0',
port: Optional[int] = None,
protocol: Optional[str] = 'GRPC',
proxy: Optional[bool] = False,
return_responses: Optional[bool] = False,
tls: Optional[bool] = False,
**kwargs
) -> Union[
'AsyncWebSocketClient',
'WebSocketClient',
'AsyncGRPCClient',
'GRPCClient',
'HTTPClient',
'AsyncHTTPClient',
]:
"""Create a Client. Client is how user interact with Flow
:param asyncio: If set, then the input and output of this Client work in an asynchronous manner.
:param host: The host address of the runtime, by default it is 0.0.0.0.
:param port: The port of the Gateway, which the client should connect to.
:param protocol: Communication protocol between server and client.
:param proxy: If set, respect the http_proxy and https_proxy environment variables. otherwise, it will unset these proxy variables before start. gRPC seems to prefer no proxy
:param return_responses: If set, return results as List of Requests instead of a reduced DocArray.
:param tls: If set, connect to gateway using tls encryption
:return: the new Client object
.. # noqa: DAR202
.. # noqa: DAR101
.. # noqa: DAR003
"""
# overload_inject_end_client
def Client(
args: Optional['argparse.Namespace'] = None, **kwargs
) -> Union[
'AsyncWebSocketClient',
'WebSocketClient',
'AsyncGRPCClient',
'GRPCClient',
'HTTPClient',
'AsyncHTTPClient',
]:
# implementation_stub_inject_start_client
"""Convenience function that returns client instance for given protocol.
EXAMPLE USAGE
.. code-block:: python
from jina import Client
from docarray import Document
# select protocol from 'grpc', 'http', or 'websocket'; default is 'grpc'
# select asyncio True of False; default is False
# select host address to connect to
c = Client(
protocol='grpc', asyncio=False, host='grpc://my.awesome.flow:1234'
) # returns GRPCClient instance
c.post(on='/index', inputs=Document(text='hello!'))
:param asyncio: If set, then the input and output of this Client work in an asynchronous manner.
:param host: The host address of the runtime, by default it is 0.0.0.0.
:param port: The port of the Gateway, which the client should connect to.
:param protocol: Communication protocol between server and client.
:param proxy: If set, respect the http_proxy and https_proxy environment variables. otherwise, it will unset these proxy variables before start. gRPC seems to prefer no proxy
:param return_responses: If set, return results as List of Requests instead of a reduced DocArray.
:param tls: If set, connect to gateway using tls encryption
:return: the new Client object
.. # noqa: DAR102
.. # noqa: DAR202
.. # noqa: DAR101
.. # noqa: DAR003
"""
# implementation_stub_inject_end_client
if not (
args and isinstance(args, argparse.Namespace)
): # we need to parse the kwargs as soon as possible otherwise to get the gateway type
args = parse_client(kwargs)
protocol = (
args.protocol if args else kwargs.get('protocol', GatewayProtocolType.GRPC)
)
if isinstance(protocol, str):
protocol = GatewayProtocolType.from_string(protocol)
is_async = (args and args.asyncio) or kwargs.get('asyncio', False)
if protocol == GatewayProtocolType.GRPC:
if is_async:
from jina.clients.grpc import AsyncGRPCClient
return AsyncGRPCClient(args, **kwargs)
else:
from jina.clients.grpc import GRPCClient
return GRPCClient(args, **kwargs)
elif protocol == GatewayProtocolType.WEBSOCKET:
if is_async:
from jina.clients.websocket import AsyncWebSocketClient
return AsyncWebSocketClient(args, **kwargs)
else:
from jina.clients.websocket import WebSocketClient
return WebSocketClient(args, **kwargs)
elif protocol == GatewayProtocolType.HTTP:
if is_async:
from jina.clients.http import AsyncHTTPClient
return AsyncHTTPClient(args, **kwargs)
else:
from jina.clients.http import HTTPClient
return HTTPClient(args, **kwargs)
|
import numpy as np
import pytest
from absl.testing import parameterized
from keras.src import layers
from keras.src import models
from keras.src import testing
from keras.src.utils import summary_utils
class SummaryUtilsTest(testing.TestCase, parameterized.TestCase):
@parameterized.parameters([("adam",), (None,)])
@pytest.mark.requires_trainable_backend
def test_print_model_summary(self, optimizer):
inputs = layers.Input((2,))
outputs = layers.Dense(3)(inputs)
model = models.Model(inputs, outputs)
model.compile(optimizer=optimizer, loss="mse", metrics=["mse"])
if optimizer:
# Trigger the optimizer weights creation
model.fit(x=np.zeros([4, 2]), y=np.zeros([4, 3]))
summary_content = []
def print_to_variable(text, line_break=False):
summary_content.append(text)
try:
summary_utils.print_summary(model, print_fn=print_to_variable)
summary_content = "\n".join(summary_content)
if optimizer:
self.assertIn("Total params: 29", summary_content)
self.assertIn("Trainable params: 9", summary_content)
self.assertIn("Non-trainable params: 0", summary_content)
self.assertIn("Optimizer params: 20", summary_content)
else:
self.assertIn("Total params: 9", summary_content)
self.assertIn("Trainable params: 9", summary_content)
self.assertIn("Non-trainable params: 0", summary_content)
self.assertNotIn("Optimizer params", summary_content)
except ImportError:
pass
def test_print_model_summary_custom_build(self):
class MyModel(models.Model):
def __init__(self):
super().__init__()
self.dense1 = layers.Dense(4, activation="relu")
self.dense2 = layers.Dense(2, activation="softmax")
self.unbuilt_dense = layers.Dense(1)
def build(self, input_shape):
self.dense1.build(input_shape)
input_shape = self.dense1.compute_output_shape(input_shape)
self.dense2.build(input_shape)
def call(self, inputs):
x = self.dense1(inputs)
return self.dense2(x)
model = MyModel()
model.build((None, 2))
summary_content = []
def print_to_variable(text, line_break=False):
summary_content.append(text)
summary_utils.print_summary(model, print_fn=print_to_variable)
summary_content = "\n".join(summary_content)
self.assertIn("(None, 4)", summary_content) # dense1
self.assertIn("(None, 2)", summary_content) # dense2
self.assertIn("?", summary_content) # unbuilt_dense
self.assertIn("Total params: 22", summary_content)
self.assertIn("Trainable params: 22", summary_content)
self.assertIn("Non-trainable params: 0", summary_content)
|
import numpy as np
import pytest
from absl.testing import parameterized
from keras.src import layers
from keras.src import models
from keras.src import testing
from keras.src.utils import summary_utils
class SummaryUtilsTest(testing.TestCase, parameterized.TestCase):
@parameterized.parameters([("adam",), (None,)])
@pytest.mark.requires_trainable_backend
def test_print_model_summary(self, optimizer):
inputs = layers.Input((2,))
outputs = layers.Dense(3)(inputs)
model = models.Model(inputs, outputs)
model.compile(optimizer=optimizer, loss="mse", metrics=["mse"])
if optimizer:
# Trigger the optimizer weights creation
model.fit(x=np.zeros([4, 2]), y=np.zeros([4, 3]))
summary_content = []
def print_to_variable(text, line_break=False):
summary_content.append(text)
try:
summary_utils.print_summary(model, print_fn=print_to_variable)
summary_content = "\n".join(summary_content)
if optimizer:
self.assertIn("Total params: 29", summary_content)
self.assertIn("Trainable params: 9", summary_content)
self.assertIn("Non-trainable params: 0", summary_content)
self.assertIn("Optimizer params: 20", summary_content)
else:
self.assertIn("Total params: 9", summary_content)
self.assertIn("Trainable params: 9", summary_content)
self.assertIn("Non-trainable params: 0", summary_content)
self.assertNotIn("Optimizer params", summary_content)
except ImportError:
pass
|
"""Standard LangChain interface tests"""
from typing import Optional
from langchain_core.language_models import BaseChatModel
from langchain_core.messages import AIMessageChunk, BaseMessageChunk
from langchain_core.rate_limiters import InMemoryRateLimiter
from langchain_tests.integration_tests import ( # type: ignore[import-not-found]
ChatModelIntegrationTests, # type: ignore[import-not-found]
)
from langchain_xai import ChatXAI
# Initialize the rate limiter in global scope, so it can be re-used
# across tests.
rate_limiter = InMemoryRateLimiter(
requests_per_second=0.5,
)
class TestXAIStandard(ChatModelIntegrationTests):
@property
def chat_model_class(self) -> type[BaseChatModel]:
return ChatXAI
@property
def chat_model_params(self) -> dict:
return {
"model": "grok-3",
"rate_limiter": rate_limiter,
"stream_usage": True,
}
def test_reasoning_content() -> None:
"""Test reasoning content."""
chat_model = ChatXAI(
model="grok-3-mini-beta",
reasoning_effort="low",
)
response = chat_model.invoke("What is 3^3?")
assert response.content
assert response.additional_kwargs["reasoning_content"]
# Test streaming
full: Optional[BaseMessageChunk] = None
for chunk in chat_model.stream("What is 3^3?"):
full = chunk if full is None else full + chunk
assert isinstance(full, AIMessageChunk)
assert full.additional_kwargs["reasoning_content"]
|
"""Standard LangChain interface tests"""
import pytest # type: ignore[import-not-found]
from langchain_core.language_models import BaseChatModel
from langchain_core.rate_limiters import InMemoryRateLimiter
from langchain_tests.integration_tests import ( # type: ignore[import-not-found]
ChatModelIntegrationTests, # type: ignore[import-not-found]
)
from langchain_xai import ChatXAI
# Initialize the rate limiter in global scope, so it can be re-used
# across tests.
rate_limiter = InMemoryRateLimiter(
requests_per_second=0.5,
)
class TestXAIStandard(ChatModelIntegrationTests):
@property
def chat_model_class(self) -> type[BaseChatModel]:
return ChatXAI
@property
def chat_model_params(self) -> dict:
return {
"model": "grok-2",
"rate_limiter": rate_limiter,
}
@pytest.mark.xfail(reason="Not yet supported.")
def test_usage_metadata_streaming(self, model: BaseChatModel) -> None:
super().test_usage_metadata_streaming(model)
|
import asyncio
import os
import random
import string
import tempfile
import time
import pytest
from jina import helper
@pytest.fixture(scope='function')
def random_workspace_name():
"""Generate a random workspace name with digits and letters."""
rand = ''.join(random.choices(string.ascii_uppercase + string.digits, k=6))
return f'JINA_TEST_WORKSPACE_{rand}'
@pytest.fixture(scope='function')
def test_metas(tmpdir, random_workspace_name):
from jina.serve.executors.metas import get_default_metas
os.environ[random_workspace_name] = str(tmpdir)
metas = get_default_metas()
metas['workspace'] = os.environ[random_workspace_name]
yield metas
del os.environ[random_workspace_name]
@pytest.fixture()
def docker_compose(request):
os.system(
f"docker-compose -f {request.param} --project-directory . up --build -d --remove-orphans"
)
time.sleep(10)
yield
os.system(
f"docker-compose -f {request.param} --project-directory . down --remove-orphans"
)
@pytest.fixture(scope='function')
def port_generator():
generated_ports = set()
def random_port():
port = helper.random_port()
while port in generated_ports:
port = helper.random_port()
generated_ports.add(port)
return port
return random_port
@pytest.fixture(autouse=True)
def test_log_level(monkeypatch):
monkeypatch.setenv('JINA_LOG_LEVEL', 'DEBUG')
@pytest.fixture(autouse=True)
def test_grpc_fork_support_false(monkeypatch):
monkeypatch.setenv('GRPC_ENABLE_FORK_SUPPORT', 'false')
@pytest.fixture(autouse=True)
def test_timeout_ctrl_time(monkeypatch):
monkeypatch.setenv('JINA_DEFAULT_TIMEOUT_CTRL', '500')
@pytest.fixture(autouse=True)
def test_disable_telemetry(monkeypatch):
monkeypatch.setenv('JINA_OPTOUT_TELEMETRY', 'True')
@pytest.fixture(autouse=True)
def tmpfile(tmpdir):
tmpfile = f'jina_test_{next(tempfile._get_candidate_names())}.db'
return tmpdir / tmpfile
@pytest.fixture(scope='session')
def event_loop(request):
"""
Valid only for `pytest.mark.asyncio` tests
"""
loop = asyncio.get_event_loop_policy().new_event_loop()
yield loop
loop.close()
|
import asyncio
import os
import random
import string
import tempfile
import time
import pytest
from jina import helper
@pytest.fixture(scope='function')
def random_workspace_name():
"""Generate a random workspace name with digits and letters."""
rand = ''.join(random.choices(string.ascii_uppercase + string.digits, k=6))
return f'JINA_TEST_WORKSPACE_{rand}'
@pytest.fixture(scope='function')
def test_metas(tmpdir, random_workspace_name):
from jina.serve.executors.metas import get_default_metas
os.environ[random_workspace_name] = str(tmpdir)
metas = get_default_metas()
metas['workspace'] = os.environ[random_workspace_name]
yield metas
del os.environ[random_workspace_name]
@pytest.fixture()
def docker_compose(request):
os.system(
f"docker-compose -f {request.param} --project-directory . up --build -d --remove-orphans"
)
time.sleep(10)
yield
os.system(
f"docker-compose -f {request.param} --project-directory . down --remove-orphans"
)
@pytest.fixture(scope='function')
def port_generator():
generated_ports = set()
def random_port():
port = helper.random_port()
while port in generated_ports:
port = helper.random_port()
generated_ports.add(port)
return port
return random_port
@pytest.fixture(autouse=True)
def test_log_level(monkeypatch):
monkeypatch.setenv('JINA_LOG_LEVEL', 'DEBUG')
@pytest.fixture(autouse=True)
def test_grpc_fork_support_false(monkeypatch):
monkeypatch.setenv('GRPC_ENABLE_FORK_SUPPORT', 'false')
@pytest.fixture(autouse=True)
def test_timeout_ctrl_time(monkeypatch):
monkeypatch.setenv('JINA_DEFAULT_TIMEOUT_CTRL', '500')
@pytest.fixture(autouse=True)
def test_disable_telemetry(monkeypatch):
monkeypatch.setenv('JINA_OPTOUT_TELEMETRY', 'True')
@pytest.fixture(autouse=True)
def tmpfile(tmpdir):
tmpfile = f'jina_test_{next(tempfile._get_candidate_names())}.db'
return tmpdir / tmpfile
@pytest.fixture(scope='session')
def event_loop(request):
"""
Valid only for `pytest.mark.asyncio` tests
"""
loop = asyncio.get_event_loop_policy().new_event_loop()
yield loop
loop.close()
|
from contextlib import suppress
from docutils import nodes
from docutils.parsers.rst import Directive
from sklearn.utils import all_estimators
from sklearn.utils._test_common.instance_generator import _construct_instance
from sklearn.utils._testing import SkipTest
class AllowNanEstimators(Directive):
@staticmethod
def make_paragraph_for_estimator_type(estimator_type):
intro = nodes.list_item()
intro += nodes.strong(text="Estimators that allow NaN values for type ")
intro += nodes.literal(text=f"{estimator_type}")
intro += nodes.strong(text=":\n")
exists = False
lst = nodes.bullet_list()
for name, est_class in all_estimators(type_filter=estimator_type):
with suppress(SkipTest):
est = _construct_instance(est_class)
if est.__sklearn_tags__().input_tags.allow_nan:
module_name = ".".join(est_class.__module__.split(".")[:2])
class_title = f"{est_class.__name__}"
class_url = f"./generated/{module_name}.{class_title}.html"
item = nodes.list_item()
para = nodes.paragraph()
para += nodes.reference(
class_title, text=class_title, internal=False, refuri=class_url
)
exists = True
item += para
lst += item
intro += lst
return [intro] if exists else None
def run(self):
lst = nodes.bullet_list()
for i in ["cluster", "regressor", "classifier", "transformer"]:
item = self.make_paragraph_for_estimator_type(i)
if item is not None:
lst += item
return [lst]
def setup(app):
app.add_directive("allow_nan_estimators", AllowNanEstimators)
return {
"version": "0.1",
"parallel_read_safe": True,
"parallel_write_safe": True,
}
|
from contextlib import suppress
from docutils import nodes
from docutils.parsers.rst import Directive
from sklearn.utils import all_estimators
from sklearn.utils._test_common.instance_generator import _construct_instance
from sklearn.utils._testing import SkipTest
class AllowNanEstimators(Directive):
@staticmethod
def make_paragraph_for_estimator_type(estimator_type):
intro = nodes.list_item()
intro += nodes.strong(text="Estimators that allow NaN values for type ")
intro += nodes.literal(text=f"{estimator_type}")
intro += nodes.strong(text=":\n")
exists = False
lst = nodes.bullet_list()
for name, est_class in all_estimators(type_filter=estimator_type):
with suppress(SkipTest):
est = _construct_instance(est_class)
if est._get_tags().get("allow_nan"):
module_name = ".".join(est_class.__module__.split(".")[:2])
class_title = f"{est_class.__name__}"
class_url = f"./generated/{module_name}.{class_title}.html"
item = nodes.list_item()
para = nodes.paragraph()
para += nodes.reference(
class_title, text=class_title, internal=False, refuri=class_url
)
exists = True
item += para
lst += item
intro += lst
return [intro] if exists else None
def run(self):
lst = nodes.bullet_list()
for i in ["cluster", "regressor", "classifier", "transformer"]:
item = self.make_paragraph_for_estimator_type(i)
if item is not None:
lst += item
return [lst]
def setup(app):
app.add_directive("allow_nan_estimators", AllowNanEstimators)
return {
"version": "0.1",
"parallel_read_safe": True,
"parallel_write_safe": True,
}
|
import os
from pathlib import Path
import numpy as np
import pytest
from PIL.Image import Image, fromarray
from jina import DocumentArray, Document, Executor
from ...normalizer import ImageNormalizer
@pytest.fixture
def numpy_image_uri(tmpdir):
blob = np.random.randint(255, size=(96, 96, 3), dtype='uint8')
im = fromarray(blob)
uri = os.path.join(tmpdir, 'tmp.png')
im.save(uri)
return uri
@pytest.fixture
def test_image_uri_doc(numpy_image_uri):
return Document(uri=numpy_image_uri)
@pytest.fixture
def test_image_buffer_doc(numpy_image_uri):
doc = Document(uri=numpy_image_uri)
doc.convert_uri_to_buffer()
return doc
@pytest.fixture
def test_image_blob_doc(numpy_image_uri):
doc = Document(uri=numpy_image_uri)
doc.convert_image_uri_to_blob()
return doc
def test_config():
ex = Executor.load_config(str(Path(__file__).parents[2] / 'config.yml'))
assert ex.target_size == 224
def test_initialization():
norm = ImageNormalizer()
assert norm.target_size == 224
norm = ImageNormalizer(
target_size=96,
img_mean=(1.0, 2.0, 3.0),
img_std=(2.0, 2.0, 2.0),
resize_dim=256,
channel_axis=4,
target_channel_axis=5,
target_dtype=np.uint8,
)
assert norm.target_size == 96
assert np.array_equal(norm.img_std, [[[2, 2, 2]]])
assert np.array_equal(norm.img_mean, [[[1, 2, 3]]])
assert norm.resize_dim == 256
assert norm.channel_axis == 4
assert norm.target_channel_axis == 5
assert norm.target_dtype == np.uint8
def test_convert_image_to_blob(
test_image_uri_doc, test_image_buffer_doc, test_image_blob_doc
):
norm = ImageNormalizer(
resize_dim=123, img_mean=(0.1, 0.1, 0.1), img_std=(0.5, 0.5, 0.5)
)
docs = DocumentArray(
[test_image_uri_doc, test_image_buffer_doc, test_image_blob_doc]
)
assert docs[0].blob is None and docs[1].blob is None
for doc in docs:
norm._convert_image_to_blob(doc)
assert len(docs) == 3
for doc in docs:
assert np.array_equal(doc.blob, test_image_blob_doc.blob)
@pytest.mark.parametrize('dtype_conversion', [np.uint8, np.float32, np.float64])
@pytest.mark.parametrize('manual_convert', [True, False])
@pytest.mark.parametrize('default_traversal_paths', [(('r'),), (('c'),)])
def test_crafting_image(test_image_uri_doc, manual_convert, dtype_conversion, default_traversal_paths):
doc = Document(test_image_uri_doc, copy=True)
doc.chunks.append(Document(test_image_uri_doc, copy=True))
doc.convert_image_uri_to_blob()
doc.chunks[0].convert_image_uri_to_blob()
norm = ImageNormalizer(
resize_dim=123,
img_mean=(0.1, 0.1, 0.1),
img_std=(0.5, 0.5, 0.5),
target_dtype=dtype_conversion,
default_traversal_paths=default_traversal_paths
)
assert norm.target_dtype == dtype_conversion
img = norm._load_image(doc.blob)
assert isinstance(img, Image)
assert img.size == (96, 96)
img_resized = norm._resize_short(img)
assert img_resized.size == (123, 123)
assert isinstance(img_resized, Image)
norm.resize_dim = (123, 456)
img_resized = norm._resize_short(img)
assert img_resized.size == (123, 456)
assert isinstance(img_resized, Image)
with pytest.raises(ValueError):
norm.resize_dim = (1, 2, 3)
norm._resize_short(img)
norm.resize_dim = 256
img = norm._resize_short(img)
norm.target_size = 128
cropped_img, b1, b2 = norm._crop_image(img, how='random')
assert cropped_img.size == (128, 128)
assert isinstance(cropped_img, Image)
norm.target_size = 224
img, b1, b2 = norm._crop_image(img, how='center')
assert img.size == (224, 224)
assert isinstance(img, Image)
assert b1 == 16
assert b2 == 16
img = np.asarray(img).astype('float32') / 255
norm_img = norm._normalize(norm._load_image(doc.blob))
img -= np.array([[[0.1, 0.1, 0.1]]])
img /= np.array([[[0.5, 0.5, 0.5]]])
assert np.array_equal(norm_img, img)
if manual_convert:
docs = DocumentArray([doc])
else:
docs = DocumentArray([test_image_uri_doc])
docs[0].chunks.extend(DocumentArray([test_image_uri_doc]))
processed_docs = norm.craft(docs, parameters={}).traverse_flat(default_traversal_paths)
assert np.array_equal(processed_docs[0].blob, img.astype(dtype_conversion))
for doc in processed_docs:
assert doc.blob.dtype == dtype_conversion
def test_move_channel_axis(test_image_uri_doc):
norm = ImageNormalizer(channel_axis=2, target_channel_axis=0)
doc = test_image_uri_doc
doc.convert_image_uri_to_blob()
img = norm._load_image(doc.blob)
assert img.size == (96, 96)
channel0_img = norm._move_channel_axis(doc.blob, 2, 0)
assert channel0_img.shape == (3, 96, 96)
processed_docs = norm.craft(DocumentArray([doc]), parameters={})
assert processed_docs[0].blob.shape == (3, 224, 224)
|
import os
from pathlib import Path
import numpy as np
import pytest
from PIL.Image import Image, fromarray
from jina import DocumentArray, Document, Executor
from ...normalizer import ImageNormalizer
@pytest.fixture
def numpy_image_uri(tmpdir):
blob = np.random.randint(255, size=(96, 96, 3), dtype='uint8')
im = fromarray(blob)
uri = os.path.join(tmpdir, 'tmp.png')
im.save(uri)
return uri
@pytest.fixture
def test_image_uri_doc(numpy_image_uri):
return Document(uri=numpy_image_uri)
@pytest.fixture
def test_image_buffer_doc(numpy_image_uri):
doc = Document(uri=numpy_image_uri)
doc.convert_uri_to_buffer()
return doc
@pytest.fixture
def test_image_blob_doc(numpy_image_uri):
doc = Document(uri=numpy_image_uri)
doc.convert_image_uri_to_blob()
return doc
def test_config():
ex = Executor.load_config(str(Path(__file__).parents[2] / 'config.yml'))
assert ex.target_size == 224
def test_initialization():
norm = ImageNormalizer()
assert norm.target_size == 224
norm = ImageNormalizer(
target_size=96,
img_mean=(1.0, 2.0, 3.0),
img_std=(2.0, 2.0, 2.0),
resize_dim=256,
channel_axis=4,
target_channel_axis=5,
target_dtype=np.uint8,
)
assert norm.target_size == 96
assert np.array_equal(norm.img_std, [[[2, 2, 2]]])
assert np.array_equal(norm.img_mean, [[[1, 2, 3]]])
assert norm.resize_dim == 256
assert norm.channel_axis == 4
assert norm.target_channel_axis == 5
assert norm.target_dtype == np.uint8
def test_convert_image_to_blob(
test_image_uri_doc, test_image_buffer_doc, test_image_blob_doc
):
norm = ImageNormalizer(
resize_dim=123, img_mean=(0.1, 0.1, 0.1), img_std=(0.5, 0.5, 0.5)
)
docs = DocumentArray(
[test_image_uri_doc, test_image_buffer_doc, test_image_blob_doc]
)
assert docs[0].blob is None and docs[1].blob is None
for doc in docs:
norm._convert_image_to_blob(doc)
assert len(docs) == 3
for doc in docs:
assert np.array_equal(doc.blob, test_image_blob_doc.blob)
@pytest.mark.parametrize('dtype_conversion', [np.uint8, np.float32, np.float64])
@pytest.mark.parametrize('manual_convert', [True, False])
def test_crafting_image(test_image_uri_doc, manual_convert, dtype_conversion):
doc = Document(test_image_uri_doc, copy=True)
doc.convert_image_uri_to_blob()
norm = ImageNormalizer(
resize_dim=123,
img_mean=(0.1, 0.1, 0.1),
img_std=(0.5, 0.5, 0.5),
target_dtype=dtype_conversion,
)
assert norm.target_dtype == dtype_conversion
img = norm._load_image(doc.blob)
assert isinstance(img, Image)
assert img.size == (96, 96)
img_resized = norm._resize_short(img)
assert img_resized.size == (123, 123)
assert isinstance(img_resized, Image)
norm.resize_dim = (123, 456)
img_resized = norm._resize_short(img)
assert img_resized.size == (123, 456)
assert isinstance(img_resized, Image)
with pytest.raises(ValueError):
norm.resize_dim = (1, 2, 3)
norm._resize_short(img)
norm.resize_dim = 256
img = norm._resize_short(img)
norm.target_size = 128
cropped_img, b1, b2 = norm._crop_image(img, how='random')
assert cropped_img.size == (128, 128)
assert isinstance(cropped_img, Image)
norm.target_size = 224
img, b1, b2 = norm._crop_image(img, how='center')
assert img.size == (224, 224)
assert isinstance(img, Image)
assert b1 == 16
assert b2 == 16
img = np.asarray(img).astype('float32') / 255
norm_img = norm._normalize(norm._load_image(doc.blob))
img -= np.array([[[0.1, 0.1, 0.1]]])
img /= np.array([[[0.5, 0.5, 0.5]]])
assert np.array_equal(norm_img, img)
if manual_convert:
docs = DocumentArray([doc])
else:
docs = DocumentArray([test_image_uri_doc])
processed_docs = norm.craft(docs)
assert np.array_equal(processed_docs[0].blob, img.astype(dtype_conversion))
for doc in processed_docs:
assert doc.blob.dtype == dtype_conversion
def test_move_channel_axis(test_image_uri_doc):
norm = ImageNormalizer(channel_axis=2, target_channel_axis=0)
doc = test_image_uri_doc
doc.convert_image_uri_to_blob()
img = norm._load_image(doc.blob)
assert img.size == (96, 96)
channel0_img = norm._move_channel_axis(doc.blob, 2, 0)
assert channel0_img.shape == (3, 96, 96)
processed_docs = norm.craft(DocumentArray([doc]))
assert processed_docs[0].blob.shape == (3, 224, 224)
|
import json
import re
from re import Pattern
from typing import Union
from langchain_core.agents import AgentAction, AgentFinish
from langchain_core.exceptions import OutputParserException
from langchain.agents.agent import AgentOutputParser
from langchain.agents.chat.prompt import FORMAT_INSTRUCTIONS
FINAL_ANSWER_ACTION = "Final Answer:"
class ChatOutputParser(AgentOutputParser):
"""Output parser for the chat agent."""
format_instructions: str = FORMAT_INSTRUCTIONS
"""Default formatting instructions"""
pattern: Pattern = re.compile(r"^.*?`{3}(?:json)?\n(.*?)`{3}.*?$", re.DOTALL)
"""Regex pattern to parse the output."""
def get_format_instructions(self) -> str:
"""Returns formatting instructions for the given output parser."""
return self.format_instructions
def parse(self, text: str) -> Union[AgentAction, AgentFinish]:
"""Parse the output from the agent into
an AgentAction or AgentFinish object.
Args:
text: The text to parse.
Returns:
An AgentAction or AgentFinish object.
Raises:
OutputParserException: If the output could not be parsed.
ValueError: If the action could not be found.
"""
includes_answer = FINAL_ANSWER_ACTION in text
try:
found = self.pattern.search(text)
if not found:
# Fast fail to parse Final Answer.
msg = "action not found"
raise ValueError(msg)
action = found.group(1)
response = json.loads(action.strip())
includes_action = "action" in response
if includes_answer and includes_action:
msg = (
"Parsing LLM output produced a final answer "
f"and a parse-able action: {text}"
)
raise OutputParserException(msg)
return AgentAction(
response["action"],
response.get("action_input", {}),
text,
)
except Exception as exc:
if not includes_answer:
msg = f"Could not parse LLM output: {text}"
raise OutputParserException(msg) from exc
output = text.split(FINAL_ANSWER_ACTION)[-1].strip()
return AgentFinish({"output": output}, text)
@property
def _type(self) -> str:
return "chat"
|
import json
import re
from re import Pattern
from typing import Union
from langchain_core.agents import AgentAction, AgentFinish
from langchain_core.exceptions import OutputParserException
from langchain.agents.agent import AgentOutputParser
from langchain.agents.chat.prompt import FORMAT_INSTRUCTIONS
FINAL_ANSWER_ACTION = "Final Answer:"
class ChatOutputParser(AgentOutputParser):
"""Output parser for the chat agent."""
format_instructions: str = FORMAT_INSTRUCTIONS
"""Default formatting instructions"""
pattern: Pattern = re.compile(r"^.*?`{3}(?:json)?\n(.*?)`{3}.*?$", re.DOTALL)
"""Regex pattern to parse the output."""
def get_format_instructions(self) -> str:
"""Returns formatting instructions for the given output parser."""
return self.format_instructions
def parse(self, text: str) -> Union[AgentAction, AgentFinish]:
"""Parse the output from the agent into
an AgentAction or AgentFinish object.
Args:
text: The text to parse.
Returns:
An AgentAction or AgentFinish object.
Raises:
OutputParserException: If the output could not be parsed.
ValueError: If the action could not be found.
"""
includes_answer = FINAL_ANSWER_ACTION in text
try:
found = self.pattern.search(text)
if not found:
# Fast fail to parse Final Answer.
msg = "action not found"
raise ValueError(msg)
action = found.group(1)
response = json.loads(action.strip())
includes_action = "action" in response
if includes_answer and includes_action:
msg = (
"Parsing LLM output produced a final answer "
f"and a parse-able action: {text}"
)
raise OutputParserException(msg)
return AgentAction(
response["action"], response.get("action_input", {}), text
)
except Exception as exc:
if not includes_answer:
msg = f"Could not parse LLM output: {text}"
raise OutputParserException(msg) from exc
output = text.split(FINAL_ANSWER_ACTION)[-1].strip()
return AgentFinish({"output": output}, text)
@property
def _type(self) -> str:
return "chat"
|
from abc import ABC, abstractmethod
from typing import Any, ClassVar, Dict, List, Optional
from llama_index.core.bridge.pydantic import BaseModel, Field, ConfigDict
class RetrievalMetricResult(BaseModel):
"""
Metric result.
Attributes:
score (float): Score for the metric
metadata (Dict[str, Any]): Metadata for the metric result
"""
score: float = Field(..., description="Score for the metric")
metadata: Dict[str, Any] = Field(
default_factory=dict, description="Metadata for the metric result"
)
def __str__(self) -> str:
"""String representation."""
return f"Score: {self.score}\nMetadata: {self.metadata}"
def __float__(self) -> float:
"""Float representation."""
return self.score
class BaseRetrievalMetric(BaseModel, ABC):
"""Base class for retrieval metrics."""
model_config = ConfigDict(arbitrary_types_allowed=True)
metric_name: ClassVar[str]
@abstractmethod
def compute(
self,
query: Optional[str] = None,
expected_ids: Optional[List[str]] = None,
retrieved_ids: Optional[List[str]] = None,
expected_texts: Optional[List[str]] = None,
retrieved_texts: Optional[List[str]] = None,
**kwargs: Any,
) -> RetrievalMetricResult:
"""
Compute metric.
Args:
query (Optional[str]): Query string
expected_ids (Optional[List[str]]): Expected ids
retrieved_ids (Optional[List[str]]): Retrieved ids
**kwargs: Additional keyword arguments
"""
|
from abc import ABC, abstractmethod
from typing import Any, ClassVar, Dict, List, Optional
from llama_index.core.bridge.pydantic import BaseModel, Field, ConfigDict
class RetrievalMetricResult(BaseModel):
"""Metric result.
Attributes:
score (float): Score for the metric
metadata (Dict[str, Any]): Metadata for the metric result
"""
score: float = Field(..., description="Score for the metric")
metadata: Dict[str, Any] = Field(
default_factory=dict, description="Metadata for the metric result"
)
def __str__(self) -> str:
"""String representation."""
return f"Score: {self.score}\nMetadata: {self.metadata}"
def __float__(self) -> float:
"""Float representation."""
return self.score
class BaseRetrievalMetric(BaseModel, ABC):
"""Base class for retrieval metrics."""
model_config = ConfigDict(arbitrary_types_allowed=True)
metric_name: ClassVar[str]
@abstractmethod
def compute(
self,
query: Optional[str] = None,
expected_ids: Optional[List[str]] = None,
retrieved_ids: Optional[List[str]] = None,
expected_texts: Optional[List[str]] = None,
retrieved_texts: Optional[List[str]] = None,
**kwargs: Any,
) -> RetrievalMetricResult:
"""Compute metric.
Args:
query (Optional[str]): Query string
expected_ids (Optional[List[str]]): Expected ids
retrieved_ids (Optional[List[str]]): Retrieved ids
**kwargs: Additional keyword arguments
"""
|
"""Copyright 2019-2024, XGBoost contributors"""
import os
from typing import Generator
import numpy as np
import pytest
import scipy.sparse
from dask import dataframe as dd
from distributed import Client, LocalCluster
from xgboost import dask as dxgb
from xgboost import testing as tm
from xgboost.testing import dask as dtm
@pytest.fixture(scope="module")
def cluster() -> Generator:
n_threads = os.cpu_count()
assert n_threads is not None
with LocalCluster(
n_workers=2, threads_per_worker=n_threads // 2, dashboard_address=":0"
) as dask_cluster:
yield dask_cluster
@pytest.fixture
def client(cluster: LocalCluster) -> Generator:
with Client(cluster) as dask_client:
yield dask_client
def test_dask_ranking(client: Client) -> None:
dpath = "demo/"
mq2008 = tm.data.get_mq2008(dpath)
data = []
for d in mq2008:
if isinstance(d, scipy.sparse.csr_matrix):
d[d == 0] = np.inf
d = d.toarray()
d[d == 0] = np.nan
d[np.isinf(d)] = 0
data.append(dd.from_array(d, chunksize=32))
else:
data.append(dd.from_array(d, chunksize=32))
(
x_train,
y_train,
qid_train,
x_test,
y_test,
qid_test,
x_valid,
y_valid,
qid_valid,
) = data
qid_train = qid_train.astype(np.uint32)
qid_valid = qid_valid.astype(np.uint32)
qid_test = qid_test.astype(np.uint32)
rank = dxgb.DaskXGBRanker(
n_estimators=2500,
eval_metric=["ndcg"],
early_stopping_rounds=10,
allow_group_split=True,
)
rank.fit(
x_train,
y_train,
qid=qid_train,
eval_set=[(x_test, y_test), (x_train, y_train)],
eval_qid=[qid_test, qid_train],
verbose=True,
)
assert rank.n_features_in_ == 46
assert rank.best_score > 0.98
@pytest.mark.filterwarnings("error")
def test_no_group_split(client: Client) -> None:
dtm.check_no_group_split(client, "cpu")
|
"""Copyright 2019-2024, XGBoost contributors"""
import os
from typing import Generator
import numpy as np
import pytest
import scipy.sparse
from dask import dataframe as dd
from distributed import Client, LocalCluster
from xgboost import dask as dxgb
from xgboost import testing as tm
@pytest.fixture(scope="module")
def cluster() -> Generator:
n_threads = os.cpu_count()
assert n_threads is not None
with LocalCluster(
n_workers=2, threads_per_worker=n_threads // 2, dashboard_address=":0"
) as dask_cluster:
yield dask_cluster
@pytest.fixture
def client(cluster: LocalCluster) -> Generator:
with Client(cluster) as dask_client:
yield dask_client
def test_dask_ranking(client: Client) -> None:
dpath = "demo/"
mq2008 = tm.data.get_mq2008(dpath)
data = []
for d in mq2008:
if isinstance(d, scipy.sparse.csr_matrix):
d[d == 0] = np.inf
d = d.toarray()
d[d == 0] = np.nan
d[np.isinf(d)] = 0
data.append(dd.from_array(d, chunksize=32))
else:
data.append(dd.from_array(d, chunksize=32))
(
x_train,
y_train,
qid_train,
x_test,
y_test,
qid_test,
x_valid,
y_valid,
qid_valid,
) = data
qid_train = qid_train.astype(np.uint32)
qid_valid = qid_valid.astype(np.uint32)
qid_test = qid_test.astype(np.uint32)
rank = dxgb.DaskXGBRanker(
n_estimators=2500, eval_metric=["ndcg"], early_stopping_rounds=10
)
rank.fit(
x_train,
y_train,
qid=qid_train,
eval_set=[(x_test, y_test), (x_train, y_train)],
eval_qid=[qid_test, qid_train],
verbose=True,
)
assert rank.n_features_in_ == 46
assert rank.best_score > 0.98
|
from docarray import BaseDoc
from docarray.typing import Mesh3DUrl
def test_set_mesh_url():
class MyDocument(BaseDoc):
mesh_url: Mesh3DUrl
d = MyDocument(mesh_url="https://jina.ai/mesh.obj")
assert isinstance(d.mesh_url, Mesh3DUrl)
assert d.mesh_url == "https://jina.ai/mesh.obj"
|
from docarray import BaseDocument
from docarray.typing import Mesh3DUrl
def test_set_mesh_url():
class MyDocument(BaseDocument):
mesh_url: Mesh3DUrl
d = MyDocument(mesh_url="https://jina.ai/mesh.obj")
assert isinstance(d.mesh_url, Mesh3DUrl)
assert d.mesh_url == "https://jina.ai/mesh.obj"
|
"""Interface with the LangChain Hub."""
from __future__ import annotations
import json
from collections.abc import Sequence
from typing import Any, Optional
from langchain_core.load.dump import dumps
from langchain_core.load.load import loads
from langchain_core.prompts import BasePromptTemplate
def _get_client(
api_key: Optional[str] = None,
api_url: Optional[str] = None,
) -> Any:
try:
from langsmith import Client as LangSmithClient
ls_client = LangSmithClient(api_url, api_key=api_key)
if hasattr(ls_client, "push_prompt") and hasattr(ls_client, "pull_prompt"):
return ls_client
else:
from langchainhub import Client as LangChainHubClient
return LangChainHubClient(api_url, api_key=api_key)
except ImportError:
try:
from langchainhub import Client as LangChainHubClient
return LangChainHubClient(api_url, api_key=api_key)
except ImportError as e:
msg = (
"Could not import langsmith or langchainhub (deprecated),"
"please install with `pip install langsmith`."
)
raise ImportError(msg) from e
def push(
repo_full_name: str,
object: Any, # noqa: A002
*,
api_url: Optional[str] = None,
api_key: Optional[str] = None,
parent_commit_hash: Optional[str] = None,
new_repo_is_public: bool = False,
new_repo_description: Optional[str] = None,
readme: Optional[str] = None,
tags: Optional[Sequence[str]] = None,
) -> str:
"""
Push an object to the hub and returns the URL it can be viewed at in a browser.
:param repo_full_name: The full name of the prompt to push to in the format of
`owner/prompt_name` or `prompt_name`.
:param object: The LangChain to serialize and push to the hub.
:param api_url: The URL of the LangChain Hub API. Defaults to the hosted API service
if you have an api key set, or a localhost instance if not.
:param api_key: The API key to use to authenticate with the LangChain Hub API.
:param parent_commit_hash: The commit hash of the parent commit to push to. Defaults
to the latest commit automatically.
:param new_repo_is_public: Whether the prompt should be public. Defaults to
False (Private by default).
:param new_repo_description: The description of the prompt. Defaults to an empty
string.
"""
client = _get_client(api_key=api_key, api_url=api_url)
# Then it's langsmith
if hasattr(client, "push_prompt"):
return client.push_prompt(
repo_full_name,
object=object,
parent_commit_hash=parent_commit_hash,
is_public=new_repo_is_public,
description=new_repo_description,
readme=readme,
tags=tags,
)
# Then it's langchainhub
manifest_json = dumps(object)
message = client.push(
repo_full_name,
manifest_json,
parent_commit_hash=parent_commit_hash,
new_repo_is_public=new_repo_is_public,
new_repo_description=new_repo_description,
)
return message
def pull(
owner_repo_commit: str,
*,
include_model: Optional[bool] = None,
api_url: Optional[str] = None,
api_key: Optional[str] = None,
) -> Any:
"""
Pull an object from the hub and returns it as a LangChain object.
:param owner_repo_commit: The full name of the prompt to pull from in the format of
`owner/prompt_name:commit_hash` or `owner/prompt_name`
or just `prompt_name` if it's your own prompt.
:param api_url: The URL of the LangChain Hub API. Defaults to the hosted API service
if you have an api key set, or a localhost instance if not.
:param api_key: The API key to use to authenticate with the LangChain Hub API.
"""
client = _get_client(api_key=api_key, api_url=api_url)
# Then it's langsmith
if hasattr(client, "pull_prompt"):
response = client.pull_prompt(owner_repo_commit, include_model=include_model)
return response
# Then it's langchainhub
if hasattr(client, "pull_repo"):
# >= 0.1.15
res_dict = client.pull_repo(owner_repo_commit)
obj = loads(json.dumps(res_dict["manifest"]))
if isinstance(obj, BasePromptTemplate):
if obj.metadata is None:
obj.metadata = {}
obj.metadata["lc_hub_owner"] = res_dict["owner"]
obj.metadata["lc_hub_repo"] = res_dict["repo"]
obj.metadata["lc_hub_commit_hash"] = res_dict["commit_hash"]
return obj
# Then it's < 0.1.15 langchainhub
resp: str = client.pull(owner_repo_commit)
return loads(resp)
|
"""Interface with the LangChain Hub."""
from __future__ import annotations
import json
from collections.abc import Sequence
from typing import Any, Optional
from langchain_core.load.dump import dumps
from langchain_core.load.load import loads
from langchain_core.prompts import BasePromptTemplate
def _get_client(
api_key: Optional[str] = None,
api_url: Optional[str] = None,
) -> Any:
try:
from langsmith import Client as LangSmithClient
ls_client = LangSmithClient(api_url, api_key=api_key)
if hasattr(ls_client, "push_prompt") and hasattr(ls_client, "pull_prompt"):
return ls_client
else:
from langchainhub import Client as LangChainHubClient
return LangChainHubClient(api_url, api_key=api_key)
except ImportError:
try:
from langchainhub import Client as LangChainHubClient
return LangChainHubClient(api_url, api_key=api_key)
except ImportError as e:
msg = (
"Could not import langsmith or langchainhub (deprecated),"
"please install with `pip install langsmith`."
)
raise ImportError(msg) from e
def push(
repo_full_name: str,
object: Any,
*,
api_url: Optional[str] = None,
api_key: Optional[str] = None,
parent_commit_hash: Optional[str] = None,
new_repo_is_public: bool = False,
new_repo_description: Optional[str] = None,
readme: Optional[str] = None,
tags: Optional[Sequence[str]] = None,
) -> str:
"""
Push an object to the hub and returns the URL it can be viewed at in a browser.
:param repo_full_name: The full name of the prompt to push to in the format of
`owner/prompt_name` or `prompt_name`.
:param object: The LangChain to serialize and push to the hub.
:param api_url: The URL of the LangChain Hub API. Defaults to the hosted API service
if you have an api key set, or a localhost instance if not.
:param api_key: The API key to use to authenticate with the LangChain Hub API.
:param parent_commit_hash: The commit hash of the parent commit to push to. Defaults
to the latest commit automatically.
:param new_repo_is_public: Whether the prompt should be public. Defaults to
False (Private by default).
:param new_repo_description: The description of the prompt. Defaults to an empty
string.
"""
client = _get_client(api_key=api_key, api_url=api_url)
# Then it's langsmith
if hasattr(client, "push_prompt"):
return client.push_prompt(
repo_full_name,
object=object,
parent_commit_hash=parent_commit_hash,
is_public=new_repo_is_public,
description=new_repo_description,
readme=readme,
tags=tags,
)
# Then it's langchainhub
manifest_json = dumps(object)
message = client.push(
repo_full_name,
manifest_json,
parent_commit_hash=parent_commit_hash,
new_repo_is_public=new_repo_is_public,
new_repo_description=new_repo_description,
)
return message
def pull(
owner_repo_commit: str,
*,
include_model: Optional[bool] = None,
api_url: Optional[str] = None,
api_key: Optional[str] = None,
) -> Any:
"""
Pull an object from the hub and returns it as a LangChain object.
:param owner_repo_commit: The full name of the prompt to pull from in the format of
`owner/prompt_name:commit_hash` or `owner/prompt_name`
or just `prompt_name` if it's your own prompt.
:param api_url: The URL of the LangChain Hub API. Defaults to the hosted API service
if you have an api key set, or a localhost instance if not.
:param api_key: The API key to use to authenticate with the LangChain Hub API.
"""
client = _get_client(api_key=api_key, api_url=api_url)
# Then it's langsmith
if hasattr(client, "pull_prompt"):
response = client.pull_prompt(owner_repo_commit, include_model=include_model)
return response
# Then it's langchainhub
if hasattr(client, "pull_repo"):
# >= 0.1.15
res_dict = client.pull_repo(owner_repo_commit)
obj = loads(json.dumps(res_dict["manifest"]))
if isinstance(obj, BasePromptTemplate):
if obj.metadata is None:
obj.metadata = {}
obj.metadata["lc_hub_owner"] = res_dict["owner"]
obj.metadata["lc_hub_repo"] = res_dict["repo"]
obj.metadata["lc_hub_commit_hash"] = res_dict["commit_hash"]
return obj
# Then it's < 0.1.15 langchainhub
resp: str = client.pull(owner_repo_commit)
return loads(resp)
|
# Copyright (c) OpenMMLab. All rights reserved.
from .default_scope import DefaultScope
from .registry import Registry, build_from_cfg
from .root import (DATA_SAMPLERS, DATASETS, HOOKS, LOOPS, METRICS,
MODEL_WRAPPERS, MODELS, OPTIMIZER_CONSTRUCTORS, OPTIMIZERS,
PARAM_SCHEDULERS, RUNNER_CONSTRUCTORS, RUNNERS, TASK_UTILS,
TRANSFORMS, VISUALIZERS, WEIGHT_INITIALIZERS, WRITERS)
__all__ = [
'Registry', 'build_from_cfg', 'RUNNERS', 'RUNNER_CONSTRUCTORS', 'HOOKS',
'DATASETS', 'DATA_SAMPLERS', 'TRANSFORMS', 'MODELS', 'WEIGHT_INITIALIZERS',
'OPTIMIZERS', 'OPTIMIZER_CONSTRUCTORS', 'TASK_UTILS', 'PARAM_SCHEDULERS',
'METRICS', 'MODEL_WRAPPERS', 'LOOPS', 'WRITERS', 'VISUALIZERS',
'DefaultScope'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .default_scope import DefaultScope
from .registry import Registry, build_from_cfg
from .root import (DATA_SAMPLERS, DATASETS, EVALUATORS, HOOKS, LOOPS,
MODEL_WRAPPERS, MODELS, OPTIMIZER_CONSTRUCTORS, OPTIMIZERS,
PARAM_SCHEDULERS, RUNNER_CONSTRUCTORS, RUNNERS, TASK_UTILS,
TRANSFORMS, VISUALIZERS, WEIGHT_INITIALIZERS, WRITERS)
__all__ = [
'Registry', 'build_from_cfg', 'RUNNERS', 'RUNNER_CONSTRUCTORS', 'HOOKS',
'DATASETS', 'DATA_SAMPLERS', 'TRANSFORMS', 'MODELS', 'WEIGHT_INITIALIZERS',
'OPTIMIZERS', 'OPTIMIZER_CONSTRUCTORS', 'TASK_UTILS', 'PARAM_SCHEDULERS',
'EVALUATORS', 'MODEL_WRAPPERS', 'LOOPS', 'WRITERS', 'VISUALIZERS',
'DefaultScope'
]
|
_base_ = [
'../_base_/models/mask-rcnn_r50_fpn.py',
'../_base_/datasets/coco_instance.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
model = dict(
data_preprocessor=dict(
# The mean and std are used in PyCls when training RegNets
mean=[103.53, 116.28, 123.675],
std=[57.375, 57.12, 58.395],
bgr_to_rgb=False),
backbone=dict(
_delete_=True,
type='RegNet',
arch='regnetx_3.2gf',
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://regnetx_3.2gf')),
neck=dict(
type='FPN',
in_channels=[96, 192, 432, 1008],
out_channels=256,
num_outs=5))
train_pipeline = [
dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='RandomChoiceResize',
scales=[(1333, 640), (1333, 672), (1333, 704), (1333, 736),
(1333, 768), (1333, 800)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
optim_wrapper = dict(
optimizer=dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.00005),
clip_grad=dict(max_norm=35, norm_type=2))
# learning policy
max_epochs = 36
train_cfg = dict(max_epochs=max_epochs)
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[28, 34],
gamma=0.1)
]
|
_base_ = [
'../_base_/models/mask-rcnn_r50_fpn.py',
'../_base_/datasets/coco_instance.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
model = dict(
data_preprocessor=dict(
# The mean and std are used in PyCls when training RegNets
mean=[103.53, 116.28, 123.675],
std=[57.375, 57.12, 58.395],
bgr_to_rgb=False),
backbone=dict(
_delete_=True,
type='RegNet',
arch='regnetx_3.2gf',
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://regnetx_3.2gf')),
neck=dict(
type='FPN',
in_channels=[96, 192, 432, 1008],
out_channels=256,
num_outs=5))
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='RandomChoiceResize',
scales=[(1333, 640), (1333, 672), (1333, 704), (1333, 736),
(1333, 768), (1333, 800)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
optim_wrapper = dict(
optimizer=dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.00005),
clip_grad=dict(max_norm=35, norm_type=2))
# learning policy
max_epochs = 36
train_cfg = dict(max_epochs=max_epochs)
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[28, 34],
gamma=0.1)
]
|
import os
import textwrap
import pyarrow as pa
import pytest
from datasets import ClassLabel, Features, Image
from datasets.builder import InvalidConfigName
from datasets.data_files import DataFilesList
from datasets.packaged_modules.csv.csv import Csv, CsvConfig
from ..utils import require_pil
@pytest.fixture
def csv_file(tmp_path):
filename = tmp_path / "file.csv"
data = textwrap.dedent(
"""\
header1,header2
1,2
10,20
"""
)
with open(filename, "w") as f:
f.write(data)
return str(filename)
@pytest.fixture
def malformed_csv_file(tmp_path):
filename = tmp_path / "malformed_file.csv"
data = textwrap.dedent(
"""\
header1,header2
1,2
10,20,
"""
)
with open(filename, "w") as f:
f.write(data)
return str(filename)
@pytest.fixture
def csv_file_with_image(tmp_path, image_file):
filename = tmp_path / "csv_with_image.csv"
data = textwrap.dedent(
f"""\
image
{image_file}
"""
)
with open(filename, "w") as f:
f.write(data)
return str(filename)
@pytest.fixture
def csv_file_with_label(tmp_path):
filename = tmp_path / "csv_with_label.csv"
data = textwrap.dedent(
"""\
label
good
bad
good
"""
)
with open(filename, "w") as f:
f.write(data)
return str(filename)
@pytest.fixture
def csv_file_with_int_list(tmp_path):
filename = tmp_path / "csv_with_int_list.csv"
data = textwrap.dedent(
"""\
int_list
1 2 3
4 5 6
7 8 9
"""
)
with open(filename, "w") as f:
f.write(data)
return str(filename)
def test_config_raises_when_invalid_name() -> None:
with pytest.raises(InvalidConfigName, match="Bad characters"):
_ = CsvConfig(name="name-with-*-invalid-character")
@pytest.mark.parametrize("data_files", ["str_path", ["str_path"], DataFilesList(["str_path"], [()])])
def test_config_raises_when_invalid_data_files(data_files) -> None:
with pytest.raises(ValueError, match="Expected a DataFilesDict"):
_ = CsvConfig(name="name", data_files=data_files)
def test_csv_generate_tables_raises_error_with_malformed_csv(csv_file, malformed_csv_file, caplog):
csv = Csv()
generator = csv._generate_tables([[csv_file, malformed_csv_file]])
with pytest.raises(ValueError, match="Error tokenizing data"):
for _ in generator:
pass
assert any(
record.levelname == "ERROR"
and "Failed to read file" in record.message
and os.path.basename(malformed_csv_file) in record.message
for record in caplog.records
)
@require_pil
def test_csv_cast_image(csv_file_with_image):
with open(csv_file_with_image, encoding="utf-8") as f:
image_file = f.read().splitlines()[1]
csv = Csv(encoding="utf-8", features=Features({"image": Image()}))
generator = csv._generate_tables([[csv_file_with_image]])
pa_table = pa.concat_tables([table for _, table in generator])
assert pa_table.schema.field("image").type == Image()()
generated_content = pa_table.to_pydict()["image"]
assert generated_content == [{"path": image_file, "bytes": None}]
def test_csv_cast_label(csv_file_with_label):
with open(csv_file_with_label, encoding="utf-8") as f:
labels = f.read().splitlines()[1:]
csv = Csv(encoding="utf-8", features=Features({"label": ClassLabel(names=["good", "bad"])}))
generator = csv._generate_tables([[csv_file_with_label]])
pa_table = pa.concat_tables([table for _, table in generator])
assert pa_table.schema.field("label").type == ClassLabel(names=["good", "bad"])()
generated_content = pa_table.to_pydict()["label"]
assert generated_content == [ClassLabel(names=["good", "bad"]).str2int(label) for label in labels]
def test_csv_convert_int_list(csv_file_with_int_list):
csv = Csv(encoding="utf-8", sep=",", converters={"int_list": lambda x: [int(i) for i in x.split()]})
generator = csv._generate_tables([[csv_file_with_int_list]])
pa_table = pa.concat_tables([table for _, table in generator])
assert pa.types.is_list(pa_table.schema.field("int_list").type)
generated_content = pa_table.to_pydict()["int_list"]
assert generated_content == [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
|
import os
import textwrap
import pyarrow as pa
import pytest
from datasets import ClassLabel, Features, Image
from datasets.packaged_modules.csv.csv import Csv
from ..utils import require_pil
@pytest.fixture
def csv_file(tmp_path):
filename = tmp_path / "file.csv"
data = textwrap.dedent(
"""\
header1,header2
1,2
10,20
"""
)
with open(filename, "w") as f:
f.write(data)
return str(filename)
@pytest.fixture
def malformed_csv_file(tmp_path):
filename = tmp_path / "malformed_file.csv"
data = textwrap.dedent(
"""\
header1,header2
1,2
10,20,
"""
)
with open(filename, "w") as f:
f.write(data)
return str(filename)
@pytest.fixture
def csv_file_with_image(tmp_path, image_file):
filename = tmp_path / "csv_with_image.csv"
data = textwrap.dedent(
f"""\
image
{image_file}
"""
)
with open(filename, "w") as f:
f.write(data)
return str(filename)
@pytest.fixture
def csv_file_with_label(tmp_path):
filename = tmp_path / "csv_with_label.csv"
data = textwrap.dedent(
"""\
label
good
bad
good
"""
)
with open(filename, "w") as f:
f.write(data)
return str(filename)
@pytest.fixture
def csv_file_with_int_list(tmp_path):
filename = tmp_path / "csv_with_int_list.csv"
data = textwrap.dedent(
"""\
int_list
1 2 3
4 5 6
7 8 9
"""
)
with open(filename, "w") as f:
f.write(data)
return str(filename)
def test_csv_generate_tables_raises_error_with_malformed_csv(csv_file, malformed_csv_file, caplog):
csv = Csv()
generator = csv._generate_tables([[csv_file, malformed_csv_file]])
with pytest.raises(ValueError, match="Error tokenizing data"):
for _ in generator:
pass
assert any(
record.levelname == "ERROR"
and "Failed to read file" in record.message
and os.path.basename(malformed_csv_file) in record.message
for record in caplog.records
)
@require_pil
def test_csv_cast_image(csv_file_with_image):
with open(csv_file_with_image, encoding="utf-8") as f:
image_file = f.read().splitlines()[1]
csv = Csv(encoding="utf-8", features=Features({"image": Image()}))
generator = csv._generate_tables([[csv_file_with_image]])
pa_table = pa.concat_tables([table for _, table in generator])
assert pa_table.schema.field("image").type == Image()()
generated_content = pa_table.to_pydict()["image"]
assert generated_content == [{"path": image_file, "bytes": None}]
def test_csv_cast_label(csv_file_with_label):
with open(csv_file_with_label, encoding="utf-8") as f:
labels = f.read().splitlines()[1:]
csv = Csv(encoding="utf-8", features=Features({"label": ClassLabel(names=["good", "bad"])}))
generator = csv._generate_tables([[csv_file_with_label]])
pa_table = pa.concat_tables([table for _, table in generator])
assert pa_table.schema.field("label").type == ClassLabel(names=["good", "bad"])()
generated_content = pa_table.to_pydict()["label"]
assert generated_content == [ClassLabel(names=["good", "bad"]).str2int(label) for label in labels]
def test_csv_convert_int_list(csv_file_with_int_list):
csv = Csv(encoding="utf-8", sep=",", converters={"int_list": lambda x: [int(i) for i in x.split()]})
generator = csv._generate_tables([[csv_file_with_int_list]])
pa_table = pa.concat_tables([table for _, table in generator])
assert pa.types.is_list(pa_table.schema.field("int_list").type)
generated_content = pa_table.to_pydict()["int_list"]
assert generated_content == [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
|
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
# file_client_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/detection/',
# 'data/': 's3://openmmlab/datasets/detection/'
# }))
file_client_args = dict(backend='disk')
train_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
# If you don't have a gt annotation, delete the pipeline
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(
batch_size=2,
num_workers=2,
persistent_workers=True,
sampler=dict(type='DefaultSampler', shuffle=True),
batch_sampler=dict(type='AspectRatioBatchSampler'),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instances_train2017.json',
data_prefix=dict(img='train2017/'),
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=train_pipeline))
val_dataloader = dict(
batch_size=1,
num_workers=2,
persistent_workers=True,
drop_last=False,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instances_val2017.json',
data_prefix=dict(img='val2017/'),
test_mode=True,
pipeline=test_pipeline))
test_dataloader = val_dataloader
val_evaluator = dict(
type='CocoMetric',
ann_file=data_root + 'annotations/instances_val2017.json',
metric='bbox',
format_only=False)
test_evaluator = val_evaluator
# inference on test dataset and
# format the output results for submission.
# test_dataloader = dict(
# batch_size=1,
# num_workers=2,
# persistent_workers=True,
# drop_last=False,
# sampler=dict(type='DefaultSampler', shuffle=False),
# dataset=dict(
# type=dataset_type,
# data_root=data_root,
# ann_file=data_root + 'annotations/image_info_test-dev2017.json',
# data_prefix=dict(img='test2017/'),
# test_mode=True,
# pipeline=test_pipeline))
# test_evaluator = dict(
# type='CocoMetric',
# metric='bbox',
# format_only=True,
# ann_file=data_root + 'annotations/image_info_test-dev2017.json',
# outfile_prefix='./work_dirs/coco_detection/test')
|
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
# file_client_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/detection/',
# 'data/': 's3://openmmlab/datasets/detection/'
# }))
file_client_args = dict(backend='disk')
train_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(
batch_size=2,
num_workers=2,
persistent_workers=True,
sampler=dict(type='DefaultSampler', shuffle=True),
batch_sampler=dict(type='AspectRatioBatchSampler'),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instances_train2017.json',
data_prefix=dict(img='train2017/'),
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=train_pipeline))
val_dataloader = dict(
batch_size=1,
num_workers=2,
persistent_workers=True,
drop_last=False,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instances_val2017.json',
data_prefix=dict(img='val2017/'),
test_mode=True,
pipeline=test_pipeline))
test_dataloader = val_dataloader
val_evaluator = dict(
type='CocoMetric',
ann_file=data_root + 'annotations/instances_val2017.json',
metric='bbox',
format_only=False)
test_evaluator = val_evaluator
# inference on test dataset and
# format the output results for submission.
# test_dataloader = dict(
# batch_size=1,
# num_workers=2,
# persistent_workers=True,
# drop_last=False,
# sampler=dict(type='DefaultSampler', shuffle=False),
# dataset=dict(
# type=dataset_type,
# data_root=data_root,
# ann_file=data_root + 'annotations/image_info_test-dev2017.json',
# data_prefix=dict(img='test2017/'),
# test_mode=True,
# pipeline=test_pipeline))
# test_evaluator = dict(
# type='CocoMetric',
# metric='bbox',
# format_only=True,
# ann_file=data_root + 'annotations/image_info_test-dev2017.json',
# outfile_prefix='./work_dirs/coco_detection/test')
|
# Copyright (c) OpenMMLab. All rights reserved.
from .checkloss_hook import CheckInvalidLossHook
from .mean_teacher_hook import MeanTeacherHook
from .memory_profiler_hook import MemoryProfilerHook
from .num_class_check_hook import NumClassCheckHook
from .pipeline_switch_hook import PipelineSwitchHook
from .set_epoch_info_hook import SetEpochInfoHook
from .sync_norm_hook import SyncNormHook
from .utils import trigger_visualization_hook
from .visualization_hook import (DetVisualizationHook,
GroundingVisualizationHook,
TrackVisualizationHook)
from .yolox_mode_switch_hook import YOLOXModeSwitchHook
__all__ = [
'YOLOXModeSwitchHook', 'SyncNormHook', 'CheckInvalidLossHook',
'SetEpochInfoHook', 'MemoryProfilerHook', 'DetVisualizationHook',
'NumClassCheckHook', 'MeanTeacherHook', 'trigger_visualization_hook',
'PipelineSwitchHook', 'TrackVisualizationHook',
'GroundingVisualizationHook'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .checkloss_hook import CheckInvalidLossHook
from .mean_teacher_hook import MeanTeacherHook
from .memory_profiler_hook import MemoryProfilerHook
from .num_class_check_hook import NumClassCheckHook
from .pipeline_switch_hook import PipelineSwitchHook
from .set_epoch_info_hook import SetEpochInfoHook
from .sync_norm_hook import SyncNormHook
from .utils import trigger_visualization_hook
from .visualization_hook import DetVisualizationHook, TrackVisualizationHook
from .yolox_mode_switch_hook import YOLOXModeSwitchHook
__all__ = [
'YOLOXModeSwitchHook', 'SyncNormHook', 'CheckInvalidLossHook',
'SetEpochInfoHook', 'MemoryProfilerHook', 'DetVisualizationHook',
'NumClassCheckHook', 'MeanTeacherHook', 'trigger_visualization_hook',
'PipelineSwitchHook', 'TrackVisualizationHook'
]
|
_base_ = 'solov2_r50_fpn_1x_coco.py'
# model settings
model = dict(
mask_head=dict(
stacked_convs=2,
feat_channels=256,
scale_ranges=((1, 56), (28, 112), (56, 224), (112, 448), (224, 896)),
mask_feature_head=dict(out_channels=128)))
# dataset settings
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='RandomChoiceResize',
scales=[(768, 512), (768, 480), (768, 448), (768, 416), (768, 384),
(768, 352)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='Resize', scale=(448, 768), keep_ratio=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
val_dataloader = dict(dataset=dict(pipeline=test_pipeline))
test_dataloader = val_dataloader
# training schedule for 3x
max_epochs = 36
train_cfg = dict(by_epoch=True, max_epochs=max_epochs)
# learning rate
param_scheduler = [
dict(
type='LinearLR',
start_factor=1.0 / 3,
by_epoch=False,
begin=0,
end=500),
dict(
type='MultiStepLR',
begin=0,
end=36,
by_epoch=True,
milestones=[27, 33],
gamma=0.1)
]
|
_base_ = 'solov2_r50_fpn_1x_coco.py'
# model settings
model = dict(
mask_head=dict(
stacked_convs=2,
feat_channels=256,
scale_ranges=((1, 56), (28, 112), (56, 224), (112, 448), (224, 896)),
mask_feature_head=dict(out_channels=128)))
# dataset settings
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
# TODO: Update after mmcv.RandomChoiceResize finish refactor
type='RandomChoiceResize',
scales=[(768, 512), (768, 480), (768, 448), (768, 416), (768, 384),
(768, 352)],
resize_cfg=dict(type='Resize', keep_ratio=True)),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='Resize', scale=(448, 768), keep_ratio=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
val_dataloader = dict(dataset=dict(pipeline=test_pipeline))
test_dataloader = val_dataloader
# training schedule for 3x
max_epochs = 36
train_cfg = dict(by_epoch=True, max_epochs=max_epochs)
# learning rate
param_scheduler = [
dict(
type='LinearLR',
start_factor=1.0 / 3,
by_epoch=False,
begin=0,
end=500),
dict(
type='MultiStepLR',
begin=0,
end=36,
by_epoch=True,
milestones=[27, 33],
gamma=0.1)
]
|
from __future__ import annotations
from typing import TYPE_CHECKING, Tuple, Union
from langchain_core.structured_query import (
Comparator,
Comparison,
Operation,
Operator,
StructuredQuery,
Visitor,
)
if TYPE_CHECKING:
from timescale_vector import client
class TimescaleVectorTranslator(Visitor):
"""Translate the internal query language elements to valid filters."""
allowed_operators = [Operator.AND, Operator.OR, Operator.NOT]
"""Subset of allowed logical operators."""
allowed_comparators = [
Comparator.EQ,
Comparator.GT,
Comparator.GTE,
Comparator.LT,
Comparator.LTE,
]
COMPARATOR_MAP = {
Comparator.EQ: "==",
Comparator.GT: ">",
Comparator.GTE: ">=",
Comparator.LT: "<",
Comparator.LTE: "<=",
}
OPERATOR_MAP = {Operator.AND: "AND", Operator.OR: "OR", Operator.NOT: "NOT"}
def _format_func(self, func: Union[Operator, Comparator]) -> str:
self._validate_func(func)
if isinstance(func, Operator):
value = self.OPERATOR_MAP[func.value] # type: ignore[index]
elif isinstance(func, Comparator):
value = self.COMPARATOR_MAP[func.value] # type: ignore[index]
return f"{value}"
def visit_operation(self, operation: Operation) -> client.Predicates:
try:
from timescale_vector import client
except ImportError as e:
raise ImportError(
"Cannot import timescale-vector. Please install with `pip install "
"timescale-vector`."
) from e
args = [arg.accept(self) for arg in operation.arguments]
return client.Predicates(*args, operator=self._format_func(operation.operator))
def visit_comparison(self, comparison: Comparison) -> client.Predicates:
try:
from timescale_vector import client
except ImportError as e:
raise ImportError(
"Cannot import timescale-vector. Please install with `pip install "
"timescale-vector`."
) from e
return client.Predicates(
(
comparison.attribute,
self._format_func(comparison.comparator),
comparison.value,
)
)
def visit_structured_query(
self, structured_query: StructuredQuery
) -> Tuple[str, dict]:
if structured_query.filter is None:
kwargs = {}
else:
kwargs = {"predicates": structured_query.filter.accept(self)}
return structured_query.query, kwargs
|
from __future__ import annotations
from typing import TYPE_CHECKING, Tuple, Union
from langchain_core.structured_query import (
Comparator,
Comparison,
Operation,
Operator,
StructuredQuery,
Visitor,
)
if TYPE_CHECKING:
from timescale_vector import client
class TimescaleVectorTranslator(Visitor):
"""Translate the internal query language elements to valid filters."""
allowed_operators = [Operator.AND, Operator.OR, Operator.NOT]
"""Subset of allowed logical operators."""
allowed_comparators = [
Comparator.EQ,
Comparator.GT,
Comparator.GTE,
Comparator.LT,
Comparator.LTE,
]
COMPARATOR_MAP = {
Comparator.EQ: "==",
Comparator.GT: ">",
Comparator.GTE: ">=",
Comparator.LT: "<",
Comparator.LTE: "<=",
}
OPERATOR_MAP = {Operator.AND: "AND", Operator.OR: "OR", Operator.NOT: "NOT"}
def _format_func(self, func: Union[Operator, Comparator]) -> str:
self._validate_func(func)
if isinstance(func, Operator):
value = self.OPERATOR_MAP[func.value] # type: ignore
elif isinstance(func, Comparator):
value = self.COMPARATOR_MAP[func.value] # type: ignore
return f"{value}"
def visit_operation(self, operation: Operation) -> client.Predicates:
try:
from timescale_vector import client
except ImportError as e:
raise ImportError(
"Cannot import timescale-vector. Please install with `pip install "
"timescale-vector`."
) from e
args = [arg.accept(self) for arg in operation.arguments]
return client.Predicates(*args, operator=self._format_func(operation.operator))
def visit_comparison(self, comparison: Comparison) -> client.Predicates:
try:
from timescale_vector import client
except ImportError as e:
raise ImportError(
"Cannot import timescale-vector. Please install with `pip install "
"timescale-vector`."
) from e
return client.Predicates(
(
comparison.attribute,
self._format_func(comparison.comparator),
comparison.value,
)
)
def visit_structured_query(
self, structured_query: StructuredQuery
) -> Tuple[str, dict]:
if structured_query.filter is None:
kwargs = {}
else:
kwargs = {"predicates": structured_query.filter.accept(self)}
return structured_query.query, kwargs
|
import importlib
class LazyModule:
def __init__(self, name, pip_name=None, import_error_msg=None):
self.name = name
self.pip_name = pip_name or name
self.import_error_msg = import_error_msg or (
f"This requires the {self.name} module. "
f"You can install it via `pip install {self.pip_name}`"
)
self.module = None
self._available = None
@property
def available(self):
if self._available is None:
try:
self.initialize()
self._available = True
except ImportError:
self._available = False
return self._available
def initialize(self):
try:
self.module = importlib.import_module(self.name)
except ImportError:
raise ImportError(self.import_error_msg)
def __getattr__(self, name):
if name == "_api_export_path":
raise AttributeError
if self.module is None:
self.initialize()
return getattr(self.module, name)
def __repr__(self):
return f"LazyModule({self.name})"
tensorflow = LazyModule("tensorflow")
gfile = LazyModule("tensorflow.io.gfile", pip_name="tensorflow")
tensorflow_io = LazyModule("tensorflow_io")
scipy = LazyModule("scipy")
jax = LazyModule("jax")
torchvision = LazyModule("torchvision")
torch_xla = LazyModule(
"torch_xla",
import_error_msg=(
"This requires the torch_xla module. You can install it via "
"`pip install torch-xla`. Additionally, you may need to update "
"LD_LIBRARY_PATH if necessary. Torch XLA builds a shared library, "
"_XLAC.so, which needs to link to the version of Python it was built "
"with. Use the following command to update LD_LIBRARY_PATH: "
"`export LD_LIBRARY_PATH=<path to Python>/lib:$LD_LIBRARY_PATH`"
),
)
optree = LazyModule("optree")
dmtree = LazyModule("tree")
tf2onnx = LazyModule("tf2onnx")
|
import importlib
class LazyModule:
def __init__(self, name, pip_name=None, import_error_msg=None):
self.name = name
self.pip_name = pip_name or name
self.import_error_msg = import_error_msg or (
f"This requires the {self.name} module. "
f"You can install it via `pip install {self.pip_name}`"
)
self.module = None
self._available = None
@property
def available(self):
if self._available is None:
try:
self.initialize()
self._available = True
except ImportError:
self._available = False
return self._available
def initialize(self):
try:
self.module = importlib.import_module(self.name)
except ImportError:
raise ImportError(self.import_error_msg)
def __getattr__(self, name):
if name == "_api_export_path":
raise AttributeError
if self.module is None:
self.initialize()
return getattr(self.module, name)
def __repr__(self):
return f"LazyModule({self.name})"
tensorflow = LazyModule("tensorflow")
gfile = LazyModule("tensorflow.io.gfile", pip_name="tensorflow")
tensorflow_io = LazyModule("tensorflow_io")
scipy = LazyModule("scipy")
jax = LazyModule("jax")
torchvision = LazyModule("torchvision")
torch_xla = LazyModule(
"torch_xla",
import_error_msg=(
"This requires the torch_xla module. You can install it via "
"`pip install torch-xla`. Additionally, you may need to update "
"LD_LIBRARY_PATH if necessary. Torch XLA builds a shared library, "
"_XLAC.so, which needs to link to the version of Python it was built "
"with. Use the following command to update LD_LIBRARY_PATH: "
"`export LD_LIBRARY_PATH=<path to Python>/lib:$LD_LIBRARY_PATH`"
),
)
optree = LazyModule("optree")
dmtree = LazyModule("tree")
|
from pathlib import Path
from typing import List, Tuple, Union
import torch
import torchaudio
from torch.utils.data import Dataset
SampleType = Tuple[int, torch.Tensor, List[torch.Tensor]]
class LibriMix(Dataset):
r"""*LibriMix* :cite:`cosentino2020librimix` dataset.
Args:
root (str or Path): The path to the directory where the directory ``Libri2Mix`` or
``Libri3Mix`` is stored.
subset (str, optional): The subset to use. Options: [``"train-360"``, ``"train-100"``,
``"dev"``, and ``"test"``] (Default: ``"train-360"``).
num_speakers (int, optional): The number of speakers, which determines the directories
to traverse. The Dataset will traverse ``s1`` to ``sN`` directories to collect
N source audios. (Default: 2)
sample_rate (int, optional): sample rate of audio files. The ``sample_rate`` determines
which subdirectory the audio are fetched. If any of the audio has a different sample
rate, raises ``ValueError``. Options: [8000, 16000] (Default: 8000)
task (str, optional): the task of LibriMix.
Options: [``"enh_single"``, ``"enh_both"``, ``"sep_clean"``, ``"sep_noisy"``]
(Default: ``"sep_clean"``)
Note:
The LibriMix dataset needs to be manually generated. Please check https://github.com/JorisCos/LibriMix
"""
def __init__(
self,
root: Union[str, Path],
subset: str = "train-360",
num_speakers: int = 2,
sample_rate: int = 8000,
task: str = "sep_clean",
):
self.root = Path(root) / f"Libri{num_speakers}Mix"
if sample_rate == 8000:
self.root = self.root / "wav8k/min" / subset
elif sample_rate == 16000:
self.root = self.root / "wav16k/min" / subset
else:
raise ValueError(f"Unsupported sample rate. Found {sample_rate}.")
self.sample_rate = sample_rate
self.task = task
self.mix_dir = (self.root / f"mix_{task.split('_')[1]}").resolve()
self.src_dirs = [(self.root / f"s{i+1}").resolve() for i in range(num_speakers)]
self.files = [p.name for p in self.mix_dir.glob("*wav")]
self.files.sort()
def _load_audio(self, path) -> torch.Tensor:
waveform, sample_rate = torchaudio.load(path)
if sample_rate != self.sample_rate:
raise ValueError(
f"The dataset contains audio file of sample rate {sample_rate}, "
f"but the requested sample rate is {self.sample_rate}."
)
return waveform
def _load_sample(self, filename) -> SampleType:
mixed = self._load_audio(str(self.mix_dir / filename))
srcs = []
for i, dir_ in enumerate(self.src_dirs):
src = self._load_audio(str(dir_ / filename))
if mixed.shape != src.shape:
raise ValueError(f"Different waveform shapes. mixed: {mixed.shape}, src[{i}]: {src.shape}")
srcs.append(src)
return self.sample_rate, mixed, srcs
def __len__(self) -> int:
return len(self.files)
def __getitem__(self, key: int) -> SampleType:
"""Load the n-th sample from the dataset.
Args:
key (int): The index of the sample to be loaded
Returns:
Tuple of the following items;
int:
Sample rate
Tensor:
Mixture waveform
list of Tensors:
List of source waveforms
"""
return self._load_sample(self.files[key])
|
from pathlib import Path
from typing import List, Tuple, Union
import torch
import torchaudio
from torch.utils.data import Dataset
SampleType = Tuple[int, torch.Tensor, List[torch.Tensor]]
class LibriMix(Dataset):
r"""Create the *LibriMix* :cite:`cosentino2020librimix` dataset.
Args:
root (str or Path): The path to the directory where the directory ``Libri2Mix`` or
``Libri3Mix`` is stored.
subset (str, optional): The subset to use. Options: [``train-360``, ``train-100``,
``dev``, and ``test``] (Default: ``train-360``).
num_speakers (int, optional): The number of speakers, which determines the directories
to traverse. The Dataset will traverse ``s1`` to ``sN`` directories to collect
N source audios. (Default: 2)
sample_rate (int, optional): sample rate of audio files. The ``sample_rate`` determines
which subdirectory the audio are fetched. If any of the audio has a different sample
rate, raises ``ValueError``. Options: [8000, 16000] (Default: 8000)
task (str, optional): the task of LibriMix.
Options: [``enh_single``, ``enh_both``, ``sep_clean``, ``sep_noisy``]
(Default: ``sep_clean``)
Note:
The LibriMix dataset needs to be manually generated. Please check https://github.com/JorisCos/LibriMix
"""
def __init__(
self,
root: Union[str, Path],
subset: str = "train-360",
num_speakers: int = 2,
sample_rate: int = 8000,
task: str = "sep_clean",
):
self.root = Path(root) / f"Libri{num_speakers}Mix"
if sample_rate == 8000:
self.root = self.root / "wav8k/min" / subset
elif sample_rate == 16000:
self.root = self.root / "wav16k/min" / subset
else:
raise ValueError(f"Unsupported sample rate. Found {sample_rate}.")
self.sample_rate = sample_rate
self.task = task
self.mix_dir = (self.root / f"mix_{task.split('_')[1]}").resolve()
self.src_dirs = [(self.root / f"s{i+1}").resolve() for i in range(num_speakers)]
self.files = [p.name for p in self.mix_dir.glob("*wav")]
self.files.sort()
def _load_audio(self, path) -> torch.Tensor:
waveform, sample_rate = torchaudio.load(path)
if sample_rate != self.sample_rate:
raise ValueError(
f"The dataset contains audio file of sample rate {sample_rate}, "
f"but the requested sample rate is {self.sample_rate}."
)
return waveform
def _load_sample(self, filename) -> SampleType:
mixed = self._load_audio(str(self.mix_dir / filename))
srcs = []
for i, dir_ in enumerate(self.src_dirs):
src = self._load_audio(str(dir_ / filename))
if mixed.shape != src.shape:
raise ValueError(f"Different waveform shapes. mixed: {mixed.shape}, src[{i}]: {src.shape}")
srcs.append(src)
return self.sample_rate, mixed, srcs
def __len__(self) -> int:
return len(self.files)
def __getitem__(self, key: int) -> SampleType:
"""Load the n-th sample from the dataset.
Args:
key (int): The index of the sample to be loaded
Returns:
(int, Tensor, List[Tensor]): ``(sample_rate, mix_waveform, list_of_source_waveforms)``
"""
return self._load_sample(self.files[key])
|
import warnings
from abc import ABC
from typing import TYPE_CHECKING, Any, BinaryIO, Dict, TypeVar, Union
from docarray.typing.tensor.abstract_tensor import AbstractTensor
from docarray.utils._internal.misc import import_library, is_notebook
if TYPE_CHECKING:
from docarray.typing.bytes.audio_bytes import AudioBytes
T = TypeVar('T', bound='AbstractAudioTensor')
MAX_INT_16 = 2**15
class AbstractAudioTensor(AbstractTensor, ABC):
def to_bytes(self) -> 'AudioBytes':
"""
Convert audio tensor to AudioBytes.
"""
from docarray.typing.bytes.audio_bytes import AudioBytes
tensor = self.get_comp_backend().to_numpy(self)
tensor = (tensor * MAX_INT_16).astype('<h')
return AudioBytes(tensor.tobytes())
def save(
self: 'T',
file_path: Union[str, BinaryIO],
format: str = 'wav',
frame_rate: int = 44100,
sample_width: int = 2,
pydub_args: Dict[str, Any] = {},
) -> None:
"""
Save audio tensor to an audio file. Mono/stereo is preserved.
:param file_path: path to an audio file. If file is a string, open the file by
that name, otherwise treat it as a file-like object.
:param format: format for the audio file ('mp3', 'wav', 'raw', 'ogg' or other ffmpeg/avconv supported files)
:param frame_rate: sampling frequency
:param sample_width: sample width in bytes
:param pydub_args: dictionary of additional arguments for pydub.AudioSegment.export function
"""
if TYPE_CHECKING:
import pydub
else:
pydub = import_library('pydub', raise_error=True)
comp_backend = self.get_comp_backend()
channels = 2 if comp_backend.n_dim(array=self) > 1 else 1 # type: ignore
segment = pydub.AudioSegment(
self.to_bytes(),
frame_rate=frame_rate,
sample_width=sample_width,
channels=channels,
)
segment.export(file_path, format=format, **pydub_args)
def display(self, rate=44100):
"""
Play audio data from tensor in notebook.
"""
if is_notebook():
from IPython.display import Audio, display
audio_np = self.get_comp_backend().to_numpy(self)
display(Audio(audio_np, rate=rate))
else:
warnings.warn('Display of audio is only possible in a notebook.')
|
import warnings
from abc import ABC
from typing import TYPE_CHECKING, Any, BinaryIO, Dict, TypeVar, Union
from docarray.typing.tensor.abstract_tensor import AbstractTensor
from docarray.utils._internal.misc import import_library, is_notebook
T = TypeVar('T', bound='AbstractAudioTensor')
MAX_INT_16 = 2**15
class AbstractAudioTensor(AbstractTensor, ABC):
def to_bytes(self):
"""
Convert audio tensor to bytes.
"""
tensor = self.get_comp_backend().to_numpy(self)
tensor = (tensor * MAX_INT_16).astype('<h')
return tensor.tobytes()
def save(
self: 'T',
file_path: Union[str, BinaryIO],
format: str = 'wav',
frame_rate: int = 44100,
sample_width: int = 2,
pydub_args: Dict[str, Any] = {},
) -> None:
"""
Save audio tensor to an audio file. Mono/stereo is preserved.
:param file_path: path to an audio file. If file is a string, open the file by
that name, otherwise treat it as a file-like object.
:param format: format for the audio file ('mp3', 'wav', 'raw', 'ogg' or other ffmpeg/avconv supported files)
:param frame_rate: sampling frequency
:param sample_width: sample width in bytes
:param pydub_args: dictionary of additional arguments for pydub.AudioSegment.export function
"""
if TYPE_CHECKING:
import pydub
else:
pydub = import_library('pydub', raise_error=True)
comp_backend = self.get_comp_backend()
channels = 2 if comp_backend.n_dim(array=self) > 1 else 1 # type: ignore
segment = pydub.AudioSegment(
self.to_bytes(),
frame_rate=frame_rate,
sample_width=sample_width,
channels=channels,
)
segment.export(file_path, format=format, **pydub_args)
def display(self, rate=44100):
"""
Play audio data from tensor in notebook.
"""
if is_notebook():
from IPython.display import Audio, display
audio_np = self.get_comp_backend().to_numpy(self)
display(Audio(audio_np, rate=rate))
else:
warnings.warn('Display of audio is only possible in a notebook.')
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.